hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace6e415296d6db48b679ed1c1fae5db03061ce3 | 4,181 | py | Python | metadata-ingestion/src/datahub_provider/lineage/datahub.py | sunkickr/datahub | 5ed410635d033a6dbbab1cd19c24a83ce3c9262c | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub_provider/lineage/datahub.py | sunkickr/datahub | 5ed410635d033a6dbbab1cd19c24a83ce3c9262c | [
"Apache-2.0"
] | null | null | null | metadata-ingestion/src/datahub_provider/lineage/datahub.py | sunkickr/datahub | 5ed410635d033a6dbbab1cd19c24a83ce3c9262c | [
"Apache-2.0"
] | null | null | null | import json
from typing import TYPE_CHECKING, Dict, List, Optional
from airflow.configuration import conf
from airflow.lineage.backend import LineageBackend
from datahub_provider._lineage_core import (
DatahubBasicLineageConfig,
send_lineage_to_datahub,
)
if TYPE_CHECKING:
from airflow.models.baseoperator import BaseOperator
class DatahubLineageConfig(DatahubBasicLineageConfig):
# If set to true, most runtime errors in the lineage backend will be
# suppressed and will not cause the overall task to fail. Note that
# configuration issues will still throw exceptions.
graceful_exceptions: bool = True
def get_lineage_config() -> DatahubLineageConfig:
"""Load the lineage config from airflow.cfg."""
# The kwargs pattern is also used for secret backends.
kwargs_str = conf.get("lineage", "datahub_kwargs", fallback="{}")
kwargs = json.loads(kwargs_str)
# Continue to support top-level datahub_conn_id config.
datahub_conn_id = conf.get("lineage", "datahub_conn_id", fallback=None)
if datahub_conn_id:
kwargs["datahub_conn_id"] = datahub_conn_id
return DatahubLineageConfig.parse_obj(kwargs)
class DatahubLineageBackend(LineageBackend):
def __init__(self) -> None:
super().__init__()
# By attempting to get and parse the config, we can detect configuration errors
# ahead of time. The init method is only called in Airflow 2.x.
_ = get_lineage_config()
# With Airflow 2.0, this can be an instance method. However, with Airflow 1.10.x, this
# method is used statically, even though LineageBackend declares it as an instance variable.
@staticmethod
def send_lineage(
operator: "BaseOperator",
inlets: Optional[List] = None, # unused
outlets: Optional[List] = None, # unused
context: Dict = None,
) -> None:
config = get_lineage_config()
try:
# This is necessary to avoid issues with circular imports.
from airflow.lineage import prepare_lineage
from datahub_provider.hooks.datahub import AIRFLOW_1
# Detect Airflow 1.10.x inlet/outlet configurations in Airflow 2.x, and
# convert to the newer version. This code path will only be triggered
# when 2.x receives a 1.10.x inlet/outlet config.
needs_repeat_preparation = False
if (
not AIRFLOW_1
and isinstance(operator._inlets, list)
and len(operator._inlets) == 1
and isinstance(operator._inlets[0], dict)
):
from airflow.lineage import AUTO
operator._inlets = [
# See https://airflow.apache.org/docs/apache-airflow/1.10.15/lineage.html.
*operator._inlets[0].get(
"datasets", []
), # assumes these are attr-annotated
*operator._inlets[0].get("task_ids", []),
*([AUTO] if operator._inlets[0].get("auto", False) else []),
]
needs_repeat_preparation = True
if (
not AIRFLOW_1
and isinstance(operator._outlets, list)
and len(operator._outlets) == 1
and isinstance(operator._outlets[0], dict)
):
operator._outlets = [*operator._outlets[0].get("datasets", [])]
needs_repeat_preparation = True
if needs_repeat_preparation:
# Rerun the lineage preparation routine, now that the old format has been translated to the new one.
prepare_lineage(lambda self, ctx: None)(operator, context)
context = context or {} # ensure not None to satisfy mypy
send_lineage_to_datahub(
config, operator, operator.inlets, operator.outlets, context
)
except Exception as e:
if config.graceful_exceptions:
operator.log.error(e)
operator.log.info("Supressing error because graceful_exceptions is set")
else:
raise
| 39.443396 | 116 | 0.626644 |
ace6e4a995ac4c04f9ffaccea66f7ed772bf01fc | 1,425 | py | Python | external-deps/spyder-kernels/spyder_kernels/utils/test_utils.py | Earthman100/spyder | 949ce0f9100a69504c70a5678e8589a05aee7d38 | [
"MIT"
] | 7,956 | 2015-02-17T01:19:09.000Z | 2022-03-31T21:52:15.000Z | external-deps/spyder-kernels/spyder_kernels/utils/test_utils.py | Earthman100/spyder | 949ce0f9100a69504c70a5678e8589a05aee7d38 | [
"MIT"
] | 16,326 | 2015-02-16T23:15:21.000Z | 2022-03-31T23:34:34.000Z | external-deps/spyder-kernels/spyder_kernels/utils/test_utils.py | Earthman100/spyder | 949ce0f9100a69504c70a5678e8589a05aee7d38 | [
"MIT"
] | 1,918 | 2015-02-20T19:26:26.000Z | 2022-03-31T19:03:25.000Z | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2018- Spyder Kernels Contributors
# Taken from the tests utils in the Metakernel package
# See utils.py at https://github.com/Calysto/metakernel/metakernel/tests
# Licensed under the terms of the BSD License
# (see spyder_kernels/__init__.py for details)
# -----------------------------------------------------------------------------
try:
from jupyter_client import session as ss
except ImportError:
from IPython.kernel.zmq import session as ss
import zmq
import logging
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from spyder_kernels.console.kernel import SpyderKernel
def get_kernel(kernel_class=SpyderKernel):
"""Get an instance of a kernel with the kernel class given."""
log = logging.getLogger('test')
log.setLevel(logging.DEBUG)
for hdlr in log.handlers:
log.removeHandler(hdlr)
hdlr = logging.StreamHandler(StringIO())
hdlr.setLevel(logging.DEBUG)
log.addHandler(hdlr)
context = zmq.Context.instance()
iopub_socket = context.socket(zmq.PUB)
kernel = kernel_class(session=ss.Session(), iopub_socket=iopub_socket,
log=log)
return kernel
def get_log_text(kernel):
"""Get the log of the given kernel."""
return kernel.log.handlers[0].stream.getvalue()
| 29.6875 | 79 | 0.638596 |
ace6e531c391cf4ecdf7fac9ae397d66dfb9bf8e | 192 | py | Python | tests/test_parametrizer.py | davidemoro/parametrizer | e5fe3b6276d30b41402b24fac61520b8e5e198a0 | [
"Apache-2.0"
] | null | null | null | tests/test_parametrizer.py | davidemoro/parametrizer | e5fe3b6276d30b41402b24fac61520b8e5e198a0 | [
"Apache-2.0"
] | 2 | 2019-03-14T12:41:32.000Z | 2019-03-14T12:45:21.000Z | tests/test_parametrizer.py | davidemoro/parametrizer | e5fe3b6276d30b41402b24fac61520b8e5e198a0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
def test_parametrizer():
""" Test parametrizer """
from parametrizer import Parametrizer
assert Parametrizer({'foo': 'bar'}).parametrize('$foo') == 'bar'
| 24 | 68 | 0.630208 |
ace6e5a202d49fb559c0820e95bc7186d1058c1d | 2,103 | gyp | Python | gyp/codec.gyp | Perspex/skia | e25fe5a294e9cee8f23207eef63fad6cffa9ced4 | [
"Apache-2.0"
] | 7 | 2016-01-12T23:32:32.000Z | 2021-12-03T11:21:26.000Z | gyp/codec.gyp | AvaloniaUI/skia | e25fe5a294e9cee8f23207eef63fad6cffa9ced4 | [
"Apache-2.0"
] | null | null | null | gyp/codec.gyp | AvaloniaUI/skia | e25fe5a294e9cee8f23207eef63fad6cffa9ced4 | [
"Apache-2.0"
] | 6 | 2015-12-09T14:00:19.000Z | 2021-12-06T03:08:43.000Z | # Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Copyright 2015 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# GYP file for codec project.
{
'targets': [
{
'target_name': 'codec',
'product_name': 'skia_codec',
'type': 'static_library',
'standalone_static_library': 1,
'dependencies': [
'core.gyp:*',
'giflib.gyp:giflib',
'libjpeg-turbo-selector.gyp:libjpeg-turbo-selector',
'libpng.gyp:libpng',
'libwebp.gyp:libwebp',
],
'cflags':[
# FIXME: This gets around a longjmp warning. See
# http://build.chromium.org/p/client.skia.compile/builders/Build-Ubuntu-GCC-x86_64-Release-Trybot/builds/113/steps/build%20most/logs/stdio
'-Wno-clobbered',
],
'include_dirs': [
'../include/codec',
'../include/private',
'../src/codec',
'../src/core',
],
'sources': [
'../src/codec/SkAndroidCodec.cpp',
'../src/codec/SkBmpCodec.cpp',
'../src/codec/SkBmpMaskCodec.cpp',
'../src/codec/SkBmpRLECodec.cpp',
'../src/codec/SkBmpStandardCodec.cpp',
'../src/codec/SkCodec.cpp',
'../src/codec/SkCodec_libgif.cpp',
'../src/codec/SkCodec_libico.cpp',
'../src/codec/SkCodec_libpng.cpp',
'../src/codec/SkCodec_wbmp.cpp',
'../src/codec/SkJpegCodec.cpp',
'../src/codec/SkJpegDecoderMgr.cpp',
'../src/codec/SkJpegUtility_codec.cpp',
'../src/codec/SkMaskSwizzler.cpp',
'../src/codec/SkMasks.cpp',
'../src/codec/SkSampler.cpp',
'../src/codec/SkSampledCodec.cpp',
'../src/codec/SkSwizzler.cpp',
'../src/codec/SkWebpAdapterCodec.cpp',
'../src/codec/SkWebpCodec.cpp',
],
'direct_dependent_settings': {
'include_dirs': [
'../include/codec',
],
},
'defines': [
'TURBO_HAS_SKIP',
],
},
],
}
| 30.478261 | 146 | 0.568236 |
ace6e5e139fd38412570ede76d40ddb21de43c6e | 1,066 | py | Python | distributor.py | IrwinDong/lambdaproxy | d3c8591c824a958d6cdbecd9e26f2b7c43ce80b2 | [
"Apache-2.0"
] | 1 | 2020-03-01T00:35:07.000Z | 2020-03-01T00:35:07.000Z | distributor.py | IrwinDong/lambdaproxy | d3c8591c824a958d6cdbecd9e26f2b7c43ce80b2 | [
"Apache-2.0"
] | null | null | null | distributor.py | IrwinDong/lambdaproxy | d3c8591c824a958d6cdbecd9e26f2b7c43ce80b2 | [
"Apache-2.0"
] | null | null | null | from queue import Queue, Full
from threading import Event
from datetime import datetime
class LambdaRequest:
path = None
waithandler : Event = None
url : str = None
timeout : bool = False
arrival : datetime = datetime.max
depature : datetime = datetime.min
def __init__(self, path:str, waithandler:Event):
self.path = path
self.waithandler = waithandler
self.arrival = datetime.now()
class Distributor:
requestqueue = None
def __init__(self, queue:Queue):
self.requestqueue = queue
def distribute(self, path:str,):
waithandler = Event()
request = LambdaRequest(path, waithandler)
try:
self.requestqueue.put(request, True, 5)
except Full:
request.timeout = True
else:
if not waithandler.wait(10):
request.timeout = True
else:
request.depature = datetime.now()
return request.timeout, request.url, request.depature-request.arrival
Instance:Distributor = None | 28.052632 | 77 | 0.626642 |
ace6e6bf11faec630f7a4d2fd630b7651d32f5c8 | 2,338 | py | Python | setup.py | rubenvdg/dask | 85f0b14bd36a5135ce51aeee067b6207374b00c4 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rubenvdg/dask | 85f0b14bd36a5135ce51aeee067b6207374b00c4 | [
"BSD-3-Clause"
] | null | null | null | setup.py | rubenvdg/dask | 85f0b14bd36a5135ce51aeee067b6207374b00c4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
from os.path import exists
from setuptools import setup
import versioneer
# NOTE: These are tested in `continuous_integration/test_imports.sh` If
# you modify these, make sure to change the corresponding line there.
extras_require = {
"array": ["numpy >= 1.18"],
"bag": [], # keeping for backwards compatibility
"dataframe": ["numpy >= 1.18", "pandas >= 1.0"],
"distributed": ["distributed == 2021.08.1"],
"diagnostics": [
"bokeh >= 1.0.0, != 2.0.0",
"jinja2",
],
"delayed": [], # keeping for backwards compatibility
}
extras_require["complete"] = sorted({v for req in extras_require.values() for v in req})
# after complete is set, add in test
extras_require["test"] = ["pytest", "pytest-rerunfailures", "pytest-xdist"]
install_requires = [
"cloudpickle >= 1.1.1",
"fsspec >= 0.6.0",
"packaging >= 20.0",
"partd >= 0.3.10",
"pyyaml",
"toolz >= 0.8.2",
]
packages = [
"dask",
"dask.array",
"dask.bag",
"dask.bytes",
"dask.dataframe",
"dask.dataframe.io",
"dask.dataframe.tseries",
"dask.diagnostics",
]
tests = [p + ".tests" for p in packages]
# Only include pytest-runner in setup_requires if we're invoking tests
if {"pytest", "test", "ptr"}.intersection(sys.argv):
setup_requires = ["pytest-runner"]
else:
setup_requires = []
setup(
name="dask",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Parallel PyData with Task Scheduling",
url="https://github.com/dask/dask/",
maintainer="Matthew Rocklin",
maintainer_email="mrocklin@gmail.com",
license="BSD",
keywords="task-scheduling parallel numpy pandas pydata",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: BSD License",
],
packages=packages + tests,
long_description=open("README.rst").read() if exists("README.rst") else "",
python_requires=">=3.7",
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=["pytest"],
extras_require=extras_require,
include_package_data=True,
zip_safe=False,
)
| 28.512195 | 88 | 0.641146 |
ace6e77af06a92a3b55122d03a844ef1f4676e71 | 10,234 | py | Python | realpgm_dataset/kerasLayers.py | santacml/Detecting_Malicious_Assembly | 868f291286bb2be2a4232d1a0ce2f7ea59355408 | [
"MIT"
] | null | null | null | realpgm_dataset/kerasLayers.py | santacml/Detecting_Malicious_Assembly | 868f291286bb2be2a4232d1a0ce2f7ea59355408 | [
"MIT"
] | null | null | null | realpgm_dataset/kerasLayers.py | santacml/Detecting_Malicious_Assembly | 868f291286bb2be2a4232d1a0ce2f7ea59355408 | [
"MIT"
] | null | null | null | import tensorflow as tf
from keras import backend as K
from keras.engine.topology import Layer
from keras.engine import InputSpec
from keras.layers import Wrapper
from keras.utils.generic_utils import has_arg
import numpy as np
'''
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
'''
class DFTLayer(Layer):
def __init__(self, **kwargs):
super(DFTLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
# self.kernel = self.add_weight(name='kernel',
# shape=(input_shape[1], self.output_dim),
# initializer='uniform',
# trainable=True)
super(DFTLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, input):
input_shape = K.int_shape(input)
print("INPIUT SHAPE", input_shape)
# input = K.reshape(input, input_shape[1] + input_shape[2] + input_shape[0])
input = tf.cast(input, tf.complex64)
out = K.fft2d(input)
# out = input
out = tf.cast(out, tf.float32)
# out = K.reshape(input, (input_shape[0], input_shape[1], input_shape[2]))
out_shape = K.int_shape(out)
print("OUt SHAPE", out_shape)
return out
def compute_output_shape(self, input_shape):
# return (input_shape[0], self.output_dim)
return input_shape
class OnceOnLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1], self.output_dim),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this somewhere!
def call(self, x):
return K.dot(x, self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
# '''
class CPUWrapper(Wrapper):
def __init__(self, layer, **kwargs):
super(CPUWrapper, self).__init__(layer, **kwargs)
self.supports_masking = False
def build(self, input_shape):
assert len(input_shape) >= 3
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = input_shape
if not self.layer.built:
self.layer.build(child_input_shape)
self.layer.built = True
super(CPUWrapper, self).build()
def compute_output_shape(self, input_shape):
# essentially just get rid of timesteps
child_input_shape = input_shape
child_output_shape = self.layer.compute_output_shape(child_input_shape)
return child_output_shape
def call(self, inputs, training=None):
with K.device('/cpu:0'):
out = self.layer.call(inputs)
return out
class TimeDistributedRAM(Wrapper):
def __init__(self, layer, **kwargs):
super(TimeDistributedRAM, self).__init__(layer, **kwargs)
self.supports_masking = False
def build(self, input_shape):
assert len(input_shape) >= 3
self.input_spec = InputSpec(shape=input_shape)
child_input_shape = (input_shape[0],) + input_shape[2:]
if not self.layer.built:
self.layer.build(child_input_shape)
self.layer.built = True
super(TimeDistributedRAM, self).build()
def compute_output_shape(self, input_shape):
# essentially just get rid of timesteps
child_input_shape = (input_shape[0],) + input_shape[2:]
child_output_shape = self.layer.compute_output_shape(child_input_shape)
timesteps = input_shape[1]
return (child_output_shape[0], timesteps) + child_output_shape[1:]
def call(self, inputs, training=None):
kwargs = {}
if has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False
input_shape = K.int_shape(inputs)
if False and input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(step, inputs,
initial_states=[],
input_length=input_shape[1],
unroll=False)
y = outputs
else:
'''
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = K.shape(inputs)[1]
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = _object_list_uid(inputs)
inputs = K.reshape(inputs, (-1,) + input_shape[2:])
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape)
y = K.reshape(y, (-1, input_length) + output_shape[2:])
'''
input_length = input_shape[1]
output_shape = self.compute_output_shape(input_shape)
# self.gpuInputVar = tf.zeros( (input_shape[0],) + input_shape[2:])
self.gpuInputVar = None
# with K.device('/cpu:0'):
# self.cpuOutputVar = tf.zeros(output_shape)
# tsteps = None
# with K.device('/cpu:0'):
# tsteps = K.unstack(inputs, axis=1)
# outList = []
# ins = K.reshape(inputs, (input_length, input_shape[0]) + input_shape[2:])
numDims = len(input_shape)
with K.device('/cpu:0'):
ins = tf.transpose(inputs, (1, 0) + tuple([i for i in range(2, numDims)]))
def foo(timestepPlusDim):
self.gpuInputVar = timestepPlusDim
# with tf.device('/gpu:0'):
# self.gpuInputVar = tf.squeeze(timestepPlusDim)
# with tf.device('/cpu:0'):
# return self.layer.call(self.gpuInputVar)
with tf.device('/gpu:0'):
out = self.layer.call(self.gpuInputVar)
with tf.device('/cpu:0'):
return out
with tf.device('/cpu:0'):
y = tf.map_fn(foo,
ins,
back_prop = True,
swap_memory=True
)
'''
# this should be on the device we are otherwise using... gpu...
# for i, timestep in enumerate(tsteps):
for timestep in tsteps:
# out = None
# with tf.device('/gpu:0'):
# test = tsteps[i]
# with tf.device('/cpu:0'):
# gpuVar = tf.identity(tsteps[i])
gpuVar = timestep * 1
# with K.device('/cpu:0'):
# out = self.layer.call(gpuVar, **kwargs)
out = self.layer.call(gpuVar, **kwargs)
with tf.device('/cpu:0'):
cpuVar = out * 1
# outList.append(self.layer.call(tsteps[i] , **kwargs))
outList.append(cpuVar)
# tsteps[i] = K.reshape(tsteps[i], (-1, 1) + output_shape[2:])
# '''
# ins = K.reshape(inputs, (-1,) + input_shape[2:])
# ins = tf.transpose(inputs, (1, 0,2,3))
# def test(timestep):
# with tf.device('/cpu:0'):
# return self.layer.call(timestep)
# y = tf.map_fn(test, ins)
# print(len(outList))
# with tf.device('/cpu:0'):
# with K.device('/cpu:0'):
# y = K.stack(outList, axis=1)
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
# ''' | 36.81295 | 90 | 0.527653 |
ace6e7a6c71cfd3f9198a2f50f88acdc1ba9928b | 5,991 | py | Python | scripts/experiments/patterns/main_patterns.py | NECOTIS/CRITICAL | eba2dc9c90936f9cf51e04374081509be433ed10 | [
"BSD-3-Clause"
] | 1 | 2022-02-16T00:59:50.000Z | 2022-02-16T00:59:50.000Z | scripts/experiments/patterns/main_patterns.py | NECOTIS/CRITICAL | eba2dc9c90936f9cf51e04374081509be433ed10 | [
"BSD-3-Clause"
] | null | null | null | scripts/experiments/patterns/main_patterns.py | NECOTIS/CRITICAL | eba2dc9c90936f9cf51e04374081509be433ed10 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2018, NECOTIS
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Authors: Simon Brodeur, Jean Rouat (advisor)
# Date: April 18th, 2019
# Organization: Groupe de recherche en Neurosciences Computationnelles et Traitement Intelligent des Signaux (NECOTIS),
# Université de Sherbrooke, Canada
import random
import logging
import numpy as np
import matplotlib.pyplot as plt
from brian2.units import ms, Hz
from brian2.synapses.synapses import Synapses
from brian2.core.clocks import defaultclock
from brian2.monitors.spikemonitor import SpikeMonitor
from brian2.core.network import Network
from brian2.units.allunits import second
from brian2.monitors.statemonitor import StateMonitor
from brian2.input.spikegeneratorgroup import SpikeGeneratorGroup
from critical.microcircuit import Microcircuit
from critical.rankorder import generateRankOrderCodedPatterns, plotPatterns, generateRankOrderCodedData
logger = logging.getLogger(__name__)
def main():
# Choose the duration of the training
duration = 10 * second
targetCbf = 1.0
logger.info('Simulating for target branching factor of %f' % (targetCbf))
# Create the microcircuit
# NOTE: p_max is chosen so to have an out-degree of N=16
m = Microcircuit(connectivity='small-world', macrocolumnShape=[2, 2, 2], minicolumnShape=[4, 4, 4],
p_max=0.056, srate=1 * Hz, excitatoryProb=0.8, delay='1*ms + 2*ms * rand()')
# Configure CRITICAL learning rule
m.S.c_out_ref = targetCbf # target critical branching factor
m.S.alpha = 0.1 # learning rate
# Define the inputs to the microcircuit
# NOTE: Number of average input synaptic connections is fixed to 1% of reservoir links
nbInputs = 8
nbPatterns = 4
patterns = generateRankOrderCodedPatterns(nbInputs, nbPatterns, widthEpoch=50 * ms, padding=5 * ms, refractory=5 * ms)
indices, times = generateRankOrderCodedData(patterns, duration, delayEpoch=100 * ms)
fig = plotPatterns(patterns)
fig.savefig('patterns.eps')
P = SpikeGeneratorGroup(nbInputs, indices, times)
Si = Synapses(P, m.G, model='w : 1', on_pre='''v_post += w * int(not_refractory_post)
c_in_tot_post += w * int(not_refractory_post)''')
Si.connect(p=0.01 * len(m.S) / (nbInputs * len(m.G)))
Si.w = '0.5 + 1.5 * rand()'
logger.info('Number of neurons in the population: %d' % (len(m.G)))
logger.info('Number of synapses in the population: %d' % (len(m.S)))
# Configure the monitors and simulation
# NOTE: setting a high time resolution increase the stability of the learning rule
M = SpikeMonitor(m.G, record=True)
Mi = SpikeMonitor(P, record=True)
Mg = StateMonitor(m.G, variables=['cbf'], record=True)
defaultclock.dt = 0.1 * ms
net = Network(m.G, m.S, P, Si, M, Mi, Mg)
# Run the simulation with input stimuli and plasticity enabled
m.S.plastic = True
net.run(duration, report='text')
# Compute population average firing rate
avgInputFiringRate = len(Mi.i) / (nbInputs * duration)
avgOutputFiringRate = len(M.i) / (len(m.G) * duration)
logger.info('Average input firing rate: %4.2f Hz' % (avgInputFiringRate))
logger.info('Average output firing rate: %4.2f Hz' % (avgOutputFiringRate))
# NOTE: compute statistics on excitatory neurons only
meanCbf = np.mean(Mg.cbf.T[:, m.G.ntype > 0], axis=-1)
fig = plt.figure(facecolor='white', figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel('Time [sec]')
ax.set_ylabel('Average output contributions')
ax.plot(Mg.t, meanCbf, color='k')
fig.tight_layout()
fig.savefig('convergence_pattern.eps')
# Visualization of the simulation
# NOTE: show only the last 10 sec of the simulation
fig = plt.figure(facecolor='white', figsize=(6, 5))
plt.subplot(211)
plt.title('Spiking activity (input)')
plt.plot(Mi.t / ms, Mi.i, '.', color='b')
plt.ylabel('Neurons')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
plt.subplot(212)
plt.title('Spiking activity (output)')
plt.plot(M.t / ms, M.i, '.', color='b')
plt.ylabel('Neurons')
plt.xlabel('Time [ms]')
plt.xlim([0.0, duration / ms])
fig.tight_layout()
plt.show()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# Fix the seed of all random number generator
seed = 0
random.seed(seed)
np.random.seed(seed)
main()
logger.info('All done.')
| 41.317241 | 122 | 0.705892 |
ace6e7d27cc5032f1fa614d1323a9e42c706575a | 123,991 | py | Python | tests/conftest.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | tests/conftest.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | tests/conftest.py | kesavanvt/ocs-ci | f120044486631f49133c9f3a137842673d765a1c | [
"MIT"
] | null | null | null | import logging
import os
import random
import time
import tempfile
import threading
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
from math import floor
from shutil import copyfile
from functools import partial
from botocore.exceptions import ClientError
import pytest
from ocs_ci.deployment import factory as dep_factory
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment,
ignore_leftovers,
tier_marks,
ignore_leftover_label,
)
from ocs_ci.ocs import constants, defaults, fio_artefacts, node, ocp, platform_nodes
from ocs_ci.ocs.bucket_utils import craft_s3_command
from ocs_ci.ocs.exceptions import (
CommandFailed,
TimeoutExpiredError,
CephHealthException,
ResourceWrongStatusException,
UnsupportedPlatformError,
)
from ocs_ci.ocs.mcg_workload import mcg_job_factory as mcg_job_factory_implementation
from ocs_ci.ocs.node import get_node_objs, schedule_nodes
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pvc
from ocs_ci.ocs.utils import setup_ceph_toolbox, collect_ocs_logs
from ocs_ci.ocs.resources.backingstore import (
backingstore_factory as backingstore_factory_implementation,
)
from ocs_ci.ocs.resources.namespacestore import (
namespace_store_factory as namespacestore_factory_implementation,
)
from ocs_ci.ocs.resources.bucketclass import (
bucket_class_factory as bucketclass_factory_implementation,
)
from ocs_ci.ocs.resources.cloud_manager import CloudManager
from ocs_ci.ocs.resources.cloud_uls import (
cloud_uls_factory as cloud_uls_factory_implementation,
)
from ocs_ci.ocs.node import check_nodes_specs
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.ocs.resources.objectbucket import BUCKET_MAP
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pod import (
get_rgw_pods,
delete_deploymentconfig_pods,
get_pods_having_label,
get_deployments_having_label,
Pod,
)
from ocs_ci.ocs.resources.pvc import PVC, create_restore_pvc
from ocs_ci.ocs.version import get_ocs_version, report_ocs_version
from ocs_ci.ocs.cluster_load import ClusterLoad, wrap_msg
from ocs_ci.utility import aws
from ocs_ci.utility import deployment_openshift_logging as ocp_logging_obj
from ocs_ci.utility import templating
from ocs_ci.utility import users, kms as KMS
from ocs_ci.utility.environment_check import (
get_status_before_execution,
get_status_after_execution,
)
from ocs_ci.utility.flexy import load_cluster_info
from ocs_ci.utility.prometheus import PrometheusAPI
from ocs_ci.utility.uninstall_openshift_logging import uninstall_cluster_logging
from ocs_ci.utility.utils import (
ceph_health_check,
ceph_health_check_base,
get_running_ocp_version,
get_openshift_client,
get_system_architecture,
get_testrun_name,
load_auth_config,
ocsci_log_path,
skipif_ocp_version,
skipif_ocs_version,
TimeoutSampler,
skipif_upgraded_from,
update_container_with_mirrored_image,
)
from ocs_ci.helpers import helpers
from ocs_ci.helpers.helpers import create_unique_resource_name
from ocs_ci.ocs.bucket_utils import get_rgw_restart_counts
from ocs_ci.ocs.pgsql import Postgresql
from ocs_ci.ocs.resources.rgw import RGW
from ocs_ci.ocs.jenkins import Jenkins
from ocs_ci.ocs.couchbase import CouchBase
from ocs_ci.ocs.amq import AMQ
from ocs_ci.ocs.elasticsearch import ElasticSearch
from ocs_ci.ocs.ui.base_ui import login_ui, close_browser
from ocs_ci.ocs.ripsaw import RipSaw
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(threadName)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([""], stdout_level="info")
logger_config.set_log_option_default("")
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
def pytest_collection_modifyitems(session, items):
"""
A pytest hook to filter out skipped tests satisfying
skipif_ocs_version or skipif_upgraded_from
Args:
session: pytest session
config: pytest config object
items: list of collected tests
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
if not (teardown or deploy):
for item in items[:]:
skipif_ocp_version_marker = item.get_closest_marker("skipif_ocp_version")
skipif_ocs_version_marker = item.get_closest_marker("skipif_ocs_version")
skipif_upgraded_from_marker = item.get_closest_marker(
"skipif_upgraded_from"
)
if skipif_ocp_version_marker:
skip_condition = skipif_ocp_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocp_version(skip_condition[0]):
log.info(
f"Test: {item} will be skipped due to OCP {skip_condition}"
)
items.remove(item)
continue
if skipif_ocs_version_marker:
skip_condition = skipif_ocs_version_marker.args
# skip_condition will be a tuple
# and condition will be first element in the tuple
if skipif_ocs_version(skip_condition[0]):
log.info(f"Test: {item} will be skipped due to {skip_condition}")
items.remove(item)
continue
if skipif_upgraded_from_marker:
skip_args = skipif_upgraded_from_marker.args
if skipif_upgraded_from(skip_args[0]):
log.info(
f"Test: {item} will be skipped because the OCS cluster is"
f" upgraded from one of these versions: {skip_args[0]}"
)
items.remove(item)
@pytest.fixture()
def supported_configuration():
"""
Check that cluster nodes have enough CPU and Memory as described in:
https://access.redhat.com/documentation/en-us/red_hat_openshift_container_storage/4.2/html-single/planning_your_deployment/index#infrastructure-requirements_rhocs
This fixture is intended as a prerequisite for tests or fixtures that
run flaky on configurations that don't meet minimal requirements.
Minimum requirements for each starting node (OSD+MON):
16 CPUs
64 GB memory
Last documentation check: 2020-02-21
"""
min_cpu = constants.MIN_NODE_CPU
min_memory = constants.MIN_NODE_MEMORY
log.info("Checking if system meets minimal requirements")
if not check_nodes_specs(min_memory=min_memory, min_cpu=min_cpu):
err_msg = (
f"At least one of the worker nodes doesn't meet the "
f"required minimum specs of {min_cpu} vCPUs and {min_memory} RAM"
)
pytest.xfail(err_msg)
@pytest.fixture(scope="session", autouse=True)
def auto_load_auth_config():
try:
auth_config = {"AUTH": load_auth_config()}
config.update(auth_config)
except FileNotFoundError:
pass # If auth file doesn't exist we just ignore.
@pytest.fixture(scope="class")
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="session")
def secret_factory_session(request):
return secret_factory_fixture(request)
@pytest.fixture(scope="function")
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(interface_type=interface)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def log_ocs_version(cluster):
"""
Fixture handling version reporting for OCS.
This fixture handles alignment of the version reporting, so that we:
* report version for each test run (no matter if just deployment, just
test or both deployment and tests are executed)
* prevent conflict of version reporting with deployment/teardown (eg. we
should not run the version logging before actual deployment, or after
a teardown)
Version is reported in:
* log entries of INFO log level during test setup phase
* ocs_version file in cluster path directory (for copy pasting into bug
reports)
"""
teardown = config.RUN["cli_params"].get("teardown")
deploy = config.RUN["cli_params"].get("deploy")
dev_mode = config.RUN["cli_params"].get("dev_mode")
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if teardown and not deploy:
log.info("Skipping version reporting for teardown.")
return
elif dev_mode:
log.info("Skipping version reporting for development mode.")
return
elif skip_ocs_deployment:
log.info("Skipping version reporting since OCS deployment is skipped.")
return
cluster_version, image_dict = get_ocs_version()
file_name = os.path.join(
config.ENV_DATA["cluster_path"], "ocs_version." + datetime.now().isoformat()
)
with open(file_name, "w") as file_obj:
report_ocs_version(cluster_version, image_dict, file_obj)
log.info("human readable ocs version info written into %s", file_name)
@pytest.fixture(scope="class")
def ceph_pool_factory_class(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="session")
def ceph_pool_factory_session(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
@pytest.fixture(scope="function")
def ceph_pool_factory(request, replica=3, compression=None):
return ceph_pool_factory_fixture(request, replica=replica, compression=compression)
def ceph_pool_factory_fixture(request, replica=3, compression=None):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL, replica=replica, compression=compression
):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool(
replica=replica, compression=compression
)
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM, namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def storageclass_factory_class(request, ceph_pool_factory_class, secret_factory_class):
return storageclass_factory_fixture(
request, ceph_pool_factory_class, secret_factory_class
)
@pytest.fixture(scope="session")
def storageclass_factory_session(
request, ceph_pool_factory_session, secret_factory_session
):
return storageclass_factory_fixture(
request, ceph_pool_factory_session, secret_factory_session
)
@pytest.fixture(scope="function")
def storageclass_factory(request, ceph_pool_factory, secret_factory):
return storageclass_factory_fixture(request, ceph_pool_factory, secret_factory)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
** This method should not be used anymore **
** This method is for internal testing only **
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE,
replica=3,
compression=None,
new_rbd_pool=False,
pool_name=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
replica (int): Replica size for a pool
compression (str): Compression type option for a pool
new_rbd_pool (bool): True if user wants to create new rbd pool for SC
pool_name (str): Existing pool name to create the storageclass other
then the default rbd pool.
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
if interface == constants.CEPHBLOCKPOOL:
if config.ENV_DATA.get("new_rbd_pool") or new_rbd_pool:
pool_obj = ceph_pool_factory(
interface=interface,
replica=config.ENV_DATA.get("replica") or replica,
compression=config.ENV_DATA.get("compression") or compression,
)
interface_name = pool_obj.name
else:
if pool_name is None:
interface_name = helpers.default_ceph_block_pool()
else:
interface_name = pool_name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy,
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the storageclass
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture(scope="session")
def project_factory_session(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
@pytest.fixture()
def project(project_factory):
"""
This fixture creates a single project instance.
"""
project_obj = project_factory()
return project_obj
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory(project_name=None):
"""
Args:
project_name (str): The name for the new project
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project(project_name=project_name)
instances.append(proj_obj)
return proj_obj
def finalizer():
"""
Delete the project
"""
for instance in instances:
try:
ocp_event = ocp.OCP(kind="Event", namespace=instance.namespace)
events = ocp_event.get()
event_count = len(events["items"])
warn_event_count = 0
for event in events["items"]:
if event["type"] == "Warning":
warn_event_count += 1
log.info(
(
"There were %d events in %s namespace before it's"
" removal (out of which %d were of type Warning)."
" For a full dump of this event list, see DEBUG logs."
),
event_count,
instance.namespace,
warn_event_count,
)
except Exception:
# we don't want any problem to disrupt the teardown itself
log.exception("Failed to get events for project %s", instance.namespace)
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace, timeout=300)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pvc_factory_class(request, project_factory_class):
return pvc_factory_fixture(request, project_factory_class)
@pytest.fixture(scope="session")
def pvc_factory_session(request, project_factory_session):
return pvc_factory_fixture(request, project_factory_session)
@pytest.fixture(scope="function")
def pvc_factory(request, project_factory):
return pvc_factory_fixture(
request,
project_factory,
)
def pvc_factory_fixture(request, project_factory):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}Gi" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode,
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
# If they have ReclaimPolicy set to Retain then delete them manually
for pv_obj in pv_objs:
if (
pv_obj.data.get("spec").get("persistentVolumeReclaimPolicy")
== constants.RECLAIM_POLICY_RETAIN
):
helpers.wait_for_resource_state(pv_obj, constants.STATUS_RELEASED)
pv_obj.delete()
pv_obj.ocp.wait_for_delete(pv_obj.name)
else:
# Workaround for bug 1915706, increasing timeout from 180 to 720
timeout = (
720
if config.ENV_DATA["platform"].lower() == constants.AZURE_PLATFORM
else 180
)
pv_obj.ocp.wait_for_delete(resource_name=pv_obj.name, timeout=timeout)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope="session")
def pod_factory_session(request, pvc_factory_session):
return pod_factory_fixture(request, pvc_factory_session)
@pytest.fixture(scope="function")
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
node_name=None,
pod_dict_path=None,
raw_block_pv=False,
deployment_config=False,
service_account=None,
replica_count=1,
command=None,
command_args=None,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
deployment_config (bool): True for DeploymentConfig creation,
False otherwise
service_account (OCS): Service account object, in case DeploymentConfig
is to be created
replica_count (int): The replica count for deployment config
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
Returns:
object: helpers.create_pod instance
"""
sa_name = service_account.name if service_account else None
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
node_name=node_name,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv,
dc_deployment=deployment_config,
sa_name=sa_name,
replica_count=replica_count,
command=command,
command_args=command_args,
)
assert pod_obj, "Failed to create pod"
if deployment_config:
dc_name = pod_obj.get_labels().get("name")
dc_ocp_dict = ocp.OCP(
kind=constants.DEPLOYMENTCONFIG, namespace=pod_obj.namespace
).get(resource_name=dc_name)
dc_obj = OCS(**dc_ocp_dict)
instances.append(dc_obj)
else:
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status)
pod_obj.reload()
pod_obj.pvc = pvc
if deployment_config:
return dc_obj
return pod_obj
def finalizer():
"""
Delete the Pod or the DeploymentConfig
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="session")
def teardown_factory_session(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope="function")
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
reclaim_policy = (
instance.reclaim_policy if instance.kind == constants.PVC else None
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
if reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="class")
def service_account_factory_class(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="session")
def service_account_factory_session(request):
return service_account_factory_fixture(request)
@pytest.fixture(scope="function")
def service_account_factory(request):
return service_account_factory_fixture(request)
def service_account_factory_fixture(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(project=None, service_account=None):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(
sa_name=service_account, namespace=project.namespace
)
if not helpers.validate_scc_policy(
sa_name=service_account, namespace=project.namespace
):
helpers.add_scc_policy(
sa_name=service_account, namespace=project.namespace
)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name, namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(request, pvc_factory, service_account_factory):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
node_name=None,
node_selector=None,
replica_count=1,
raw_block_pv=False,
sa_obj=None,
wait=True,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
node_name (str): The name of specific node to schedule the pod
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
replica_count (int): Replica count for deployment config
raw_block_pv (str): True if pod with raw block pvc
sa_obj (object) : If specific service account is needed
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = sa_obj or service_account_factory(
project=pvc.project, service_account=service_account
)
dc_pod_obj = helpers.create_pod(
interface_type=interface,
pvc_name=pvc.name,
do_reload=False,
namespace=pvc.namespace,
sa_name=sa_obj.name,
dc_deployment=True,
replica_count=replica_count,
node_name=node_name,
node_selector=node_selector,
raw_block_pv=raw_block_pv,
pod_dict_path=constants.FEDORA_DC_YAML,
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
if wait:
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
dc_pod_obj.pvc = pvc
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
delete_deploymentconfig_pods(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING["polarion"]["project_id"]
record_testsuite_property("polarion-project-id", polarion_project_id)
jenkins_build_url = config.RUN.get("jenkins_build_url")
if jenkins_build_url:
record_testsuite_property("polarion-custom-description", jenkins_build_url)
polarion_testrun_name = get_testrun_name()
record_testsuite_property("polarion-testrun-id", polarion_testrun_name)
record_testsuite_property("polarion-testrun-status-id", "inprogress")
record_testsuite_property("polarion-custom-isautomated", "True")
@pytest.fixture(scope="session", autouse=True)
def additional_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures additional custom testsuite properties for junit xml
"""
# add logs url
logs_url = config.RUN.get("logs_url")
if logs_url:
record_testsuite_property("logs-url", logs_url)
@pytest.fixture(scope="session")
def tier_marks_name():
"""
Gets the tier mark names
Returns:
list: list of tier mark names
"""
tier_marks_name = []
for each_tier in tier_marks:
try:
tier_marks_name.append(each_tier.name)
except AttributeError:
tier_marks_name.append(each_tier().args[0].name)
return tier_marks_name
@pytest.fixture(scope="function", autouse=True)
def health_checker(request, tier_marks_name):
skipped = False
dev_mode = config.RUN["cli_params"].get("dev_mode")
if dev_mode:
log.info("Skipping health checks for development mode")
return
def finalizer():
if not skipped:
try:
teardown = config.RUN["cli_params"]["teardown"]
skip_ocs_deployment = config.ENV_DATA["skip_ocs_deployment"]
if not (teardown or skip_ocs_deployment):
ceph_health_check_base()
log.info("Ceph health check passed at teardown")
except CephHealthException:
log.info("Ceph health check failed at teardown")
# Retrying to increase the chance the cluster health will be OK
# for next test
ceph_health_check()
raise
node = request.node
request.addfinalizer(finalizer)
for mark in node.iter_markers():
if mark.name in tier_marks_name:
log.info("Checking for Ceph Health OK ")
try:
status = ceph_health_check_base()
if status:
log.info("Ceph health check passed at setup")
return
except CephHealthException:
skipped = True
# skip because ceph is not in good health
pytest.skip("Ceph health check failed at setup")
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN["cli_params"]["teardown"]
deploy = config.RUN["cli_params"]["deploy"]
if teardown or deploy:
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
# If KMS is configured, clean up the backend resources
# we are doing it before OCP cleanup
if config.DEPLOYMENT.get("kms_deployment"):
kms = KMS.get_kms_deployment()
kms.cleanup()
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
force_download = (
config.RUN["cli_params"].get("deploy")
and config.DEPLOYMENT["force_download_client"]
)
get_openshift_client(force_download=force_download)
# set environment variable for early testing of RHCOS
if config.ENV_DATA.get("early_testing"):
release_img = config.ENV_DATA["RELEASE_IMG"]
log.info(f"Running early testing of RHCOS with release image: {release_img}")
os.environ["RELEASE_IMG"] = release_img
os.environ["OPENSHIFT_INSTALL_RELEASE_IMAGE_OVERRIDE"] = release_img
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
@pytest.fixture(scope="class")
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, ignore_leftovers]]
# app labels of resources to be excluded for leftover check
exclude_labels = [constants.must_gather_pod_label]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
if mark.name == ignore_leftover_label.name:
exclude_labels.extend(list(mark.args))
request.addfinalizer(
partial(get_status_after_execution, exclude_labels=exclude_labels)
)
get_status_before_execution(exclude_labels=exclude_labels)
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini("log_cli_level") or "DEBUG"
@pytest.fixture(scope="session", autouse=True)
def cluster_load(
request,
project_factory_session,
pvc_factory_session,
service_account_factory_session,
pod_factory_session,
):
"""
Run IO during the test execution
"""
cl_load_obj = None
io_in_bg = config.RUN.get("io_in_bg")
log_utilization = config.RUN.get("log_utilization")
io_load = config.RUN.get("io_load")
cluster_load_error = None
cluster_load_error_msg = (
"Cluster load might not work correctly during this run, because "
"it failed with an exception: %s"
)
# IO load should not happen during deployment
deployment_test = (
True if ("deployment" in request.node.items[0].location[0]) else False
)
if io_in_bg and not deployment_test:
io_load = int(io_load) * 0.01
log.info(wrap_msg("Tests will be running while IO is in the background"))
log.info(
"Start running IO in the background. The amount of IO that "
"will be written is going to be determined by the cluster "
"capabilities according to its limit"
)
try:
cl_load_obj = ClusterLoad(
project_factory=project_factory_session,
sa_factory=service_account_factory_session,
pvc_factory=pvc_factory_session,
pod_factory=pod_factory_session,
target_percentage=io_load,
)
cl_load_obj.reach_cluster_load_percentage()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
if (log_utilization or io_in_bg) and not deployment_test:
if not cl_load_obj:
try:
cl_load_obj = ClusterLoad()
except Exception as ex:
log.error(cluster_load_error_msg, ex)
cluster_load_error = ex
config.RUN["load_status"] = "running"
def finalizer():
"""
Stop the thread that executed watch_load()
"""
config.RUN["load_status"] = "finished"
if thread:
thread.join()
if cluster_load_error:
raise cluster_load_error
request.addfinalizer(finalizer)
def watch_load():
"""
Watch the cluster load by monitoring the cluster latency.
Print the cluster utilization metrics every 15 seconds.
If IOs are running in the test background, dynamically adjust
the IO load based on the cluster latency.
"""
while config.RUN["load_status"] != "finished":
time.sleep(20)
try:
cl_load_obj.print_metrics(mute_logs=True)
if io_in_bg:
if config.RUN["load_status"] == "running":
cl_load_obj.adjust_load_if_needed()
elif config.RUN["load_status"] == "to_be_paused":
cl_load_obj.reduce_load(pause=True)
config.RUN["load_status"] = "paused"
elif config.RUN["load_status"] == "to_be_reduced":
cl_load_obj.reduce_load(pause=False)
config.RUN["load_status"] = "reduced"
elif config.RUN["load_status"] == "to_be_resumed":
cl_load_obj.resume_load()
config.RUN["load_status"] = "running"
# Any type of exception should be caught and we should continue.
# We don't want any test to fail
except Exception:
continue
thread = threading.Thread(target=watch_load)
thread.start()
def resume_cluster_load_implementation():
"""
Resume cluster load implementation
"""
config.RUN["load_status"] = "to_be_resumed"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status == "running":
break
except TimeoutExpiredError:
log.error("Cluster load was not resumed successfully")
def reduce_cluster_load_implementation(request, pause, resume=True):
"""
Pause/reduce the background cluster load
Args:
pause (bool): True for completely pausing the cluster load, False for reducing it by 50%
resume (bool): True for resuming the cluster load upon teardown, False for not resuming
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
if resume:
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
config.RUN["load_status"] = "to_be_paused" if pause else "to_be_reduced"
try:
for load_status in TimeoutSampler(300, 3, config.RUN.get, "load_status"):
if load_status in ["paused", "reduced"]:
break
except TimeoutExpiredError:
log.error(
f"Cluster load was not {'paused' if pause else 'reduced'} successfully"
)
@pytest.fixture()
def pause_cluster_load(request):
"""
Pause the background cluster load without resuming it
"""
reduce_cluster_load_implementation(request=request, pause=True, resume=False)
@pytest.fixture()
def resume_cluster_load(request):
"""
Resume the background cluster load
"""
if config.RUN.get("io_in_bg"):
def finalizer():
"""
Resume the cluster load
"""
resume_cluster_load_implementation()
request.addfinalizer(finalizer)
@pytest.fixture()
def pause_and_resume_cluster_load(request):
"""
Pause the background cluster load and resume it in teardown to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=True)
@pytest.fixture()
def reduce_and_resume_cluster_load(request):
"""
Reduce the background cluster load to be 50% of what it is and resume the load in teardown
to the original load value
"""
reduce_cluster_load_implementation(request=request, pause=False)
@pytest.fixture(
params=[
pytest.param({"interface": constants.CEPHBLOCKPOOL}),
pytest.param({"interface": constants.CEPHFILESYSTEM}),
],
ids=["RBD", "CephFS"],
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param["interface"]
@pytest.fixture(scope="class")
def multi_pvc_factory_class(project_factory_class, pvc_factory_class):
return multi_pvc_factory_fixture(project_factory_class, pvc_factory_class)
@pytest.fixture(scope="session")
def multi_pvc_factory_session(project_factory_session, pvc_factory_session):
return multi_pvc_factory_fixture(project_factory_session, pvc_factory_session)
@pytest.fixture(scope="function")
def multi_pvc_factory(project_factory, pvc_factory):
return multi_pvc_factory_fixture(project_factory, pvc_factory)
def multi_pvc_factory_fixture(project_factory, pvc_factory):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection="distribute_sequential",
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False,
timeout=60,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
timeout(int): Time in seconds to wait
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or helpers.default_storage_class(
interface_type=interface
)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == "select_random":
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = dist_val + (num_of_pvc % num_of_modes)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == "distribute_random":
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if "-" in access_mode:
access_mode, volume_mode = access_mode.split("-")
else:
volume_mode = ""
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode,
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status, timeout=timeout)
return pvc_list
return factory
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status("terminated")
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == "terminated":
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
log_path = ocsci_log_path()
for worker in node.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
copyfile(
f"/tmp/{worker}-top-output.txt",
f"{log_path}/{worker}-top-output.txt",
)
os.remove(f"/tmp/{worker}-top-output.txt")
log.info("Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode="w+", prefix="test_status", delete=False
)
def get_flag_status():
with open(temp_file.name, "r") as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, "w") as t_file:
t_file.writelines(value)
set_flag_status("running")
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(namespace=config.ENV_DATA["cluster_namespace"])
while get_flag_status() == "running":
for worker in node.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(
str(oc.exec_oc_cmd(command=top_cmd, out_yaml_format=False))
)
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(" ")
f.write(line)
log.info("Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert (
ec2_instances
), f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val
for key, val in ec2_instances.items()
if (aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture(scope="session")
def cld_mgr(request, rgw_endpoint):
"""
Returns a cloud manager instance that'll be used throughout the session
Returns:
CloudManager: A CloudManager resource
"""
cld_mgr = CloudManager()
def finalizer():
for client in vars(cld_mgr):
try:
getattr(cld_mgr, client).secret.delete()
except AttributeError:
log.info(f"{client} secret not found")
request.addfinalizer(finalizer)
return cld_mgr
@pytest.fixture()
def rgw_obj(request):
return rgw_obj_fixture(request)
@pytest.fixture(scope="session")
def rgw_obj_session(request):
return rgw_obj_fixture(request)
def rgw_obj_fixture(request):
"""
Returns an RGW resource that represents RGW in the cluster
Returns:
RGW: An RGW resource
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return RGW()
else:
return None
@pytest.fixture()
def rgw_deployments(request):
"""
Return RGW deployments or skip the test.
"""
rgw_deployments = get_deployments_having_label(
label=constants.RGW_APP_LABEL, namespace=config.ENV_DATA["cluster_namespace"]
)
if rgw_deployments:
return rgw_deployments
else:
pytest.skip("There is no RGW deployment available for this test.")
@pytest.fixture(scope="session")
def rgw_endpoint(request):
"""
Expose RGW service and return external RGW endpoint address if available.
Returns:
string: external RGW endpoint
"""
log.info("Looking for RGW service to expose")
oc = ocp.OCP(kind=constants.SERVICE, namespace=config.ENV_DATA["cluster_namespace"])
rgw_service = oc.get(selector=constants.RGW_APP_LABEL)["items"]
if rgw_service:
if config.DEPLOYMENT["external_mode"]:
rgw_service = constants.RGW_SERVICE_EXTERNAL_MODE
else:
rgw_service = constants.RGW_SERVICE_INTERNAL_MODE
log.info(f"Service {rgw_service} found and will be exposed")
# custom hostname is provided because default hostname from rgw service
# is too long and OCP rejects it
oc = ocp.OCP(
kind=constants.ROUTE, namespace=config.ENV_DATA["cluster_namespace"]
)
route = oc.get(resource_name="noobaa-mgmt")
router_hostname = route["status"]["ingress"][0]["routerCanonicalHostname"]
rgw_hostname = f"rgw.{router_hostname}"
oc.exec_oc_cmd(f"expose service/{rgw_service} --hostname {rgw_hostname}")
# new route is named after service
rgw_endpoint = oc.get(resource_name=rgw_service)
endpoint_obj = OCS(**rgw_endpoint)
def _finalizer():
endpoint_obj.delete()
request.addfinalizer(_finalizer)
return f"http://{rgw_hostname}"
else:
log.info("RGW service is not available")
@pytest.fixture()
def mcg_obj(request):
return mcg_obj_fixture(request)
@pytest.fixture(scope="session")
def mcg_obj_session(request):
return mcg_obj_fixture(request)
def mcg_obj_fixture(request, *args, **kwargs):
"""
Returns an MCG resource that's connected to the S3 endpoint
Returns:
MCG: An MCG resource
"""
if config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM:
log.warning("As openshift dedicated is used, no MCG resource is returned")
return None
mcg_obj = MCG(*args, **kwargs)
def finalizer():
if config.ENV_DATA["platform"].lower() == "aws":
mcg_obj.cred_req_obj.delete()
if kwargs.get("create_aws_creds"):
request.addfinalizer(finalizer)
return mcg_obj
@pytest.fixture()
def awscli_pod(request):
return awscli_pod_fixture(request, scope_name="function")
@pytest.fixture(scope="session")
def awscli_pod_session(request):
return awscli_pod_fixture(request, scope_name="session")
def awscli_pod_fixture(request, scope_name):
"""
Creates a new AWSCLI pod for relaying commands
Args:
scope_name (str): The name of the fixture's scope,
used for giving a descriptive name to the pod and configmap
Returns:
pod: A pod running the AWS CLI
"""
# Create the service-ca configmap to be mounted upon pod creation
service_ca_data = templating.load_yaml(constants.AWSCLI_SERVICE_CA_YAML)
service_ca_configmap_name = create_unique_resource_name(
constants.AWSCLI_SERVICE_CA_CONFIGMAP_NAME, scope_name
)
service_ca_data["metadata"]["name"] = service_ca_configmap_name
log.info("Trying to create the AWS CLI service CA")
service_ca_configmap = helpers.create_resource(**service_ca_data)
arch = get_system_architecture()
if arch.startswith("x86"):
pod_dict_path = constants.AWSCLI_POD_YAML
else:
pod_dict_path = constants.AWSCLI_MULTIARCH_POD_YAML
awscli_pod_dict = templating.load_yaml(pod_dict_path)
awscli_pod_dict["spec"]["volumes"][0]["configMap"][
"name"
] = service_ca_configmap_name
awscli_pod_name = create_unique_resource_name(
constants.AWSCLI_RELAY_POD_NAME, scope_name
)
awscli_pod_dict["metadata"]["name"] = awscli_pod_name
update_container_with_mirrored_image(awscli_pod_dict)
awscli_pod_obj = Pod(**awscli_pod_dict)
assert awscli_pod_obj.create(
do_reload=True
), f"Failed to create Pod {awscli_pod_name}"
OCP(namespace=defaults.ROOK_CLUSTER_NAMESPACE, kind="ConfigMap").wait_for_resource(
resource_name=service_ca_configmap.name, column="DATA", condition="1"
)
helpers.wait_for_resource_state(awscli_pod_obj, constants.STATUS_RUNNING)
def _awscli_pod_cleanup():
awscli_pod_obj.delete()
service_ca_configmap.delete()
request.addfinalizer(_awscli_pod_cleanup)
return awscli_pod_obj
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
@pytest.fixture()
def uploaded_objects(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
return uploaded_objects_fixture(
request, mcg_obj, awscli_pod, verify_rgw_restart_count
)
@pytest.fixture(scope="session")
def uploaded_objects_session(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
):
return uploaded_objects_fixture(
request, mcg_obj_session, awscli_pod_session, verify_rgw_restart_count_session
)
def uploaded_objects_fixture(request, mcg_obj, awscli_pod, verify_rgw_restart_count):
"""
Deletes all objects that were created as part of the test
Args:
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
awscli_pod (Pod): A pod running the AWSCLI tools
Returns:
list: An empty list of objects
"""
uploaded_objects_paths = []
def object_cleanup():
for uploaded_filename in uploaded_objects_paths:
log.info(f"Deleting object {uploaded_filename}")
awscli_pod.exec_cmd_on_pod(
command=craft_s3_command("rm " + uploaded_filename, mcg_obj),
secrets=[
mcg_obj.access_key_id,
mcg_obj.access_key,
mcg_obj.s3_internal_endpoint,
],
)
request.addfinalizer(object_cleanup)
return uploaded_objects_paths
@pytest.fixture()
def verify_rgw_restart_count(request):
return verify_rgw_restart_count_fixture(request)
@pytest.fixture(scope="session")
def verify_rgw_restart_count_session(request):
return verify_rgw_restart_count_fixture(request)
def verify_rgw_restart_count_fixture(request):
"""
Verifies the RGW restart count at start and end of a test
"""
if config.ENV_DATA["platform"].lower() in constants.ON_PREM_PLATFORMS:
log.info("Getting RGW pod restart count before executing the test")
initial_counts = get_rgw_restart_counts()
def finalizer():
rgw_pods = get_rgw_pods()
for rgw_pod in rgw_pods:
rgw_pod.reload()
log.info("Verifying whether RGW pods changed after executing the test")
for rgw_pod in rgw_pods:
assert rgw_pod.restart_count in initial_counts, "RGW pod restarted"
request.addfinalizer(finalizer)
@pytest.fixture()
def rgw_bucket_factory(request, rgw_obj):
if rgw_obj:
return bucket_factory_fixture(request, rgw_obj=rgw_obj)
else:
return None
@pytest.fixture(scope="session")
def rgw_bucket_factory_session(request, rgw_obj_session):
if rgw_obj_session:
return bucket_factory_fixture(request, rgw_obj=rgw_obj_session)
else:
return None
@pytest.fixture()
def bucket_factory(request, bucket_class_factory, mcg_obj):
"""
Returns an MCG bucket factory.
If MCG object not found returns None
"""
if mcg_obj:
return bucket_factory_fixture(request, bucket_class_factory, mcg_obj)
else:
return None
@pytest.fixture(scope="session")
def bucket_factory_session(request, bucket_class_factory_session, mcg_obj_session):
"""
Returns a session-scoped MCG bucket factory.
If session-scoped MCG object not found returns None
"""
if mcg_obj_session:
return bucket_factory_fixture(
request, bucket_class_factory_session, mcg_obj_session
)
else:
return None
def bucket_factory_fixture(
request, bucket_class_factory=None, mcg_obj=None, rgw_obj=None
):
"""
Create a bucket factory. Calling this fixture creates a new bucket(s).
For a custom amount, provide the 'amount' parameter.
***Please note***
Creation of buckets by utilizing the S3 interface *does not* support bucketclasses.
Only OC/CLI buckets can support different bucketclasses.
By default, all S3 buckets utilize the default bucketclass.
Args:
bucket_class_factory: creates a new Bucket Class
mcg_obj (MCG): An MCG object containing the MCG S3 connection
credentials
rgw_obj (RGW): An RGW object
"""
created_buckets = []
def _create_buckets(
amount=1,
interface="S3",
verify_health=True,
bucketclass=None,
*args,
**kwargs,
):
"""
Creates and deletes all buckets that were created as part of the test
Args:
amount (int): The amount of buckets to create
interface (str): The interface to use for creation of buckets.
S3 | OC | CLI | NAMESPACE
verify_Health (bool): Whether to verify the created bucket's health
post-creation
bucketclass (dict): A dictionary describing a new
bucketclass to be created.
When None, the default bucketclass is used.
Returns:
list: A list of s3.Bucket objects, containing all the created
buckets
"""
if interface.lower() not in BUCKET_MAP:
raise RuntimeError(
f"Invalid interface type received: {interface}. "
f'available types: {", ".join(BUCKET_MAP.keys())}'
)
bucketclass = (
bucketclass if bucketclass is None else bucket_class_factory(bucketclass)
)
for i in range(amount):
bucket_name = helpers.create_unique_resource_name(
resource_description="bucket", resource_type=interface.lower()
)
created_bucket = BUCKET_MAP[interface.lower()](
bucket_name,
mcg=mcg_obj,
rgw=rgw_obj,
bucketclass=bucketclass,
*args,
**kwargs,
)
created_buckets.append(created_bucket)
if verify_health:
created_bucket.verify_health()
return created_buckets
def bucket_cleanup():
for bucket in created_buckets:
log.info(f"Cleaning up bucket {bucket.name}")
try:
bucket.delete()
except ClientError as e:
if e.response["Error"]["Code"] == "NoSuchBucket":
log.warning(f"{bucket.name} could not be found in cleanup")
else:
raise
request.addfinalizer(bucket_cleanup)
return _create_buckets
@pytest.fixture(scope="class")
def cloud_uls_factory(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="session")
def cloud_uls_factory_session(request, cld_mgr):
"""
Create an Underlying Storage factory.
Calling this fixture creates a new underlying storage(s).
Returns:
func: Factory method - each call to this function creates
an Underlying Storage factory
"""
return cloud_uls_factory_implementation(request, cld_mgr)
@pytest.fixture(scope="function")
def mcg_job_factory(request, bucket_factory, project_factory, mcg_obj, tmp_path):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request, bucket_factory, project_factory, mcg_obj, tmp_path
)
@pytest.fixture(scope="session")
def mcg_job_factory_session(
request, bucket_factory_session, project_factory_session, mcg_obj_session, tmp_path
):
"""
Create a Job factory.
Calling this fixture creates a new Job(s) that utilize MCG bucket.
Returns:
func: Factory method - each call to this function creates
a job
"""
return mcg_job_factory_implementation(
request,
bucket_factory_session,
project_factory_session,
mcg_obj_session,
tmp_path,
)
@pytest.fixture()
def backingstore_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If MCG object not found
"""
if mcg_obj:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
else:
return None
@pytest.fixture(scope="session")
def backingstore_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Backing Store factory.
Calling this fixture creates a new Backing Store(s).
Returns:
func: Factory method - each call to this function creates
a backingstore
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return backingstore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
else:
return None
@pytest.fixture()
def bucket_class_factory(
request, mcg_obj, backingstore_factory, namespace_store_factory
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If MCG object not found
"""
if mcg_obj:
return bucketclass_factory_implementation(
request, mcg_obj, backingstore_factory, namespace_store_factory
)
else:
return None
@pytest.fixture(scope="session")
def bucket_class_factory_session(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
):
"""
Create a Bucket Class factory.
Calling this fixture creates a new Bucket Class.
Returns:
func: Factory method - each call to this function creates
a bucketclass
None: If session-scoped MCG object not found
"""
if mcg_obj_session:
return bucketclass_factory_implementation(
request,
mcg_obj_session,
backingstore_factory_session,
namespace_store_factory_session,
)
else:
return None
@pytest.fixture()
def multiregion_mirror_setup(bucket_factory):
return multiregion_mirror_setup_fixture(bucket_factory)
@pytest.fixture(scope="session")
def multiregion_mirror_setup_session(bucket_factory_session):
return multiregion_mirror_setup_fixture(bucket_factory_session)
def multiregion_mirror_setup_fixture(bucket_factory):
# Setup
# Todo:
# add region and amount parametrization - note that `us-east-1`
# will cause an error as it is the default region. If usage of `us-east-1`
# needs to be tested, keep the 'region' field out.
bucketclass = {
"interface": "CLI",
"backingstore_dict": {"aws": [(1, "us-west-1"), (1, "us-east-2")]},
"placement_policy": "Mirror",
}
# Create a NooBucket that'll use the bucket class in order to test
# the mirroring policy
bucket = bucket_factory(1, "OC", bucketclass=bucketclass)[0]
return bucket, bucket.bucketclass.backingstores
@pytest.fixture(scope="session")
def default_storageclasses(request, teardown_factory_session):
"""
Returns dictionary with storageclasses. Keys represent reclaim policy of
storageclass. There are two storageclasses for each key. First is RBD based
and the second one is CephFS based. Storageclasses with Retain Reclaim
Policy are created from default storageclasses.
"""
scs = {constants.RECLAIM_POLICY_DELETE: [], constants.RECLAIM_POLICY_RETAIN: []}
# TODO(fbalak): Use proper constants after
# https://github.com/red-hat-storage/ocs-ci/issues/1056
# is resolved
for sc_name in ("ocs-storagecluster-ceph-rbd", "ocs-storagecluster-cephfs"):
sc = OCS(kind=constants.STORAGECLASS, metadata={"name": sc_name})
sc.reload()
scs[constants.RECLAIM_POLICY_DELETE].append(sc)
sc.data["reclaimPolicy"] = constants.RECLAIM_POLICY_RETAIN
sc.data["metadata"]["name"] += "-retain"
sc._name = sc.data["metadata"]["name"]
sc.create()
teardown_factory_session(sc)
scs[constants.RECLAIM_POLICY_RETAIN].append(sc)
return scs
@pytest.fixture(scope="class")
def install_logging(request):
"""
Setup and teardown
* The setup will deploy openshift-logging in the cluster
* The teardown will uninstall cluster-logging from the cluster
"""
def finalizer():
uninstall_cluster_logging()
request.addfinalizer(finalizer)
csv = ocp.OCP(
kind=constants.CLUSTER_SERVICE_VERSION,
namespace=constants.OPENSHIFT_LOGGING_NAMESPACE,
)
logging_csv = csv.get().get("items")
if logging_csv:
log.info("Logging is already configured, Skipping Installation")
return
log.info("Configuring Openshift-logging")
# Checks OCP version
ocp_version = get_running_ocp_version()
# Creates namespace opensift-operators-redhat
ocp_logging_obj.create_namespace(yaml_file=constants.EO_NAMESPACE_YAML)
# Creates an operator-group for elasticsearch
assert ocp_logging_obj.create_elasticsearch_operator_group(
yaml_file=constants.EO_OG_YAML, resource_name="openshift-operators-redhat"
)
# Set RBAC policy on the project
assert ocp_logging_obj.set_rbac(
yaml_file=constants.EO_RBAC_YAML, resource_name="prometheus-k8s"
)
# Creates subscription for elastic-search operator
subscription_yaml = templating.load_yaml(constants.EO_SUB_YAML)
subscription_yaml["spec"]["channel"] = ocp_version
helpers.create_resource(**subscription_yaml)
assert ocp_logging_obj.get_elasticsearch_subscription()
# Creates a namespace openshift-logging
ocp_logging_obj.create_namespace(yaml_file=constants.CL_NAMESPACE_YAML)
# Creates an operator-group for cluster-logging
assert ocp_logging_obj.create_clusterlogging_operator_group(
yaml_file=constants.CL_OG_YAML
)
# Creates subscription for cluster-logging
cl_subscription = templating.load_yaml(constants.CL_SUB_YAML)
cl_subscription["spec"]["channel"] = ocp_version
helpers.create_resource(**cl_subscription)
assert ocp_logging_obj.get_clusterlogging_subscription()
# Creates instance in namespace openshift-logging
cluster_logging_operator = OCP(
kind=constants.POD, namespace=constants.OPENSHIFT_LOGGING_NAMESPACE
)
log.info(f"The cluster-logging-operator {cluster_logging_operator.get()}")
ocp_logging_obj.create_instance()
@pytest.fixture
def fio_pvc_dict():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture(scope="session")
def fio_pvc_dict_session():
"""
PVC template for fio workloads.
Note that all 'None' values needs to be defined before usage.
"""
return fio_artefacts.get_pvc_dict()
@pytest.fixture
def fio_configmap_dict():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture(scope="session")
def fio_configmap_dict_session():
"""
ConfigMap template for fio workloads.
Note that you need to add actual configuration to workload.fio file.
"""
return fio_artefacts.get_configmap_dict()
@pytest.fixture
def fio_job_dict():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="session")
def fio_job_dict_session():
"""
Job template for fio workloads.
"""
return fio_artefacts.get_job_dict()
@pytest.fixture(scope="function")
def pgsql_factory_fixture(request):
"""
Pgsql factory fixture
"""
pgsql = Postgresql()
def factory(
replicas,
clients=None,
threads=None,
transactions=None,
scaling_factor=None,
timeout=None,
sc_name=None,
):
"""
Factory to start pgsql workload
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
"""
# Setup postgres
pgsql.setup_postgresql(replicas=replicas, sc_name=sc_name)
# Create pgbench benchmark
pgsql.create_pgbench_benchmark(
replicas=replicas,
clients=clients,
threads=threads,
transactions=transactions,
scaling_factor=scaling_factor,
timeout=timeout,
)
# Wait for pg_bench pod to initialized and complete
pgsql.wait_for_pgbench_status(status=constants.STATUS_COMPLETED)
# Get pgbench pods
pgbench_pods = pgsql.get_pgbench_pods()
# Validate pgbench run and parse logs
pgsql.validate_pgbench_run(pgbench_pods)
return pgsql
def finalizer():
"""
Clean up
"""
pgsql.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def jenkins_factory_fixture(request):
"""
Jenkins factory fixture
"""
jenkins = Jenkins()
def factory(num_projects=1, num_of_builds=1):
"""
Factory to start jenkins workload
Args:
num_projects (int): Number of Jenkins projects
num_of_builds (int): Number of builds per project
"""
# Jenkins template
jenkins.create_ocs_jenkins_template()
# Init number of projects
jenkins.number_projects = num_projects
# Create app jenkins
jenkins.create_app_jenkins()
# Create jenkins pvc
jenkins.create_jenkins_pvc()
# Create jenkins build config
jenkins.create_jenkins_build_config()
# Wait jenkins deploy pod reach to completed state
jenkins.wait_for_jenkins_deploy_status(status=constants.STATUS_COMPLETED)
# Init number of builds per project
jenkins.number_builds_per_project = num_of_builds
# Start Builds
jenkins.start_build()
# Wait build reach 'Complete' state
jenkins.wait_for_build_to_complete()
# Print table of builds
jenkins.print_completed_builds_results()
return jenkins
def finalizer():
"""
Clean up
"""
jenkins.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def couchbase_factory_fixture(request):
"""
Couchbase factory fixture
"""
couchbase = CouchBase()
def factory(replicas=3, run_in_bg=False, skip_analyze=True, sc_name=None):
"""
Factory to start couchbase workload
Args:
replicas (int): Number of couchbase workers to be deployed
run_in_bg (bool): Run IOs in background as option
skip_analyze (bool): Skip logs analysis as option
"""
# Setup couchbase
couchbase.setup_cb()
# Create couchbase workers
couchbase.create_couchbase_worker(replicas=replicas, sc_name=sc_name)
# Run couchbase workload
couchbase.run_workload(replicas=replicas, run_in_bg=run_in_bg)
# Run sanity check on data logs
couchbase.analyze_run(skip_analyze=skip_analyze)
return couchbase
def finalizer():
"""
Clean up
"""
couchbase.teardown()
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="function")
def amq_factory_fixture(request):
"""
AMQ factory fixture
"""
amq = AMQ()
def factory(
sc_name,
kafka_namespace=constants.AMQ_NAMESPACE,
size=100,
replicas=3,
topic_name="my-topic",
user_name="my-user",
partitions=1,
topic_replicas=1,
num_of_producer_pods=1,
num_of_consumer_pods=1,
value="10000",
since_time=1800,
):
"""
Factory to start amq workload
Args:
sc_name (str): Name of storage clase
kafka_namespace (str): Namespace where kafka cluster to be created
size (int): Size of the storage
replicas (int): Number of kafka and zookeeper pods to be created
topic_name (str): Name of the topic to be created
user_name (str): Name of the user to be created
partitions (int): Number of partitions of topic
topic_replicas (int): Number of replicas of topic
num_of_producer_pods (int): Number of producer pods to be created
num_of_consumer_pods (int): Number of consumer pods to be created
value (str): Number of messages to be sent and received
since_time (int): Number of seconds to required to sent the msg
"""
# Setup kafka cluster
amq.setup_amq_cluster(
sc_name=sc_name, namespace=kafka_namespace, size=size, replicas=replicas
)
# Run open messages
amq.create_messaging_on_amq(
topic_name=topic_name,
user_name=user_name,
partitions=partitions,
replicas=topic_replicas,
num_of_producer_pods=num_of_producer_pods,
num_of_consumer_pods=num_of_consumer_pods,
value=value,
)
# Wait for some time to generate msg
waiting_time = 60
log.info(f"Waiting for {waiting_time}sec to generate msg")
time.sleep(waiting_time)
# Check messages are sent and received
threads = amq.run_in_bg(
namespace=kafka_namespace, value=value, since_time=since_time
)
return amq, threads
def finalizer():
"""
Clean up
"""
# Clean up
amq.cleanup()
request.addfinalizer(finalizer)
return factory
@pytest.fixture
def measurement_dir(tmp_path):
"""
Returns directory path where should be stored all results related
to measurement. If 'measurement_dir' is provided by config then use it,
otherwise new directory is generated.
Returns:
str: Path to measurement directory
"""
if config.ENV_DATA.get("measurement_dir"):
measurement_dir = config.ENV_DATA.get("measurement_dir")
log.info(f"Using measurement dir from configuration: {measurement_dir}")
else:
measurement_dir = os.path.join(os.path.dirname(tmp_path), "measurement_results")
if not os.path.exists(measurement_dir):
log.info(f"Measurement dir {measurement_dir} doesn't exist. Creating it.")
os.mkdir(measurement_dir)
return measurement_dir
@pytest.fixture()
def multi_dc_pod(multi_pvc_factory, dc_pod_factory, service_account_factory):
"""
Prepare multiple dc pods for the test
Returns:
list: Pod instances
"""
def factory(
num_of_pvcs=1,
pvc_size=100,
project=None,
access_mode="RWO",
pool_type="rbd",
timeout=60,
):
dict_modes = {
"RWO": "ReadWriteOnce",
"RWX": "ReadWriteMany",
"RWX-BLK": "ReadWriteMany-Block",
}
dict_types = {"rbd": "CephBlockPool", "cephfs": "CephFileSystem"}
if access_mode in "RWX-BLK" and pool_type in "rbd":
modes = dict_modes["RWX-BLK"]
create_rbd_block_rwx_pod = True
else:
modes = dict_modes[access_mode]
create_rbd_block_rwx_pod = False
pvc_objs = multi_pvc_factory(
interface=dict_types[pool_type],
access_modes=[modes],
size=pvc_size,
num_of_pvc=num_of_pvcs,
project=project,
timeout=timeout,
)
dc_pods = []
dc_pods_res = []
sa_obj = service_account_factory(project=project)
with ThreadPoolExecutor() as p:
for pvc_obj in pvc_objs:
if create_rbd_block_rwx_pod:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=constants.CEPHBLOCKPOOL,
pvc=pvc_obj,
raw_block_pv=True,
sa_obj=sa_obj,
)
)
else:
dc_pods_res.append(
p.submit(
dc_pod_factory,
interface=dict_types[pool_type],
pvc=pvc_obj,
sa_obj=sa_obj,
)
)
for dc in dc_pods_res:
pod_obj = dc.result()
if create_rbd_block_rwx_pod:
log.info(
"#### setting attribute pod_type since "
f"create_rbd_block_rwx_pod = {create_rbd_block_rwx_pod}"
)
setattr(pod_obj, "pod_type", "rbd_block_rwx")
else:
setattr(pod_obj, "pod_type", "")
dc_pods.append(pod_obj)
with ThreadPoolExecutor() as p:
for dc in dc_pods:
p.submit(
helpers.wait_for_resource_state,
resource=dc,
state=constants.STATUS_RUNNING,
timeout=120,
)
return dc_pods
return factory
@pytest.fixture(scope="session")
def htpasswd_path(tmpdir_factory):
"""
Returns:
string: Path to HTPasswd file with additional usernames
"""
return str(tmpdir_factory.mktemp("idp_data").join("users.htpasswd"))
@pytest.fixture(scope="session")
def htpasswd_identity_provider(request):
"""
Creates HTPasswd Identity provider.
Returns:
object: OCS object representing OCP OAuth object with HTPasswd IdP
"""
users.create_htpasswd_idp()
cluster = OCS(kind=constants.OAUTH, metadata={"name": "cluster"})
cluster.reload()
def finalizer():
"""
Remove HTPasswd IdP
"""
# TODO(fbalak): remove HTPasswd identityProvider
# cluster.ocp.patch(
# resource_name='cluster',
# params=f'[{ "op": "remove", "path": "/spec/identityProviders" }]'
# )
# users.delete_htpasswd_secret()
request.addfinalizer(finalizer)
return cluster
@pytest.fixture(scope="function")
def user_factory(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(scope="session")
def user_factory_session(request, htpasswd_identity_provider, htpasswd_path):
return users.user_factory(request, htpasswd_path)
@pytest.fixture(autouse=True)
def log_alerts(request):
"""
Log alerts at the beginning and end of each test case. At the end of test
case print a difference: what new alerts are in place after the test is
complete.
"""
teardown = config.RUN["cli_params"].get("teardown")
if teardown:
return
alerts_before = []
prometheus = None
try:
prometheus = PrometheusAPI()
except Exception:
log.exception("There was a problem with connecting to Prometheus")
def _collect_alerts():
try:
alerts_response = prometheus.get(
"alerts", payload={"silenced": False, "inhibited": False}
)
if alerts_response.ok:
alerts = alerts_response.json().get("data").get("alerts")
log.debug(f"Found alerts: {alerts}")
return alerts
else:
log.warning(
f"There was a problem with collecting alerts for analysis: {alerts_response.text}"
)
return False
except Exception:
log.exception("There was a problem with collecting alerts for analysis")
return False
def _print_diff():
if alerts_before:
alerts_after = _collect_alerts()
if alerts_after:
alerts_new = [
alert for alert in alerts_after if alert not in alerts_before
]
if alerts_new:
log.warning("During test were raised new alerts")
log.warning(alerts_new)
alerts_before = _collect_alerts()
request.addfinalizer(_print_diff)
@pytest.fixture(scope="session", autouse=True)
def ceph_toolbox(request):
"""
This fixture initiates ceph toolbox pod for manually created deployment
and if it does not already exist.
"""
deploy = config.RUN["cli_params"]["deploy"]
teardown = config.RUN["cli_params"].get("teardown")
skip_ocs = config.ENV_DATA["skip_ocs_deployment"]
deploy_teardown = deploy or teardown
ocp_dedicated = (
config.ENV_DATA["platform"].lower() == constants.OPENSHIFT_DEDICATED_PLATFORM
)
if not (deploy_teardown or skip_ocs) or (ocp_dedicated and not deploy_teardown):
try:
# Creating toolbox pod
setup_ceph_toolbox()
except CommandFailed:
log.info("Failed to create toolbox")
@pytest.fixture(scope="function")
def node_drain_teardown(request):
"""
Tear down function after Node drain
"""
def finalizer():
"""
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by marking them as schedulable
"""
scheduling_disabled_nodes = [
n.name
for n in get_node_objs()
if n.ocp.get_resource_status(n.name)
== constants.NODE_READY_SCHEDULING_DISABLED
]
if scheduling_disabled_nodes:
schedule_nodes(scheduling_disabled_nodes)
ceph_health_check(tries=60)
request.addfinalizer(finalizer)
@pytest.fixture(scope="function")
def node_restart_teardown(request, nodes):
"""
Make sure all nodes are up again
Make sure that all cluster's nodes are in 'Ready' state and if not,
change them back to 'Ready' state by restarting the nodes
"""
def finalizer():
# Start the powered off nodes
nodes.restart_nodes_by_stop_and_start_teardown()
try:
node.wait_for_nodes_status(status=constants.NODE_READY)
except ResourceWrongStatusException:
# Restart the nodes if in NotReady state
not_ready_nodes = [
n
for n in node.get_node_objs()
if n.ocp.get_resource_status(n.name) == constants.NODE_NOT_READY
]
if not_ready_nodes:
log.info(
f"Nodes in NotReady status found: {[n.name for n in not_ready_nodes]}"
)
nodes.restart_nodes(not_ready_nodes)
node.wait_for_nodes_status(status=constants.NODE_READY)
request.addfinalizer(finalizer)
@pytest.fixture()
def mcg_connection_factory(request, mcg_obj, cld_mgr):
"""
Create a new MCG connection for given platform. If there already exists
a connection for the platform then return this previously created
connection.
"""
created_connections = {}
def _create_connection(platform=constants.AWS_PLATFORM, name=None):
"""
Args:
platform (str): Platform used for connection
name (str): New connection name. If not provided then new name will
be generated. New name will be used only if there is not
existing connection for given platform
Returns:
str: connection name
"""
if platform not in created_connections:
connection_name = name or create_unique_resource_name(
constants.MCG_CONNECTION, platform
)
mcg_obj.create_connection(cld_mgr, platform, connection_name)
created_connections[platform] = connection_name
return created_connections[platform]
def _connections_cleanup():
for platform in created_connections:
mcg_obj.delete_ns_connection(created_connections[platform])
request.addfinalizer(_connections_cleanup)
return _create_connection
@pytest.fixture()
def ns_resource_factory(
request, mcg_obj, cld_mgr, cloud_uls_factory, mcg_connection_factory
):
"""
Create a namespace resource factory. Calling this fixture creates a new namespace resource.
"""
created_ns_resources = []
def _create_ns_resources(platform=constants.AWS_PLATFORM):
# Create random connection_name
rand_connection = mcg_connection_factory(platform)
# Create the actual namespace resource
rand_ns_resource = create_unique_resource_name(
constants.MCG_NS_RESOURCE, platform
)
if platform == constants.RGW_PLATFORM:
region = None
else:
# TODO: fix this when https://github.com/red-hat-storage/ocs-ci/issues/3338
# is resolved
region = "us-east-2"
target_bucket_name = mcg_obj.create_namespace_resource(
rand_ns_resource,
rand_connection,
region,
cld_mgr,
cloud_uls_factory,
platform,
)
log.info(f"Check validity of NS resource {rand_ns_resource}")
if platform == constants.AWS_PLATFORM:
endpoint = constants.MCG_NS_AWS_ENDPOINT
elif platform == constants.AZURE_PLATFORM:
endpoint = constants.MCG_NS_AZURE_ENDPOINT
elif platform == constants.RGW_PLATFORM:
rgw_conn = RGW()
endpoint, _, _ = rgw_conn.get_credentials()
else:
raise UnsupportedPlatformError(f"Unsupported Platform: {platform}")
mcg_obj.check_ns_resource_validity(
rand_ns_resource, target_bucket_name, endpoint
)
created_ns_resources.append(rand_ns_resource)
return target_bucket_name, rand_ns_resource
def ns_resources_cleanup():
for ns_resource in created_ns_resources:
mcg_obj.delete_ns_resource(ns_resource)
request.addfinalizer(ns_resources_cleanup)
return _create_ns_resources
@pytest.fixture()
def namespace_store_factory(request, cld_mgr, mcg_obj, cloud_uls_factory):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj, cloud_uls_factory
)
@pytest.fixture(scope="session")
def namespace_store_factory_session(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
):
"""
Create a Namespace Store factory.
Calling this fixture creates a new Namespace Store(s).
Returns:
func: Factory method - each call to this function creates
a namespacestore
"""
return namespacestore_factory_implementation(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session
)
@pytest.fixture()
def snapshot_factory(request):
"""
Snapshot factory. Calling this fixture creates a volume snapshot from the
specified PVC
"""
instances = []
def factory(pvc_obj, wait=True, snapshot_name=None):
"""
Args:
pvc_obj (PVC): PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name (str): Name to be provided for snapshot
Returns:
OCS: OCS instance of kind VolumeSnapshot
"""
snap_obj = pvc_obj.create_snapshot(snapshot_name=snapshot_name, wait=wait)
return snap_obj
def finalizer():
"""
Delete the snapshots
"""
snapcontent_objs = []
# Get VolumeSnapshotContent form VolumeSnapshots and delete
# VolumeSnapshots
for instance in instances:
if not instance.is_deleted:
snapcontent_objs.append(
helpers.get_snapshot_content_obj(snap_obj=instance)
)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for VolumeSnapshotContents to be deleted
for snapcontent_obj in snapcontent_objs:
snapcontent_obj.ocp.wait_for_delete(
resource_name=snapcontent_obj.name, timeout=240
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_factory(snapshot_factory):
"""
Snapshot factory. Calling this fixture creates volume snapshots of each
PVC in the provided list
"""
def factory(pvc_obj, wait=True, snapshot_name_suffix=None):
"""
Args:
pvc_obj (list): List PVC object from which snapshot has to be created
wait (bool): True to wait for snapshot to be ready, False otherwise
snapshot_name_suffix (str): Suffix to be added to snapshot
Returns:
OCS: List of OCS instances of kind VolumeSnapshot
"""
snapshot = []
for obj in pvc_obj:
log.info(f"Creating snapshot of PVC {obj.name}")
snapshot_name = (
f"{obj.name}-{snapshot_name_suffix}" if snapshot_name_suffix else None
)
snap_obj = snapshot_factory(
pvc_obj=obj, snapshot_name=snapshot_name, wait=wait
)
snapshot.append(snap_obj)
return snapshot
return factory
@pytest.fixture()
def snapshot_restore_factory(request):
"""
Snapshot restore factory. Calling this fixture creates new PVC out of the
specified VolumeSnapshot.
"""
instances = []
def factory(
snapshot_obj,
restore_pvc_name=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
):
"""
Args:
snapshot_obj (OCS): OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_name (str): Name to be provided for restored pvc
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
Returns:
PVC: Restored PVC object
"""
snapshot_info = snapshot_obj.get()
size = size or snapshot_info["status"]["restoreSize"]
restore_pvc_name = restore_pvc_name or (
helpers.create_unique_resource_name(snapshot_obj.name, "restore")
)
if snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHBLOCKPOOL).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHBLOCKPOOL).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_RBD_PVC_RESTORE_YAML
interface = constants.CEPHBLOCKPOOL
elif snapshot_info["spec"]["volumeSnapshotClassName"] == (
helpers.default_volumesnapshotclass(constants.CEPHFILESYSTEM).name
):
storageclass = (
storageclass
or helpers.default_storage_class(constants.CEPHFILESYSTEM).name
)
restore_pvc_yaml = restore_pvc_yaml or constants.CSI_CEPHFS_PVC_RESTORE_YAML
interface = constants.CEPHFILESYSTEM
restored_pvc = create_restore_pvc(
sc_name=storageclass,
snap_name=snapshot_obj.name,
namespace=snapshot_obj.namespace,
size=size,
pvc_name=restore_pvc_name,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
)
instances.append(restored_pvc)
restored_pvc.snapshot = snapshot_obj
restored_pvc.interface = interface
if status:
helpers.wait_for_resource_state(restored_pvc, status)
return restored_pvc
def finalizer():
"""
Delete the PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def multi_snapshot_restore_factory(snapshot_restore_factory):
"""
Snapshot restore factory. Calling this fixture creates set of new PVC out of the
each VolumeSnapshot provided in the list.
"""
def factory(
snapshot_obj,
restore_pvc_suffix=None,
storageclass=None,
size=None,
volume_mode=None,
restore_pvc_yaml=None,
access_mode=constants.ACCESS_MODE_RWO,
status=constants.STATUS_BOUND,
wait_each=False,
):
"""
Args:
snapshot_obj (list): List OCS instance of kind VolumeSnapshot which has
to be restored to new PVC
restore_pvc_suffix (str): Suffix to be added to pvc name
storageclass (str): Name of storageclass
size (str): Size of PVC being created. eg: 5Gi. Ideally, this
should be same as the restore size of snapshot. Adding this
parameter to consider negative test scenarios.
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC.
restore_pvc_yaml (str): The location of pvc-restore.yaml
access_mode (str): This decides the access mode to be used for the
PVC. ReadWriteOnce is default.
status (str): If provided then factory waits for the PVC to reach
desired state.
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List of restored PVC object
"""
new_pvcs = []
status_tmp = status if wait_each else ""
for snap_obj in snapshot_obj:
log.info(f"Creating a PVC from snapshot {snap_obj.name}")
restore_pvc_name = (
f"{snap_obj.name}-{restore_pvc_suffix}" if restore_pvc_suffix else None
)
restored_pvc = snapshot_restore_factory(
snapshot_obj=snap_obj,
restore_pvc_name=restore_pvc_name,
storageclass=storageclass,
size=size,
volume_mode=volume_mode,
restore_pvc_yaml=restore_pvc_yaml,
access_mode=access_mode,
status=status_tmp,
)
restored_pvc.snapshot = snapshot_obj
new_pvcs.append(restored_pvc)
if status and not wait_each:
for restored_pvc in new_pvcs:
helpers.wait_for_resource_state(restored_pvc, status)
return new_pvcs
return factory
@pytest.fixture(scope="session", autouse=True)
def collect_logs_fixture(request):
"""
This fixture collects ocs logs after tier execution and this will allow
to see the cluster's status after the execution on all execution status options.
"""
def finalizer():
"""
Tracking both logs separately reduce changes of collision
"""
if not config.RUN["cli_params"].get("deploy") and not config.RUN[
"cli_params"
].get("teardown"):
if config.REPORTING["collect_logs_on_success_run"]:
collect_ocs_logs("testcases", ocs=False, status_failure=False)
collect_ocs_logs("testcases", ocp=False, status_failure=False)
request.addfinalizer(finalizer)
def get_ready_noobaa_endpoint_count(namespace):
"""
Get the number of ready nooobaa endpoints
"""
pods_info = get_pods_having_label(
label=constants.NOOBAA_ENDPOINT_POD_LABEL, namespace=namespace
)
ready_count = 0
for ep_info in pods_info:
container_statuses = ep_info.get("status", {}).get("containerStatuses")
if container_statuses is not None and len(container_statuses) > 0:
if container_statuses[0].get("ready"):
ready_count += 1
return ready_count
@pytest.fixture(scope="function")
def nb_ensure_endpoint_count(request):
"""
Validate and ensure the number of running noobaa endpoints
"""
cls = request.cls
min_ep_count = cls.MIN_ENDPOINT_COUNT
max_ep_count = cls.MAX_ENDPOINT_COUNT
assert min_ep_count <= max_ep_count
namespace = defaults.ROOK_CLUSTER_NAMESPACE
should_wait = False
# prior to 4.6 we configured the ep count directly on the noobaa cr.
if float(config.ENV_DATA["ocs_version"]) < 4.6:
noobaa = OCP(kind="noobaa", namespace=namespace)
resource = noobaa.get()["items"][0]
endpoints = resource.get("spec", {}).get("endpoints", {})
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}'
noobaa.patch(resource_name="noobaa", params=params, format_type="merge")
should_wait = True
else:
storage_cluster = OCP(kind=constants.STORAGECLUSTER, namespace=namespace)
resource = storage_cluster.get()["items"][0]
resource_name = resource["metadata"]["name"]
endpoints = (
resource.get("spec", {}).get("multiCloudGateway", {}).get("endpoints", {})
)
if endpoints.get("minCount", -1) != min_ep_count:
log.info(f"Changing minimum Noobaa endpoints to {min_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"minCount":{min_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if endpoints.get("maxCount", -1) != max_ep_count:
log.info(f"Changing maximum Noobaa endpoints to {max_ep_count}")
params = f'{{"spec":{{"multiCloudGateway":{{"endpoints":{{"maxCount":{max_ep_count}}}}}}}}}'
storage_cluster.patch(
resource_name=resource_name, params=params, format_type="merge"
)
should_wait = True
if should_wait:
# Wait for the NooBaa endpoint pods to stabilize
try:
for ready_nb_ep_count in TimeoutSampler(
300, 30, get_ready_noobaa_endpoint_count, namespace
):
if min_ep_count <= ready_nb_ep_count <= max_ep_count:
log.info(
f"NooBaa endpoints stabilized. Ready endpoints: {ready_nb_ep_count}"
)
break
log.info(
f"Waiting for the NooBaa endpoints to stabilize. "
f"Current ready count: {ready_nb_ep_count}"
)
except TimeoutExpiredError:
raise TimeoutExpiredError(
"NooBaa endpoints did not stabilize in time.\n"
f"Min count: {min_ep_count}, max count: {max_ep_count}, ready count: {ready_nb_ep_count}"
)
@pytest.fixture()
def pvc_clone_factory(request):
"""
Calling this fixture creates a clone from the specified PVC
"""
instances = []
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
):
"""
Args:
pvc_obj (PVC): PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
Returns:
PVC: PVC instance
"""
assert (
pvc_obj.provisioner in constants.OCS_PROVISIONERS
), f"Unknown provisioner in PVC {pvc_obj.name}"
if pvc_obj.provisioner == "openshift-storage.rbd.csi.ceph.com":
clone_yaml = constants.CSI_RBD_PVC_CLONE_YAML
interface = constants.CEPHBLOCKPOOL
elif pvc_obj.provisioner == "openshift-storage.cephfs.csi.ceph.com":
clone_yaml = constants.CSI_CEPHFS_PVC_CLONE_YAML
interface = constants.CEPHFILESYSTEM
size = size or pvc_obj.get().get("spec").get("resources").get("requests").get(
"storage"
)
storageclass = storageclass or pvc_obj.backed_sc
access_mode = access_mode or pvc_obj.get_pvc_access_mode
volume_mode = volume_mode or getattr(pvc_obj, "volume_mode", None)
# Create clone
clone_pvc_obj = pvc.create_pvc_clone(
sc_name=storageclass,
parent_pvc=pvc_obj.name,
clone_yaml=clone_yaml,
pvc_name=clone_name,
storage_size=size,
access_mode=access_mode,
volume_mode=volume_mode,
)
instances.append(clone_pvc_obj)
clone_pvc_obj.parent = pvc_obj
clone_pvc_obj.volume_mode = volume_mode
clone_pvc_obj.interface = interface
if status:
helpers.wait_for_resource_state(clone_pvc_obj, status)
return clone_pvc_obj
def finalizer():
"""
Delete the cloned PVCs
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(instance.name)
# Wait for PVs to delete
helpers.wait_for_pv_delete(pv_objs)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def reportportal_customization(request):
if hasattr(request.node.config, "py_test_service"):
rp_service = request.node.config.py_test_service
if not hasattr(rp_service.RP, "rp_client"):
request.config._metadata[
"RP Launch URL:"
] = "Problem with RP, launch URL is not available!"
return
launch_id = rp_service.RP.rp_client.launch_id
project = rp_service.RP.rp_client.project
endpoint = rp_service.RP.rp_client.endpoint
launch_url = f"{endpoint}/ui/#{project}/launches/all/{launch_id}/{launch_id}"
config.REPORTING["rp_launch_url"] = launch_url
config.REPORTING["rp_launch_id"] = launch_id
config.REPORTING["rp_endpoint"] = endpoint
config.REPORTING["rp_project"] = project
request.config._metadata["RP Launch URL:"] = launch_url
@pytest.fixture()
def multi_pvc_clone_factory(pvc_clone_factory):
"""
Calling this fixture creates clone from each PVC in the provided list of PVCs
"""
def factory(
pvc_obj,
status=constants.STATUS_BOUND,
clone_name=None,
storageclass=None,
size=None,
access_mode=None,
volume_mode=None,
wait_each=False,
):
"""
Args:
pvc_obj (list): List PVC object from which clone has to be created
status (str): If provided then factory waits for cloned PVC to
reach the desired state
clone_name (str): Name to be provided for cloned PVC
storageclass (str): storage class to be used for cloned PVC
size (int): The requested size for the cloned PVC. This should
be same as the size of parent PVC for a successful clone
access_mode (str): This decides the access mode to be used for
the cloned PVC. eg: ReadWriteOnce, ReadOnlyMany, ReadWriteMany
volume_mode (str): Volume mode for PVC. This should match the
volume mode of parent PVC
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
PVC: List PVC instance
"""
cloned_pvcs = []
status_tmp = status if wait_each else ""
for obj in pvc_obj:
# Create clone
clone_pvc_obj = pvc_clone_factory(
pvc_obj=obj,
clone_name=clone_name,
storageclass=storageclass,
size=size,
access_mode=access_mode,
volume_mode=volume_mode,
status=status_tmp,
)
cloned_pvcs.append(clone_pvc_obj)
if status and not wait_each:
for cloned_pvc in cloned_pvcs:
helpers.wait_for_resource_state(cloned_pvc, status)
return cloned_pvcs
return factory
@pytest.fixture(scope="function")
def multiple_snapshot_and_clone_of_postgres_pvc_factory(
request,
multi_snapshot_factory,
multi_snapshot_restore_factory,
multi_pvc_clone_factory,
):
"""
Calling this fixture creates multiple snapshots & clone of postgres PVC
"""
instances = []
def factory(pvc_size_new, pgsql):
"""
Args:
pvc_size_new (int): Resize/Expand the pvc size
pgsql (obj): Pgsql obj
Returns:
Postgres pod: Pod instances
"""
# Get postgres pvc list obj
postgres_pvcs_obj = pgsql.get_postgres_pvc()
snapshots = multi_snapshot_factory(pvc_obj=postgres_pvcs_obj)
log.info("Created snapshots from all the PVCs and snapshots are in Ready state")
restored_pvc_objs = multi_snapshot_restore_factory(snapshot_obj=snapshots)
log.info("Created new PVCs from all the snapshots")
cloned_pvcs = multi_pvc_clone_factory(
pvc_obj=restored_pvc_objs, volume_mode=constants.VOLUME_MODE_FILESYSTEM
)
log.info("Created new PVCs from all restored volumes")
# Attach a new pgsql pod cloned pvcs
sset_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=cloned_pvcs, postgres_name="postgres-clone", run_benchmark=False
)
instances.extend(sset_list)
# Resize cloned PVCs
for pvc_obj in cloned_pvcs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
new_snapshots = multi_snapshot_factory(pvc_obj=cloned_pvcs)
log.info(
"Created snapshots from all the cloned PVCs"
" and snapshots are in Ready state"
)
new_restored_pvc_objs = multi_snapshot_restore_factory(
snapshot_obj=new_snapshots
)
log.info("Created new PVCs from all the snapshots and in Bound state")
# Attach a new pgsql pod restored pvcs
pgsql_obj_list = pgsql.attach_pgsql_pod_to_claim_pvc(
pvc_objs=new_restored_pvc_objs,
postgres_name="postgres-clone-restore",
run_benchmark=False,
)
instances.extend(pgsql_obj_list)
# Resize restored PVCs
for pvc_obj in new_restored_pvc_objs:
log.info(f"Expanding size of PVC {pvc_obj.name} to {pvc_size_new}G")
pvc_obj.resize_pvc(pvc_size_new, True)
return instances
def finalizer():
"""
Delete the list of pod objects created
"""
for instance in instances:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def es(request):
"""
Create In-cluster elastic-search deployment for benchmark-operator tests.
using the name es - as shortcut for elastic-search for simplicity
"""
def teardown():
es.cleanup()
request.addfinalizer(teardown)
es = ElasticSearch()
return es
@pytest.fixture(scope="function")
def setup_ui(request):
driver = login_ui()
def finalizer():
close_browser(driver)
request.addfinalizer(finalizer)
return driver
@pytest.fixture(scope="session", autouse=True)
def load_cluster_info_file(request):
"""
This fixture tries to load cluster_info.json file if exists (on cluster
installed via Flexy) and apply the information to the config object (for
example related to disconnected cluster)
"""
load_cluster_info()
@pytest.fixture(scope="function")
def ripsaw(request):
# Create benchmark Operator (formerly ripsaw)
ripsaw = RipSaw()
def teardown():
ripsaw.cleanup()
time.sleep(10)
request.addfinalizer(teardown)
return ripsaw
| 32.871421 | 166 | 0.637119 |
ace6e828a09957e8459441b2aeabea3c98109d41 | 1,933 | py | Python | UsefulTools/Detect/Tools/eval/recallV2.py | CharlesPikachu/CharlesFace | 90bfe38c58068228d0069dce43b55b2570acaa16 | [
"MIT"
] | 13 | 2018-05-23T07:07:28.000Z | 2021-05-28T07:37:30.000Z | UsefulTools/Detect/Tools/eval/recallV2.py | CharlesPikachu/CharlesFace | 90bfe38c58068228d0069dce43b55b2570acaa16 | [
"MIT"
] | null | null | null | UsefulTools/Detect/Tools/eval/recallV2.py | CharlesPikachu/CharlesFace | 90bfe38c58068228d0069dce43b55b2570acaa16 | [
"MIT"
] | null | null | null | # paper:
# yolo1: https://arxiv.org/abs/1506.02640
# yolo2: https://arxiv.org/abs/1612.08242
# yolo3: https://pjreddie.com/media/files/papers/YOLOv3.pdf
# Author: Charles
from PIL import Image
from utils.standard_utils import *
from nets.darknet import Darknet
# compute recall
def eval_list(cfgfile, weightfile, imglist, use_cuda=True):
m = Darknet(cfgfile)
m.eval()
m.load_weights(weightfile)
eval_wid = m.width
eval_hei = m.height
if use_cuda:
m.cuda()
conf_thresh = 0.25
nms_thresh = 0.4
iou_thresh = 0.5
min_box_scale = 8. / m.width
with open(imglist) as fp:
lines = fp.readlines()
total = 0.0
proposals = 0.0
correct = 0.0
lineId = 0
avg_iou = 0.0
for line in lines:
img_path = line.rstrip()
if img_path[0] == '#':
continue
lineId = lineId + 1
lab_path = img_path.replace('images', 'labels')
lab_path = lab_path.replace('JPEGImages', 'labels')
lab_path = lab_path.replace('.jpg', '.txt').replace('.png', '.txt')
truths = read_truths_args(lab_path, min_box_scale)
img = Image.open(img_path).convert('RGB').resize((eval_wid, eval_hei))
boxes = do_detect(m, img, conf_thresh, nms_thresh, use_cuda)
if False:
savename = "tmp/%06d.jpg" % (lineId)
print("save %s" % savename)
plot_boxes(img, boxes, savename)
total = total + truths.shape[0]
for i in range(len(boxes)):
if boxes[i][4] > conf_thresh:
proposals += 1
for i in range(truths.shape[0]):
box_gt = [truths[i][1], truths[i][2], truths[i][3], truths[i][4], 1.0]
best_iou = 0
for j in range(len(boxes)):
iou = bbox_iou(box_gt, boxes[j], x1y1x2y2=False)
best_iou = max(iou, best_iou)
if best_iou > iou_thresh:
avg_iou += best_iou
correct += 1
precision = 1.0*correct / proposals
recall = 1.0*correct / total
fscore = 2.0*precision*recall / (precision+recall)
print("%d IOU: %f, Recal: %f, Precision: %f, Fscore: %f\n" % (lineId-1, avg_iou/correct, recall, precision, fscore)) | 31.177419 | 117 | 0.674082 |
ace6eac99b234f0a37363c1b23418475fc5cd113 | 1,387 | py | Python | ts_unittest/case_api/test_example_yaml_api_case.py | carter-gao/AutoTestFramework | 3dcf4fac3da02db8ddd27c0cc18a2fa02064871e | [
"Apache-2.0"
] | 1 | 2022-01-23T06:52:01.000Z | 2022-01-23T06:52:01.000Z | ts_unittest/case_api/test_example_yaml_api_case.py | carter-gao/AutoTestFramework | 3dcf4fac3da02db8ddd27c0cc18a2fa02064871e | [
"Apache-2.0"
] | null | null | null | ts_unittest/case_api/test_example_yaml_api_case.py | carter-gao/AutoTestFramework | 3dcf4fac3da02db8ddd27c0cc18a2fa02064871e | [
"Apache-2.0"
] | 3 | 2020-03-31T03:44:03.000Z | 2021-01-10T13:42:32.000Z | # coding:utf-8
import unittest
from common.readYaml import ReadApi
from common.api.baseTestCase import BaseTestCase
from common.api.requestMethod import SendRequest
class ExampleApiCase(BaseTestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
# 读取api信息
cls.api = ReadApi('example.yaml').read('weatherApi') # 对于不同接口只需改动这一行
# 实例化请求类
cls.req = SendRequest(cls.api)
def tearDown(self) -> None:
# 在每个用例执行完毕时完成剩余的回写任务
self.back_fill.fill_api_name(self.api.get('name'))
self.back_fill.fill_api_url(self.api.get('url'))
self.back_fill.fill_case_name(self.api.get(self.count).get('title'))
self.back_fill.fill_test_data(self.req.current_data)
# # 若当前接口全部用例无其他操作步骤,如:数据库操作、上下文回写、动态参数重新赋值等等,那么可以这样写
# def setUp(self) -> None:
# super().setUp()
# self.check_result(self.req.excepted(self.count), self.req.request(self.count, {'timestamp': self.timestamp}))
#
# def test_01(self):
# """参数city不为空"""
#
# def test_02(self):
# """参数city为空"""
def test_01(self):
"""参数city不为空"""
self.check_result(self.req.excepted(1), self.req.request(1))
def test_02(self):
"""参数city为空"""
self.check_result(self.req.excepted(2), self.req.request(2))
if __name__ == '__main__':
unittest.main()
| 28.306122 | 119 | 0.641673 |
ace6eb30c151be91c9f714e2694b9ffa1fcf30e9 | 774 | py | Python | ScotlandPYard/spyengine/StupidAIDetective.py | fkarg/ScotlandPYard | 768ecbf20357f5cde8d669f05d11cacaf3299dbb | [
"MIT"
] | null | null | null | ScotlandPYard/spyengine/StupidAIDetective.py | fkarg/ScotlandPYard | 768ecbf20357f5cde8d669f05d11cacaf3299dbb | [
"MIT"
] | null | null | null | ScotlandPYard/spyengine/StupidAIDetective.py | fkarg/ScotlandPYard | 768ecbf20357f5cde8d669f05d11cacaf3299dbb | [
"MIT"
] | null | null | null | from numpy.random import choice
from .aidetective import AIDetective
class StupidAIDetective(AIDetective):
def play_next(self):
moves = []
for t in self.tickets.keys():
if self.tickets[t] > 0:
moves.extend(
[(n, t) for n in self.engine.get_valid_nodes(self.name, t)]
)
# print("Stupid AI: mesa thinks one of those is good")
if len(moves) > 0:
idx = choice(len(moves))
random_move = moves[idx]
node, ticket = random_move
# print("Stupid AI: mesa choose to go to {} with dis {} yaaa".format(node.nodeid, ticket))
else:
node = None
ticket = None
self.engine.sendNextMove(node, ticket)
| 30.96 | 102 | 0.549096 |
ace6ebcd003ef8d0681a84504da5974a34f76fcb | 1,254 | py | Python | src/tests/test_time_calculations.py | lbiragnet/covid_dashboard_lbiragnet | 0716af54bd126f41b9135767640d226689111506 | [
"MIT"
] | null | null | null | src/tests/test_time_calculations.py | lbiragnet/covid_dashboard_lbiragnet | 0716af54bd126f41b9135767640d226689111506 | [
"MIT"
] | null | null | null | src/tests/test_time_calculations.py | lbiragnet/covid_dashboard_lbiragnet | 0716af54bd126f41b9135767640d226689111506 | [
"MIT"
] | null | null | null | import time
from time_calculations import current_time_hhmm
from time_calculations import minutes_to_seconds
from time_calculations import hours_to_minutes
from time_calculations import hhmm_to_seconds
from time_calculations import calc_update_interval
from time_calculations import calc_update_epoch_interval
def test_current_time_hhmm():
current_time = current_time_hhmm()
assert isinstance(current_time, str)
actual_time = time.strftime("%H:%M", time.localtime())
assert current_time == actual_time
def test_minutes_to_seconds():
seconds = minutes_to_seconds(60)
assert isinstance(seconds, int)
assert seconds == 3600
def test_hours_to_minutes():
minutes = hours_to_minutes(60)
assert isinstance(minutes, int)
assert minutes == 3600
def test_hhmm_to_seconds():
seconds = hhmm_to_seconds("15:15")
assert isinstance(seconds, int)
assert seconds == 54900
def test_calc_update_interval():
interval = calc_update_interval("15:15")
assert isinstance(interval, int)
def test_calc_update_epoch_interval():
current_epoch_time = round(time.time(), 0)
epoch_time = calc_update_epoch_interval("18:15")
assert isinstance(epoch_time, float)
assert epoch_time >= current_epoch_time | 30.585366 | 58 | 0.777512 |
ace6ec07ac86f41775f1111436d1e74597174be4 | 664 | py | Python | wildfire/data/goes_level_2/__init__.py | Ferrumofomega/goes | db04c3749832ff77ffc618dd2380f8ea23dda53d | [
"MIT"
] | 1 | 2020-01-15T03:18:08.000Z | 2020-01-15T03:18:08.000Z | wildfire/data/goes_level_2/__init__.py | joyprojects/wildfire | db04c3749832ff77ffc618dd2380f8ea23dda53d | [
"MIT"
] | 60 | 2019-11-24T01:57:48.000Z | 2020-04-19T05:07:17.000Z | wildfire/data/goes_level_2/__init__.py | Ferrumofomega/wildfire | db04c3749832ff77ffc618dd2380f8ea23dda53d | [
"MIT"
] | 1 | 2020-02-29T01:24:35.000Z | 2020-02-29T01:24:35.000Z | """Methods for downloading, parsing, and analyzing GOES Level 2 Wildfire data.
Full Scans of Fire for GOES-17
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes17/ABI-L2-FDCF/?region=us-east-1&tab=overview
CONUS Scans of Fire for GOES-17
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes17/ABI-L2-FDCC/?region=us-east-1&tab=overview
Full Scans of Fire for GOES-16
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes16/ABI-L2-FDCF/?region=us-east-1&tab=overview
CONUS Scans of Fire for GOES-16
https://s3.console.aws.amazon.com/s3/buckets/noaa-goes16/ABI-L2-FDCC/?region=us-east-1&tab=overview
"""
from .utilities import *
from .downloader import *
| 39.058824 | 99 | 0.769578 |
ace6eeecbfa4a248231e4cc0381b57f9e5f21356 | 308 | py | Python | pylib/utils.py | zachwood0s/pylib | 264737285a13245c99b57474fc44316e0e69332f | [
"MIT"
] | null | null | null | pylib/utils.py | zachwood0s/pylib | 264737285a13245c99b57474fc44316e0e69332f | [
"MIT"
] | null | null | null | pylib/utils.py | zachwood0s/pylib | 264737285a13245c99b57474fc44316e0e69332f | [
"MIT"
] | null | null | null | import functools
import unittest
def require(condition, fail_value=None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if condition(*args, **kwargs):
return func(*args, **kwargs)
else:
return fail_value
return wrapper
return decorator | 23.692308 | 40 | 0.665584 |
ace6ef6f1cbab83ecd238501824e6f41ac1ab084 | 9,384 | py | Python | utils/model_manager.py | sari-rev00/pytorch_image_clissifier | 08698b1023e08cdde561d492074e7ee8c41be8ac | [
"Apache-2.0"
] | 1 | 2022-01-25T01:43:44.000Z | 2022-01-25T01:43:44.000Z | utils/model_manager.py | sari-rev00/pytorch_image_clissifier | 08698b1023e08cdde561d492074e7ee8c41be8ac | [
"Apache-2.0"
] | 3 | 2022-02-13T13:46:12.000Z | 2022-02-14T01:20:43.000Z | utils/model_manager.py | sari-rev00/pytorch_image_classifier | 08698b1023e08cdde561d492074e7ee8c41be8ac | [
"Apache-2.0"
] | null | null | null | import os
from datetime import datetime
import json
# from tqdm import tqdm
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from datetime import datetime
import torch
import torch.optim as optim
from torch.autograd import Variable
from utils.dataloader import gen_transform
from utils.optimizer import default_optimizer
from utils.criterion import default_criterion
from config.config import ConfManager, ConfOptimizer, TransformParam
ACC_TH = ConfManager.ACC_TH
SAVE_DIR_BASE = ConfManager.SAVE_DIR_BASE
FIG_SAVE_DIR = ConfManager.FIG_SAVE_DIR
FIG_COLOR_TRAIN = ConfManager.FIG_COLOR_TRAIN
FIG_COLOR_TEST = ConfManager.FIG_COLOR_TEST
ROUND_DIGIT = ConfManager.ROUND_DIGIT
def round_with_floor(num, digit):
floor = 10 ** (-1 * digit)
return round(num, digit) if floor < num else floor
class Manager():
def __init__(self, model):
self.model = model
self.training_result = None
return None
def train(
self,
num_epochs,
dataloader,
optimizer=None,
criterion=None,
acc_th=ACC_TH,
auto_save=True,
print_epoch_step=None):
self.model.label_idx_dict = dataloader.dataset.label_idx_dict
self.batch_size = dataloader.batch_size
self.shuffle = dataloader.shuffle
self.drop_last = dataloader.drop_last
if not optimizer:
optimizer = default_optimizer(self.model)
if not criterion:
criterion = default_criterion()
if not print_epoch_step:
print_epoch_step = int(1)
dt_start = datetime.now()
model_desc = self.model.model_descriptions()
if auto_save:
save_dir = "{}_{}/".format(
model_desc["name"],
dt_start.strftime('%Y%m%d%H%M%S'))
result = {
"start": dt_start.strftime('%Y-%m-%d %H:%M:%S'),
"model_descriptions": model_desc,
"scores": list()}
result["label_idx_dict"] = self.model.label_idx_dict
best_loss = None
print("Training: {} {}".format(
model_desc["name"],
dt_start.strftime('%Y%m%d%H%M%S')))
print(f"batch size: {self.batch_size}\n")
for ep in range(1, num_epochs +1):
if (ep % print_epoch_step) == 0:
print("Epoch:{}/{} ============".format(ep, num_epochs))
d_score = dict()
d_score["epoch"] = ep
for mode in ["train", "test"]:
if mode == "train":
self.model.train()
else:
self.model.eval()
ep_loss = float(0)
ep_corrects = int(0)
ep_data_num = int(0)
dataloader.set_mode(mode=mode)
for inputs, labels in dataloader:
ep_data_num += dataloader.batch_size
optimizer.zero_grad()
with torch.set_grad_enabled(mode == "train"):
outputs = self.model(inputs)
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
if mode == "train":
loss.backward()
optimizer.step()
ep_loss += loss.item() * inputs.size(0)
ep_corrects += torch.sum(preds == labels.data).item()
ep_loss_per_data = round_with_floor(
num=ep_loss / ep_data_num,
digit=ROUND_DIGIT)
ep_acc = round(ep_corrects / ep_data_num, ROUND_DIGIT)
if mode == "train":
d_score["train_loss"] = ep_loss_per_data
d_score["train_acc"] = ep_acc
else:
d_score["test_loss"] = ep_loss_per_data
d_score["test_acc"] = ep_acc
if not best_loss:
best_loss = ep_loss_per_data
elif (ep_acc > acc_th) and (ep_loss_per_data < best_loss) and auto_save:
best_loss = ep_loss_per_data
fname = "{}_{}_{}".format(
model_desc["name"],
dt_start.strftime('%Y%m%d%H%M%S'),
str(ep).zfill(3))
self.save_model_info(dir=save_dir, fname=fname)
if (ep % print_epoch_step) == 0:
print(" Mode: {}, Loss: {}, Acc: {}".format(
mode,
ep_loss_per_data,
ep_acc))
result["scores"].append(d_score)
result["end"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
self.training_result = result
if auto_save:
print("saved: training information")
self.save_training_info(dir=save_dir, fname="training_info")
self.make_result_fig(save=True, save_dir=SAVE_DIR_BASE + save_dir)
return None
def save_training_info(self, dir, fname):
if not os.path.exists(SAVE_DIR_BASE + dir):
os.mkdir(SAVE_DIR_BASE + dir)
dict_info = {
"training_result": self.training_result,
"dataloader": {
"batch_size": self.batch_size,
"shuffle": self.shuffle,
"drop_last": self.drop_last},
"optimizer": {
"learning_rate": ConfOptimizer.LEARNING_RATE,
"momentum": ConfOptimizer.MOMENTUM,
"weight_decay": ConfOptimizer.WEIGHT_DECAY},
"transform": {
"resize": TransformParam.resize,
"color_mean": TransformParam.color_mean,
"color_std": TransformParam.color_std}}
if dir[-1] != "/":
dir += "/"
if not ".json" in fname:
fname += ".json"
with open(SAVE_DIR_BASE + dir + fname, mode='w') as f:
json.dump(dict_info, f, indent=4)
return None
def save_model_info(self, dir, fname):
self.model.save_model_info(dir=dir, fname=fname)
return None
def load_model_info(self, fname):
self.model.__init__(model_info_fname=fname)
return None
def predict(self, fpath, pos=False):
input_channel = self.model.model_descriptions()["input_channel"]
input_size = self.model.model_descriptions()["input_size"]
img = Image.open(fpath)
transform = gen_transform()
x = transform(img, "test")
x = torch.reshape(x, (-1, input_channel, input_size, input_size))
self.model.eval()
pred_pos = self.model(x)
if pos:
return pred_pos[0].tolist()
else:
pred_label = torch.max(pred_pos, 1).indices.item()
for k, v in self.model.label_idx_dict.items():
if int(v) == int(pred_label):
return str(k)
raise Exception(f"Error: predicted label {pred_label} is not included in label_idx_dict.")
def make_result_fig(self, save=False, save_dir=FIG_SAVE_DIR):
if not self.training_result:
return None
color_train = FIG_COLOR_TRAIN
color_test = FIG_COLOR_TEST
df = pd.DataFrame(self.training_result["scores"])
ep = df["epoch"].astype(int).values.tolist()
train_loss = df["train_loss"].values.tolist()
train_acc = df["train_acc"].values.tolist()
test_loss = df["test_loss"].values.tolist()
test_acc = df["test_acc"].values.tolist()
model_name = self.model.model_descriptions()["name"]
fig = plt.figure(figsize=(16, 6))
ax_0 = fig.add_subplot(1,2,1)
ax_0.plot(ep, train_loss, marker="o", markersize=6, color=color_train, label="train")
ax_0.plot(ep, test_loss, marker="o", markersize=6, color=color_test, label="test")
ax_0.set_yscale('log')
ax_0.set_title('loss: ' + model_name + self.training_result["start"])
ax_0.set_xlabel('epoch')
ax_0.set_ylabel('loss')
ax_0.grid(True)
ax_0.legend(bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=1, fontsize=12)
ax_1 = fig.add_subplot(1,2,2)
ax_1.plot(ep, train_acc, marker="o", markersize=6, color=color_train, label="train")
ax_1.plot(ep, test_acc, marker="o", markersize=6, color=color_test, label="test")
ax_1.set_ylim([0.95, 1.005])
ax_1.set_title('acc: ' + model_name + self.training_result["start"])
ax_1.set_xlabel('epoch')
ax_1.set_ylabel('acc')
ax_1.grid(True)
ax_1.legend(bbox_to_anchor=(1, 0), loc='lower right', borderaxespad=1, fontsize=12)
if save:
dt = datetime.strptime(self.training_result["start"], '%Y-%m-%d %H:%M:%S')
fig.savefig(save_dir + "{}_{}.jpg".format(
self.training_result["model_descriptions"]["name"],
dt.strftime('%Y%m%d%H%M%S')))
return fig
| 41.706667 | 103 | 0.543372 |
ace6efc4b91268267e4891e48ea470dd77ba0191 | 491 | py | Python | profiles_api/urls.py | LaiZiSen/profiles_REST_API_course | 83662a33b3a318dc7e52c5d56b577e4863ed7c5d | [
"MIT"
] | null | null | null | profiles_api/urls.py | LaiZiSen/profiles_REST_API_course | 83662a33b3a318dc7e52c5d56b577e4863ed7c5d | [
"MIT"
] | null | null | null | profiles_api/urls.py | LaiZiSen/profiles_REST_API_course | 83662a33b3a318dc7e52c5d56b577e4863ed7c5d | [
"MIT"
] | null | null | null | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset',views.HelloViewSet,basename = 'hello-viewset')
router.register('profile',views.UserProfileViewSet)
router.register('feed',views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/',views.HelloApiView.as_view()),
path('login/',views.UserLoginApiView.as_view()),
path('',include(router.urls))
]
| 27.277778 | 78 | 0.771894 |
ace6f020dd2d058da58ef7dbf4cf7d5cb619af10 | 1,116 | py | Python | kubernetes/test/test_v1beta1_mutating_webhook_configuration.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:44:11.000Z | 2021-06-10T23:44:11.000Z | kubernetes/test/test_v1beta1_mutating_webhook_configuration.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1beta1_mutating_webhook_configuration.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2018-11-06T16:33:43.000Z | 2018-11-06T16:33:43.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_mutating_webhook_configuration import V1beta1MutatingWebhookConfiguration
class TestV1beta1MutatingWebhookConfiguration(unittest.TestCase):
""" V1beta1MutatingWebhookConfiguration unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1MutatingWebhookConfiguration(self):
"""
Test V1beta1MutatingWebhookConfiguration
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_mutating_webhook_configuration.V1beta1MutatingWebhookConfiguration()
pass
if __name__ == '__main__':
unittest.main()
| 24.8 | 118 | 0.749104 |
ace6f1a0c9dc283eb5b97468629b4b75d9102470 | 1,858 | py | Python | HeavyFlavorAnalysis/SpecificDecay/test/cfg_recoCheck.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | HeavyFlavorAnalysis/SpecificDecay/test/cfg_recoCheck.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | HeavyFlavorAnalysis/SpecificDecay/test/cfg_recoCheck.py | gputtley/cmssw | c1ef8454804e4ebea8b65f59c4a952a6c94fde3b | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("bckAnalysis")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load("TrackingTools/TransientTrack/TransientTrackBuilder_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.source = cms.Source("PoolSource",fileNames = cms.untracked.vstring(
'file:reco.root'
))
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
process.checkBPHWriteDecay = cms.EDAnalyzer('CheckBPHWriteDecay',
### to dump only one event
# runNumber = cms.uint32( 275371 ),
# evtNumber = cms.uint32( 783544498 ),
candsLabel = cms.vstring('bphWriteSpecificDecay:oniaFitted:bphAnalysis'
,'bphWriteSpecificDecay:kx0Cand:bphAnalysis'
,'bphWriteSpecificDecay:phiCand:bphAnalysis'
,'bphWriteSpecificDecay:buFitted:bphAnalysis'
,'bphWriteSpecificDecay:bdFitted:bphAnalysis'
,'bphWriteSpecificDecay:bsFitted:bphAnalysis')
)
process.p = cms.Path(
process.checkBPHWriteDecay
)
| 41.288889 | 89 | 0.76211 |
ace6f41a27ffed1107066e293e03f11d043f338a | 744 | py | Python | examples/run_dnpmshde.py | eltociear/NiaPy | 7884aefec8f013d9f8db5c1af7080a61dd19a31d | [
"MIT"
] | null | null | null | examples/run_dnpmshde.py | eltociear/NiaPy | 7884aefec8f013d9f8db5c1af7080a61dd19a31d | [
"MIT"
] | null | null | null | examples/run_dnpmshde.py | eltociear/NiaPy | 7884aefec8f013d9f8db5c1af7080a61dd19a31d | [
"MIT"
] | null | null | null | # encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
from niapy.algorithms.modified import DynNpMultiStrategyDifferentialEvolutionMTS
from niapy.task import StoppingTask
from niapy.problems import Sphere
# we will run Differential Evolution for 5 independent runs
for i in range(5):
task = StoppingTask(problem=Sphere(dimension=10), max_evals=10000)
algo = DynNpMultiStrategyDifferentialEvolutionMTS(population_size=50, differential_weight=0.5, crossover_probability=0.9, p_max=10)
best = algo.run(task)
print('%s -> %s' % (best[0], best[1]))
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 35.428571 | 135 | 0.771505 |
ace6f490aa6c71a15e5f17ea94e693d17579149b | 91,261 | py | Python | src/azure-cli-core/azure/cli/core/tests/test_profile.py | bim-msft/azure-cli | c673f94fdef812f6cbd46118b62584d3169d1d38 | [
"MIT"
] | null | null | null | src/azure-cli-core/azure/cli/core/tests/test_profile.py | bim-msft/azure-cli | c673f94fdef812f6cbd46118b62584d3169d1d38 | [
"MIT"
] | null | null | null | src/azure-cli-core/azure/cli/core/tests/test_profile.py | bim-msft/azure-cli | c673f94fdef812f6cbd46118b62584d3169d1d38 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
import json
import os
import sys
import unittest
import mock
import re
from copy import deepcopy
from adal import AdalError
from azure.mgmt.resource.subscriptions.models import \
(SubscriptionState, Subscription, SubscriptionPolicies, SpendingLimit)
from azure.cli.core._profile import (Profile, CredsCache, SubscriptionFinder,
ServicePrincipalAuth, _AUTH_CTX_FACTORY)
from azure.cli.core.mock import DummyCli
from knack.util import CLIError
class TestProfile(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tenant_id = 'microsoft.com'
cls.user1 = 'foo@foo.com'
cls.id1 = 'subscriptions/1'
cls.display_name1 = 'foo account'
cls.state1 = SubscriptionState.enabled
cls.subscription1 = SubscriptionStub(cls.id1,
cls.display_name1,
cls.state1,
cls.tenant_id)
cls.raw_token1 = 'some...secrets'
cls.token_entry1 = {
"_clientId": "04b07795-8ddb-461a-bbee-02f9e1bf7b46",
"resource": "https://management.core.windows.net/",
"tokenType": "Bearer",
"expiresOn": "2016-03-31T04:26:56.610Z",
"expiresIn": 3599,
"identityProvider": "live.com",
"_authority": "https://login.microsoftonline.com/common",
"isMRRT": True,
"refreshToken": "faked123",
"accessToken": cls.raw_token1,
"userId": cls.user1
}
cls.user2 = 'bar@bar.com'
cls.id2 = 'subscriptions/2'
cls.display_name2 = 'bar account'
cls.state2 = SubscriptionState.past_due
cls.subscription2 = SubscriptionStub(cls.id2,
cls.display_name2,
cls.state2,
cls.tenant_id)
cls.test_msi_tenant = '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a'
cls.test_msi_access_token = ('eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IlZXVkljMVdEMVRrc2JiMzAxc2FzTTVrT3E1'
'USIsImtpZCI6IlZXVkljMVdEMVRrc2JiMzAxc2FzTTVrT3E1USJ9.eyJhdWQiOiJodHRwczovL21hbmF'
'nZW1lbnQuY29yZS53aW5kb3dzLm5ldC8iLCJpc3MiOiJodHRwczovL3N0cy53aW5kb3dzLm5ldC81NDg'
'yNmIyMi0zOGQ2LTRmYjItYmFkOS1iN2I5M2EzZTljNWEvIiwiaWF0IjoxNTAzMzU0ODc2LCJuYmYiOjE'
'1MDMzNTQ4NzYsImV4cCI6MTUwMzM1ODc3NiwiYWNyIjoiMSIsImFpbyI6IkFTUUEyLzhFQUFBQTFGL1k'
'0VVR3bFI1Y091QXJxc1J0OU5UVVc2MGlsUHZna0daUC8xczVtdzg9IiwiYW1yIjpbInB3ZCJdLCJhcHB'
'pZCI6IjA0YjA3Nzk1LThkZGItNDYxYS1iYmVlLTAyZjllMWJmN2I0NiIsImFwcGlkYWNyIjoiMCIsImV'
'fZXhwIjoyNjI4MDAsImZhbWlseV9uYW1lIjoic2RrIiwiZ2l2ZW5fbmFtZSI6ImFkbWluMyIsImdyb3V'
'wcyI6WyJlNGJiMGI1Ni0xMDE0LTQwZjgtODhhYi0zZDhhOGNiMGUwODYiLCI4YTliMTYxNy1mYzhkLTR'
'hYTktYTQyZi05OTg2OGQzMTQ2OTkiLCI1NDgwMzkxNy00YzcxLTRkNmMtOGJkZi1iYmQ5MzEwMTBmOGM'
'iXSwiaXBhZGRyIjoiMTY3LjIyMC4xLjIzNCIsIm5hbWUiOiJhZG1pbjMiLCJvaWQiOiJlN2UxNThkMy0'
'3Y2RjLTQ3Y2QtODgyNS01ODU5ZDdhYjJiNTUiLCJwdWlkIjoiMTAwMzNGRkY5NUQ0NEU4NCIsInNjcCI'
'6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6ImhRenl3b3FTLUEtRzAySTl6ZE5TRmtGd3R2MGVwZ2l'
'WY1Vsdm1PZEZHaFEiLCJ0aWQiOiI1NDgyNmIyMi0zOGQ2LTRmYjItYmFkOS1iN2I5M2EzZTljNWEiLCJ'
'1bmlxdWVfbmFtZSI6ImFkbWluM0BBenVyZVNES1RlYW0ub25taWNyb3NvZnQuY29tIiwidXBuIjoiYWR'
'taW4zQEF6dXJlU0RLVGVhbS5vbm1pY3Jvc29mdC5jb20iLCJ1dGkiOiJuUEROYm04UFkwYUdELWhNeWx'
'rVEFBIiwidmVyIjoiMS4wIiwid2lkcyI6WyI2MmU5MDM5NC02OWY1LTQyMzctOTE5MC0wMTIxNzcxNDV'
'lMTAiXX0.Pg4cq0MuP1uGhY_h51ZZdyUYjGDUFgTW2EfIV4DaWT9RU7GIK_Fq9VGBTTbFZA0pZrrmP-z'
'7DlN9-U0A0nEYDoXzXvo-ACTkm9_TakfADd36YlYB5aLna-yO0B7rk5W9ANelkzUQgRfidSHtCmV6i4V'
'e-lOym1sH5iOcxfIjXF0Tp2y0f3zM7qCq8Cp1ZxEwz6xYIgByoxjErNXrOME5Ld1WizcsaWxTXpwxJn_'
'Q8U2g9kXHrbYFeY2gJxF_hnfLvNKxUKUBnftmyYxZwKi0GDS0BvdJnJnsqSRSpxUx__Ra9QJkG1IaDzj'
'ZcSZPHK45T6ohK9Hk9ktZo0crVl7Tmw')
def test_normalize(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
expected = {
'environmentName': 'AzureCloud',
'id': '1',
'name': self.display_name1,
'state': self.state1.value,
'user': {
'name': self.user1,
'type': 'user'
},
'isDefault': False,
'tenantId': self.tenant_id
}
self.assertEqual(expected, consolidated[0])
# verify serialization works
self.assertIsNotNone(json.dumps(consolidated[0]))
def test_normalize_with_unicode_in_subscription_name(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
test_display_name = 'sub' + chr(255)
polished_display_name = 'sub?'
test_subscription = SubscriptionStub('sub1',
test_display_name,
SubscriptionState.enabled,
'tenant1')
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[test_subscription],
False)
self.assertTrue(consolidated[0]['name'] in [polished_display_name, test_display_name])
def test_normalize_with_none_subscription_name(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
test_display_name = None
polished_display_name = ''
test_subscription = SubscriptionStub('sub1',
test_display_name,
SubscriptionState.enabled,
'tenant1')
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[test_subscription],
False)
self.assertTrue(consolidated[0]['name'] == polished_display_name)
def test_update_add_two_different_subscriptions(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# add the first and verify
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(len(storage_mock['subscriptions']), 1)
subscription1 = storage_mock['subscriptions'][0]
self.assertEqual(subscription1, {
'environmentName': 'AzureCloud',
'id': '1',
'name': self.display_name1,
'state': self.state1.value,
'user': {
'name': self.user1,
'type': 'user'
},
'isDefault': True,
'tenantId': self.tenant_id
})
# add the second and verify
consolidated = profile._normalize_properties(self.user2,
[self.subscription2],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(len(storage_mock['subscriptions']), 2)
subscription2 = storage_mock['subscriptions'][1]
self.assertEqual(subscription2, {
'environmentName': 'AzureCloud',
'id': '2',
'name': self.display_name2,
'state': self.state2.value,
'user': {
'name': self.user2,
'type': 'user'
},
'isDefault': True,
'tenantId': self.tenant_id
})
# verify the old one stays, but no longer active
self.assertEqual(storage_mock['subscriptions'][0]['name'],
subscription1['name'])
self.assertFalse(storage_mock['subscriptions'][0]['isDefault'])
def test_update_with_same_subscription_added_twice(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# add one twice and verify we will have one but with new token
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
new_subscription1 = SubscriptionStub(self.id1,
self.display_name1,
self.state1,
self.tenant_id)
consolidated = profile._normalize_properties(self.user1,
[new_subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(len(storage_mock['subscriptions']), 1)
self.assertTrue(storage_mock['subscriptions'][0]['isDefault'])
def test_set_active_subscription(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
consolidated = profile._normalize_properties(self.user2,
[self.subscription2],
False)
profile._set_subscriptions(consolidated)
self.assertTrue(storage_mock['subscriptions'][1]['isDefault'])
profile.set_active_subscription(storage_mock['subscriptions'][0]['id'])
self.assertFalse(storage_mock['subscriptions'][1]['isDefault'])
self.assertTrue(storage_mock['subscriptions'][0]['isDefault'])
def test_default_active_subscription_to_non_disabled_one(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
subscriptions = profile._normalize_properties(
self.user2, [self.subscription2, self.subscription1], False)
profile._set_subscriptions(subscriptions)
# verify we skip the overdued subscription and default to the 2nd one in the list
self.assertEqual(storage_mock['subscriptions'][1]['name'], self.subscription1.display_name)
self.assertTrue(storage_mock['subscriptions'][1]['isDefault'])
def test_get_subscription(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(self.display_name1, profile.get_subscription()['name'])
self.assertEqual(self.display_name1,
profile.get_subscription(subscription=self.display_name1)['name'])
sub_id = self.id1.split('/')[-1]
self.assertEqual(sub_id, profile.get_subscription()['id'])
self.assertEqual(sub_id, profile.get_subscription(subscription=sub_id)['id'])
self.assertRaises(CLIError, profile.get_subscription, "random_id")
def test_get_auth_info_fail_on_user_account(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
# testing dump of existing logged in account
self.assertRaises(CLIError, profile.get_sp_auth_info)
@mock.patch('azure.cli.core.profiles.get_api_version', autospec=True)
def test_subscription_finder_constructor(self, get_api_mock):
cli = DummyCli()
get_api_mock.return_value = '2016-06-01'
cli.cloud.endpoints.resource_manager = 'http://foo_arm'
finder = SubscriptionFinder(cli, None, None, arm_client_factory=None)
result = finder._arm_client_factory(mock.MagicMock())
self.assertEqual(result.config.base_url, 'http://foo_arm')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_get_auth_info_for_logged_in_service_principal(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
profile.find_subscriptions_on_login(False, '1234', 'my-secret', True, self.tenant_id, use_device_code=False,
allow_no_subscriptions=False, subscription_finder=finder)
# action
extended_info = profile.get_sp_auth_info()
# assert
self.assertEqual(self.id1.split('/')[-1], extended_info['subscriptionId'])
self.assertEqual('1234', extended_info['clientId'])
self.assertEqual('my-secret', extended_info['clientSecret'])
self.assertEqual('https://login.microsoftonline.com', extended_info['activeDirectoryEndpointUrl'])
self.assertEqual('https://management.azure.com/', extended_info['resourceManagerEndpointUrl'])
def test_get_auth_info_for_newly_created_service_principal(self):
cli = DummyCli()
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, [self.subscription1], False)
profile._set_subscriptions(consolidated)
# action
extended_info = profile.get_sp_auth_info(name='1234', cert_file='/tmp/123.pem')
# assert
self.assertEqual(self.id1.split('/')[-1], extended_info['subscriptionId'])
self.assertEqual(self.tenant_id, extended_info['tenantId'])
self.assertEqual('1234', extended_info['clientId'])
self.assertEqual('/tmp/123.pem', extended_info['clientCertificate'])
self.assertIsNone(extended_info.get('clientSecret', None))
self.assertEqual('https://login.microsoftonline.com', extended_info['activeDirectoryEndpointUrl'])
self.assertEqual('https://management.azure.com/', extended_info['resourceManagerEndpointUrl'])
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_without_subscriptions_thru_service_principal(self, mock_auth_context):
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = []
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
# action
result = profile.find_subscriptions_on_login(False,
'1234',
'my-secret',
True,
self.tenant_id,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertEqual(1, len(result))
self.assertEqual(result[0]['id'], self.tenant_id)
self.assertEqual(result[0]['state'], 'Enabled')
self.assertEqual(result[0]['tenantId'], self.tenant_id)
self.assertEqual(result[0]['name'], 'N/A(tenant level account)')
self.assertTrue(profile.is_tenant_level_account())
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_with_subscriptions_allow_no_subscriptions_thru_service_principal(self, mock_auth_context):
"""test subscription is returned even with --allow-no-subscriptions. """
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
# action
result = profile.find_subscriptions_on_login(False,
'1234',
'my-secret',
True,
self.tenant_id,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertEqual(1, len(result))
self.assertEqual(result[0]['id'], self.id1.split('/')[-1])
self.assertEqual(result[0]['state'], 'Enabled')
self.assertEqual(result[0]['tenantId'], self.tenant_id)
self.assertEqual(result[0]['name'], self.display_name1)
self.assertFalse(profile.is_tenant_level_account())
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_without_subscriptions_thru_common_tenant(self, mock_auth_context):
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
cli = DummyCli()
tenant_object = mock.MagicMock()
tenant_object.id = "foo-bar"
tenant_object.tenant_id = self.tenant_id
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = []
mock_arm_client.tenants.list.return_value = (x for x in [tenant_object])
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
# action
result = profile.find_subscriptions_on_login(False,
'1234',
'my-secret',
False,
None,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertEqual(1, len(result))
self.assertEqual(result[0]['id'], self.tenant_id)
self.assertEqual(result[0]['state'], 'Enabled')
self.assertEqual(result[0]['tenantId'], self.tenant_id)
self.assertEqual(result[0]['name'], 'N/A(tenant level account)')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_without_subscriptions_without_tenant(self, mock_auth_context):
cli = DummyCli()
finder = mock.MagicMock()
finder.find_through_interactive_flow.return_value = []
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# action
result = profile.find_subscriptions_on_login(True,
'1234',
'my-secret',
False,
None,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertTrue(0 == len(result))
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_get_current_account_user(self, mock_read_cred_file):
cli = DummyCli()
# setup
mock_read_cred_file.return_value = [TestProfile.token_entry1]
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
user = profile.get_current_account_user()
# verify
self.assertEqual(user, self.user1)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', return_value=None)
def test_create_token_cache(self, mock_read_file):
cli = DummyCli()
mock_read_file.return_value = []
profile = Profile(cli_ctx=cli, use_global_creds_cache=False, async_persist=False)
cache = profile._creds_cache.adal_token_cache
self.assertFalse(cache.read_items())
self.assertTrue(mock_read_file.called)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_load_cached_tokens(self, mock_read_file):
cli = DummyCli()
mock_read_file.return_value = [TestProfile.token_entry1]
profile = Profile(cli_ctx=cli, use_global_creds_cache=False, async_persist=False)
cache = profile._creds_cache.adal_token_cache
matched = cache.find({
"_authority": "https://login.microsoftonline.com/common",
"_clientId": "04b07795-8ddb-461a-bbee-02f9e1bf7b46",
"userId": self.user1
})
self.assertEqual(len(matched), 1)
self.assertEqual(matched[0]['accessToken'], self.raw_token1)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_subscription = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id),
'MSI-DEV-INC', self.state1, '12345678-38d6-4fb2-bad9-b7b93a3e1234')
consolidated = profile._normalize_properties(self.user1,
[test_subscription],
False)
profile._set_subscriptions(consolidated)
# action
cred, subscription_id, _ = profile.get_login_credentials()
# verify
self.assertEqual(subscription_id, test_subscription_id)
# verify the cred._tokenRetriever is a working lambda
token_type, token = cred._token_retriever()
self.assertEqual(token, self.raw_token1)
self.assertEqual(some_token_type, token_type)
mock_get_token.assert_called_once_with(mock.ANY, self.user1, test_tenant_id,
'https://management.core.windows.net/')
self.assertEqual(mock_get_token.call_count, 1)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials_aux_subscriptions(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
raw_token2 = 'some...secrets2'
token_entry2 = {
"resource": "https://management.core.windows.net/",
"tokenType": "Bearer",
"_authority": "https://login.microsoftonline.com/common",
"accessToken": raw_token2,
}
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1, token_entry2]
mock_get_token.side_effect = [(some_token_type, TestProfile.raw_token1), (some_token_type, raw_token2)]
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_subscription_id2 = '12345678-1bf0-4dda-aec3-cb9272f09591'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_tenant_id2 = '12345678-38d6-4fb2-bad9-b7b93a3e4321'
test_subscription = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id),
'MSI-DEV-INC', self.state1, test_tenant_id)
test_subscription2 = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id2),
'MSI-DEV-INC2', self.state1, test_tenant_id2)
consolidated = profile._normalize_properties(self.user1,
[test_subscription, test_subscription2],
False)
profile._set_subscriptions(consolidated)
# action
cred, subscription_id, _ = profile.get_login_credentials(subscription_id=test_subscription_id,
aux_subscriptions=[test_subscription_id2])
# verify
self.assertEqual(subscription_id, test_subscription_id)
# verify the cred._tokenRetriever is a working lambda
token_type, token = cred._token_retriever()
self.assertEqual(token, self.raw_token1)
self.assertEqual(some_token_type, token_type)
token2 = cred._external_tenant_token_retriever()
self.assertEqual(len(token2), 1)
self.assertEqual(token2[0][1], raw_token2)
self.assertEqual(mock_get_token.call_count, 2)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
def test_get_login_credentials_msi_system_assigned(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_user = 'systemAssignedIdentity'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id, 'MSI', self.state1, test_tenant_id)
consolidated = profile._normalize_properties(test_user,
[msi_subscription],
True)
profile._set_subscriptions(consolidated)
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
def test_get_login_credentials_msi_user_assigned_with_client_id(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_user = 'userAssignedIdentity'
test_client_id = '12345678-38d6-4fb2-bad9-b7b93a3e8888'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id, 'MSIClient-{}'.format(test_client_id), self.state1, test_tenant_id)
consolidated = profile._normalize_properties(test_user, [msi_subscription], True)
profile._set_subscriptions(consolidated, secondary_key_name='name')
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
self.assertTrue(cred.client_id, test_client_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
def test_get_login_credentials_msi_user_assigned_with_object_id(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_object_id = '12345678-38d6-4fb2-bad9-b7b93a3e9999'
msi_subscription = SubscriptionStub('/subscriptions/12345678-1bf0-4dda-aec3-cb9272f09590',
'MSIObject-{}'.format(test_object_id),
self.state1, '12345678-38d6-4fb2-bad9-b7b93a3e1234')
consolidated = profile._normalize_properties('userAssignedIdentity', [msi_subscription], True)
profile._set_subscriptions(consolidated, secondary_key_name='name')
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
self.assertTrue(cred.object_id, test_object_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
def test_get_login_credentials_msi_user_assigned_with_res_id(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_res_id = ('/subscriptions/{}/resourceGroups/r1/providers/Microsoft.ManagedIdentity/'
'userAssignedIdentities/id1').format(test_subscription_id)
msi_subscription = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id),
'MSIResource-{}'.format(test_res_id),
self.state1, '12345678-38d6-4fb2-bad9-b7b93a3e1234')
consolidated = profile._normalize_properties('userAssignedIdentity', [msi_subscription], True)
profile._set_subscriptions(consolidated, secondary_key_name='name')
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
self.assertTrue(cred.msi_res_id, test_res_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_raw_token(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1,
TestProfile.token_entry1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
creds, sub, tenant = profile.get_raw_token(resource='https://foo')
# verify
self.assertEqual(creds[0], self.token_entry1['tokenType'])
self.assertEqual(creds[1], self.raw_token1)
# the last in the tuple is the whole token entry which has several fields
self.assertEqual(creds[2]['expiresOn'], self.token_entry1['expiresOn'])
mock_get_token.assert_called_once_with(mock.ANY, self.user1, self.tenant_id,
'https://foo')
self.assertEqual(mock_get_token.call_count, 1)
self.assertEqual(sub, '1')
self.assertEqual(tenant, self.tenant_id)
# Test get_raw_token with tenant
creds, sub, tenant = profile.get_raw_token(resource='https://foo', tenant=self.tenant_id)
self.assertEqual(creds[0], self.token_entry1['tokenType'])
self.assertEqual(creds[1], self.raw_token1)
self.assertEqual(creds[2]['expiresOn'], self.token_entry1['expiresOn'])
mock_get_token.assert_called_with(mock.ANY, self.user1, self.tenant_id, 'https://foo')
self.assertEqual(mock_get_token.call_count, 2)
self.assertIsNone(sub)
self.assertEqual(tenant, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_service_principal', autospec=True)
def test_get_raw_token_for_sp(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1,
TestProfile.token_entry1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties('sp1',
[self.subscription1],
True)
profile._set_subscriptions(consolidated)
# action
creds, sub, tenant = profile.get_raw_token(resource='https://foo')
# verify
self.assertEqual(creds[0], self.token_entry1['tokenType'])
self.assertEqual(creds[1], self.raw_token1)
# the last in the tuple is the whole token entry which has several fields
self.assertEqual(creds[2]['expiresOn'], self.token_entry1['expiresOn'])
mock_get_token.assert_called_once_with(mock.ANY, 'sp1', 'https://foo', self.tenant_id)
self.assertEqual(mock_get_token.call_count, 1)
self.assertEqual(sub, '1')
self.assertEqual(tenant, self.tenant_id)
# Test get_raw_token with tenant
creds, sub, tenant = profile.get_raw_token(resource='https://foo', tenant=self.tenant_id)
self.assertEqual(creds[0], self.token_entry1['tokenType'])
self.assertEqual(creds[1], self.raw_token1)
self.assertEqual(creds[2]['expiresOn'], self.token_entry1['expiresOn'])
mock_get_token.assert_called_with(mock.ANY, 'sp1', 'https://foo', self.tenant_id)
self.assertEqual(mock_get_token.call_count, 2)
self.assertIsNone(sub)
self.assertEqual(tenant, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
def test_get_raw_token_msi_system_assigned(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_user = 'systemAssignedIdentity'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id,
'MSI', self.state1, test_tenant_id)
consolidated = profile._normalize_properties(test_user,
[msi_subscription],
True)
profile._set_subscriptions(consolidated)
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, tenant_id = profile.get_raw_token(resource='http://test_resource')
# assert
self.assertEqual(subscription_id, test_subscription_id)
self.assertEqual(cred[0], 'Bearer')
self.assertEqual(cred[1], TestProfile.test_msi_access_token)
self.assertEqual(subscription_id, test_subscription_id)
self.assertEqual(tenant_id, test_tenant_id)
# verify tenant shouldn't be specified for MSI account
with self.assertRaisesRegexp(CLIError, "MSI"):
cred, subscription_id, _ = profile.get_raw_token(resource='http://test_resource', tenant=self.tenant_id)
@mock.patch('azure.cli.core._profile.in_cloud_console', autospec=True)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
def test_get_raw_token_in_cloud_console(self, mock_msi_auth, mock_read_cred_file, mock_in_cloud_console):
mock_read_cred_file.return_value = []
mock_in_cloud_console.return_value = True
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id,
self.display_name1, self.state1, test_tenant_id)
consolidated = profile._normalize_properties(self.user1,
[msi_subscription],
True)
consolidated[0]['user']['cloudShellID'] = True
profile._set_subscriptions(consolidated)
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, tenant_id = profile.get_raw_token(resource='http://test_resource')
# assert
self.assertEqual(subscription_id, test_subscription_id)
self.assertEqual(cred[0], 'Bearer')
self.assertEqual(cred[1], TestProfile.test_msi_access_token)
self.assertEqual(subscription_id, test_subscription_id)
self.assertEqual(tenant_id, test_tenant_id)
# verify tenant shouldn't be specified for Cloud Shell account
with self.assertRaisesRegexp(CLIError, 'Cloud Shell'):
cred, subscription_id, _ = profile.get_raw_token(resource='http://test_resource', tenant=self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials_for_graph_client(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, [self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
cred, _, tenant_id = profile.get_login_credentials(
resource=cli.cloud.endpoints.active_directory_graph_resource_id)
_, _ = cred._token_retriever()
# verify
mock_get_token.assert_called_once_with(mock.ANY, self.user1, self.tenant_id,
'https://graph.windows.net/')
self.assertEqual(tenant_id, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials_for_data_lake_client(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, [self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
cred, _, tenant_id = profile.get_login_credentials(
resource=cli.cloud.endpoints.active_directory_data_lake_resource_id)
_, _ = cred._token_retriever()
# verify
mock_get_token.assert_called_once_with(mock.ANY, self.user1, self.tenant_id,
'https://datalake.azure.net/')
self.assertEqual(tenant_id, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.persist_cached_creds', autospec=True)
def test_logout(self, mock_persist_creds, mock_read_cred_file):
cli = DummyCli()
# setup
mock_read_cred_file.return_value = [TestProfile.token_entry1]
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(1, len(storage_mock['subscriptions']))
# action
profile.logout(self.user1)
# verify
self.assertEqual(0, len(storage_mock['subscriptions']))
self.assertEqual(mock_read_cred_file.call_count, 1)
self.assertEqual(mock_persist_creds.call_count, 1)
@mock.patch('azure.cli.core._profile._delete_file', autospec=True)
def test_logout_all(self, mock_delete_cred_file):
cli = DummyCli()
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
consolidated2 = profile._normalize_properties(self.user2,
[self.subscription2],
False)
profile._set_subscriptions(consolidated + consolidated2)
self.assertEqual(2, len(storage_mock['subscriptions']))
# action
profile.logout_all()
# verify
self.assertEqual([], storage_mock['subscriptions'])
self.assertEqual(mock_delete_cred_file.call_count, 1)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_thru_username_password(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_auth_context.acquire_token_with_username_password.assert_called_once_with(
mgmt_resource, self.user1, 'bar', mock.ANY)
mock_auth_context.acquire_token.assert_called_once_with(
mgmt_resource, self.user1, mock.ANY)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_thru_username_non_password(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_username_password.return_value = None
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: None)
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, 'http://goo-resource')
# assert
self.assertEqual([], subs)
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
@mock.patch('azure.cli.core._profile._get_cloud_console_token_endpoint', autospec=True)
@mock.patch('azure.cli.core._profile.SubscriptionFinder', autospec=True)
def test_find_subscriptions_in_cloud_console(self, mock_subscription_finder, mock_get_token_endpoint,
mock_get_client_class, mock_msi_auth):
class SubscriptionFinderStub:
def find_from_raw_token(self, tenant, token):
# make sure the tenant and token args match 'TestProfile.test_msi_access_token'
if token != TestProfile.test_msi_access_token or tenant != '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a':
raise AssertionError('find_from_raw_token was not invoked with expected tenant or token')
return [TestProfile.subscription1]
mock_subscription_finder.return_value = SubscriptionFinderStub()
mock_get_token_endpoint.return_value = "http://great_endpoint"
mock_msi_auth.return_value = MSRestAzureAuthStub()
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
# action
subscriptions = profile.find_subscriptions_in_cloud_console()
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'admin3@AzureSDKTeam.onmicrosoft.com')
self.assertEqual(s['user']['cloudShellID'], True)
self.assertEqual(s['user']['type'], 'user')
self.assertEqual(s['name'], self.display_name1)
self.assertEqual(s['id'], self.id1.split('/')[-1])
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_system_assigned(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = [TestProfile.subscription1]
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi()
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'systemAssignedIdentity')
self.assertEqual(s['user']['type'], 'servicePrincipal')
self.assertEqual(s['user']['assignedIdentityInfo'], 'MSI')
self.assertEqual(s['name'], self.display_name1)
self.assertEqual(s['id'], self.id1.split('/')[-1])
self.assertEqual(s['tenantId'], '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a')
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_no_subscriptions(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = []
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi(allow_no_subscriptions=True)
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'systemAssignedIdentity')
self.assertEqual(s['user']['type'], 'servicePrincipal')
self.assertEqual(s['user']['assignedIdentityInfo'], 'MSI')
self.assertEqual(s['name'], 'N/A(tenant level account)')
self.assertEqual(s['id'], self.test_msi_tenant)
self.assertEqual(s['tenantId'], self.test_msi_tenant)
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_user_assigned_with_client_id(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = [TestProfile.subscription1]
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
test_client_id = '54826b22-38d6-4fb2-bad9-b7b93a3e9999'
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi(identity_id=test_client_id)
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'userAssignedIdentity')
self.assertEqual(s['user']['type'], 'servicePrincipal')
self.assertEqual(s['name'], self.display_name1)
self.assertEqual(s['user']['assignedIdentityInfo'], 'MSIClient-{}'.format(test_client_id))
self.assertEqual(s['id'], self.id1.split('/')[-1])
self.assertEqual(s['tenantId'], '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a')
@mock.patch('msrestazure.azure_active_directory.MSIAuthentication', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
@mock.patch('azure.cli.core._profile.SubscriptionFinder', autospec=True)
def test_find_subscriptions_in_vm_with_msi_user_assigned_with_object_id(self, mock_subscription_finder, mock_get_client_class,
mock_msi_auth):
from requests import HTTPError
class SubscriptionFinderStub:
def find_from_raw_token(self, tenant, token):
# make sure the tenant and token args match 'TestProfile.test_msi_access_token'
if token != TestProfile.test_msi_access_token or tenant != '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a':
raise AssertionError('find_from_raw_token was not invoked with expected tenant or token')
return [TestProfile.subscription1]
class AuthStub:
def __init__(self, **kwargs):
self.token = None
self.client_id = kwargs.get('client_id')
self.object_id = kwargs.get('object_id')
# since msrestazure 0.4.34, set_token in init
self.set_token()
def set_token(self):
# here we will reject the 1st sniffing of trying with client_id and then acccept the 2nd
if self.object_id:
self.token = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
else:
mock_obj = mock.MagicMock()
mock_obj.status, mock_obj.reason = 400, 'Bad Request'
raise HTTPError(response=mock_obj)
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
mock_subscription_finder.return_value = SubscriptionFinderStub()
mock_msi_auth.side_effect = AuthStub
test_object_id = '54826b22-38d6-4fb2-bad9-b7b93a3e9999'
# action
subscriptions = profile.find_subscriptions_in_vm_with_msi(identity_id=test_object_id)
# assert
self.assertEqual(subscriptions[0]['user']['assignedIdentityInfo'], 'MSIObject-{}'.format(test_object_id))
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_user_assigned_with_res_id(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = [TestProfile.subscription1]
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
test_res_id = ('/subscriptions/0b1f6471-1bf0-4dda-aec3-cb9272f09590/resourcegroups/g1/'
'providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1')
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi(identity_id=test_res_id)
# assert
self.assertEqual(subscriptions[0]['user']['assignedIdentityInfo'], 'MSIResource-{}'.format(test_res_id))
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password', autospec=True)
@mock.patch('adal.AuthenticationContext.acquire_token', autospec=True)
def test_find_subscriptions_thru_username_password_adfs(self, mock_acquire_token,
mock_acquire_token_username_password):
cli = DummyCli()
TEST_ADFS_AUTH_URL = 'https://adfs.local.azurestack.external/adfs'
def test_acquire_token(self, resource, username, password, client_id):
global acquire_token_invoked
acquire_token_invoked = True
if (self.authority.url == TEST_ADFS_AUTH_URL and self.authority.is_adfs_authority):
return TestProfile.token_entry1
else:
raise ValueError('AuthContext was not initialized correctly for ADFS')
mock_acquire_token_username_password.side_effect = test_acquire_token
mock_acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
cli.cloud.endpoints.active_directory = TEST_ADFS_AUTH_URL
finder = SubscriptionFinder(cli, _AUTH_CTX_FACTORY, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
self.assertTrue(acquire_token_invoked)
@mock.patch('adal.AuthenticationContext', autospec=True)
@mock.patch('azure.cli.core._profile.logger', autospec=True)
def test_find_subscriptions_thru_username_password_with_account_disabled(self, mock_logger, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.side_effect = AdalError('Account is disabled')
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, mgmt_resource)
# assert
self.assertEqual([], subs)
mock_logger.warning.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_particular_tenent(self, mock_auth_context):
def just_raise(ex):
raise ex
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.side_effect = lambda: just_raise(
ValueError("'tenants.list' should not occur"))
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
subs = finder.find_from_user_account(self.user1, 'bar', self.tenant_id, 'http://someresource')
# assert
self.assertEqual([self.subscription1], subs)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_through_device_code_flow(self, mock_auth_context):
cli = DummyCli()
test_nonsense_code = {'message': 'magic code for you'}
mock_auth_context.acquire_user_code.return_value = test_nonsense_code
mock_auth_context.acquire_token_with_device_code.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_through_interactive_flow(None, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_auth_context.acquire_user_code.assert_called_once_with(
mgmt_resource, mock.ANY)
mock_auth_context.acquire_token_with_device_code.assert_called_once_with(
mgmt_resource, test_nonsense_code, mock.ANY)
mock_auth_context.acquire_token.assert_called_once_with(
mgmt_resource, self.user1, mock.ANY)
@mock.patch('adal.AuthenticationContext', autospec=True)
@mock.patch('azure.cli.core._profile._get_authorization_code', autospec=True)
def test_find_subscriptions_through_authorization_code_flow(self, _get_authorization_code_mock, mock_auth_context):
import adal
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
token_cache = adal.TokenCache()
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, token_cache, lambda _: mock_arm_client)
_get_authorization_code_mock.return_value = {
'code': 'code1',
'reply_url': 'http://localhost:8888'
}
mgmt_resource = 'https://management.core.windows.net/'
temp_token_cache = mock.MagicMock()
type(mock_auth_context).cache = temp_token_cache
temp_token_cache.read_items.return_value = []
mock_auth_context.acquire_token_with_authorization_code.return_value = self.token_entry1
# action
subs = finder.find_through_authorization_code_flow(None, mgmt_resource, 'https:/some_aad_point/common')
# assert
self.assertEqual([self.subscription1], subs)
mock_auth_context.acquire_token.assert_called_once_with(mgmt_resource, self.user1, mock.ANY)
mock_auth_context.acquire_token_with_authorization_code.assert_called_once_with('code1',
'http://localhost:8888',
mgmt_resource, mock.ANY,
None)
_get_authorization_code_mock.assert_called_once_with(mgmt_resource, 'https:/some_aad_point/common')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_interactive_from_particular_tenent(self, mock_auth_context):
def just_raise(ex):
raise ex
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.side_effect = lambda: just_raise(
ValueError("'tenants.list' should not occur"))
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
subs = finder.find_through_interactive_flow(self.tenant_id, 'http://someresource')
# assert
self.assertEqual([self.subscription1], subs)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_service_principal_id(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_service_principal_id('my app', ServicePrincipalAuth('my secret'),
self.tenant_id, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_arm_client.tenants.list.assert_not_called()
mock_auth_context.acquire_token.assert_not_called()
mock_auth_context.acquire_token_with_client_credentials.assert_called_once_with(
mgmt_resource, 'my app', 'my secret')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_service_principal_using_cert(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_certificate.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_cert_file = os.path.join(curr_dir, 'sp_cert.pem')
# action
subs = finder.find_from_service_principal_id('my app', ServicePrincipalAuth(test_cert_file),
self.tenant_id, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_arm_client.tenants.list.assert_not_called()
mock_auth_context.acquire_token.assert_not_called()
mock_auth_context.acquire_token_with_client_certificate.assert_called_once_with(
mgmt_resource, 'my app', mock.ANY, mock.ANY, None)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_service_principal_using_cert_sn_issuer(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_certificate.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [self.subscription1]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_cert_file = os.path.join(curr_dir, 'sp_cert.pem')
with open(test_cert_file) as cert_file:
cert_file_string = cert_file.read()
match = re.search(r'\-+BEGIN CERTIFICATE.+\-+(?P<public>[^-]+)\-+END CERTIFICATE.+\-+',
cert_file_string, re.I)
public_certificate = match.group('public').strip()
# action
subs = finder.find_from_service_principal_id('my app', ServicePrincipalAuth(test_cert_file, use_cert_sn_issuer=True),
self.tenant_id, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_arm_client.tenants.list.assert_not_called()
mock_auth_context.acquire_token.assert_not_called()
mock_auth_context.acquire_token_with_client_certificate.assert_called_once_with(
mgmt_resource, 'my app', mock.ANY, mock.ANY, public_certificate)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_refresh_accounts_one_user_account(self, mock_auth_context):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, deepcopy([self.subscription1]), False)
profile._set_subscriptions(consolidated)
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = deepcopy([self.subscription1, self.subscription2])
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
profile.refresh_accounts(finder)
# assert
result = storage_mock['subscriptions']
self.assertEqual(2, len(result))
self.assertEqual(self.id1.split('/')[-1], result[0]['id'])
self.assertEqual(self.id2.split('/')[-1], result[1]['id'])
self.assertTrue(result[0]['isDefault'])
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_refresh_accounts_one_user_account_one_sp_account(self, mock_auth_context):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
sp_subscription1 = SubscriptionStub('sp-sub/3', 'foo-subname', self.state1, 'foo_tenant.onmicrosoft.com')
consolidated = profile._normalize_properties(self.user1, deepcopy([self.subscription1]), False)
consolidated += profile._normalize_properties('http://foo', [sp_subscription1], True)
profile._set_subscriptions(consolidated)
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.side_effect = deepcopy([[self.subscription1], [self.subscription2, sp_subscription1]])
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
profile._creds_cache.retrieve_secret_of_service_principal = lambda _: 'verySecret'
profile._creds_cache.flush_to_disk = lambda _: ''
# action
profile.refresh_accounts(finder)
# assert
result = storage_mock['subscriptions']
self.assertEqual(3, len(result))
self.assertEqual(self.id1.split('/')[-1], result[0]['id'])
self.assertEqual(self.id2.split('/')[-1], result[1]['id'])
self.assertEqual('3', result[2]['id'])
self.assertTrue(result[0]['isDefault'])
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_refresh_accounts_with_nothing(self, mock_auth_context):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, deepcopy([self.subscription1]), False)
profile._set_subscriptions(consolidated)
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = []
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
profile.refresh_accounts(finder)
# assert
result = storage_mock['subscriptions']
self.assertEqual(0, len(result))
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_credscache_load_tokens_and_sp_creds_with_secret(self, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_read_file.return_value = [self.token_entry1, test_sp]
# action
creds_cache = CredsCache(cli, async_persist=False)
# assert
token_entries = [entry for _, entry in creds_cache.load_adal_token_cache().read_items()]
self.assertEqual(token_entries, [self.token_entry1])
self.assertEqual(creds_cache._service_principal_creds, [test_sp])
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_credscache_load_tokens_and_sp_creds_with_cert(self, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"certificateFile": 'junkcert.pem'
}
mock_read_file.return_value = [test_sp]
# action
creds_cache = CredsCache(cli, async_persist=False)
creds_cache.load_adal_token_cache()
# assert
self.assertEqual(creds_cache._service_principal_creds, [test_sp])
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_credscache_retrieve_sp_secret_with_cert(self, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"certificateFile": 'junkcert.pem'
}
mock_read_file.return_value = [test_sp]
# action
creds_cache = CredsCache(cli, async_persist=False)
creds_cache.load_adal_token_cache()
# assert
self.assertEqual(creds_cache.retrieve_secret_of_service_principal(test_sp['servicePrincipalId']), None)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_add_new_sp_creds(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
test_sp2 = {
"servicePrincipalId": "myapp2",
"servicePrincipalTenant": "mytenant2",
"accessToken": "Secret2"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [self.token_entry1, test_sp]
creds_cache = CredsCache(cli, async_persist=False)
# action
creds_cache.save_service_principal_cred(test_sp2)
# assert
token_entries = [e for _, e in creds_cache.adal_token_cache.read_items()] # noqa: F812
self.assertEqual(token_entries, [self.token_entry1])
self.assertEqual(creds_cache._service_principal_creds, [test_sp, test_sp2])
mock_open_for_write.assert_called_with(mock.ANY, 'w+')
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_add_preexisting_sp_creds(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [test_sp]
creds_cache = CredsCache(cli, async_persist=False)
# action
creds_cache.save_service_principal_cred(test_sp)
# assert
self.assertEqual(creds_cache._service_principal_creds, [test_sp])
self.assertFalse(mock_open_for_write.called)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_add_preexisting_sp_new_secret(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [test_sp]
creds_cache = CredsCache(cli, async_persist=False)
new_creds = test_sp.copy()
new_creds['accessToken'] = 'Secret2'
# action
creds_cache.save_service_principal_cred(new_creds)
# assert
self.assertEqual(creds_cache._service_principal_creds, [new_creds])
self.assertTrue(mock_open_for_write.called)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_match_service_principal_correctly(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [test_sp]
factory = mock.MagicMock()
factory.side_effect = ValueError('SP was found')
creds_cache = CredsCache(cli, factory, async_persist=False)
# action and verify(we plant an exception to throw after the SP was found; so if the exception is thrown,
# we know the matching did go through)
self.assertRaises(ValueError, creds_cache.retrieve_token_for_service_principal,
'myapp', 'resource1', 'mytenant', False)
# tenant doesn't exactly match, but it still succeeds
# before fully migrating to pytest and utilizing capsys fixture, use `pytest -o log_cli=True` to manually
# verify the warning log
self.assertRaises(ValueError, creds_cache.retrieve_token_for_service_principal,
'myapp', 'resource1', 'mytenant2', False)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_remove_creds(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [self.token_entry1, test_sp]
creds_cache = CredsCache(cli, async_persist=False)
# action #1, logout a user
creds_cache.remove_cached_creds(self.user1)
# assert #1
token_entries = [e for _, e in creds_cache.adal_token_cache.read_items()] # noqa: F812
self.assertEqual(token_entries, [])
# action #2 logout a service principal
creds_cache.remove_cached_creds('myapp')
# assert #2
self.assertEqual(creds_cache._service_principal_creds, [])
mock_open_for_write.assert_called_with(mock.ANY, 'w+')
self.assertEqual(mock_open_for_write.call_count, 2)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_credscache_new_token_added_by_adal(self, mock_adal_auth_context, _, mock_open_for_write, mock_read_file): # pylint: disable=line-too-long
cli = DummyCli()
token_entry2 = {
"accessToken": "new token",
"tokenType": "Bearer",
"userId": self.user1
}
def acquire_token_side_effect(*args): # pylint: disable=unused-argument
creds_cache.adal_token_cache.has_state_changed = True
return token_entry2
def get_auth_context(_, authority, **kwargs): # pylint: disable=unused-argument
mock_adal_auth_context.cache = kwargs['cache']
return mock_adal_auth_context
mock_adal_auth_context.acquire_token.side_effect = acquire_token_side_effect
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [self.token_entry1]
creds_cache = CredsCache(cli, auth_ctx_factory=get_auth_context, async_persist=False)
# action
mgmt_resource = 'https://management.core.windows.net/'
token_type, token, _ = creds_cache.retrieve_token_for_user(self.user1, self.tenant_id,
mgmt_resource)
mock_adal_auth_context.acquire_token.assert_called_once_with(
'https://management.core.windows.net/',
self.user1,
mock.ANY)
# assert
mock_open_for_write.assert_called_with(mock.ANY, 'w+')
self.assertEqual(token, 'new token')
self.assertEqual(token_type, token_entry2['tokenType'])
@mock.patch('azure.cli.core._profile.get_file_json', autospec=True)
def test_credscache_good_error_on_file_corruption(self, mock_read_file):
mock_read_file.side_effect = ValueError('a bad error for you')
cli = DummyCli()
# action
creds_cache = CredsCache(cli, async_persist=False)
# assert
with self.assertRaises(CLIError) as context:
creds_cache.load_adal_token_cache()
self.assertTrue(re.findall(r'bad error for you', str(context.exception)))
def test_service_principal_auth_client_secret(self):
sp_auth = ServicePrincipalAuth('verySecret!')
result = sp_auth.get_entry_to_persist('sp_id1', 'tenant1')
self.assertEqual(result, {
'servicePrincipalId': 'sp_id1',
'servicePrincipalTenant': 'tenant1',
'accessToken': 'verySecret!'
})
def test_service_principal_auth_client_cert(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_cert_file = os.path.join(curr_dir, 'sp_cert.pem')
sp_auth = ServicePrincipalAuth(test_cert_file)
result = sp_auth.get_entry_to_persist('sp_id1', 'tenant1')
self.assertEqual(result, {
'servicePrincipalId': 'sp_id1',
'servicePrincipalTenant': 'tenant1',
'certificateFile': test_cert_file,
'thumbprint': 'F0:6A:53:84:8B:BE:71:4A:42:90:D6:9D:33:52:79:C1:D0:10:73:FD'
})
def test_detect_adfs_authority_url(self):
cli = DummyCli()
adfs_url_1 = 'https://adfs.redmond.ext-u15f2402.masd.stbtest.microsoft.com/adfs/'
cli.cloud.endpoints.active_directory = adfs_url_1
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# test w/ trailing slash
r = profile.auth_ctx_factory(cli, 'common', None)
self.assertEqual(r.authority.url, adfs_url_1.rstrip('/'))
# test w/o trailing slash
adfs_url_2 = 'https://adfs.redmond.ext-u15f2402.masd.stbtest.microsoft.com/adfs'
cli.cloud.endpoints.active_directory = adfs_url_2
r = profile.auth_ctx_factory(cli, 'common', None)
self.assertEqual(r.authority.url, adfs_url_2)
# test w/ regular aad
aad_url = 'https://login.microsoftonline.com'
cli.cloud.endpoints.active_directory = aad_url
r = profile.auth_ctx_factory(cli, 'common', None)
self.assertEqual(r.authority.url, aad_url + '/common')
class FileHandleStub(object): # pylint: disable=too-few-public-methods
def write(self, content):
pass
def __enter__(self):
return self
def __exit__(self, _2, _3, _4):
pass
class SubscriptionStub(Subscription): # pylint: disable=too-few-public-methods
def __init__(self, id, display_name, state, tenant_id): # pylint: disable=redefined-builtin
policies = SubscriptionPolicies()
policies.spending_limit = SpendingLimit.current_period_off
policies.quota_id = 'some quota'
super(SubscriptionStub, self).__init__(subscription_policies=policies, authorization_source='some_authorization_source')
self.id = id
self.display_name = display_name
self.state = state
self.tenant_id = tenant_id
class TenantStub(object): # pylint: disable=too-few-public-methods
def __init__(self, tenant_id):
self.tenant_id = tenant_id
class MSRestAzureAuthStub:
def __init__(self, *args, **kwargs):
self._token = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
self.set_token_invoked_count = 0
self.token_read_count = 0
self.client_id = kwargs.get('client_id')
self.object_id = kwargs.get('object_id')
self.msi_res_id = kwargs.get('msi_res_id')
def set_token(self):
self.set_token_invoked_count += 1
@property
def token(self):
self.token_read_count += 1
return self._token
@token.setter
def token(self, value):
self._token = value
if __name__ == '__main__':
unittest.main()
| 50.588137 | 153 | 0.649237 |
ace6f4ef7b6062f91e08f330639efcd529c64151 | 7,208 | py | Python | glue/core/visual.py | nabobalis/glue | 1c718378b5527e64d85cc6a6f9a0330652e5cf4b | [
"BSD-3-Clause"
] | null | null | null | glue/core/visual.py | nabobalis/glue | 1c718378b5527e64d85cc6a6f9a0330652e5cf4b | [
"BSD-3-Clause"
] | null | null | null | glue/core/visual.py | nabobalis/glue | 1c718378b5527e64d85cc6a6f9a0330652e5cf4b | [
"BSD-3-Clause"
] | null | null | null | from matplotlib.colors import ColorConverter, Colormap
from matplotlib.cm import get_cmap
from glue.config import settings
from glue.config import colormaps
from echo import callback_property, HasCallbackProperties
# Define acceptable line styles
VALID_LINESTYLES = ['solid', 'dashed', 'dash-dot', 'dotted', 'none']
__all__ = ['VisualAttributes']
class VisualAttributes(HasCallbackProperties):
"""
This class is used to define visual attributes for any kind of objects.
Parameters
----------
parent : `QObject`, optional
The object that this visual attributes object is attached to. Default is `None`.
color : `str`, optional
A matplotlib color string. Default is set from :class:`~glue.config.SettingRegistry`.
alpha : `float`, optional
Opacity, between 0-1. Default is set from :class:`~glue.config.SettingRegistry`.
preferred_cmap : `str` or :class:`~matplotlib.colors.Colormap`, optional
A colormap to be used as the preferred colormap, by name or instance. Default is `None`.
linewidth : `float`, optional
The linewidth. Default is 1.
linestyle : `str`, optional
The linestyle. Default is `'solid'`.
marker : `str`, optional
The matplotlib marker shape. Default is `'o'`.
markersize : `float`, optional
The size of the marker. Default is 3.
"""
def __init__(self, parent=None, color=None, alpha=None, preferred_cmap=None, linewidth=1, linestyle='solid', marker='o', markersize=3):
super(VisualAttributes, self).__init__()
# We have to set the defaults here, otherwise the settings are fixed
# once the class is defined.
color = color or settings.DATA_COLOR
alpha = alpha or settings.DATA_ALPHA
self.parent = parent
self._atts = ['color', 'alpha', 'linewidth', 'linestyle', 'marker',
'markersize', 'preferred_cmap']
self.color = color
self.alpha = alpha
self.preferred_cmap = preferred_cmap
self.linewidth = linewidth
self.linestyle = linestyle
self.marker = marker
self.markersize = markersize
def __eq__(self, other):
if not isinstance(other, VisualAttributes):
return False
elif self is other:
return True
else:
return all(getattr(self, a) == getattr(other, a) for a in self._atts)
# If __eq__ is defined, then __hash__ has to be re-defined
__hash__ = object.__hash__
def set(self, other):
"""
Update this instance's properties based on another VisualAttributes instance.
"""
for att in self._atts:
setattr(self, att, getattr(other, att))
def copy(self, new_parent=None):
"""
Create a new instance with the same visual properties
"""
result = VisualAttributes()
result.set(self)
if new_parent is not None:
result.parent = new_parent
return result
@callback_property
def color(self):
"""
Color specified using Matplotlib notation
Specifically, it can be:
* A string with a common color (e.g. 'black', 'red', 'orange')
* A string containing a float in the rng [0:1] for a shade of
gray ('0.0' = black,'1.0' = white)
* A tuple of three floats in the rng [0:1] for (R, G, B)
* An HTML hexadecimal string (e.g. '#eeefff')
"""
return self._color
@color.setter
def color(self, value):
if isinstance(value, str):
self._color = value.lower()
else:
self._color = value
@callback_property
def preferred_cmap(self):
"""
A preferred colormap specified using Matplotlib notation
"""
return self._preferred_cmap
@preferred_cmap.setter
def preferred_cmap(self, value):
if isinstance(value, str):
try:
self._preferred_cmap = get_cmap(value)
except ValueError:
# This checks for the formal name of the colormap.
# e.g., 'viridis' is 'Viridis'
for element in colormaps.members:
if element[0] == value:
self._preferred_cmap = element[1]
break
else:
# If the string name fails to be validated
raise ValueError(f"{value} is not a valid colormap name.")
elif isinstance(value, Colormap) or value is None:
self._preferred_cmap = value
else:
raise TypeError("`preferred_cmap` must be a string or an instance of a matplotlib.colors.Colormap")
@callback_property
def alpha(self):
"""
Transparency, given as a floating point value between 0 and 1.
"""
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = value
@property
def rgba(self):
r, g, b = ColorConverter().to_rgb(self.color)
return (r, g, b, self.alpha)
@callback_property
def linestyle(self):
"""
The line style, which can be one of 'solid', 'dashed', 'dash-dot',
'dotted', or 'none'.
"""
return self._linestyle
@linestyle.setter
def linestyle(self, value):
if value not in VALID_LINESTYLES:
raise Exception("Line style should be one of %s" %
'/'.join(VALID_LINESTYLES))
self._linestyle = value
@callback_property
def linewidth(self):
"""
The line width, in points.
"""
return self._linewidth
@linewidth.setter
def linewidth(self, value):
if type(value) not in [float, int]:
raise Exception("Line width should be a float or an int")
if value < 0:
raise Exception("Line width should be positive")
self._linewidth = value
@callback_property
def marker(self):
"""
The marker symbol.
"""
return self._marker
@marker.setter
def marker(self, value):
self._marker = value
@callback_property
def markersize(self):
return self._markersize
@markersize.setter
def markersize(self, value):
self._markersize = int(value)
def __setattr__(self, attribute, value):
# Check that the attribute exists (don't allow new attributes)
allowed = set(['color', 'linewidth', 'linestyle',
'alpha', 'parent', 'marker', 'markersize',
'preferred_cmap'])
if attribute not in allowed and not attribute.startswith('_'):
raise Exception("Attribute %s does not exist" % attribute)
changed = getattr(self, attribute, None) != value
super(VisualAttributes, self).__setattr__(attribute, value)
# if parent has a broadcast method, broadcast the change
if (changed and hasattr(self, 'parent') and
hasattr(self.parent, 'broadcast') and
attribute != 'parent' and not attribute.startswith('_')):
self.parent.broadcast('style')
| 32.913242 | 139 | 0.600305 |
ace6f537a98238be6fd2517a08d7ebaea76dce8d | 857 | py | Python | wijn/migrations/0003_score.py | nruigrok/wijn | a43d4226f65a571f8123840caa862efe15c42524 | [
"MIT"
] | null | null | null | wijn/migrations/0003_score.py | nruigrok/wijn | a43d4226f65a571f8123840caa862efe15c42524 | [
"MIT"
] | null | null | null | wijn/migrations/0003_score.py | nruigrok/wijn | a43d4226f65a571f8123840caa862efe15c42524 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wijn', '0002_auto_20150116_1527'),
]
operations = [
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('vraag', models.CharField(max_length=255)),
('region', models.CharField(max_length=255, null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
},
bases=(models.Model,),
),
]
| 29.551724 | 114 | 0.590432 |
ace6f5f00b52222a75684f110c4fdb2248238020 | 2,327 | py | Python | noxfile.py | elastic/enterprise-search-python | 1788413218badc01e2da23ac290698de40117f8c | [
"Apache-2.0"
] | 19 | 2019-09-05T21:14:37.000Z | 2022-03-13T00:55:48.000Z | noxfile.py | elastic/enterprise-search-python | 1788413218badc01e2da23ac290698de40117f8c | [
"Apache-2.0"
] | 77 | 2019-08-19T19:02:09.000Z | 2022-03-29T18:32:27.000Z | noxfile.py | elastic/enterprise-search-python | 1788413218badc01e2da23ac290698de40117f8c | [
"Apache-2.0"
] | 15 | 2019-10-17T14:04:09.000Z | 2022-03-22T14:04:27.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os.path import abspath, dirname, join
import nox
SOURCE_FILES = (
"noxfile.py",
"setup.py",
"elastic_enterprise_search/",
"utils/",
"tests/",
)
@nox.session()
def format(session):
session.install("black", "isort")
session.run(
"black", "--target-version=py27", "--target-version=py37", *SOURCE_FILES
)
session.run("isort", *SOURCE_FILES)
session.run("python", "utils/license-headers.py", "fix", *SOURCE_FILES)
lint(session)
@nox.session
def lint(session):
session.install("flake8", "black", "isort")
session.run(
"black",
"--check",
"--target-version=py27",
"--target-version=py37",
*SOURCE_FILES
)
session.run("isort", "--check", *SOURCE_FILES)
session.run("flake8", "--ignore=E501,W503,E203", *SOURCE_FILES)
session.run("python", "utils/license-headers.py", "check", *SOURCE_FILES)
def tests_impl(session):
junit_xml = join(
abspath(dirname(__file__)),
"junit/enterprise-search-python-junit.xml",
)
session.install("git+https://github.com/elastic/elastic-transport-python")
session.install(".[develop]")
session.run(
"pytest",
"--junitxml=%s" % junit_xml,
"--cov=elastic_enterprise_search",
*(session.posargs or ("tests/",)),
env={"PYTHONWARNINGS": "always::DeprecationWarning"}
)
session.run("coverage", "report", "-m")
@nox.session(python=["2.7", "3.6", "3.7", "3.8", "3.9"])
def test(session):
tests_impl(session)
| 29.833333 | 80 | 0.660507 |
ace6f91eb51132914e86b0256d9bc4c86e446f84 | 21,745 | py | Python | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/bio/v1/bio_v1_messages.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/bio/v1/bio_v1_messages.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/bio/v1/bio_v1_messages.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | """Generated message classes for bio version v1.
Stores, processes, explores and shares biological data.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bio'
class BioProjectsOperationsCancelRequest(_messages.Message):
"""A BioProjectsOperationsCancelRequest object.
Fields:
name: The name of the operation resource to be cancelled.
"""
name = _messages.StringField(1, required=True)
class BioProjectsOperationsGetRequest(_messages.Message):
"""A BioProjectsOperationsGetRequest object.
Fields:
name: The name of the operation resource.
"""
name = _messages.StringField(1, required=True)
class BioProjectsOperationsListRequest(_messages.Message):
"""A BioProjectsOperationsListRequest object.
Fields:
filter: The standard list filter.
name: The name of the operation collection.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class BioProjectsPipelinesRunDeepVariantV1alphaRequest(_messages.Message):
"""A BioProjectsPipelinesRunDeepVariantV1alphaRequest object.
Fields:
projectId: Required. The project associated with this DeepVariant pipeline
run.
runDeepVariantV1alphaRequest: A RunDeepVariantV1alphaRequest resource to
be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
runDeepVariantV1alphaRequest = _messages.MessageField('RunDeepVariantV1alphaRequest', 2)
class Empty(_messages.Message):
"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class ListOperationsResponse(_messages.Message):
"""The response message for Operations.ListOperations.
Fields:
nextPageToken: The standard List next-page token.
operations: A list of operations that matches the specified filter in the
request.
"""
nextPageToken = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
class Operation(_messages.Message):
"""This resource represents a long-running operation that is the result of a
network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If true, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure or
cancellation.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should have the format of `operations/some/unique/name`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationEvent(_messages.Message):
"""An event that occurred during an Operation.
Fields:
description: A description of event in JSON-LD format.
endTime: Optional time of when event finished. An event can have a start
time and no finish time. If an event has a finish time, there must be a
start time.
startTime: Optional time of when event started.
"""
description = _messages.StringField(1)
endTime = _messages.StringField(2)
startTime = _messages.StringField(3)
class OperationMetadata(_messages.Message):
"""Metadata describing an Operation.
Messages:
LabelsValue: User-settable labels.
RequestValue: The original request that started the operation.
RuntimeMetadataValue: Runtime metadata on this Operation.
Fields:
createTime: The time at which the job was submitted to the Genomics
service.
endTime: The time at which the job stopped running.
events: Optional event messages that were generated during the job's
execution. This also contains any warnings that were generated during
import or export.
labels: User-settable labels.
projectId: The Google Cloud Project in which the job is scoped.
request: The original request that started the operation.
runtimeMetadata: Runtime metadata on this Operation.
startTime: The time at which the job began to run.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""User-settable labels.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class RequestValue(_messages.Message):
"""The original request that started the operation.
Messages:
AdditionalProperty: An additional property for a RequestValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a RequestValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class RuntimeMetadataValue(_messages.Message):
"""Runtime metadata on this Operation.
Messages:
AdditionalProperty: An additional property for a RuntimeMetadataValue
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a RuntimeMetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
createTime = _messages.StringField(1)
endTime = _messages.StringField(2)
events = _messages.MessageField('OperationEvent', 3, repeated=True)
labels = _messages.MessageField('LabelsValue', 4)
projectId = _messages.StringField(5)
request = _messages.MessageField('RequestValue', 6)
runtimeMetadata = _messages.MessageField('RuntimeMetadataValue', 7)
startTime = _messages.StringField(8)
class PipelineOptions(_messages.Message):
"""Common pipeline options.
Messages:
LabelsValue: User-settable labels. Applied to the Operation and any
associated pipeline resources, e.g. GCE VMs (if any).
Fields:
computeZones: Google Compute Engine availability zones in which the
workflow should start worker virtual machines, if any are needed for
this particular workflow. Must be valid Google Compute Engine zone
names, for example "us-east1-d".
labels: User-settable labels. Applied to the Operation and any associated
pipeline resources, e.g. GCE VMs (if any).
requestId: Optional. If non-empty then requests are idempotent in that
sending a second RunWorkflowRequest with the same project_id and
request_id will return the name of the same already-running operation,
instead of starting another. Do not reuse request_ids. Reusing a
(project_id, request_id) for a different request will result in an
error. A common way of filling this value is with a random 64-bit
number.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""User-settable labels. Applied to the Operation and any associated
pipeline resources, e.g. GCE VMs (if any).
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
computeZones = _messages.StringField(1, repeated=True)
labels = _messages.MessageField('LabelsValue', 2)
requestId = _messages.StringField(3)
class RunDeepVariantV1alphaRequest(_messages.Message):
"""A RunDeepVariantV1alphaRequest object.
Fields:
inputFastq1: List of Google Cloud Storage paths of forward strand FASTQ.
The pairs of FASTQ files must occur at the same position in both lists.
e.g.: input_fastq1s = ['lane1_1.fastq', 'lane2_1.fastq',
'lane3_1.fastq'] input_fastq2s = ['lane1_2.fastq', 'lane2_2.fastq',
'lane3_2.fastq']
inputFastq2: List of Google Cloud Storage paths of reverse strand FASTQ.
The pairs of FASTQ files must occur at the same position in both lists.
e.g.: input_fastq1s = ['lane1_1.fastq', 'lane2_1.fastq',
'lane3_1.fastq'] input_fastq2s = ['lane1_2.fastq', 'lane2_2.fastq',
'lane3_2.fastq']
options: Common pipeline options.
outputPath: Required. The Google Cloud Storage path for copying the final
output files. For example, 'gs://<user_bucket>/<sample_name>/'.
sampleName: Required. Sample name.
"""
inputFastq1 = _messages.StringField(1, repeated=True)
inputFastq2 = _messages.StringField(2, repeated=True)
options = _messages.MessageField('PipelineOptions', 3)
outputPath = _messages.StringField(4)
sampleName = _messages.StringField(5)
class RuntimeMetadata(_messages.Message):
"""Runtime metadata that will be populated in the runtime_metadata field of
an Operation associated with a RunWorkflow execution.
"""
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class Status(_messages.Message):
"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). The error model is designed to be:
- Simple to use and understand for most users - Flexible enough to meet
unexpected needs # Overview The `Status` message contains three pieces of
data: error code, error message, and error details. The error code should be
an enum value of google.rpc.Code, but it may accept additional error codes
if needed. The error message should be a developer-facing English message
that helps developers *understand* and *resolve* the error. If a localized
user-facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package `google.rpc` which can be used for common error
conditions. # Language mapping The `Status` message is the logical
representation of the error model, but it is not necessarily the actual wire
format. When the `Status` message is exposed in different client libraries
and different wire protocols, it can be mapped differently. For example, it
will likely be mapped to some exceptions in Java, but more likely mapped to
some error codes in C. # Other uses The error model and the `Status`
message can be used in a variety of environments, either with or without
APIs, to provide a consistent developer experience across different
environments. Example uses of this error model include: - Partial errors.
If a service needs to return partial errors to the client, it may embed
the `Status` in the normal response to indicate the partial errors. -
Workflow errors. A typical workflow has multiple steps. Each step may
have a `Status` message for error reporting purpose. - Batch operations. If
a client uses batch request and batch response, the `Status` message
should be used directly inside batch response, one for each error sub-
response. - Asynchronous operations. If an API call embeds asynchronous
operation results in its response, the status of those operations should
be represented directly using the `Status` message. - Logging. If some
API errors are stored in logs, the message `Status` could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There will be a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'bio')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'bio')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'bio')
| 38.899821 | 90 | 0.734652 |
ace6f922810861a47539d411c6f643b68f2a0310 | 1,031 | py | Python | OpenAttack/data/nltk_wordnet.py | ZJU-ZhangY/OpenAttack | 5698fe7d494d85068ccfb644d4c76b920a61082c | [
"MIT"
] | 1 | 2020-09-27T23:10:14.000Z | 2020-09-27T23:10:14.000Z | OpenAttack/data/nltk_wordnet.py | nishiwen1214/OpenAttack | c1d095b595257caa226e902b2c89b36845164f6a | [
"MIT"
] | null | null | null | OpenAttack/data/nltk_wordnet.py | nishiwen1214/OpenAttack | c1d095b595257caa226e902b2c89b36845164f6a | [
"MIT"
] | 1 | 2020-09-01T11:14:42.000Z | 2020-09-01T11:14:42.000Z | """
:type: nltk.WordNetCorpusReader
:Size: 10.283MB
Model files for wordnet in nltk.
`[page] <http://wordnet.princeton.edu/>`__
"""
from OpenAttack.utils import make_zip_downloader
NAME = "TProcess.NLTKWordNet"
URL = "https://thunlp.oss-cn-qingdao.aliyuncs.com/TAADToolbox/wordnet.zip"
DOWNLOAD = make_zip_downloader(URL)
def LOAD(path):
wnc = __import__("nltk").corpus.WordNetCorpusReader(path, None)
def lemma(word, pos):
pp = "n"
if pos in ["a", "r", "n", "v", "s"]:
pp = pos
else:
if pos[:2] == "JJ":
pp = "a"
elif pos[:2] == "VB":
pp = "v"
elif pos[:2] == "NN":
pp = "n"
elif pos[:2] == "RB":
pp = "r"
else:
pp = None
if pp is None: # do not need lemmatization
return word
lemmas = wnc._morphy(word, pp)
return min(lemmas, key=len) if len(lemmas) > 0 else word
wnc.lemma = lemma
return wnc
| 25.146341 | 74 | 0.518914 |
ace6f99c90217d24fc505383af69568bdd569417 | 4,937 | py | Python | aws/iot/basicShadowDeltaListener.py | JoseIbanez/testing | 4d6ff310cd63a8b2f8e1abcfbea0f17b23220021 | [
"MIT"
] | 1 | 2016-09-15T03:58:30.000Z | 2016-09-15T03:58:30.000Z | aws/iot/basicShadowDeltaListener.py | JoseIbanez/testing | 4d6ff310cd63a8b2f8e1abcfbea0f17b23220021 | [
"MIT"
] | 1 | 2020-09-13T08:44:50.000Z | 2020-09-13T08:44:50.000Z | aws/iot/basicShadowDeltaListener.py | JoseIbanez/testing | 4d6ff310cd63a8b2f8e1abcfbea0f17b23220021 | [
"MIT"
] | null | null | null | '''
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
'''
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import logging
import time
import json
import argparse
from os.path import expanduser
# Shadow JSON schema:
#
# Name: Bot
# {
# "state": {
# "desired":{
# "property":<INT VALUE>
# }
# }
# }
# Custom Shadow callback
def customShadowCallback_Delta(payload, responseStatus, token):
# payload is a JSON string ready to be parsed using json.loads(...)
# in both Py2.x and Py3.x
print(responseStatus)
payloadDict = json.loads(payload)
print("++++++++DELTA++++++++++")
print("property: " + str(payloadDict["state"]))
print("property: " + str(payloadDict["state"]["property"]))
print("version: " + str(payloadDict["version"]))
print("+++++++++++++++++++++++\n\n")
# Read in command-line parameters
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--endpoint", action="store", dest="host", help="Your AWS IoT custom endpoint",
default="a1o7eza7uxtx5n-ats.iot.eu-west-1.amazonaws.com")
parser.add_argument("-r", "--rootCA", action="store", dest="rootCAPath", help="Root CA file path",
default=expanduser("~/.secrets/iot/AmazonRootCA1.pem"))
parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path",
default=expanduser("~/.secrets/iot/1c7e71bb92-certificate.pem.crt"))
parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path",
default=expanduser("~/.secrets/iot/1c7e71bb92-private.pem.key"))
parser.add_argument("-p", "--port", action="store", dest="port", type=int, help="Port number override")
parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False,
help="Use MQTT over WebSocket")
parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name")
parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicShadowDeltaListener",
help="Targeted client id")
args = parser.parse_args()
host = args.host
rootCAPath = args.rootCAPath
certificatePath = args.certificatePath
privateKeyPath = args.privateKeyPath
port = args.port
useWebsocket = args.useWebsocket
thingName = args.thingName
clientId = args.clientId
if args.useWebsocket and args.certificatePath and args.privateKeyPath:
parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.")
exit(2)
if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath):
parser.error("Missing credentials for authentication.")
exit(2)
# Port defaults
if args.useWebsocket and not args.port: # When no port override for WebSocket, default to 443
port = 443
if not args.useWebsocket and not args.port: # When no port override for non-WebSocket, default to 8883
port = 8883
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTShadowClient
myAWSIoTMQTTShadowClient = None
if useWebsocket:
myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True)
myAWSIoTMQTTShadowClient.configureEndpoint(host, port)
myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath)
else:
myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId)
myAWSIoTMQTTShadowClient.configureEndpoint(host, port)
myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath)
# AWSIoTMQTTShadowClient configuration
myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
myAWSIoTMQTTShadowClient.connect()
# Create a deviceShadow with persistent subscription
deviceShadowHandler = myAWSIoTMQTTShadowClient.createShadowHandlerWithName(thingName, True)
# Listen on deltas
deviceShadowHandler.shadowRegisterDeltaCallback(customShadowCallback_Delta)
# Loop forever
while True:
time.sleep(1) | 39.18254 | 117 | 0.735264 |
ace6f9c98c4bb78d97d8985f859543ca0eca285d | 2,016 | py | Python | docs/rtd/conf.py | avast-tl/yaramod | 93c95793f3b3cee514d9e9aa0a93bc4dc5c64a70 | [
"MIT",
"BSD-3-Clause"
] | 31 | 2017-12-12T21:10:19.000Z | 2019-03-09T03:28:49.000Z | docs/rtd/conf.py | avast-tl/yaramod | 93c95793f3b3cee514d9e9aa0a93bc4dc5c64a70 | [
"MIT",
"BSD-3-Clause"
] | 20 | 2017-12-27T22:23:48.000Z | 2019-04-16T15:28:10.000Z | docs/rtd/conf.py | avast-tl/yaramod | 93c95793f3b3cee514d9e9aa0a93bc4dc5c64a70 | [
"MIT",
"BSD-3-Clause"
] | 9 | 2017-12-16T14:01:04.000Z | 2019-04-16T13:27:42.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'yaramod'
copyright = '2020, Avast'
author = 'Avast'
# The full version, including alpha/beta/rc tags
release = 'v3.12.8'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_rtd_theme',
'sphinx_tabs.tabs',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
html_static_path = []
| 33.6 | 79 | 0.662202 |
ace6fa6b33fd0a51aab85e5b53e14e092450c27c | 642 | py | Python | test/null_allocator.py | inaccel/numpy-allocator | 282cf6bd86a148daca1851c2b82065526e6038bf | [
"Apache-2.0"
] | 11 | 2022-01-01T22:19:38.000Z | 2022-01-12T21:44:15.000Z | test/null_allocator.py | inaccel/numpy-allocator | 282cf6bd86a148daca1851c2b82065526e6038bf | [
"Apache-2.0"
] | null | null | null | test/null_allocator.py | inaccel/numpy-allocator | 282cf6bd86a148daca1851c2b82065526e6038bf | [
"Apache-2.0"
] | 1 | 2021-08-30T08:21:01.000Z | 2021-08-30T08:21:01.000Z | from ctypes import *
import numpy_allocator
class null_allocator(metaclass=numpy_allocator.type):
@CFUNCTYPE(c_void_p, c_size_t, c_size_t)
def _calloc_(nelem, elsize):
return None
@CFUNCTYPE(None, c_void_p, c_size_t)
def _free_(ptr, size):
pass
@CFUNCTYPE(c_void_p, c_size_t)
def _malloc_(size):
return None
@CFUNCTYPE(c_void_p, c_void_p, c_size_t)
def _realloc_(ptr, new_size):
return None
def main():
import numpy as np
with np.testing.assert_raises(MemoryError):
with null_allocator:
np.ndarray(())
if __name__ == '__main__':
main()
| 19.454545 | 53 | 0.657321 |
ace6fb0fa883dc7a43e4e717f9dcb1442ae7f352 | 6,172 | py | Python | machine_learning/decision_tree.py | jenia90/Python | 696fb4a681ad9e4d84e0d2b894daf449a3e30b24 | [
"MIT"
] | 145,614 | 2016-07-21T05:40:05.000Z | 2022-03-31T22:17:22.000Z | machine_learning/decision_tree.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 3,987 | 2016-07-28T17:31:25.000Z | 2022-03-30T23:07:46.000Z | machine_learning/decision_tree.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 40,014 | 2016-07-26T15:14:41.000Z | 2022-03-31T22:23:03.000Z | """
Implementation of a basic regression decision tree.
Input data set: The input data set must be 1-dimensional with continuous labels.
Output: The decision tree maps a real number input to a real number output.
"""
import numpy as np
class Decision_Tree:
def __init__(self, depth=5, min_leaf_size=5):
self.depth = depth
self.decision_boundary = 0
self.left = None
self.right = None
self.min_leaf_size = min_leaf_size
self.prediction = None
def mean_squared_error(self, labels, prediction):
"""
mean_squared_error:
@param labels: a one dimensional numpy array
@param prediction: a floating point value
return value: mean_squared_error calculates the error if prediction is used to
estimate the labels
>>> tester = Decision_Tree()
>>> test_labels = np.array([1,2,3,4,5,6,7,8,9,10])
>>> test_prediction = np.float(6)
>>> tester.mean_squared_error(test_labels, test_prediction) == (
... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
... test_prediction))
True
>>> test_labels = np.array([1,2,3])
>>> test_prediction = np.float(2)
>>> tester.mean_squared_error(test_labels, test_prediction) == (
... Test_Decision_Tree.helper_mean_squared_error_test(test_labels,
... test_prediction))
True
"""
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
return np.mean((labels - prediction) ** 2)
def train(self, X, y):
"""
train:
@param X: a one dimensional numpy array
@param y: a one dimensional numpy array.
The contents of y are the labels for the corresponding X values
train does not have a return value
"""
"""
this section is to check that the inputs conform to our dimensionality
constraints
"""
if X.ndim != 1:
print("Error: Input data set must be one dimensional")
return
if len(X) != len(y):
print("Error: X and y have different lengths")
return
if y.ndim != 1:
print("Error: Data set labels must be one dimensional")
return
if len(X) < 2 * self.min_leaf_size:
self.prediction = np.mean(y)
return
if self.depth == 1:
self.prediction = np.mean(y)
return
best_split = 0
min_error = self.mean_squared_error(X, np.mean(y)) * 2
"""
loop over all possible splits for the decision tree. find the best split.
if no split exists that is less than 2 * error for the entire array
then the data set is not split and the average for the entire array is used as
the predictor
"""
for i in range(len(X)):
if len(X[:i]) < self.min_leaf_size:
continue
elif len(X[i:]) < self.min_leaf_size:
continue
else:
error_left = self.mean_squared_error(X[:i], np.mean(y[:i]))
error_right = self.mean_squared_error(X[i:], np.mean(y[i:]))
error = error_left + error_right
if error < min_error:
best_split = i
min_error = error
if best_split != 0:
left_X = X[:best_split]
left_y = y[:best_split]
right_X = X[best_split:]
right_y = y[best_split:]
self.decision_boundary = X[best_split]
self.left = Decision_Tree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
self.right = Decision_Tree(
depth=self.depth - 1, min_leaf_size=self.min_leaf_size
)
self.left.train(left_X, left_y)
self.right.train(right_X, right_y)
else:
self.prediction = np.mean(y)
return
def predict(self, x):
"""
predict:
@param x: a floating point value to predict the label of
the prediction function works by recursively calling the predict function
of the appropriate subtrees based on the tree's decision boundary
"""
if self.prediction is not None:
return self.prediction
elif self.left or self.right is not None:
if x >= self.decision_boundary:
return self.right.predict(x)
else:
return self.left.predict(x)
else:
print("Error: Decision tree not yet trained")
return None
class Test_Decision_Tree:
"""Decision Tres test class"""
@staticmethod
def helper_mean_squared_error_test(labels, prediction):
"""
helper_mean_squared_error_test:
@param labels: a one dimensional numpy array
@param prediction: a floating point value
return value: helper_mean_squared_error_test calculates the mean squared error
"""
squared_error_sum = np.float(0)
for label in labels:
squared_error_sum += (label - prediction) ** 2
return np.float(squared_error_sum / labels.size)
def main():
"""
In this demonstration we're generating a sample data set from the sin function in
numpy. We then train a decision tree on the data set and use the decision tree to
predict the label of 10 different test values. Then the mean squared error over
this test is displayed.
"""
X = np.arange(-1.0, 1.0, 0.005)
y = np.sin(X)
tree = Decision_Tree(depth=10, min_leaf_size=10)
tree.train(X, y)
test_cases = (np.random.rand(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
avg_error = np.mean((predictions - test_cases) ** 2)
print("Test values: " + str(test_cases))
print("Predictions: " + str(predictions))
print("Average error: " + str(avg_error))
if __name__ == "__main__":
main()
import doctest
doctest.testmod(name="mean_squarred_error", verbose=True)
| 33.912088 | 86 | 0.591542 |
ace6fbdb9b64e6ce0eb1cfb4383a9db1ee306f16 | 976 | py | Python | tests/test_enums.py | pixxelspace/titiler | 54d2b203860df35aff7fe9b01beaa2e35939d0e9 | [
"MIT"
] | null | null | null | tests/test_enums.py | pixxelspace/titiler | 54d2b203860df35aff7fe9b01beaa2e35939d0e9 | [
"MIT"
] | null | null | null | tests/test_enums.py | pixxelspace/titiler | 54d2b203860df35aff7fe9b01beaa2e35939d0e9 | [
"MIT"
] | null | null | null | """test titiler enums."""
import pytest
from rio_tiler.profiles import img_profiles
from titiler.resources.enums import ImageType
@pytest.mark.parametrize(
"value,driver,mimetype",
[
("png", "PNG", "image/png"),
("npy", "NPY", "application/x-binary"),
("tif", "GTiff", "image/tiff; application=geotiff"),
("jpeg", "JPEG", "image/jpeg"),
("jp2", "JP2OpenJPEG", "image/jp2"),
("webp", "WEBP", "image/webp"),
("pngraw", "PNG", "image/png"),
],
)
def test_imagetype(value, driver, mimetype):
"""Test driver and mimetype values."""
assert ImageType[value].driver == driver
assert ImageType[value].mimetype == mimetype
def test_imageprofile():
"""test image profile."""
ImageType.png.profile == img_profiles.get("png")
ImageType.pngraw.profile == img_profiles.get("pngraw")
ImageType.jpeg.profile == img_profiles.get("jpeg")
ImageType.webp.profile == img_profiles.get("webp")
| 29.575758 | 60 | 0.633197 |
ace6fc012670dfd73468a5d9e12fef6db9e648ef | 8,890 | py | Python | src/main/resources/pytz/zoneinfo/Atlantic/Madeira.py | TheEin/swagger-maven-plugin | cf93dce2d5c8d3534f4cf8c612b11e2d2313871b | [
"Apache-2.0"
] | 65 | 2015-11-14T13:46:01.000Z | 2021-08-14T05:54:04.000Z | lib/pytz/zoneinfo/Atlantic/Madeira.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 13 | 2016-03-31T20:00:17.000Z | 2021-08-20T14:52:31.000Z | lib/pytz/zoneinfo/Atlantic/Madeira.py | tjsavage/polymer-dashboard | 19bc467f1206613f8eec646b6f2bc43cc319ef75 | [
"CNRI-Python",
"Linux-OpenIB"
] | 20 | 2015-03-18T08:41:37.000Z | 2020-12-18T02:58:30.000Z | '''tzinfo timezone information for Atlantic/Madeira.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Madeira(DstTzInfo):
'''Atlantic/Madeira timezone definition. See datetime.tzinfo for details'''
zone = 'Atlantic/Madeira'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1911,5,24,1,7,36),
d(1916,6,18,0,0,0),
d(1916,11,1,1,0,0),
d(1917,3,1,0,0,0),
d(1917,10,15,0,0,0),
d(1918,3,2,0,0,0),
d(1918,10,15,0,0,0),
d(1919,3,1,0,0,0),
d(1919,10,15,0,0,0),
d(1920,3,1,0,0,0),
d(1920,10,15,0,0,0),
d(1921,3,1,0,0,0),
d(1921,10,15,0,0,0),
d(1924,4,17,0,0,0),
d(1924,10,15,0,0,0),
d(1926,4,18,0,0,0),
d(1926,10,3,0,0,0),
d(1927,4,10,0,0,0),
d(1927,10,2,0,0,0),
d(1928,4,15,0,0,0),
d(1928,10,7,0,0,0),
d(1929,4,21,0,0,0),
d(1929,10,6,0,0,0),
d(1931,4,19,0,0,0),
d(1931,10,4,0,0,0),
d(1932,4,3,0,0,0),
d(1932,10,2,0,0,0),
d(1934,4,8,0,0,0),
d(1934,10,7,0,0,0),
d(1935,3,31,0,0,0),
d(1935,10,6,0,0,0),
d(1936,4,19,0,0,0),
d(1936,10,4,0,0,0),
d(1937,4,4,0,0,0),
d(1937,10,3,0,0,0),
d(1938,3,27,0,0,0),
d(1938,10,2,0,0,0),
d(1939,4,16,0,0,0),
d(1939,11,19,0,0,0),
d(1940,2,25,0,0,0),
d(1940,10,6,0,0,0),
d(1941,4,6,0,0,0),
d(1941,10,6,0,0,0),
d(1942,3,15,0,0,0),
d(1942,4,25,23,0,0),
d(1942,8,15,23,0,0),
d(1942,10,25,0,0,0),
d(1943,3,14,0,0,0),
d(1943,4,17,23,0,0),
d(1943,8,28,23,0,0),
d(1943,10,31,0,0,0),
d(1944,3,12,0,0,0),
d(1944,4,22,23,0,0),
d(1944,8,26,23,0,0),
d(1944,10,29,0,0,0),
d(1945,3,11,0,0,0),
d(1945,4,21,23,0,0),
d(1945,8,25,23,0,0),
d(1945,10,28,0,0,0),
d(1946,4,7,0,0,0),
d(1946,10,6,0,0,0),
d(1947,4,6,3,0,0),
d(1947,10,5,3,0,0),
d(1948,4,4,3,0,0),
d(1948,10,3,3,0,0),
d(1949,4,3,3,0,0),
d(1949,10,2,3,0,0),
d(1951,4,1,3,0,0),
d(1951,10,7,3,0,0),
d(1952,4,6,3,0,0),
d(1952,10,5,3,0,0),
d(1953,4,5,3,0,0),
d(1953,10,4,3,0,0),
d(1954,4,4,3,0,0),
d(1954,10,3,3,0,0),
d(1955,4,3,3,0,0),
d(1955,10,2,3,0,0),
d(1956,4,1,3,0,0),
d(1956,10,7,3,0,0),
d(1957,4,7,3,0,0),
d(1957,10,6,3,0,0),
d(1958,4,6,3,0,0),
d(1958,10,5,3,0,0),
d(1959,4,5,3,0,0),
d(1959,10,4,3,0,0),
d(1960,4,3,3,0,0),
d(1960,10,2,3,0,0),
d(1961,4,2,3,0,0),
d(1961,10,1,3,0,0),
d(1962,4,1,3,0,0),
d(1962,10,7,3,0,0),
d(1963,4,7,3,0,0),
d(1963,10,6,3,0,0),
d(1964,4,5,3,0,0),
d(1964,10,4,3,0,0),
d(1965,4,4,3,0,0),
d(1965,10,3,3,0,0),
d(1966,4,3,3,0,0),
d(1977,3,27,0,0,0),
d(1977,9,25,0,0,0),
d(1978,4,2,0,0,0),
d(1978,10,1,0,0,0),
d(1979,4,1,0,0,0),
d(1979,9,30,1,0,0),
d(1980,3,30,0,0,0),
d(1980,9,28,1,0,0),
d(1981,3,29,1,0,0),
d(1981,9,27,1,0,0),
d(1982,3,28,1,0,0),
d(1982,9,26,1,0,0),
d(1983,3,27,2,0,0),
d(1983,9,25,1,0,0),
d(1984,3,25,1,0,0),
d(1984,9,30,1,0,0),
d(1985,3,31,1,0,0),
d(1985,9,29,1,0,0),
d(1986,3,30,1,0,0),
d(1986,9,28,1,0,0),
d(1987,3,29,1,0,0),
d(1987,9,27,1,0,0),
d(1988,3,27,1,0,0),
d(1988,9,25,1,0,0),
d(1989,3,26,1,0,0),
d(1989,9,24,1,0,0),
d(1990,3,25,1,0,0),
d(1990,9,30,1,0,0),
d(1991,3,31,1,0,0),
d(1991,9,29,1,0,0),
d(1992,3,29,1,0,0),
d(1992,9,27,1,0,0),
d(1993,3,28,1,0,0),
d(1993,9,26,1,0,0),
d(1994,3,27,1,0,0),
d(1994,9,25,1,0,0),
d(1995,3,26,1,0,0),
d(1995,9,24,1,0,0),
d(1996,3,31,1,0,0),
d(1996,10,27,1,0,0),
d(1997,3,30,1,0,0),
d(1997,10,26,1,0,0),
d(1998,3,29,1,0,0),
d(1998,10,25,1,0,0),
d(1999,3,28,1,0,0),
d(1999,10,31,1,0,0),
d(2000,3,26,1,0,0),
d(2000,10,29,1,0,0),
d(2001,3,25,1,0,0),
d(2001,10,28,1,0,0),
d(2002,3,31,1,0,0),
d(2002,10,27,1,0,0),
d(2003,3,30,1,0,0),
d(2003,10,26,1,0,0),
d(2004,3,28,1,0,0),
d(2004,10,31,1,0,0),
d(2005,3,27,1,0,0),
d(2005,10,30,1,0,0),
d(2006,3,26,1,0,0),
d(2006,10,29,1,0,0),
d(2007,3,25,1,0,0),
d(2007,10,28,1,0,0),
d(2008,3,30,1,0,0),
d(2008,10,26,1,0,0),
d(2009,3,29,1,0,0),
d(2009,10,25,1,0,0),
d(2010,3,28,1,0,0),
d(2010,10,31,1,0,0),
d(2011,3,27,1,0,0),
d(2011,10,30,1,0,0),
d(2012,3,25,1,0,0),
d(2012,10,28,1,0,0),
d(2013,3,31,1,0,0),
d(2013,10,27,1,0,0),
d(2014,3,30,1,0,0),
d(2014,10,26,1,0,0),
d(2015,3,29,1,0,0),
d(2015,10,25,1,0,0),
d(2016,3,27,1,0,0),
d(2016,10,30,1,0,0),
d(2017,3,26,1,0,0),
d(2017,10,29,1,0,0),
d(2018,3,25,1,0,0),
d(2018,10,28,1,0,0),
d(2019,3,31,1,0,0),
d(2019,10,27,1,0,0),
d(2020,3,29,1,0,0),
d(2020,10,25,1,0,0),
d(2021,3,28,1,0,0),
d(2021,10,31,1,0,0),
d(2022,3,27,1,0,0),
d(2022,10,30,1,0,0),
d(2023,3,26,1,0,0),
d(2023,10,29,1,0,0),
d(2024,3,31,1,0,0),
d(2024,10,27,1,0,0),
d(2025,3,30,1,0,0),
d(2025,10,26,1,0,0),
d(2026,3,29,1,0,0),
d(2026,10,25,1,0,0),
d(2027,3,28,1,0,0),
d(2027,10,31,1,0,0),
d(2028,3,26,1,0,0),
d(2028,10,29,1,0,0),
d(2029,3,25,1,0,0),
d(2029,10,28,1,0,0),
d(2030,3,31,1,0,0),
d(2030,10,27,1,0,0),
d(2031,3,30,1,0,0),
d(2031,10,26,1,0,0),
d(2032,3,28,1,0,0),
d(2032,10,31,1,0,0),
d(2033,3,27,1,0,0),
d(2033,10,30,1,0,0),
d(2034,3,26,1,0,0),
d(2034,10,29,1,0,0),
d(2035,3,25,1,0,0),
d(2035,10,28,1,0,0),
d(2036,3,30,1,0,0),
d(2036,10,26,1,0,0),
d(2037,3,29,1,0,0),
d(2037,10,25,1,0,0),
]
_transition_info = [
i(-4080,0,'FMT'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(3600,7200,'MADMT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(3600,7200,'MADMT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(3600,7200,'MADMT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(3600,7200,'MADMT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,3600,'MADST'),
i(-3600,0,'MADT'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
i(3600,3600,'WEST'),
i(0,0,'WET'),
]
Madeira = Madeira()
| 19.284165 | 79 | 0.558943 |
ace6fcc015326d2b92b8091aea790c01355b98e7 | 3,114 | py | Python | ambari-common/src/main/python/ambari_ws4py/client/geventclient.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 1,664 | 2015-01-03T09:35:21.000Z | 2022-03-31T04:55:24.000Z | ambari-common/src/main/python/ambari_ws4py/client/geventclient.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 3,018 | 2015-02-19T20:16:10.000Z | 2021-11-13T20:47:48.000Z | ambari-common/src/main/python/ambari_ws4py/client/geventclient.py | likenamehaojie/Apache-Ambari-ZH | 5973025bd694cdbb4b49fb4c4e0d774782811ff6 | [
"Apache-2.0"
] | 1,673 | 2015-01-06T14:14:42.000Z | 2022-03-31T07:22:30.000Z | # -*- coding: utf-8 -*-
import copy
import gevent
from gevent import Greenlet
from gevent.queue import Queue
from ambari_ws4py.client import WebSocketBaseClient
__all__ = ['WebSocketClient']
class WebSocketClient(WebSocketBaseClient):
def __init__(self, url, protocols=None, extensions=None, heartbeat_freq=None, ssl_options=None, headers=None, exclude_headers=None):
"""
WebSocket client that executes the
:meth:`run() <ws4py.websocket.WebSocket.run>` into a gevent greenlet.
.. code-block:: python
ws = WebSocketClient('ws://localhost:9000/echo', protocols=['http-only', 'chat'])
ws.connect()
ws.send("Hello world")
def incoming():
while True:
m = ws.receive()
if m is not None:
print str(m)
else:
break
def outgoing():
for i in range(0, 40, 5):
ws.send("*" * i)
greenlets = [
gevent.spawn(incoming),
gevent.spawn(outgoing),
]
gevent.joinall(greenlets)
"""
WebSocketBaseClient.__init__(self, url, protocols, extensions, heartbeat_freq,
ssl_options=ssl_options, headers=headers, exclude_headers=exclude_headers)
self._th = Greenlet(self.run)
self.messages = Queue()
"""
Queue that will hold received messages.
"""
def handshake_ok(self):
"""
Called when the upgrade handshake has completed
successfully.
Starts the client's thread.
"""
self._th.start()
def received_message(self, message):
"""
Override the base class to store the incoming message
in the `messages` queue.
"""
self.messages.put(copy.deepcopy(message))
def closed(self, code, reason=None):
"""
Puts a :exc:`StopIteration` as a message into the
`messages` queue.
"""
# When the connection is closed, put a StopIteration
# on the message queue to signal there's nothing left
# to wait for
self.messages.put(StopIteration)
def receive(self, block=True):
"""
Returns messages that were stored into the
`messages` queue and returns `None` when the
websocket is terminated or closed.
`block` is passed though the gevent queue `.get()` method, which if
True will block until an item in the queue is available. Set this to
False if you just want to check the queue, which will raise an
Empty exception you need to handle if there is no message to return.
"""
# If the websocket was terminated and there are no messages
# left in the queue, return None immediately otherwise the client
# will block forever
if self.terminated and self.messages.empty():
return None
message = self.messages.get(block=block)
if message is StopIteration:
return None
return message
| 32.103093 | 136 | 0.593128 |
ace6fcedc09a14c8c677c9f6529a9881bdb2e787 | 1,444 | py | Python | src/exts/custom_checks.py | DJStompZone/emojis | 398435e3d8f235c5856f1bae9d42e0a9be377ceb | [
"MIT"
] | 26 | 2020-08-29T20:17:15.000Z | 2022-01-11T21:57:16.000Z | src/exts/custom_checks.py | DJStompZone/emojis | 398435e3d8f235c5856f1bae9d42e0a9be377ceb | [
"MIT"
] | 10 | 2020-08-31T13:50:36.000Z | 2021-05-23T09:28:17.000Z | src/exts/custom_checks.py | DJStompZone/emojis | 398435e3d8f235c5856f1bae9d42e0a9be377ceb | [
"MIT"
] | 19 | 2020-08-29T20:18:06.000Z | 2021-10-17T02:39:30.000Z | from src.common.common import *
class CustomChecks(Cog):
__slots__ = ["bot"]
def __init__(self, bot):
self.bot = bot
async def bot_check(self, ctx):
"""
Checks that affect the entire bot.
Checks implemented:
- cooldown: A global cooldown for every command.
"""
async def cooldown_check() -> bool:
""" Implement a global cooldown for every command, defined in bot.cooldown. """
whitelist = ("help",)
if ctx.command.name in whitelist:
return True
# Get current cooldown
bucket = self.bot.cooldown.get_bucket(ctx.message)
retry_after = bucket.update_rate_limit()
if retry_after: # On cooldown
await ctx.error(
"You're on cooldown. Try again in %d seconds." % int(retry_after)
)
return False
else: # Not on cooldown
return True
# Checks not in this tuple will be ignored
active_checks = (cooldown_check,)
# Loop through every check
# Every check must return True for the command to continue
# When adding new checks, use ctx.error and then return False on fail
for c in active_checks:
if not await c():
return False
return True
def setup(bot):
bot.add_cog(CustomChecks(bot))
| 27.245283 | 91 | 0.560942 |
ace6fcf7696a0cca68b12324f8b7526966c9b4c8 | 1,324 | py | Python | observations/r/sp500.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | observations/r/sp500.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | observations/r/sp500.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def sp500(path):
"""Returns on Standard \\& Poor's 500 Index
daily observations from 1981–01 to 1991–04
*number of observations* : 2783
A dataframe containing :
r500
daily return S\\&P500 (change in log index)
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `sp500.csv`.
Returns:
Tuple of np.ndarray `x_train` with 2783 rows and 1 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'sp500.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/SP500.csv'
maybe_download_and_extract(path, url,
save_file_name='sp500.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 25.461538 | 71 | 0.675982 |
ace6fd2effc8a39f25a46fe42f3dfbfa7fcb64dc | 779 | py | Python | discordplus/classes/configs.py | Ashenguard/DiscordPlus | 94d2226c4e12cb2f4215cd956f19c7adf4420e9a | [
"MIT"
] | null | null | null | discordplus/classes/configs.py | Ashenguard/DiscordPlus | 94d2226c4e12cb2f4215cd956f19c7adf4420e9a | [
"MIT"
] | null | null | null | discordplus/classes/configs.py | Ashenguard/DiscordPlus | 94d2226c4e12cb2f4215cd956f19c7adf4420e9a | [
"MIT"
] | null | null | null | from typing import Optional, Union, Callable
from discord import Message, Color
from discord.ext.commands import Bot, DefaultHelpCommand
from discordplus.lib import Config, RequiredValue
class SlashConfig(Config, auto_setup=True):
sync_commands: bool = False
debug_guild: Optional[int] = None
delete_from_unused_guilds: bool = False
sync_on_cog_reload: bool = False
override_type: bool = False
application_id: Optional[int] = None
class BotPlusConfig(Config, auto_setup=True):
token: str = RequiredValue()
command_prefix: Union[str, Callable[[Bot, Message], str]] = None
log_channel_id: int = None
help_command = DefaultHelpCommand()
description = None
color = Color.default()
slash_config: Optional[SlashConfig] = None
| 28.851852 | 68 | 0.741977 |
ace6fda934a6a04d60709391f23de20e5431b961 | 129 | py | Python | bot/common.py | Supportiii/telegram-report-bot | 6a050caafb1c205c0fd58f91be9264f1190ea706 | [
"MIT"
] | null | null | null | bot/common.py | Supportiii/telegram-report-bot | 6a050caafb1c205c0fd58f91be9264f1190ea706 | [
"MIT"
] | null | null | null | bot/common.py | Supportiii/telegram-report-bot | 6a050caafb1c205c0fd58f91be9264f1190ea706 | [
"MIT"
] | null | null | null | from aiogram.utils.callback_data import CallbackData
report_msg_cb = CallbackData("delmsg", "option", "user_id", "message_ids")
| 32.25 | 74 | 0.790698 |
ace6fdd62b0a486234070f2e7eb76c33541040e3 | 4,797 | py | Python | crypten/nn/onnx_helper.py | gmuraru/CrypTen | e39a7aaf65436706321fe4e3fc055308c78b6b92 | [
"MIT"
] | null | null | null | crypten/nn/onnx_helper.py | gmuraru/CrypTen | e39a7aaf65436706321fe4e3fc055308c78b6b92 | [
"MIT"
] | null | null | null | crypten/nn/onnx_helper.py | gmuraru/CrypTen | e39a7aaf65436706321fe4e3fc055308c78b6b92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.onnx.symbolic_helper as sym_help
import torch.onnx.symbolic_registry as sym_registry
from onnx import numpy_helper
def get_parameter_name(name):
"""
Gets parameter name from parameter key.
"""
return name[name.rfind(".") + 1 :]
def get_attribute_value(attr):
"""
Retrieves value from attribute in ONNX graph.
"""
if attr.HasField("f"): # floating-point attribute
return attr.f
elif attr.HasField("i"): # integer attribute
return attr.i
elif attr.HasField("s"): # string attribute
return attr.s # TODO: Sanitize string.
elif attr.HasField("t"): # tensor attribute
return torch.from_numpy(numpy_helper.to_array(attr.t))
elif len(attr.ints) > 0:
return list(attr.ints)
elif len(attr.floats) > 0:
return list(attr.floats)
else:
raise ValueError("Unknown attribute type for attribute %s." % attr.name)
def _update_onnx_symbolic_registry():
"""
Updates the ONNX symbolic registry for operators that need a CrypTen-specific
implementation and custom operators.
"""
for version_key, version_val in sym_registry._registry.items():
for function_key in version_val.keys():
if function_key == "softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_softmax
if function_key == "log_softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_logsoftmax
if function_key == "dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_dropout
if function_key == "feature_dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_feature_dropout
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_softmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's Softmax module to a Softmax module in
the ONNX model. It overrides PyTorch's default conversion of Softmax module
to a sequence of Exp, ReduceSum and Div modules, since this default
conversion can cause numerical overflow when applied to CrypTensors.
"""
result = g.op("Softmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_logsoftmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's LogSoftmax module to a LogSoftmax module in
the ONNX model. It overrides PyTorch's default conversion of LogSoftmax module
to avoid potentially creating Transpose operators.
"""
result = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_dropout(g, input, p, train):
"""
This function converts PyTorch's Dropout module to a Dropout module in the ONNX
model. It overrides PyTorch's default implementation to ignore the Dropout module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore Dropout modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the Dropout module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_feature_dropout(g, input, p, train):
"""
This function converts PyTorch's DropoutNd module to a DropoutNd module in the ONNX
model. It overrides PyTorch's default implementation to ignore the DropoutNd module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore DropoutNd modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the DropoutNd module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("DropoutNd", input, ratio_f=p, outputs=2)
return r
| 39 | 87 | 0.6798 |
ace6ff6db88dc4fa4a1af9bc1ab9648c33b3dda7 | 4,590 | py | Python | 4-bandit/code/policyHybridLinUCB.py | lukaselmer/ethz-data-mining | cb4215c202efc37f3626a25c8301a4ac36813493 | [
"MIT"
] | 2 | 2015-01-24T18:22:33.000Z | 2019-08-14T06:30:58.000Z | 4-bandit/code/policyHybridLinUCB.py | lukaselmer/ethz-data-mining | cb4215c202efc37f3626a25c8301a4ac36813493 | [
"MIT"
] | null | null | null | 4-bandit/code/policyHybridLinUCB.py | lukaselmer/ethz-data-mining | cb4215c202efc37f3626a25c8301a4ac36813493 | [
"MIT"
] | 2 | 2016-01-15T21:12:32.000Z | 2019-08-14T06:30:59.000Z | #!/usr/bin/env python2.7
import numpy as np
#with alpha = 0.71 we get Online: CTR=?? Took ??
# Offline: TAKES TOO LONG!!!! Evaluated 51586/1040000 lines. CTR = 0.055907
# Implementation of Linear UCB
class LinUCB:
all_articles = []
A_zero = np.identity(36)
A_zero_inverse = np.identity(36)
b_zero = np.zeros((36, 1))
beta_hat=np.zeros((36, 1))
all_A = {}
all_A_inverse={}
all_B = {}
all_b = {}
all_theta_hat = {}
current_article = None # current recommendation
current_user = None # user for which the article was recommended
current_z = None # user for which the article was recommended
alpha = 0.71
current_article = None # current recommendation
current_user = None # user for which the article was recommended
def set_articles(self, articles):
self.all_articles = articles
# initialize M and b for each article:
for article in self.all_articles:
A = np.identity(6)
B = np.zeros((6, 36))
b = np.zeros((6, 1))
self.all_A[article] = A
self.all_B[article] = B
self.all_b[article] = b
self.all_theta_hat[article] = b #initially also zeros(6,1)
self.all_A_inverse[article] = A
def recommend(self, timestamp, user_features, articles):
user_features = np.reshape(user_features, (6, 1))
best_ucb = -np.inf
for article in articles:
outerproduct=np.outer(np.reshape(np.array(self.all_articles[article]),(6,1)),user_features)
z_t = np.reshape(outerproduct,(36,1)) #unsure!!!
theta_hat = self.all_theta_hat[article]
first_term = np.dot(np.dot(z_t.T,self.A_zero_inverse),z_t)
second_term = np.dot(np.dot(np.dot(np.dot(2*z_t.T,self.A_zero_inverse),self.all_B[article].T),self.all_A_inverse[article]),user_features)
third_term = np.dot(np.dot(user_features.T,self.all_A_inverse[article]),user_features)
fourth_term = np.dot(np.dot(np.dot(np.dot(np.dot(np.dot(user_features.T,self.all_A_inverse[article]),self.all_B[article]),self.A_zero_inverse),self.all_B[article].T),self.all_A_inverse[article]),user_features)
s = first_term-second_term+third_term+fourth_term
#now ucb
current_ucb=np.dot(z_t.T,self.beta_hat) + np.dot(user_features.T,self.all_theta_hat[article]) + self.alpha*np.sqrt(s)
if current_ucb > best_ucb:
best_ucb = current_ucb
self.current_article = article
self.current_z = z_t
self.current_user = user_features
return self.current_article
def update(self, reward):
if reward == 0 or reward == 1:
article = self.current_article
user = self.current_user
z = self.current_z
A = self.all_A[article]
B = self.all_B[article]
b = self.all_b[article]
self.A_zero += np.dot(np.dot(B.T,self.all_A_inverse[article]),B)
self.b_zero += np.dot(np.dot(B.T,self.all_A_inverse[article]),b)
self.all_A[article] = A + np.dot(user, user.T)
self.all_B[article] = B + np.dot(user, z.T)
self.all_b[article] = b + reward * user
#update
self.all_A_inverse[article] = np.linalg.inv(self.all_A[article])
A_inv = self.all_A_inverse[article]
self.A_zero += np.dot(z,z.T) - np.dot(np.dot(self.all_B[article].T,A_inv),self.all_B[article])
self.b_zero += reward*z - np.dot(np.dot(self.all_B[article].T,A_inv),self.all_b[article])
# precompute
#self.all_A_inverse[article] = np.linalg.inv(self.all_A[article])
self.A_zero_inverse = np.linalg.inv(self.A_zero)
self.beta_hat = np.dot(self.A_zero_inverse,self.b_zero)
self.all_theta_hat[article] = np.dot(self.all_A_inverse[article],self.all_b[article]-np.dot(self.all_B[article],self.beta_hat))
linucb = LinUCB()
# Evaluator will call this function and pass the article features.
# Check evaluator.py description for details.
def set_articles(art):
linucb.set_articles(art)
# This function will be called by the evaluator.
# Check task description for details.
def update(reward):
linucb.update(reward)
# This function will be called by the evaluator.
# Check task description for details.
def reccomend(timestamp, user_features, articles):
return linucb.recommend(timestamp, user_features, articles) | 34.511278 | 221 | 0.632898 |
ace6ffb7d63e7a6d6dceb2bbfce6b963551dfd76 | 1,989 | py | Python | sdk/python/pulumi_azure/compute/image.py | Frassle/pulumi-azure | 593dd1020b09b83422928913d06bf91538926155 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/compute/image.py | Frassle/pulumi-azure | 593dd1020b09b83422928913d06bf91538926155 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/compute/image.py | Frassle/pulumi-azure | 593dd1020b09b83422928913d06bf91538926155 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Image(pulumi.CustomResource):
"""
Manage a custom virtual machine image that can be used to create virtual machines.
"""
def __init__(__self__, __name__, __opts__=None, data_disks=None, location=None, name=None, os_disk=None, resource_group_name=None, source_virtual_machine_id=None, tags=None):
"""Create a Image resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['data_disks'] = data_disks
if not location:
raise TypeError('Missing required property location')
__props__['location'] = location
__props__['name'] = name
__props__['os_disk'] = os_disk
if not resource_group_name:
raise TypeError('Missing required property resource_group_name')
__props__['resource_group_name'] = resource_group_name
__props__['source_virtual_machine_id'] = source_virtual_machine_id
__props__['tags'] = tags
super(Image, __self__).__init__(
'azure:compute/image:Image',
__name__,
__props__,
__opts__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 36.163636 | 178 | 0.681247 |
ace700ac2d13625c42b660af90008d13aa163a10 | 2,292 | py | Python | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySnapshotGroupRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySnapshotGroupRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/ModifySnapshotGroupRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ModifySnapshotGroupRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'ModifySnapshotGroup')
self.set_method('POST')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_SnapshotGroupId(self):
return self.get_query_params().get('SnapshotGroupId')
def set_SnapshotGroupId(self,SnapshotGroupId):
self.add_query_param('SnapshotGroupId',SnapshotGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name) | 33.705882 | 72 | 0.770942 |
ace700bc42b3676bc055e9d5cac4d39276392b78 | 4,486 | py | Python | selection_pipeline/haps_to_hapmap.py | MerrimanLab/selectionTools | b2f49c6f52c84a751791dff6a753a13ca830831b | [
"MIT"
] | 31 | 2016-02-20T16:29:56.000Z | 2022-02-16T09:39:09.000Z | selection_pipeline/haps_to_hapmap.py | MerrimanLab/selectionTools | b2f49c6f52c84a751791dff6a753a13ca830831b | [
"MIT"
] | 14 | 2016-10-06T15:50:11.000Z | 2022-02-16T20:46:11.000Z | selection_pipeline/haps_to_hapmap.py | MerrimanLab/selectionTools | b2f49c6f52c84a751791dff6a753a13ca830831b | [
"MIT"
] | 18 | 2016-05-12T19:03:44.000Z | 2021-07-16T01:58:58.000Z | import re
from optparse import OptionParser
from pyfasta import Fasta
#
# $1 haps format to be converted to hapmap format
# $2 sample format file
#
# 11 columns of precursor
# regex for determining we have a valid SNP #
def aa_seq(options):
f = Fasta(options.ancestralfasta)
keyz = (f.keys())
match = ''
if (options.single_chromosome):
# Single chromosome fasta should only have one sequence.
# that sequence should be the sequence of interest.
keyz = (list(keyz))
key = keyz[0]
else:
get_chromosome_from_header = options.header
get_chromosome_from_header.replace('?', options.chromosome)
for key in keyz:
if(re.match(get_chromosome_from_header, key) is not None):
match = key
if(match is ''):
raise Exception("No match possible is something wrong with "
" the regex specified to the program as "
"--header-regex")
aaSeq = f[key]
return(aaSeq)
def main():
header = ("rs# alleles chrom pos strand assembly# center protLSID "
"assayLSID panelLSID QCcode")
parser = OptionParser()
parser.add_option('-i', dest="haps_file", help="Haps Input File")
parser.add_option('-s', dest="sample_file", help="Sample Input File")
parser.add_option('-c', dest="chromosome", help="Chromosome")
parser.add_option('-o', dest="output_file_name", help="Output File name")
parser.add_option('-a', dest="ancestralfasta", help="Outgroup fasta file")
parser.add_option('--id', dest="ancestral_indivdual_id",
help="Name of the ancestral Individual")
parser.add_option('--header-regex', dest='header',
help=("To determine which chromosome to extract"
"is a regex with a ? for the chromosome number"))
parser.add_option('--single-chromosome', action="store_true",
dest="single_chromosome")
(options, args) = parser.parse_args()
options.chromosome = str(options.chromosome)
if(options.single_chromosome is None):
options.single_chromosome = False
assert options.header is None, \
"Option header_regex required if the fasta file is"\
"split by chromosome"
# Set default ancestral ID#
if (options.ancestral_indivdual_id is None):
options.ancestral_indivdual_id = 'ANCESTOR'
sample_ids = []
output = open(options.output_file_name, 'w')
failed_snps = open('failed_snps.txt', 'w')
aaSeq = aa_seq(options)
with open(options.sample_file, 'r') as f:
for i, line in enumerate(f):
if(i > 1):
line = line.split()
sample_ids.append(line[1])
# Construct the header line.
sample_ids.append(options.ancestral_indivdual_id)
header = header + ' ' + ' '.join(sample_ids) + '\n'
output.write(header)
with open(options.haps_file, 'r') as f:
for line in f:
output_line = ''
line = line.split()
rsid = line[1]
pos = line[2]
ancestral_allele = aaSeq[int(pos)-1]
if not (re.match('[ACTGactg]', ancestral_allele)):
failed_snps.write(rsid + ' ' + pos + '\n')
else:
a1 = line[3]
a2 = line[4]
ancestral_genotypes = ancestral_allele.upper() + \
ancestral_allele.upper()
def check_alleles(x):
try:
x = int(x)
if(x == 0):
return a1
else:
return a2
except:
return "0"
change_alleles = map(check_alleles, line[5:])
change_alleles = list(change_alleles)
zipa = change_alleles[0::2]
zipb = change_alleles[1::2]
change_alleles = zip(zipa, zipb)
change_alleles = [''.join(row) for row in change_alleles]
output_line = rsid + ' ' + a1 + '/' + a2 + \
' ' + options.chromosome + ' ' + pos
output_line = output_line + ' + -9 -9 -9 -9 -9 -9 ' +\
' '.join(change_alleles) + ' ' + ancestral_genotypes
output.write(output_line + '\n')
output.close()
failed_snps.close()
if __name__ == "__main__":
main()
| 38.34188 | 78 | 0.557958 |
ace700c16b171e4d1162bcd20f524b01f4848f28 | 2,128 | py | Python | stats.py | JoseAlanis/dpx_tools | bf203dc727ccc07491ec810beeaa16aa4a32411e | [
"BSD-3-Clause"
] | null | null | null | stats.py | JoseAlanis/dpx_tools | bf203dc727ccc07491ec810beeaa16aa4a32411e | [
"BSD-3-Clause"
] | null | null | null | stats.py | JoseAlanis/dpx_tools | bf203dc727ccc07491ec810beeaa16aa4a32411e | [
"BSD-3-Clause"
] | null | null | null | # Authors: Jose C. Garcia Alanis <alanis.jcg@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
def sliding_window_correlation(data, sampling_frequency=256.0, time_step=1.0):
"""
Parameters
----------
data : np.ndarray
Should be a 2-dimensional array of shape channel x samples.
sampling_frequency : float
The sampling frequency of the data (in Hz). Defaults to 256.
time_step : float | int
Window length for analysis (in seconds). Defaults to 1.0.
Returns
-------
channel_correlations : np.ndarray
Numpy array containing the channel by channel correlations.
"""
# get data dimensions
n_channels, n_samples = data.shape
# based on the sampling rate and window length (in seconds):
# determine the number of data point that should be included
# in the analysis
samples_for_corr = int(time_step * sampling_frequency)
# get the index of the samples that marks the start of each window
# for correlation analysis
sample_idx = np.arange(0, n_samples, samples_for_corr)
# number of windows to use for analysis
n_corr_steps = len(sample_idx)
# reshape data to individual windows
dat_windowed = data.reshape((n_channels, n_corr_steps, samples_for_corr))
# placeholder for results
channel_correlations = np.zeros((n_corr_steps, n_channels, n_channels))
# compute correlations for windowed data
for step in range(n_corr_steps):
# get window data
eeg_portion = np.squeeze(dat_windowed[:, step, :])
# compute correlation coefficients
corrs = np.corrcoef(eeg_portion)
channel_correlations[step, :, :] = corrs
return channel_correlations
# -- WIP --
# def noise_correlation:
# noise_covs = mne.compute_covariance(
# epochs, tmax=0., method=('empirical', 'shrunk'),
# return_estimators=True, rank=None)
#
# noise_diag = np.diag(noise_covs[0].data)
# np.sqrt(noise_diag)
# noise_corr = np.linalg.inv(np.sqrt(np.diag(noise_diag))) @ noise_covs[
# 0].data @ np.linalg.inv(np.sqrt(np.diag(noise_diag)))
| 33.25 | 78 | 0.678571 |
ace700c8cd995ee6ebada38122a11d269e6811e0 | 49,429 | py | Python | docassemble_webapp/docassemble/webapp/socketserver.py | amsclark/docassemble | ae5c194831faabb52681a6c827ec30c106273eb7 | [
"MIT"
] | 1 | 2019-03-25T08:22:37.000Z | 2019-03-25T08:22:37.000Z | docassemble_webapp/docassemble/webapp/socketserver.py | amsclark/docassemble | ae5c194831faabb52681a6c827ec30c106273eb7 | [
"MIT"
] | null | null | null | docassemble_webapp/docassemble/webapp/socketserver.py | amsclark/docassemble | ae5c194831faabb52681a6c827ec30c106273eb7 | [
"MIT"
] | null | null | null | from six import string_types, text_type, PY2
import sys
import docassemble.base.config
docassemble.base.config.load(arguments=sys.argv)
from docassemble.base.config import daconfig
import docassemble.base.functions
import eventlet
eventlet.sleep()
eventlet.monkey_patch()
from flask_socketio import join_room, disconnect
from docassemble.webapp.app_socket import app, db, socketio
from sqlalchemy import create_engine, MetaData, or_, and_
from simplekv.memory.redisstore import RedisStore
import docassemble.base.util
import redis
import json
import datetime
import pytz
if PY2:
import cPickle as pickle
else:
import pickle
import re
import time
import random
from docassemble.webapp.backend import initial_dict, can_access_file_number, get_info_from_file_number, get_info_from_file_reference, get_new_file_number, nice_utc_date, nice_date_from_utc, fetch_user_dict, get_chat_log, encrypt_phrase, pack_phrase, fix_pickle_obj
from docassemble.webapp.users.models import UserModel, ChatLog
from docassemble.base.functions import get_default_timezone, word
from flask import session, request
from flask_kvsession import KVSessionExtension
import docassemble.webapp.daredis
from docassemble.webapp.daredis import redis_host, redis_port, redis_offset
store = RedisStore(docassemble.webapp.daredis.r_store)
kv_session = KVSessionExtension(store, app)
from docassemble.webapp.daredis import r as rr
threads = dict()
secrets = dict()
def obtain_lock(user_code, filename):
key = 'da:lock:' + user_code + ':' + filename
found = False
count = 4
while count > 0:
record = rr.get(key)
if record:
sys.stderr.write("obtain_lock: waiting for " + key + "\n")
time.sleep(1.0)
else:
found = False
break
found = True
count -= 1
if found:
sys.stderr.write("Request for " + key + " deadlocked\n")
release_lock(user_code, filename)
pipe = rr.pipeline()
pipe.set(key, 1)
pipe.expire(key, 4)
pipe.execute()
def release_lock(user_code, filename):
key = 'da:lock:' + user_code + ':' + filename
rr.delete(key)
def background_thread(sid=None, user_id=None, temp_user_id=None):
if user_id is not None:
user_id = int(user_id)
if temp_user_id is not None:
temp_user_id = int(temp_user_id)
with app.app_context():
sys.stderr.write("Started client thread for " + str(sid) + " who is " + str(user_id) + " or " + str(temp_user_id) + "\n")
if user_id is None:
person = None
user_is_temp = True
else:
person = UserModel.query.filter_by(id=user_id).first()
user_is_temp = False
if person is not None and person.timezone is not None:
the_timezone = pytz.timezone(person.timezone)
else:
the_timezone = pytz.timezone(get_default_timezone())
r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_offset)
partners = set()
pubsub = r.pubsub()
pubsub.subscribe([sid])
for item in pubsub.listen():
sys.stderr.write("0\n" + repr(item) + "\n")
if item['type'] != 'message':
continue
#sys.stderr.write("sid: " + str(sid) + ":\n")
data = None
try:
data = json.loads(item['data'].decode())
except:
sys.stderr.write(" JSON parse error: " + str(item['data'].decode()) + "\n")
continue
if data.get('message', None) == "KILL" and (('sid' in data and data['sid'] == sid) or 'sid' not in data):
pubsub.unsubscribe(sid)
sys.stderr.write(" interview unsubscribed and finished for " + str(sid) + "\n")
break
else:
sys.stderr.write(" Got something for sid " + str(sid) + " from " + data.get('origin', "Unknown origin") + "\n")
if 'messagetype' in data:
if data['messagetype'] == 'chat':
#sys.stderr.write(" Emitting interview chat message: " + str(data['message']['message']) + "\n")
if (user_is_temp is True and str(temp_user_id) == str(data['message'].get('temp_user_id', None))) or (user_is_temp is False and str(user_id) == str(data['message'].get('user_id', None))):
data['message']['is_self'] = True
else:
data['message']['is_self'] = False
socketio.emit('chatmessage', {'i': data['yaml_filename'], 'uid': data['uid'], 'userid': data['user_id'], 'data': data['message']}, namespace='/wsinterview', room=sid)
elif data['messagetype'] == 'chatready':
pubsub.subscribe(data['sid'])
partners.add(data['sid'])
sys.stderr.write("chatready 2")
socketio.emit('chatready', {}, namespace='/wsinterview', room=sid)
elif data['messagetype'] == 'departure':
if data['sid'] in partners:
partners.remove(data['sid'])
socketio.emit('departure', {'numpartners': len(partners)}, namespace='/wsinterview', room=sid)
elif data['messagetype'] == 'block':
if data['sid'] in partners:
partners.remove(data['sid'])
socketio.emit('departure', {'numpartners': len(partners)}, namespace='/wsinterview', room=sid)
elif data['messagetype'] == 'chatpartner':
partners.add(data['sid'])
elif data['messagetype'] == 'controllerchanges':
socketio.emit('controllerchanges', {'parameters': data['parameters'], 'clicked': data['clicked']}, namespace='/wsinterview', room=sid)
elif data['messagetype'] == 'controllerstart':
socketio.emit('controllerstart', {}, namespace='/wsinterview', room=sid)
elif data['messagetype'] == 'controllerexit':
socketio.emit('controllerexit', {}, namespace='/wsinterview', room=sid)
# elif data['messagetype'] == "newpage":
# sys.stderr.write(" Got new page for interview\n")
# try:
# obj = json.loads(r.get(data['key']))
# except:
# sys.stderr.write(" newpage JSON parse error\n")
# continue
# socketio.emit('newpage', {'obj': obj}, namespace='/wsinterview', room=sid)
sys.stderr.write(' exiting interview thread for sid ' + str(sid) + '\n')
@socketio.on('start_being_controlled', namespace='/wsinterview')
def interview_start_being_controlled(message):
#sys.stderr.write("received start_being_controlled\n")
session_id = session.get('uid', None)
yaml_filename = session.get('i', None)
the_user_id = session.get('user_id', 't' + str(session.get('tempuser', None)))
key = 'da:input:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
rr.publish(key, json.dumps(dict(message='start_being_controlled', key=re.sub(r'^da:input:uid:', 'da:session:uid:', key))))
@socketio.on('message', namespace='/wsinterview')
def handle_message(message):
socketio.emit('mymessage', {'data': "Hello"}, namespace='/wsinterview', room=request.sid)
#sys.stderr.write('received message from ' + str(session.get('uid', 'NO UID')) + ': ' + message['data'] + "\n")
@socketio.on('chat_log', namespace='/wsinterview')
def chat_log(message):
user_dict = get_dict()
if user_dict is None:
return
chat_mode = user_dict['_internal']['livehelp']['mode']
yaml_filename = session.get('i', None)
session_id = session.get('uid', None)
user_id = session.get('user_id', None)
if user_id is None:
temp_user_id = session.get('tempuser', None)
else:
temp_user_id = None
if user_id is not None:
user_id = int(user_id)
if temp_user_id is not None:
temp_user_id = int(temp_user_id)
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
#sys.stderr.write("chat_log: " + str(repr(user_id)) + " " + str(repr(temp_user_id)) + "\n")
messages = get_chat_log(chat_mode, yaml_filename, session_id, user_id, temp_user_id, secret, user_id, temp_user_id)
socketio.emit('chat_log', {'data': messages}, namespace='/wsinterview', room=request.sid)
#sys.stderr.write("Interview: sending back " + str(len(messages)) + " messages\n")
@socketio.on('transmit', namespace='/wsinterview')
def handle_message(message):
#sys.stderr.write('received transmission from ' + str(session.get('uid', 'NO UID')) + ': ' + message['data'] + "\n")
session_id = session.get('uid', None)
if session_id is not None:
rr.publish(session_id, json.dumps(dict(origin='client', room=request.sid, message=message['data'])))
@socketio.on('terminate', namespace='/wsinterview')
def terminate_interview_connection():
sys.stderr.write("terminate_interview_connection\n")
# hopefully the disconnect will be triggered
# if request.sid in threads:
# rr.publish(request.sid, json.dumps(dict(origin='client', message='KILL', sid=request.sid)))
socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
#disconnect()
@socketio.on('chatmessage', namespace='/wsinterview')
def chat_message(data):
nowtime = datetime.datetime.utcnow()
session_id = session.get('uid', None)
yaml_filename = session.get('i', None)
encrypted = session.get('encrypted', True)
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
if encrypted:
message = encrypt_phrase(data['data'], secret)
else:
message = pack_phrase(data['data'])
user_id = session.get('user_id', None)
if user_id is None:
temp_user_id = session.get('tempuser', None)
else:
temp_user_id = None
if user_id is not None:
user_id = int(user_id)
if temp_user_id is not None:
temp_user_id = int(temp_user_id)
user_dict = get_dict()
chat_mode = user_dict['_internal']['livehelp']['mode']
if chat_mode in ['peer', 'peerhelp']:
open_to_peer = True
else:
open_to_peer = False
record = ChatLog(filename=yaml_filename, key=session_id, message=message, encrypted=encrypted, modtime=nowtime, temp_user_id=temp_user_id, user_id=user_id, open_to_peer=open_to_peer, temp_owner_id=temp_user_id, owner_id=user_id)
db.session.add(record)
db.session.commit()
if user_id is not None:
person = UserModel.query.filter_by(id=user_id).first()
else:
person = None
modtime = nice_utc_date(nowtime)
if person is None:
rr.publish(request.sid, json.dumps(dict(origin='client', messagetype='chat', sid=request.sid, yaml_filename=yaml_filename, uid=session_id, user_id='t' + str(temp_user_id), message=dict(id=record.id, temp_user_id=record.temp_user_id, modtime=modtime, message=data['data'], roles=['user'], mode=chat_mode))))
else:
rr.publish(request.sid, json.dumps(dict(origin='client', messagetype='chat', sid=request.sid, yaml_filename=yaml_filename, uid=session_id, user_id=user_id, message=dict(id=record.id, user_id=record.user_id, first_name=person.first_name, last_name=person.last_name, email=person.email, modtime=modtime, message=data['data'], roles=[role.name for role in person.roles], mode=chat_mode))))
#sys.stderr.write('received chat message from sid ' + str(request.sid) + ': ' + data['data'] + "\n")
def wait_for_channel(rr, channel):
times = 0
while times < 5 and rr.publish(channel, json.dumps(dict(messagetype='ping'))) == 0:
times += 1
time.sleep(0.5)
if times >= 5:
return False
else:
return True
@socketio.on('connect', namespace='/wsinterview')
def on_interview_connect():
sys.stderr.write("Client connected on interview\n")
join_room(request.sid)
interview_connect()
rr.publish('da:monitor', json.dumps(dict(messagetype='refreshsessions')))
@socketio.on('connectagain', namespace='/wsinterview')
def on_interview_reconnect(data):
sys.stderr.write("Client reconnected on interview\n")
interview_connect()
rr.publish('da:monitor', json.dumps(dict(messagetype='refreshsessions')))
socketio.emit('reconnected', {}, namespace='/wsinterview', room=request.sid)
def interview_connect():
session_id = session.get('uid', None)
if session_id is not None:
user_dict, is_encrypted = get_dict_encrypt()
if is_encrypted:
secret = request.cookies.get('secret', None)
else:
secret = None
if secret is not None:
secret = str(secret)
if user_dict is None:
sys.stderr.write("user_dict did not exist.\n")
socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
return
chat_info = user_dict['_internal']['livehelp']
if chat_info['availability'] == 'unavailable':
sys.stderr.write("Socket started but chat is unavailable.\n")
socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
return
#sys.stderr.write('chat info is ' + str(chat_info) + "\n")
if user_dict['_internal']['livehelp']['mode'] in ['peer', 'peerhelp']:
peer_ok = True
else:
peer_ok = False
yaml_filename = session.get('i', None)
the_user_id = session.get('user_id', 't' + str(session.get('tempuser', None)))
if request.sid not in threads:
#sys.stderr.write('Starting thread for sid ' + str(request.sid) + "\n")
threads[request.sid] = socketio.start_background_task(target=background_thread, sid=request.sid, user_id=session.get('user_id', None), temp_user_id=session.get('tempuser', None))
channel_up = wait_for_channel(rr, request.sid)
if not channel_up:
sys.stderr.write("Channel did not come up.\n")
socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
return
lkey = 'da:ready:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
#sys.stderr.write("Searching: " + lkey + "\n")
if rr.exists(lkey):
lkey_exists = True
else:
lkey_exists = False
if lkey_exists is False and peer_ok is False:
sys.stderr.write("Key does not exist: " + lkey + ".\n")
#socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
#return
failed_to_find_partner = True
found_help = False
if lkey_exists:
partner_keys = rr.lrange(lkey, 0, -1)
#sys.stderr.write("partner_keys is: " + str(type(partner_keys)) + " " + str(partner_keys) + "\n")
if partner_keys is None and not peer_ok:
sys.stderr.write("No partner keys: " + lkey + ".\n")
socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
return
rr.delete(lkey)
for pkey in partner_keys:
pkey = pkey.decode()
#sys.stderr.write("Considering: " + pkey + "\n")
partner_sid = rr.get(pkey)
if partner_sid is not None:
partner_sid = partner_sid.decode()
if re.match(r'^da:monitor:available:.*', pkey):
is_help = True
else:
is_help = False
if is_help and found_help:
continue
#sys.stderr.write("Trying to pub to " + str(partner_sid) + " from " + str(pkey) + "\n")
listeners = rr.publish(partner_sid, json.dumps(dict(messagetype='chatready', uid=session_id, i=yaml_filename, userid=the_user_id, secret=secret, sid=request.sid)))
#sys.stderr.write("Listeners: " + str(listeners) + "\n")
if re.match(r'^da:interviewsession.*', pkey):
rr.publish(request.sid, json.dumps(dict(messagetype='chatready', sid=partner_sid)))
else:
rr.publish(request.sid, json.dumps(dict(messagetype='chatpartner', sid=partner_sid)))
if listeners > 0:
if is_help:
found_help = True
failed_to_find_partner = False
if failed_to_find_partner and peer_ok is False:
sys.stderr.write("Unable to reach any potential chat partners.\n")
#socketio.emit('terminate', {}, namespace='/wsinterview', room=request.sid)
#return
key = 'da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(the_user_id)
rr.set(key, request.sid)
@socketio.on('disconnect', namespace='/wsinterview')
def on_interview_disconnect():
sys.stderr.write('Client disconnected from interview\n')
yaml_filename = session.get('i', None)
session_id = session.get('uid', None)
the_user_id = session.get('user_id', 't' + str(session.get('tempuser', None)))
if request.sid in secrets:
del secrets[request.sid]
if session_id is not None:
rr.delete('da:interviewsession:uid:' + str(session.get('uid', None)) + ':i:' + str(session.get('i', None)) + ':userid:' + str(the_user_id))
key = 'da:session:uid:' + str(session.get('uid', None)) + ':i:' + str(session.get('i', None)) + ':userid:' + str(the_user_id)
rr.expire(key, 10)
rr.publish(request.sid, json.dumps(dict(origin='client', message='KILL', sid=request.sid)))
def get_dict():
session_id = session.get('uid', None)
yaml_filename = session.get('i', None)
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
if session_id is None or yaml_filename is None:
sys.stderr.write('Attempt to get dictionary where session not defined\n')
return None
#obtain_lock(session_id, yaml_filename)
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except Exception as err:
#release_lock(session_id, yaml_filename)
sys.stderr.write('get_dict: attempt to get dictionary failed: ' + text_type(err) + '\n')
return None
#release_lock(session_id, yaml_filename)
return user_dict
def get_dict_encrypt():
session_id = session.get('uid', None)
yaml_filename = session.get('i', None)
secret = request.cookies.get('secret', None)
if secret is not None:
secret = str(secret)
if session_id is None or yaml_filename is None:
sys.stderr.write('Attempt to get dictionary where session not defined\n')
return None, None
#obtain_lock(session_id, yaml_filename)
try:
steps, user_dict, is_encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except Exception as err:
#release_lock(session_id, yaml_filename)
sys.stderr.write('get_dict_encrypt: attempt to get dictionary failed: ' + text_type(err) + '\n')
return None, None
#release_lock(session_id, yaml_filename)
return user_dict, is_encrypted
#monitor
def monitor_thread(sid=None, user_id=None):
with app.app_context():
sys.stderr.write("Started monitor thread for " + str(sid) + " who is " + str(user_id) + "\n")
if user_id is not None:
person = UserModel.query.filter_by(id=user_id).first()
else:
person = None
if person is not None and person.timezone is not None:
the_timezone = pytz.timezone(person.timezone)
else:
the_timezone = pytz.timezone(get_default_timezone())
r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_offset)
listening_sids = set()
pubsub = r.pubsub()
pubsub.subscribe(['da:monitor', sid])
for item in pubsub.listen():
sys.stderr.write("1\n" + repr(item) + "\n")
if item['type'] != 'message':
continue
#sys.stderr.write("monitor sid: " + str(sid) + ":\n")
data = None
try:
data = json.loads(item['data'].decode())
except:
sys.stderr.write(" monitor JSON parse error: " + item['data'].decode() + "\n")
continue
if 'message' in data and data['message'] == "KILL":
if item['channel'].decode() == str(sid):
sys.stderr.write(" monitor unsubscribed from all\n")
pubsub.unsubscribe()
for interview_sid in listening_sids:
r.publish(interview_sid, json.dumps(dict(messagetype='departure', sid=sid)))
break
elif item['channel'].decode() != 'da:monitor':
pubsub.unsubscribe(item['channel'].decode())
if data['sid'] in listening_sids:
listening_sids.remove(data['sid'])
sys.stderr.write(" monitor unsubscribed from " + item['channel'].decode() + "\n")
continue
else:
sys.stderr.write(" Got something for monitor\n")
if 'messagetype' in data:
#if data['messagetype'] == 'abortcontroller':
# socketio.emit('abortcontroller', {'key': data['key']}, namespace='/monitor', room=sid)
if data['messagetype'] == 'sessionupdate':
#sys.stderr.write(" Got a session update: " + str(data['session']) + "\n")
#sys.stderr.write(" Got a session update\n")
socketio.emit('sessionupdate', {'key': data['key'], 'session': data['session']}, namespace='/monitor', room=sid)
if data['messagetype'] == 'chatready':
pubsub.subscribe(data['sid'])
listening_sids.add(data['sid'])
secrets[data['sid']] = data['secret']
r.hset('da:monitor:chatpartners:' + str(user_id), 'da:interviewsession:uid:' + str(data['uid']) + ':i:' + str(data['i']) + ':userid:' + str(data['userid']), 1)
if str(data['userid']).startswith('t'):
name = word("anonymous visitor") + ' ' + str(data['userid'])[1:]
else:
person = UserModel.query.filter_by(id=data['userid']).first()
if person.first_name:
name = str(person.first_name) + ' ' + str(person.last_name)
else:
name = str(person.email)
sys.stderr.write("chatready 1")
socketio.emit('chatready', {'uid': data['uid'], 'i': data['i'], 'userid': data['userid'], 'name': name}, namespace='/monitor', room=sid)
if data['messagetype'] == 'block':
pubsub.unsubscribe(item['channel'].decode())
if item['channel'].decode() in listening_sids:
listening_sids.remove(item['channel'].decode())
sys.stderr.write(" monitor unsubscribed from " + item['channel'].decode() + "\n")
if data['messagetype'] == 'refreshsessions':
socketio.emit('refreshsessions', {}, namespace='/monitor', room=sid)
if data['messagetype'] == 'chat':
#sys.stderr.write(" Emitting monitor chat message: " + str(data['message']['message']) + "\n")
if str(user_id) == str(data['message'].get('user_id', None)):
data['message']['is_self'] = True
else:
data['message']['is_self'] = False
socketio.emit('chatmessage', {'i': data['yaml_filename'], 'uid': data['uid'], 'userid': data['user_id'], 'data': data['message']}, namespace='/monitor', room=sid)
if data['messagetype'] == 'chatstop':
sys.stderr.write(" Chat termination for sid " + data['sid'] + "\n")
pubsub.unsubscribe(data['sid'])
if data['sid'] in secrets:
del secrets[data['sid']]
r.hdel('da:monitor:chatpartners:' + str(user_id), 'da:interviewsession:uid:' + str(data['uid']) + ':i:' + str(data['i']) + ':userid:' + data['userid'])
socketio.emit('chatstop', {'uid': data['uid'], 'i': data['i'], 'userid': data['userid']}, namespace='/monitor', room=sid)
sys.stderr.write(' exiting monitor thread for sid ' + str(sid) + '\n')
@socketio.on('connect', namespace='/monitor')
def on_monitor_connect():
if 'monitor' not in session:
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
return
sys.stderr.write('Client connected on monitor and will join room monitor\n')
key = 'da:monitor:' + str(request.sid)
pipe = rr.pipeline()
pipe.set(key, 1)
pipe.expire(key, 60)
pipe.execute()
join_room('monitor')
join_room(request.sid)
user_id = session.get('user_id', None)
if request.sid not in threads:
threads[request.sid] = socketio.start_background_task(target=monitor_thread, sid=request.sid, user_id=user_id)
@socketio.on('disconnect', namespace='/monitor')
def on_monitor_disconnect():
user_id = session.get('user_id', None)
sys.stderr.write('Client disconnected from monitor\n')
rr.delete('da:monitor:' + str(request.sid))
rr.expire('da:monitor:available:' + str(user_id), 5)
for key in rr.keys('da:monitor:role:*:userid:' + str(user_id)):
key = key.decode()
rr.expire(key, 5)
for key in rr.keys('da:phonecode:monitor:' + str(user_id) + ':uid:*'):
key = key.decode()
the_code = rr.get(key)
if the_code is not None:
the_code = the_code.decode()
rr.expire('da:callforward:' + the_code, 5)
rr.expire(key, 5)
rr.expire('da:monitor:chatpartners:' + str(user_id), 5)
rr.publish(request.sid, json.dumps(dict(message='KILL', sid=request.sid)))
@socketio.on('terminate', namespace='/monitor')
def terminate_monitor_connection():
sys.stderr.write("terminate_monitor_connection\n")
# hopefully the disconnect will be triggered
# if request.sid in threads:
# rr.publish(request.sid, json.dumps(dict(origin='client', message='KILL', sid=request.sid)))
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
#disconnect()
@socketio.on('block', namespace='/monitor')
def monitor_block(data):
if 'monitor' not in session:
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
return
key = data.get('key', None)
if key is None:
sys.stderr.write("No key provided\n")
return
rr.set(re.sub(r'^da:session:', 'da:block:', key), 1)
sid = rr.get(re.sub(r'^da:session:', 'da:interviewsession:', key))
if sid is not None:
sid = sid.decode()
rr.publish(sid, json.dumps(dict(messagetype='block', sid=request.sid)))
sys.stderr.write("Blocking\n")
else:
sys.stderr.write("Could not block because could not get sid\n")
socketio.emit('block', {'key': key}, namespace='/monitor', room=request.sid)
@socketio.on('unblock', namespace='/monitor')
def monitor_unblock(data):
if 'monitor' not in session:
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
return
key = data.get('key', None)
if key is None:
sys.stderr.write("No key provided\n")
return
sys.stderr.write("Unblocking\n")
rr.delete(re.sub(r'^da:session:', 'da:block:', key))
sid = rr.get(re.sub(r'^da:session:', 'da:interviewsession:', key))
if sid is not None:
sid = sid.decode()
rr.publish(sid, json.dumps(dict(messagetype='chatpartner', sid=request.sid)))
socketio.emit('unblock', {'key': key}, namespace='/monitor', room=request.sid)
def decode_dict(the_dict):
out_dict = dict()
for k, v in the_dict.items():
out_dict[k.decode()] = v.decode()
return out_dict
@socketio.on('updatemonitor', namespace='/monitor')
def update_monitor(message):
if 'monitor' not in session:
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
return
#sys.stderr.write('received message from ' + str(request.sid) + "\n")
available_for_chat = message['available_for_chat']
new_subscribed_roles = message['subscribed_roles']
new_phone_partners = message['phone_partners_to_add']
term_phone_partners = message['phone_partners_to_terminate']
phone_number = message['phone_number']
phone_number_key = 'da:monitor:phonenumber:' + str(session['user_id'])
if phone_number is None or phone_number == '':
rr.delete(phone_number_key)
else:
pipe = rr.pipeline()
pipe.set(phone_number_key, phone_number)
pipe.expire(phone_number_key, 2592000)
pipe.execute()
phone_partners = dict()
prefix = 'da:phonecode:monitor:' + str(session['user_id']) + ':uid:'
for key in term_phone_partners:
the_code = rr.get(key)
if the_code is not None:
the_code = the_code.decode()
rr.delete(re.sub(r'da:session:uid:', prefix, key))
rr.delete('da:callforward:' + the_code)
if phone_number is None or phone_number == '':
for key in rr.keys(prefix + '*'):
key = key.decode()
the_code = rr.get(key)
if the_code is not None:
the_code = the_code.decode()
rr.delete(key)
rr.delete('da:callforward:' + the_code)
else:
codes_in_use = set()
for key in rr.keys('da:callforward:*'):
key = key.decode()
code = re.sub(r'^da:callforward:', '', key)
codes_in_use.add(code)
for key in rr.keys(prefix + '*'):
key = key.decode()
phone_partners[re.sub(r'^da:phonecode:monitor:[0-9]*:uid:', 'da:session:uid:', key)] = 1
for key in new_phone_partners:
if key in phone_partners:
continue
times = 0
ok = False
while times < 1000:
times += 1
code = "%04d" % random.randint(1000, 9999)
if code in codes_in_use:
continue
ok = True
the_code = code
new_key = re.sub(r'^da:session:uid:', prefix, key)
code_key = 'da:callforward:' + str(code)
pipe = rr.pipeline()
pipe.set(new_key, code)
pipe.set(code_key, phone_number)
pipe.expire(new_key, 300)
pipe.expire(code_key, 300)
pipe.execute()
phone_partners[key] = 1
break
if times >= 1000:
logmessage("update_monitor: could not get a random integer")
#sys.stderr.write('subscribed roles are type ' + str(type(new_subscribed_roles)) + " which is " + str(new_subscribed_roles) + "\n")
monitor_key = 'da:monitor:' + str(request.sid)
pipe = rr.pipeline()
pipe.set(monitor_key, 1)
pipe.expire(monitor_key, 60)
pipe.execute()
key = 'da:monitor:available:' + str(session['user_id'])
key_exists = rr.exists(key)
chat_partners = dict()
for cp_key in rr.hgetall('da:monitor:chatpartners:' + str(session['user_id'])):
cp_key = cp_key.decode()
if rr.get(cp_key) is None:
rr.hdel('da:monitor:chatpartners:' + str(session['user_id']), cp_key)
else:
chat_partners[re.sub('^da:interviewsession:uid:', r'da:session:uid:', cp_key)] = 1
#sys.stderr.write('daAvailableForChat is ' + str(available_for_chat) + " for key " + key + "\n")
if available_for_chat:
pipe = rr.pipeline()
pipe.set(key, request.sid)
pipe.expire(key, 60)
pipe.execute()
elif key_exists:
#sys.stderr.write("Deleting shit\n")
pipe = rr.pipeline()
pipe.delete(key)
for avail_key in rr.keys('da:monitor:role:*:userid:' + str(session['user_id'])):
pipe.delete(avail_key.decode())
pipe.execute()
avail_roles = list()
for key in rr.keys('da:chat:roletype:*'):
avail_roles.append(re.sub(r'^da:chat:roletype:', r'', key.decode()))
sub_role_key = 'da:monitor:userrole:' + str(session['user_id'])
if rr.exists(sub_role_key):
subscribed_roles = decode_dict(rr.hgetall(sub_role_key))
else:
subscribed_roles = dict()
del_mon_role_keys = list()
for role_key in [k for k in new_subscribed_roles.keys()]:
if role_key not in avail_roles:
#sys.stderr.write("role_key is " + str(role_key) + " which is " + str(type(role_key)) + "\n")
del new_subscribed_roles[role_key]
for role_key in [k for k in subscribed_roles.keys()]:
if role_key not in avail_roles:
rr.hdel(sub_role_key, role_key)
del_mon_role_keys.append('da:monitor:role:' + role_key + ':userid:' + str(session['user_id']))
for role_key in [k for k in new_subscribed_roles.keys()]:
if role_key not in subscribed_roles:
rr.hset(sub_role_key, role_key, 1)
subscribed_roles[role_key] = 1
for role_key in [k for k in subscribed_roles.keys()]:
if role_key not in new_subscribed_roles:
rr.hdel(sub_role_key, role_key)
del_mon_role_keys.append('da:monitor:role:' + role_key + ':userid:' + str(session['user_id']))
del subscribed_roles[role_key]
if len(del_mon_role_keys):
pipe = rr.pipeline()
for key in del_mon_role_keys:
pipe.delete(key)
pipe.execute()
if available_for_chat and len(subscribed_roles):
pipe = rr.pipeline()
for role_key in [k for k in subscribed_roles.keys()]:
key = 'da:monitor:role:' + role_key + ':userid:' + str(session['user_id'])
pipe.set(key, 1)
pipe.expire(key, 60)
pipe.execute()
keylist = list()
for key in rr.keys('da:session:*'):
keylist.append(key.decode())
sessions = dict()
for key in keylist:
try:
sessobj = fix_pickle_obj(rr.get(key))
except:
sys.stderr.write('error parsing value of ' + str(key) + " which was " + repr(rr.get(key)) + "\n")
continue
if sessobj.get('chatstatus', None) != 'off':
html = rr.get(re.sub(r'^da:session:', 'da:html:', key))
if html is not None:
html = html.decode()
obj = json.loads(html)
sessobj['browser_title'] = obj.get('browser_title', 'not available')
if rr.exists(re.sub(r'^da:session:', 'da:block:', key)):
sessobj['blocked'] = True
else:
sessobj['blocked'] = False
sessions[key] = sessobj
socketio.emit('updatemonitor', {'available_for_chat': available_for_chat, 'subscribedRoles': subscribed_roles, 'sessions': sessions, 'availRoles': sorted(avail_roles), 'chatPartners': chat_partners, 'phonePartners': phone_partners}, namespace='/monitor', room=request.sid)
@socketio.on('chatmessage', namespace='/monitor')
def monitor_chat_message(data):
if 'monitor' not in session:
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
return
key = data.get('key', None)
#sys.stderr.write("Key is " + str(key) + "\n")
if key is None:
sys.stderr.write("No key provided\n")
return
m = re.match(r'da:session:uid:(.*):i:(.*):userid:(.*)', key)
if not m:
sys.stderr.write("Invalid key provided\n")
return
session_id = m.group(1)
yaml_filename = m.group(2)
chat_user_id = m.group(3)
key = 'da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(chat_user_id)
sid = rr.get(key)
if sid is None:
sys.stderr.write("No sid for monitor chat message with key " + str(key) + "\n")
return
sid = sid.decode()
secret = secrets.get(sid, None)
if secret is not None:
secret = str(secret)
#obtain_lock(session_id, yaml_filename)
try:
steps, user_dict, encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except Exception as err:
#release_lock(session_id, yaml_filename)
sys.stderr.write("monitor_chat_message: could not get dictionary: " + text_type(err) + "\n")
return
#release_lock(session_id, yaml_filename)
nowtime = datetime.datetime.utcnow()
if encrypted:
message = encrypt_phrase(data['data'], secret)
else:
message = pack_phrase(data['data'])
user_id = session.get('user_id', None)
if user_id is not None:
user_id = int(user_id)
person = UserModel.query.filter_by(id=user_id).first()
chat_mode = user_dict['_internal']['livehelp']['mode']
m = re.match('t([0-9]+)', chat_user_id)
if m:
temp_owner_id = m.group(1)
owner_id = None
else:
temp_owner_id = None
owner_id = chat_user_id
if chat_mode in ['peer', 'peerhelp']:
open_to_peer = True
else:
open_to_peer = False
record = ChatLog(filename=yaml_filename, key=session_id, message=message, encrypted=encrypted, modtime=nowtime, user_id=user_id, temp_owner_id=temp_owner_id, owner_id=owner_id, open_to_peer=open_to_peer)
db.session.add(record)
db.session.commit()
modtime = nice_utc_date(nowtime)
rr.publish(sid, json.dumps(dict(origin='client', messagetype='chat', sid=request.sid, yaml_filename=yaml_filename, uid=session_id, user_id=chat_user_id, message=dict(id=record.id, user_id=record.user_id, first_name=person.first_name, last_name=person.last_name, email=person.email, modtime=modtime, message=data['data'], roles=[role.name for role in person.roles], mode=chat_mode))))
#sys.stderr.write('received chat message on monitor from sid ' + str(request.sid) + ': ' + data['data'] + "\n")
@socketio.on('chat_log', namespace='/monitor')
def monitor_chat_log(data):
if 'monitor' not in session:
socketio.emit('terminate', {}, namespace='/monitor', room=request.sid)
return
key = data.get('key', None)
#sys.stderr.write("Key is " + str(key) + "\n")
if key is None:
sys.stderr.write("No key provided\n")
return
m = re.match(r'da:session:uid:(.*):i:(.*):userid:(.*)', key)
if not m:
sys.stderr.write("Invalid key provided\n")
return
session_id = m.group(1)
yaml_filename = m.group(2)
chat_user_id = m.group(3)
key = 'da:interviewsession:uid:' + str(session_id) + ':i:' + str(yaml_filename) + ':userid:' + str(chat_user_id)
sid = rr.get(key)
if sid is None:
sys.stderr.write("No sid for monitor chat message with key " + str(key) + "\n")
return
sid = sid.decode()
secret = secrets.get(sid, None)
if secret is not None:
secret = str(secret)
#obtain_lock(session_id, yaml_filename)
try:
steps, user_dict, encrypted = fetch_user_dict(session_id, yaml_filename, secret=secret)
except Exception as err:
#release_lock(session_id, yaml_filename)
sys.stderr.write("monitor_chat_log: could not get dictionary: " + text_type(err) + "\n")
return
#release_lock(session_id, yaml_filename)
chat_mode = user_dict['_internal']['livehelp']['mode']
m = re.match('t([0-9]+)', chat_user_id)
if m:
temp_user_id = m.group(1)
user_id = None
else:
temp_user_id = None
user_id = chat_user_id
self_user_id = session.get('user_id', None)
if user_id is not None:
user_id = int(user_id)
if temp_user_id is not None:
temp_user_id = int(temp_user_id)
if self_user_id is not None:
self_user_id = int(self_user_id)
messages = get_chat_log(chat_mode, yaml_filename, session_id, user_id, temp_user_id, secret, self_user_id, None)
socketio.emit('chat_log', {'uid': session_id, 'i': yaml_filename, 'userid': chat_user_id, 'mode': chat_mode, 'data': messages}, namespace='/monitor', room=request.sid)
#sys.stderr.write("Monitor: sending back " + str(len(messages)) + " messages")
#observer
def observer_thread(sid=None, key=None):
with app.app_context():
sys.stderr.write("Started observer thread for " + str(sid) + "\n")
r = redis.StrictRedis(host=redis_host, port=redis_port, db=redis_offset)
pubsub = r.pubsub()
pubsub.subscribe([key, sid])
for item in pubsub.listen():
sys.stderr.write("2\n" + repr(item) + "\n")
if item['type'] != 'message':
continue
#sys.stderr.write("observer sid: " + str(sid) + ":\n")
data = None
try:
data = json.loads(item['data'].decode())
except:
sys.stderr.write(" observer JSON parse error: " + item['data'].decode() + "\n")
continue
if 'message' in data and data['message'] == "KILL" and (('sid' in data and data['sid'] == sid) or 'sid' not in data):
pubsub.unsubscribe()
sys.stderr.write(" observer unsubscribed and finished for " + str(sid) + "\n")
break
elif 'message' in data:
if data['message'] == "newpage":
#sys.stderr.write(" Got new page for observer\n")
try:
obj = json.loads(r.get(data['key']).decode())
except:
sys.stderr.write(" newpage JSON parse error\n")
continue
socketio.emit('newpage', {'obj': obj}, namespace='/observer', room=sid)
elif data['message'] == "start_being_controlled":
#sys.stderr.write(" got start_being_controlled message with key " + str(data['key']) + "\n")
socketio.emit('start_being_controlled', {'key': data['key']}, namespace='/observer', room=sid)
else:
#sys.stderr.write(" Got parameters for observer\n")
socketio.emit('pushchanges', {'parameters': data}, namespace='/observer', room=sid)
sys.stderr.write(' exiting observer thread for sid ' + str(sid) + '\n')
@socketio.on('connect', namespace='/observer')
def on_observer_connect():
if 'observer' not in session:
socketio.emit('terminate', {}, namespace='/observer', room=request.sid)
return
join_room(request.sid)
@socketio.on('observe', namespace='/observer')
def on_observe(message):
if 'observer' not in session:
socketio.emit('terminate', {}, namespace='/observer', room=request.sid)
return
if request.sid not in threads:
key = 'da:input:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
#sys.stderr.write('Observing ' + key + '\n')
threads[request.sid] = socketio.start_background_task(target=observer_thread, sid=request.sid, key=key)
@socketio.on('observerStartControl', namespace='/observer')
def start_control(message):
if 'observer' not in session:
socketio.emit('terminate', {}, namespace='/observer', room=request.sid)
return
self_key = 'da:control:sid:' + str(request.sid)
key = 'da:control:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
existing_sid = rr.get(key)
if existing_sid is None or existing_sid.decode() == request.sid:
#sys.stderr.write('Controlling ' + key + '\n')
pipe = rr.pipeline()
pipe.set(self_key, key)
pipe.expire(self_key, 12)
pipe.set(key, request.sid)
pipe.expire(key, 12)
pipe.execute()
int_key = 'da:interviewsession:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
int_sid = rr.get(int_key)
if int_sid is not None:
int_sid = int_sid.decode()
rr.publish(int_sid, json.dumps(dict(messagetype='controllerstart')))
else:
sys.stderr.write('That key ' + key + ' is already taken\n')
key = 'da:session:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
#rr.publish('da:monitor', json.dumps(dict(messagetype='abortcontroller', key=key)))
socketio.emit('abortcontrolling', {'key': key}, namespace='/observer', room=request.sid)
@socketio.on('observerStopControl', namespace='/observer')
def stop_control(message):
if 'observer' not in session:
socketio.emit('terminate', {}, namespace='/observer', room=request.sid)
return
self_key = 'da:control:sid:' + str(request.sid)
key = 'da:control:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
sys.stderr.write('Stop controlling ' + key + '\n')
existing_sid = rr.get(key)
pipe = rr.pipeline()
pipe.delete(self_key)
if existing_sid is not None and existing_sid.decode() == request.sid:
pipe.delete(key)
pipe.execute()
sid = rr.get('da:interviewsession:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid']))
if sid is not None:
sid = sid.decode()
sys.stderr.write("Calling controllerexit 1");
rr.publish(sid, json.dumps(dict(messagetype='controllerexit', sid=request.sid)))
else:
pipe.execute()
@socketio.on('observerChanges', namespace='/observer')
def observer_changes(message):
sys.stderr.write('observerChanges\n')
if 'observer' not in session:
socketio.emit('terminate', {}, namespace='/observer', room=request.sid)
return
sid = rr.get('da:interviewsession:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid']))
if sid is None:
key = 'da:session:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
sys.stderr.write('observerChanges: sid is none.\n')
if rr.get(key) is None:
sys.stderr.write('observerChanges: session has gone away for good. Sending stopcontrolling.\n')
socketio.emit('stopcontrolling', {'key': key}, namespace='/observer', room=request.sid)
else:
socketio.emit('noconnection', {'key': key}, namespace='/observer', room=request.sid)
else:
sid = sid.decode()
sys.stderr.write('observerChanges: sid exists at ' + time.strftime("%Y-%m-%d %H:%M:%S") + '\n')
rr.publish(sid, json.dumps(dict(messagetype='controllerchanges', sid=request.sid, clicked=message.get('clicked', None), parameters=message['parameters'])))
# sid=request.sid, yaml_filename=str(message['i']), uid=str(message['uid']), user_id=str(message['userid'])
self_key = 'da:control:sid:' + str(request.sid)
key = 'da:control:uid:' + str(message['uid']) + ':i:' + str(message['i']) + ':userid:' + str(message['userid'])
#sys.stderr.write('Controlling ' + key + '\n')
pipe = rr.pipeline()
pipe.set(self_key, key)
pipe.expire(key, 12)
pipe.set(key, request.sid)
pipe.expire(key, 12)
pipe.execute()
@socketio.on('disconnect', namespace='/observer')
def on_observer_disconnect():
sys.stderr.write('Client disconnected from observer\n')
self_key = 'da:control:sid:' + str(request.sid)
int_key = rr.get(self_key)
if int_key is not None:
int_key = int_key.decode()
rr.delete(int_key)
other_sid = rr.get(re.sub(r'^da:control:uid:', 'da:interviewsession:uid:', int_key))
else:
other_sid = None
rr.delete(self_key)
if other_sid is not None:
other_sid = other_sid.decode()
sys.stderr.write("Calling controllerexit 2");
rr.publish(other_sid, json.dumps(dict(messagetype='controllerexit', sid=request.sid)))
rr.publish(request.sid, json.dumps(dict(message='KILL', sid=request.sid)))
@socketio.on('terminate', namespace='/observer')
def terminate_observer_connection():
sys.stderr.write("terminate_observer_connection\n")
# hopefully the disconnect will be triggered
# if request.sid in threads:
# rr.publish(request.sid, json.dumps(dict(origin='client', message='KILL', sid=request.sid)))
socketio.emit('terminate', {}, namespace='/observer', room=request.sid)
#disconnect()
if __name__ == '__main__':
socketio.run(app)
| 48.223415 | 394 | 0.599183 |
ace70237eb1f9ab4d029f1331b22514fb0668830 | 59 | py | Python | ruco/__init__.py | nizig/ruco | 1cee1d8f4f1155cf99b5ba71061d3c0a76c5e672 | [
"MIT"
] | 10 | 2017-02-22T05:33:35.000Z | 2021-11-27T17:05:12.000Z | ruco/__init__.py | nizig/ruco | 1cee1d8f4f1155cf99b5ba71061d3c0a76c5e672 | [
"MIT"
] | 3 | 2018-12-25T12:04:32.000Z | 2019-08-22T14:44:36.000Z | ruco/__init__.py | nizig/ruco | 1cee1d8f4f1155cf99b5ba71061d3c0a76c5e672 | [
"MIT"
] | 1 | 2021-03-05T21:36:03.000Z | 2021-03-05T21:36:03.000Z | from . import bits
from . import service
from . import cli
| 14.75 | 21 | 0.745763 |
ace703db25ab459aee013d042b46dcd9d2dce890 | 1,666 | py | Python | sdk/python/pulumi_azure_nextgen/maintenance/v20210401preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/maintenance/v20210401preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/maintenance/v20210401preview/__init__.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .configuration_assignment import *
from .configuration_assignment_parent import *
from .get_configuration_assignment import *
from .get_configuration_assignment_parent import *
from .get_maintenance_configuration import *
from .maintenance_configuration import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:maintenance/v20210401preview:ConfigurationAssignment":
return ConfigurationAssignment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:maintenance/v20210401preview:ConfigurationAssignmentParent":
return ConfigurationAssignmentParent(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:maintenance/v20210401preview:MaintenanceConfiguration":
return MaintenanceConfiguration(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "maintenance/v20210401preview", _module_instance)
_register_module()
| 39.666667 | 110 | 0.72569 |
ace703df78341adee53f940b2732603f959d50d6 | 15,342 | py | Python | amc_search.py | Sharingsky/FORMERAMC | 11ffacdd3dd03d78fa9d25f891b92afefec204e3 | [
"MIT"
] | null | null | null | amc_search.py | Sharingsky/FORMERAMC | 11ffacdd3dd03d78fa9d25f891b92afefec204e3 | [
"MIT"
] | null | null | null | amc_search.py | Sharingsky/FORMERAMC | 11ffacdd3dd03d78fa9d25f891b92afefec204e3 | [
"MIT"
] | null | null | null | # Code for "AMC: AutoML for Model Compression and Acceleration on Mobile Devices"
# Yihui He*, Ji Lin*, Zhijian Liu, Hanrui Wang, Li-Jia Li, Song Han
# {jilin, songhan}@mit.edu
import os
import numpy as np
import argparse
from copy import deepcopy
import torch
import torch.nn as nn
torch.backends.cudnn.deterministic = True
from env.channel_pruning_env import ChannelPruningEnv
from lib.agent import DDPG
from lib.utils import get_output_folder
from tensorboardX import SummaryWriter
def parse_args():
parser = argparse.ArgumentParser(description='AMC search script')
parser.add_argument('--job', default='gates_train', type=str, help='support option: train/export')
parser.add_argument('--suffix', default=None, type=str, help='suffix to help you remember what experiment you ran')
# env
parser.add_argument('--model', default='plain20', type=str, help='model to prune')
parser.add_argument('--dataset', default='cifar10', type=str, help='dataset to use (cifar/imagenet)')
parser.add_argument('--data_root', default='D:\_1work\pycharmcode', type=str, help='dataset path')
parser.add_argument('--preserve_ratio', default=0.5, type=float, help='preserve ratio of the model')
parser.add_argument('--lbound', default=0.2, type=float, help='minimum preserve ratio')
parser.add_argument('--rbound', default=1.0, type=float, help='maximum preserve ratio')
parser.add_argument('--reward', default='acc_reward', type=str, help='Setting the reward')
parser.add_argument('--acc_metric', default='acc1', type=str, help='use acc1 or acc5')
parser.add_argument('--use_real_val', dest='use_real_val', action='store_true')
parser.add_argument('--ckpt_path', default='./checkpoints/ckpt.pth.tar', type=str, help='manual path of checkpoint')
# parser.add_argument('--pruning_method', default='cp', type=str,
# help='method to prune (fg/cp for fine-grained and channel pruning)')
# only for channel pruning
parser.add_argument('--n_calibration_batches', default=60, type=int,
help='n_calibration_batches')
parser.add_argument('--n_points_per_layer', default=10, type=int,
help='method to prune (fg/cp for fine-grained and channel pruning)')
parser.add_argument('--channel_round', default=8, type=int, help='Round channel to multiple of channel_round')
# ddpg
parser.add_argument('--hidden1', default=300, type=int, help='hidden num of first fully connect layer')
parser.add_argument('--hidden2', default=300, type=int, help='hidden num of second fully connect layer')
parser.add_argument('--lr_c', default=1e-3, type=float, help='learning rate for actor')
parser.add_argument('--lr_a', default=1e-4, type=float, help='learning rate for actor')
parser.add_argument('--warmup', default=100, type=int,
help='time without training but only filling the replay memory')
parser.add_argument('--discount', default=1., type=float, help='')
parser.add_argument('--bsize', default=64, type=int, help='minibatch size')
parser.add_argument('--rmsize', default=100, type=int, help='memory size for each layer')
parser.add_argument('--window_length', default=1, type=int, help='')
parser.add_argument('--tau', default=0.01, type=float, help='moving average for target network')
# noise (truncated normal distribution)
parser.add_argument('--init_delta', default=0.5, type=float,
help='initial variance of truncated normal distribution')
parser.add_argument('--delta_decay', default=0.95, type=float,
help='delta decay during exploration')
# training
parser.add_argument('--max_episode_length', default=1e9, type=int, help='')
parser.add_argument('--output', default='./logs', type=str, help='')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--init_w', default=0.003, type=float, help='')
parser.add_argument('--train_episode', default=300, type=int, help='train iters each timestep')
parser.add_argument('--epsilon', default=50000, type=int, help='linear decay of exploration policy')
parser.add_argument('--seed', default=3, type=int, help='random seed to set')
parser.add_argument('--n_gpu', default=1, type=int, help='number of gpu to use')
parser.add_argument('--n_worker', default=0, type=int, help='number of data loader worker')
parser.add_argument('--data_bsize', default=64, type=int, help='number of data batch size')
parser.add_argument('--resume', default='default', type=str, help='Resuming model path for testing')
# export
parser.add_argument('--ratios', default=None, type=str, help='ratios for pruning')
parser.add_argument('--channels', default='3, 16, 8, 8, 16, 8, 16, 16, 24, 24, 24, 32, 32, 40, 48, 48, 56, 16, 16', type=str, help='channels after pruning')
# parser.add_argument('--channels', default='3, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 8, 16, 16, 16, 16, 32, 16, 32, 16, 24, 16, 24, 16, 24, 16, 24, 16, 24, 16, 24, 32, 64, 32, 56, 56, 48, 56, 48, 56, 48, 56, 48, 56, 40, 56, 40, 56, 24, 16, 16',
# type=str, help='channels after pruning')
parser.add_argument('--export_path', default='./prunedmodel/palin20pruned10.pkl', type=str, help='path for exporting models')
parser.add_argument('--use_new_input', dest='use_new_input', action='store_true', help='use new input feature')
#compact
parser.add_argument('--compa', default=False, type=bool)
parser.add_argument('--model_cp',default='plain20pr')
parser.add_argument('--ckpt_path_cp', default='./prunedmodel/palin20pruned10.pkl', type=str,)
#gates
parser.add_argument('--kesi',default=0.1,type=float)
parser.add_argument('--wd', default=4e-5, type=float, help='weight decay')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--n_epoch', default=150, type=int, help='number of epochs to train')
return parser.parse_args()
def get_model_and_checkpoint(model, dataset, checkpoint_path, n_gpu=1):
if model == 'mobilenet' and dataset == 'imagenet':
from models.mobilenet import MobileNet
net = MobileNet(n_class=1000)
elif model == 'vgg' and dataset == 'imagenet':
from models.vgg import vgg16
net = vgg16(pretrained=False)
elif model == 'plain20' and dataset == 'cifar10':
from models.cifar_plain import plain20
net = plain20(10)
elif model == 'mobilenetv2' and dataset == 'imagenet':
from models.mobilenet_v2 import MobileNetV2
net = MobileNetV2(n_class=1000)
elif model == 'plain20pr' and dataset=='cifar10':
from models.cifar_plain import plain20pr
net = plain20pr(10)
elif model == 'resnet56' and dataset == 'cifar10':
from models.pytorch_cifar_models.resnetcifar import cifar10_resnet56
net = cifar10_resnet56()
print('load model over')
else:
raise NotImplementedError
sd = torch.load(checkpoint_path)
if 'state_dict' in sd: # a checkpoint but not a state_dict
sd = sd['state_dict']
sd = {k.replace('module.', ''): v for k, v in sd.items()}
net.load_state_dict(sd)
net = net.cuda()
if n_gpu > 1:
net = torch.nn.DataParallel(net, range(n_gpu))
return net, deepcopy(net.state_dict())
class Gatemodel(nn.Module):
def __init__(self,model):
super(Gatemodel, self).__init__()
self.model=model
self.gate_dic= {}
self.m_list = []
self.validlayeridx = 0
self.add_gates(self.model)
self.kesi = args.kesi
def add_gates(self, model):
for i,m in enumerate(model.modules()):
if type(m)==nn.Conv2d and not m.groups== m.in_channels:
self.gate_dic[self.validlayeridx]=torch.nn.Parameter(torch.randn((m.in_channels,1)),requires_grad=True).cuda()
self.m_list.append(m)
self.validlayeridx+=1
elif type(m) in [nn.BatchNorm2d,nn.ReLU,nn.Linear,nn.AdaptiveAvgPool2d,
nn.MaxPool2d]:
self.m_list.append(m)
self.validlayeridx+=1
def forward(self,x):
for i,m in enumerate(self.m_list):
if i in self.gate_dic:
n,c,h,w = x.size()
x = x.reshape(n,c,-1)
x = x.mul(self.gate_dic[i]*self.gate_dic[i]/(self.gate_dic[i]*self.gate_dic[i]+self.kesi))
x = x.reshape(n,c,h,w)
if type(self.m_list[i]) == nn.Linear:
x=x.mean(2).mean(2)
x = self.m_list[i](x)
return x
def gates_train():
from amc_fine_tune import newtrain as trainmodel
#1.加载模型,添加门
gatemodel=Gatemodel(model)
#遍历模型,在普通卷积前加上门,即添加上可学习的向量,同时要注意shortcut前后的两个卷积是相同的门
trainmodel(args.n_epoch,train_loader,gatemodel)
#2.结合门一起训练
#3.
def train(num_episode, agent, env, output):
agent.is_training = True
step = episode = episode_steps = 0
episode_reward = 0.
observation = None
T = [] # trajectory
while episode < num_episode: # counting based on episode
# reset if it is the start of episode
if observation is None:
observation = deepcopy(env.reset())
agent.reset(observation)
# agent pick action ...
if episode <= args.warmup:
action = agent.random_action()
# action = sample_from_truncated_normal_distribution(lower=0., upper=1., mu=env.preserve_ratio, sigma=0.5)
else:
action = agent.select_action(observation, episode=episode)
# env response with next_observation, reward, terminate_info
observation2, reward, done, info = env.step(action)
observation2 = deepcopy(observation2)
T.append([reward, deepcopy(observation), deepcopy(observation2), action, done])
# [optional] save intermideate model
if episode % int(num_episode / 3) == 0:
agent.save_model(output)
# update
step += 1
episode_steps += 1
episode_reward += reward
observation = deepcopy(observation2)
if done: # end of episode
print('#{}: episode_reward:{:.4f} acc: {:.4f}, ratio: {:.4f}'.format(episode, episode_reward,
info['accuracy'],
info['compress_ratio']))
text_writer.write(
'#{}: episode_reward:{:.4f} acc: {:.4f}, ratio: {:.4f}\n'.format(episode, episode_reward,
info['accuracy'],
info['compress_ratio']))
final_reward = T[-1][0]
# print('final_reward: {}'.format(final_reward))
# agent observe and update policy
for r_t, s_t, s_t1, a_t, done in T:
agent.observe(final_reward, s_t, s_t1, a_t, done)
if episode > args.warmup:
agent.update_policy()
# reset
observation = None
episode_steps = 0
episode_reward = 0.
episode += 1
T = []
tfwriter.add_scalar('reward/last', final_reward, episode)
tfwriter.add_scalar('reward/best', env.best_reward, episode)
tfwriter.add_scalar('info/accuracy', info['accuracy'], episode)
tfwriter.add_scalar('info/compress_ratio', info['compress_ratio'], episode)
tfwriter.add_text('info/best_policy', str(env.best_strategy), episode)
# record the preserve rate for each layer
for i, preserve_rate in enumerate(env.strategy):
tfwriter.add_scalar('preserve_rate/{}'.format(i), preserve_rate, episode)
text_writer.write('best reward: {}\n'.format(env.best_reward))
text_writer.write('best policy: {}\n'.format(env.best_strategy))
text_writer.write('best d_prime: {}\n'.format(env.best_d_prime_list))
text_writer.close()
def export_model(env, args):
assert args.ratios is not None or args.channels is not None, 'Please provide a valid ratio list or pruned channels'
assert args.export_path is not None, 'Please provide a valid export path'
env.set_export_path(args.export_path)
print('=> Original model channels: {}'.format(env.org_channels))
if args.ratios:
ratios = args.ratios.split(',')
ratios = [float(r) for r in ratios]
assert len(ratios) == len(env.org_channels)
channels = [int(r * c) for r, c in zip(ratios, env.org_channels)]
else:
channels = args.channels.split(',')
channels = [int(r) for r in channels]
ratios = [c2 / c1 for c2, c1 in zip(channels, env.org_channels)]
print('=> Pruning with ratios: {}'.format(ratios))
print('=> Channels after pruning: {}'.format(channels))
for r in ratios:
env.step(r)
return
if __name__ == "__main__":
args = parse_args()
if args.seed is not None:
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
model, checkpoint = get_model_and_checkpoint(args.model, args.dataset, checkpoint_path=args.ckpt_path,
n_gpu=args.n_gpu)
if not args.job == 'gates_train':
env = ChannelPruningEnv(model, checkpoint, args.dataset,
preserve_ratio=1. if args.job == 'export' else args.preserve_ratio,
n_data_worker=args.n_worker, batch_size=args.data_bsize,
args=args, export_model=args.job == 'export', use_new_input=args.use_new_input)
if args.job == 'train':
# build folder and logs
base_folder_name = '{}_{}_r{}_search'.format(args.model, args.dataset, args.preserve_ratio)
if args.suffix is not None:
base_folder_name = base_folder_name + '_' + args.suffix
args.output = get_output_folder(args.output, base_folder_name)
print('=> Saving logs to {}'.format(args.output))
tfwriter = SummaryWriter(logdir=args.output)
text_writer = open(os.path.join(args.output, 'log.txt'), 'w')
print('=> Output path: {}...'.format(args.output))
nb_states = env.layer_embedding.shape[1]
nb_actions = 1 # just 1 action here
args.rmsize = args.rmsize * len(env.prunable_idx) # for each layer
print('** Actual replay buffer size: {}'.format(args.rmsize))
agent = DDPG(nb_states, nb_actions, args)
train(args.train_episode, agent, env, args.output)
elif args.job == 'export':
export_model(env, args)
elif args.job == 'gates_train':
from lib.data import get_dataset
train_loader,val_loader,n_classes = get_dataset(args.dataset,
args.bsize,args.n_worker,data_root=args.data_root)
gates_train()
else:
raise RuntimeError('Undefined job {}'.format(args.job))
| 47.498452 | 271 | 0.627363 |
ace703e6ae362e0de562351f1363722831014f07 | 17,432 | py | Python | pointcnn.py | jiezhangxl/PointCNN-FI-Conv | b861692530fefd86e95a5bbcd0570b92cd112747 | [
"MIT"
] | 1 | 2021-11-29T06:39:43.000Z | 2021-11-29T06:39:43.000Z | pointcnn.py | jiezhangxl/PointCNN-FI-Conv | b861692530fefd86e95a5bbcd0570b92cd112747 | [
"MIT"
] | null | null | null | pointcnn.py | jiezhangxl/PointCNN-FI-Conv | b861692530fefd86e95a5bbcd0570b92cd112747 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import pointfly as pf
import tensorflow as tf
def ficonv(pts, fts, qrs, tag, N, K1, mm, sigma, scale, K, D, P, C, C_pts_fts, kernel_num, is_training, with_kernel_registering, with_kernel_shape_comparison,
with_point_transformation, with_feature_transformation, with_learning_feature_transformation, kenel_initialization_method, depth_multiplier, sorting_method=None, with_global=False):
Dis, indices_dilated = pf.knn_indices_general(qrs, pts, K*D, True)
indices = indices_dilated[:, :, ::D, :]
if sorting_method is not None:
indices = pf.sort_points(pts, indices, sorting_method)
nn_pts = tf.gather_nd(pts, indices, name=tag + 'nn_pts') # (N, P, K, 3)
nn_pts_center = tf.expand_dims(qrs, axis=2, name=tag + 'nn_pts_center') # (N, P, 1, 3)
nn_pts_local = tf.subtract(nn_pts, nn_pts_center, name=tag+'nn_pts_local') # (N, P, K, 3)
if with_point_transformation or with_feature_transformation:
X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training, (1, K))
X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training, (1, K))
X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
X_2 = pf.depthwise_conv2d(X_1_KK, K, tag + 'X_2', is_training, (1, K), activation=None)
X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
if with_point_transformation:
if with_learning_feature_transformation:
nn_pts_local = tf.matmul(X_2_KK, nn_pts_local)
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
else:
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
nn_pts_local = tf.matmul(X_2_KK, nn_pts_local)
else:
if with_learning_feature_transformation:
nn_pts_local_ = tf.matmul(X_2_KK, nn_pts_local, name=tag+'nn_pts_local_')
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local_, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
else:
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
if fts is None:
nn_fts_input = nn_fts_from_pts
else:
nn_fts_from_prev = tf.gather_nd(fts, indices, name=tag + 'nn_fts_from_prev')
nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev], axis=-1, name=tag + 'nn_fts_input')
P1 = tf.shape(nn_pts_local)[1]
dim1 = 3
if with_kernel_registering:
######################## preparing #########################
if with_feature_transformation:
nn_fts_input = tf.matmul(X_2_KK, nn_fts_input)
r_data = tf.reduce_sum(nn_pts_local * nn_pts_local, axis=3, keep_dims=True, name=tag+'kernel_pow')
######################## kernel-registering #########################
shape_id = 0
if kenel_initialization_method == 'random':
kernel_shape=tf.Variable(tf.random_uniform([K1,dim1], minval=-0.5, maxval=0.5, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
else:
kernel_shape=tf.Variable(tf.random_normal([K1,dim1], mean=0.0, stddev=1.0, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
kernel_shape_dis = tf.sqrt(tf.reduce_sum(kernel_shape * kernel_shape, axis=1), name=tag+'kernel_shape_dis'+str(shape_id))
kernel_shape_normal = scale * tf.div(kernel_shape,tf.reduce_max(kernel_shape_dis), name=tag+'kernel_shape_normal'+str(shape_id))
r_kernel = tf.reduce_sum(kernel_shape_normal * kernel_shape_normal, axis=1, keep_dims=True, name=tag+'kernel_pow'+str(shape_id))
reshape_data = tf.reshape(nn_pts_local, [N*P1*K,dim1], name=tag+'reshape_kernel'+str(shape_id))
m = tf.reshape( tf.matmul(reshape_data, tf.transpose(kernel_shape_normal)), [N, P1, K, K1], name=tag+'mm'+str(shape_id))
dis_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'dis_matrix'+str(shape_id))
coef_matrix = tf.exp(tf.div(-dis_matrix,sigma), name=tag+'coef_matrix'+str(shape_id))
#coef_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'coef_matrix'+str(shape_id))
if with_kernel_shape_comparison:
coef_global = tf.reduce_sum(coef_matrix, axis=[2,3], keep_dims=True)/K
coef_normal = coef_global * tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
else:
coef_normal = tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
fts_X = tf.matmul(coef_normal, nn_fts_input, name=tag+'fts_X'+str(shape_id))
###################################################################
fts_conv = pf.separable_conv2d(fts_X, math.ceil(mm*C/kernel_num), tag+'fts_conv'+str(shape_id), is_training, (1, K1), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag+'fts_conv_3d'+str(shape_id))
for shape_id in range(kernel_num - 1):
shape_id = shape_id + 1
if kenel_initialization_method == 'random':
kernel_shape=tf.Variable(tf.random_uniform([K1,dim1], minval=-0.5, maxval=0.5, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
else:
kernel_shape=tf.Variable(tf.random_normal([K1,dim1], mean=0.0, stddev=1.0, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
kernel_shape_dis = tf.sqrt(tf.reduce_sum(kernel_shape * kernel_shape, axis=1), name=tag+'kernel_shape_dis'+str(shape_id))
kernel_shape_normal = scale * tf.div(kernel_shape,tf.reduce_max(kernel_shape_dis), name=tag+'kernel_shape_normal'+str(shape_id))
r_kernel = tf.reduce_sum(kernel_shape_normal * kernel_shape_normal, axis=1, keep_dims=True, name=tag+'kernel_pow'+str(shape_id))
reshape_data = tf.reshape(nn_pts_local, [N*P1*K,dim1], name=tag+'reshape_kernel'+str(shape_id))
m = tf.reshape( tf.matmul(reshape_data, tf.transpose(kernel_shape_normal)), [N, P1, K, K1], name=tag+'mm'+str(shape_id))
dis_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'dis_matrix'+str(shape_id))
coef_matrix = tf.exp(tf.div(-dis_matrix,sigma), name=tag+'coef_matrix'+str(shape_id))
#coef_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'coef_matrix'+str(shape_id))
if with_kernel_shape_comparison:
coef_global = tf.reduce_sum(coef_matrix, axis=[2,3], keep_dims=True)/K
coef_normal = coef_global * tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
else:
coef_normal = tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
fts_X = tf.matmul(coef_normal, nn_fts_input, name=tag+'fts_X'+str(shape_id))
###################################################################
fts_conv = pf.separable_conv2d(fts_X, math.ceil(mm*C/kernel_num), tag+'fts_conv'+str(shape_id), is_training, (1, K1), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.concat([fts_conv_3d, tf.squeeze(fts_conv, axis=2)], axis = -1 , name=tag+'fts_conv_3d'+str(shape_id))
else:
fts_X = nn_fts_input
fts_conv = pf.separable_conv2d(fts_X, C, tag + 'fts_conv', is_training, (1, K), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')
if with_global:
fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global', is_training)
return tf.concat([fts_global, fts_conv_3d], axis=-1, name=tag + 'fts_conv_3d_with_global')
else:
return fts_conv_3d
def xdeconv(pts, fts, qrs, tag, N, K, D, P, C, C_pts_fts, is_training, with_X_transformation, depth_multiplier,
sorting_method=None, with_global=False):
_, indices_dilated = pf.knn_indices_general(qrs, pts, K * D, True)
indices = indices_dilated[:, :, ::D, :]
if sorting_method is not None:
indices = pf.sort_points(pts, indices, sorting_method)
nn_pts = tf.gather_nd(pts, indices, name=tag + 'nn_pts') # (N, P, K, 3)
nn_pts_center = tf.expand_dims(qrs, axis=2, name=tag + 'nn_pts_center') # (N, P, 1, 3)
nn_pts_local = tf.subtract(nn_pts, nn_pts_center, name=tag + 'nn_pts_local') # (N, P, K, 3)
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
if fts is None:
nn_fts_input = nn_fts_from_pts
else:
nn_fts_from_prev = tf.gather_nd(fts, indices, name=tag + 'nn_fts_from_prev')
nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev], axis=-1, name=tag + 'nn_fts_input')
if with_X_transformation:
######################## X-transformation #########################
X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training, (1, K))
X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training, (1, K))
X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
X_2 = pf.depthwise_conv2d(X_1_KK, K, tag + 'X_2', is_training, (1, K), activation=None)
X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
fts_X = tf.matmul(X_2_KK, nn_fts_input, name=tag + 'fts_X')
###################################################################
else:
fts_X = nn_fts_input
fts_conv = pf.separable_conv2d(fts_X, C, tag + 'fts_conv', is_training, (1, K), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')
if with_global:
fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global', is_training)
return tf.concat([fts_global, fts_conv_3d], axis=-1, name=tag + 'fts_conv_3d_with_global')
else:
return fts_conv_3d
class PointCNN:
def __init__(self, points, features, is_training, setting):
xconv_params = setting.xconv_params
fc_params = setting.fc_params
with_X_transformation = setting.with_X_transformation
with_kernel_registering = setting.with_kernel_registering
with_kernel_shape_comparison = setting.with_kernel_shape_comparison
with_point_transformation = setting.with_point_transformation
with_feature_transformation = setting.with_feature_transformation
with_learning_feature_transformation = setting.with_learning_feature_transformation
kenel_initialization_method = setting.kenel_initialization_method
sorting_method = setting.sorting_method
N = tf.shape(points)[0]
kernel_num = setting.kernel_num
if setting.sampling == 'fps':
from sampling import tf_sampling
self.layer_pts = [points]
if features is None:
self.layer_fts = [features]
else:
features = tf.reshape(features, (N, -1, setting.data_dim - 3), name='features_reshape')
C_fts = xconv_params[0]['C'] // 2
features_hd = pf.dense(features, C_fts, 'features_hd', is_training)
self.layer_fts = [features_hd]
# self.Dis = []
# self.nn_pts_local = []
for layer_idx, layer_param in enumerate(xconv_params):
tag = 'xconv_' + str(layer_idx + 1) + '_'
K1 = layer_param['K1']
mm = layer_param['mm']
sigma = layer_param['sigma']
scale = layer_param['scale']
K = layer_param['K']
D = layer_param['D']
P = layer_param['P']
C = layer_param['C']
links = layer_param['links']
if setting.sampling != 'random' and links:
print('Error: flexible links are supported only when random sampling is used!')
exit()
# get k-nearest points
pts = self.layer_pts[-1]
fts = self.layer_fts[-1]
if P == -1 or (layer_idx > 0 and P == xconv_params[layer_idx - 1]['P']):
qrs = self.layer_pts[-1]
else:
if setting.sampling == 'fps':
fps_indices = tf_sampling.farthest_point_sample(P, pts)
batch_indices = tf.tile(tf.reshape(tf.range(N), (-1, 1, 1)), (1, P, 1))
indices = tf.concat([batch_indices, tf.expand_dims(fps_indices,-1)], axis=-1)
qrs = tf.gather_nd(pts, indices, name= tag + 'qrs') # (N, P, 3)
elif setting.sampling == 'ids':
indices = pf.inverse_density_sampling(pts, K, P)
qrs = tf.gather_nd(pts, indices)
elif setting.sampling == 'random':
qrs = tf.slice(pts, (0, 0, 0), (-1, P, -1), name=tag + 'qrs') # (N, P, 3)
else:
print('Unknown sampling method!')
exit()
self.layer_pts.append(qrs)
if layer_idx == 0:
C_pts_fts = C // 2 if fts is None else C // 4
depth_multiplier = 4
else:
C_prev = xconv_params[layer_idx - 1]['C']
C_pts_fts = C_prev // 4
depth_multiplier = math.ceil(C / C_prev)
with_global = (setting.with_global and layer_idx == len(xconv_params) - 1)
fts_xconv= ficonv(pts, fts, qrs, tag, N, K1, mm, sigma, scale, K, D, P, C, C_pts_fts, kernel_num, is_training, with_kernel_registering, with_kernel_shape_comparison,
with_point_transformation, with_feature_transformation, with_learning_feature_transformation, kenel_initialization_method, depth_multiplier, sorting_method, with_global)
#self.Dis.append(Dis_)
#self.nn_pts_local.append(nn_pts_local_)
fts_list = []
for link in links:
fts_from_link = self.layer_fts[link]
if fts_from_link is not None:
fts_slice = tf.slice(fts_from_link, (0, 0, 0), (-1, P, -1), name=tag + 'fts_slice_' + str(-link))
fts_list.append(fts_slice)
if fts_list:
fts_list.append(fts_xconv)
self.layer_fts.append(tf.concat(fts_list, axis=-1, name=tag + 'fts_list_concat'))
else:
self.layer_fts.append(fts_xconv)
if hasattr(setting, 'xdconv_params'):
for layer_idx, layer_param in enumerate(setting.xdconv_params):
tag = 'xdconv_' + str(layer_idx + 1) + '_'
K = layer_param['K']
D = layer_param['D']
pts_layer_idx = layer_param['pts_layer_idx']
qrs_layer_idx = layer_param['qrs_layer_idx']
pts = self.layer_pts[pts_layer_idx + 1]
fts = self.layer_fts[pts_layer_idx + 1] if layer_idx == 0 else self.layer_fts[-1]
qrs = self.layer_pts[qrs_layer_idx + 1]
fts_qrs = self.layer_fts[qrs_layer_idx + 1]
P = xconv_params[qrs_layer_idx]['P']
C = xconv_params[qrs_layer_idx]['C']
C_prev = xconv_params[pts_layer_idx]['C']
C_pts_fts = C_prev // 4
depth_multiplier = 1
fts_xdconv = xdeconv(pts, fts, qrs, tag, N, K, D, P, C, C_pts_fts, is_training, with_X_transformation,
depth_multiplier, sorting_method)
fts_concat = tf.concat([fts_xdconv, fts_qrs], axis=-1, name=tag + 'fts_concat')
fts_fuse = pf.dense(fts_concat, C, tag + 'fts_fuse', is_training)
self.layer_pts.append(qrs)
self.layer_fts.append(fts_fuse)
self.fc_layers = [self.layer_fts[-1]]
for layer_idx, layer_param in enumerate(fc_params):
C = layer_param['C']
dropout_rate = layer_param['dropout_rate']
fc = pf.dense(self.fc_layers[-1], C, 'fc{:d}'.format(layer_idx), is_training)
fc_drop = tf.layers.dropout(fc, dropout_rate, training=is_training, name='fc{:d}_drop'.format(layer_idx))
self.fc_layers.append(fc_drop)
| 59.494881 | 199 | 0.623623 |
ace704105547d32b8b0738488749d364efb3febb | 6,400 | py | Python | ferrox/lib/helpers.py | hsuaz/ferrox | ac89b698e6c12c57c7a3128b6a25a3dc100bfc15 | [
"MIT"
] | 3 | 2017-01-03T20:55:16.000Z | 2022-03-01T15:21:53.000Z | ferrox/lib/helpers.py | hsuaz/ferrox | ac89b698e6c12c57c7a3128b6a25a3dc100bfc15 | [
"MIT"
] | null | null | null | ferrox/lib/helpers.py | hsuaz/ferrox | ac89b698e6c12c57c7a3128b6a25a3dc100bfc15 | [
"MIT"
] | 4 | 2017-01-03T20:48:09.000Z | 2022-03-01T15:21:58.000Z | """Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to both as 'h'.
"""
from webhelpers.util import html_escape
from webhelpers.html import *
from routes import url_for, redirect_to, request_config
import pylons.config
from pylons import tmpl_context as c
import os
import re
import time
def javascript_include_tag(src):
return HTML.tag('script', src=src, type="text/javascript")
def link_to(text, url, **kwargs):
raise RuntimeError("""
h.link_to() is depricated. Use h.HTML.a() instead.
Syntax: h.HTML.a(href='url://example.com/', *content, **attrs)
*content can be strings and/or h.HTML.tag()s. Strings will be escaped.
""")
def escape_once(data):
raise ("h.escape_once has been depricated in favor of h.html_escape()")
def normalize_newlines(string):
"""Adjust all line endings to be the Linux line break, \\x0a."""
return re.compile("\x0d\x0a|\x0d").sub("\x0a", string)
def to_dict(model):
'''Convert a SQLAlchemy model instance into a dictionary'''
model_dict = {}
for propname in model.__table__.c.keys():
model_dict[propname] = getattr(model, propname)
return model_dict
def embed_flash(url,dims=None):
rv = """
<object classid="clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" codebase="http://download.macromedia.com/pub/shockwave/cabs/flash/swflash.cab#version=6,0,40,0" id="page_content" """
if dims != None:
rv = rv + "height=\"%d\" width=\"%d\"" % dims
rv = rv + """>
<param name="movie" value="%s" />
<param name="quality" value="high" />
<param name="bgcolor" value="#FFFFFF" />
<embed src="%s" quality="high" bgcolor="#FFFFFF" name="myMoviename" align="" type="application/x-shockwave-flash" pluginspage="http://www.macromedia.com/go/getflashplayer" """ % (url,url)
if dims != None:
rv = rv + "height=\"%d\" width=\"%d\"" % dims
rv = rv + """></embed>
</object>
"""
return rv
def embed_mp3(url):
return """
<object width="300" height="42">
<param name="src" value="%s">
<param name="autoplay" value="true">
<param name="controller" value="true">
<param name="bgcolor" value="#FF9900">
<embed src="%s" autostart="true" loop="false" width="300" height="42" controller="true" bgcolor="#FF9900"></embed>
</object>
"""%(url,url)
def dict_to_option (opts=(),default=None):
output = ''
for k in opts.keys():
if opts[k] == '':
v = k
else:
v = opts[k]
if default == k:
selected = ' selected="selected"'
else:
selected = ''
output = "%s\n<option value=\"%s\"%s>%s</option>" % (output, k, selected, v)
return output
def format_time(datetime):
"""Format a datetime object standardly."""
format_string = '%m/%d/%y %I:%M %p'
if hasattr(datetime,'strftime'):
return datetime.strftime(format_string)
else:
return time.strftime(format_string,time.gmtime(datetime))
def image_tag(source, alt=None, size=None, **options):
"""
Copied from the default pylons webhelpers, to fix alt='' not working.
Also copies alt into title, if one isn't specified.
"""
options['src'] = source
if alt == None:
alt = os.path.splitext(os.path.basename(source))[0].title()
options['alt'] = alt
if not 'title' in options:
options['title'] = options['alt']
if size and re.match('^(\d+|)x(\d+|)$', size) and size != 'x':
width, height = size.split('x')
if width:
options['width'] = width
if height:
options['height'] = height
return HTML.tag('img', **options)
def form(*args, **kwargs):
raise RuntimeError("Do not use the built-in webhelpers form tags "
"functions. Use formgen instead. If you don't need "
"errors or defaults, use c.empty_form.")
start_form = form
end_form = form
text_field = form
submit = form
password_field = form
check_box = form
radio_buttom = form
hidden_field = form
file_field = form
def indented_comments(comments):
"""Given a list of comment rows, returns them with an indent property set
corresponding to the depth relative to the first (presumably the root).
The comments should be in order by left. This will always put them in
the correct order.
"""
last_comment = None
indent = 0
right_ancestry = []
for comment in comments:
if last_comment \
and comment.left < last_comment.right:
indent = indent + 1
right_ancestry.append(last_comment)
for i in xrange(len(right_ancestry) - 1, -1, -1):
if comment.left > right_ancestry[i].right:
indent = indent - 1
right_ancestry.pop(i)
if len(right_ancestry):
comment._parent = right_ancestry[-1]
comment.indent = indent
last_comment = comment
return comments
def get_avatar_url(object = None):
if hasattr(object, 'avatar') and object.avatar:
return url_for(controller='gallery', action='file', filename=object.avatar.mogile_key)
else:
av = None
if hasattr(object, 'primary_artist'):
av = object.primary_artist.default_avatar
elif hasattr(object, 'author'):
av = object.author.default_avatar
elif hasattr(object, 'user'):
av = object.user.default_avatar
elif hasattr(object, 'default_avatar') and object.default_avatar:
av = object.user.default_avatar
if av:
return url_for(controller='gallery', action='file', filename=av.mogile_key)
return pylons.config.get('avatar.default', '/default_avatar.png')
def objects_to_option_tags(objects, default=None, id_attr='id', name_attr='name'):
output = ''
for o in objects:
output += """<option value="%d"%s>%s</option>""" % (getattr(o, id_attr),
' selected="selected"' if default==getattr(o, id_attr) else '', getattr(o, name_attr))
return output
def implicit_url_for(**kwargs):
new_route = c.route.copy()
new_route.update(kwargs)
if new_route['controller'] == None or new_route['action'] == None:
raise RuntimeError("Try create url without 'controller' or 'action'")
return url_for(**new_route)
| 31.840796 | 192 | 0.629688 |
ace7049624f178085705f35f2a81c22d3d4d5f69 | 3,192 | py | Python | utils/common.py | ryo-currency/ryo-gui-wallet | d72495fb456ff0a15000d3cf214b5765a1555c4b | [
"BSD-3-Clause"
] | 18 | 2018-06-03T19:13:56.000Z | 2020-01-16T19:43:58.000Z | utils/common.py | ryo-currency/ryo-gui-wallet | d72495fb456ff0a15000d3cf214b5765a1555c4b | [
"BSD-3-Clause"
] | 7 | 2018-06-06T13:39:01.000Z | 2018-12-10T06:19:23.000Z | utils/common.py | ryo-currency/ryo-gui-wallet | d72495fb456ff0a15000d3cf214b5765a1555c4b | [
"BSD-3-Clause"
] | 7 | 2018-06-03T13:42:41.000Z | 2022-03-09T09:09:47.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
## Copyright (c) 2017, The Sumokoin Project (www.sumokoin.org)
'''
Misc utility classes/functions for application
'''
import os, sys, string
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class DummyStream:
''' dummyStream behaves like a stream but does nothing. '''
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
def getAppPath():
'''Get the path to this script no matter how it's run.'''
#Determine if the application is a py/pyw or a frozen exe.
if hasattr(sys, 'frozen'):
# If run from exe
dir_path = os.path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
elif '__file__' in locals():
# If run from py
dir_path = os.path.dirname(unicode(__file__, sys.getfilesystemencoding()))
else:
# If run from command line
#dir_path = sys.path[0]
dir_path = os.getcwdu()
return dir_path
def getResourcesPath():
app_path = getAppPath()
if sys.platform == 'darwin' and hasattr(sys, 'frozen'):
resources_path = os.path.normpath(os.path.abspath(os.path.join(app_path, "..", "Resources")))
else:
resources_path = os.path.normpath(os.path.abspath(os.path.join(app_path, "Resources")))
return resources_path
def getHomeDir():
if sys.platform == 'win32':
import winpaths
homedir = winpaths.get_common_appdata() # = e.g 'C:\ProgramData'
else:
homedir = os.path.expanduser("~")
return homedir
def getSockDir():
if sys.platform == 'win32':
import winpaths
homedir = winpaths.get_appdata() # = e.g 'C:\ProgramData'
else:
homedir = os.path.expanduser("~")
return homedir
def makeDir(d):
if not os.path.exists(d):
os.makedirs(d)
return d
def ensureDir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
return f
def _xorData(data):
"""Xor Method, Take a data Xor all bytes and return"""
data = [chr(ord(c) ^ 10) for c in data]
return string.join(data, '')
def readFile(path, offset=0, size=-1, xor_data=False):
"""Read specified block from file, using the given size and offset"""
fd = open(path, 'rb')
fd.seek(offset)
data = fd.read(size)
fd.close()
return _xorData(data) if xor_data else data
def writeFile(path, buf, offset=0, xor_data=False):
"""Write specified block on file at the given offset"""
if xor_data:
buf = _xorData(buf)
fd = open(path, 'wb')
fd.seek(offset)
fd.write(buf)
fd.close()
return len(buf)
def print_money(amount):
try:
amount = int(amount)
except:
raise Exception("Error parsing amount. Money amount must be an integer.")
return "%s <small>RYO</small>" % ("{:,.9f}".format(amount/1000000000.))
def print_money2(amount):
try:
amount = int(amount)
except:
raise Exception("Error parsing amount. Money amount must be an integer.")
return "%s" % ("{:,.9f}".format(amount/1000000000.))
| 28.756757 | 101 | 0.630013 |
ace704cfcc46d5152741a5620b141377148266a6 | 4,365 | py | Python | tests/unit/task/contexts/network/test_allow_ssh.py | DavidLiu506/rally-openstack-alcor | 8fbaf6517fd9818ee569f9c3061d66b869026159 | [
"Apache-2.0"
] | null | null | null | tests/unit/task/contexts/network/test_allow_ssh.py | DavidLiu506/rally-openstack-alcor | 8fbaf6517fd9818ee569f9c3061d66b869026159 | [
"Apache-2.0"
] | null | null | null | tests/unit/task/contexts/network/test_allow_ssh.py | DavidLiu506/rally-openstack-alcor | 8fbaf6517fd9818ee569f9c3061d66b869026159 | [
"Apache-2.0"
] | 1 | 2021-08-10T03:11:51.000Z | 2021-08-10T03:11:51.000Z | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from rally_openstack.task.contexts.network import allow_ssh
from tests.unit import test
CTX = "rally_openstack.task.contexts.network.allow_ssh"
class AllowSSHContextTestCase(test.TestCase):
def setUp(self):
super(AllowSSHContextTestCase, self).setUp()
self.users_count = 3
self.ctx = test.get_test_context()
self.ctx.update(
users=[
{
"tenant_id": f"uuid{i // 3}",
"credential": mock.MagicMock()
}
for i in range(1, self.users_count + 1)
],
admin={
"tenant_id": "uuid2",
"credential": mock.MagicMock()},
tenants={
"uuid1": {"id": "uuid1", "name": "uuid1"},
"uuid2": {"id": "uuid2", "name": "uuid1"}
}
)
def test_setup(self):
for i, user in enumerate(self.ctx["users"]):
clients = user["credential"].clients.return_value
nc = clients.neutron.return_value
nc.list_extensions.return_value = {
"extensions": [{"alias": "security-group"}]
}
nc.create_security_group.return_value = {
"security_group": {
"name": "xxx",
"id": f"security-group-{i}",
"security_group_rules": []
}
}
allow_ssh.AllowSSH(self.ctx).setup()
# admin user should not be used
self.assertFalse(self.ctx["admin"]["credential"].clients.called)
processed_tenants = {}
for i, user in enumerate(self.ctx["users"]):
clients = user["credential"].clients.return_value
nc = clients.neutron.return_value
if i == 0:
nc.list_extensions.assert_called_once_with()
else:
self.assertFalse(nc.list_extensions.called)
if user["tenant_id"] in processed_tenants:
self.assertFalse(nc.create_security_group.called)
self.assertFalse(nc.create_security_group_rule.called)
else:
nc.create_security_group.assert_called_once_with({
"security_group": {
"name": mock.ANY,
"description": mock.ANY
}
})
secgroup = nc.create_security_group.return_value
secgroup = secgroup["security_group"]
rules = copy.deepcopy(allow_ssh._RULES_TO_ADD)
for rule in rules:
rule["security_group_id"] = secgroup["id"]
self.assertEqual(
[mock.call({"security_group_rule": rule})
for rule in rules],
nc.create_security_group_rule.call_args_list
)
processed_tenants[user["tenant_id"]] = secgroup
self.assertEqual(processed_tenants[user["tenant_id"]]["id"],
user["secgroup"]["id"])
def test_setup_no_security_group_extension(self):
clients = self.ctx["users"][0]["credential"].clients.return_value
nc = clients.neutron.return_value
nc.list_extensions.return_value = {"extensions": []}
allow_ssh.AllowSSH(self.ctx).setup()
# admin user should not be used
self.assertFalse(self.ctx["admin"]["credential"].clients.called)
nc.list_extensions.assert_called_once_with()
for i, user in enumerate(self.ctx["users"]):
if i == 0:
continue
self.assertFalse(user["credential"].clients.called)
| 36.07438 | 78 | 0.562887 |
ace705353147bb4859228e4274b9273df3e38714 | 321 | py | Python | share/migrations/0019_merge.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | null | null | null | share/migrations/0019_merge.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | null | null | null | share/migrations/0019_merge.py | felliott/SHARE | 8fd60ff4749349c9b867f6188650d71f4f0a1a56 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-03 19:26
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('share', '0018_fuzzycount'),
('share', '0018_store_favicons'),
]
operations = [
]
| 18.882353 | 47 | 0.64486 |
ace70568143ad69666d0d12ddbf6dec00b1edb69 | 1,962 | py | Python | src/pymap3d/utils.py | lvcarlosja/pymap3d | 4a20f1e269dd748eb55c47dae2ed1a9d0c5c6cd7 | [
"BSD-2-Clause"
] | null | null | null | src/pymap3d/utils.py | lvcarlosja/pymap3d | 4a20f1e269dd748eb55c47dae2ed1a9d0c5c6cd7 | [
"BSD-2-Clause"
] | null | null | null | src/pymap3d/utils.py | lvcarlosja/pymap3d | 4a20f1e269dd748eb55c47dae2ed1a9d0c5c6cd7 | [
"BSD-2-Clause"
] | null | null | null | """Utility functions
all assume radians"""
import typing
from .ellipsoid import Ellipsoid
try:
from numpy import hypot, cos, sin, arctan2 as atan2, radians, pi, asarray
except ImportError:
from math import atan2, hypot, cos, sin, radians, pi
asarray = None
__all__ = ["cart2pol", "pol2cart", "cart2sph", "sph2cart", "sign"]
if typing.TYPE_CHECKING:
from numpy import ndarray
def sign(x: "ndarray") -> "ndarray":
""" signum function """
if x < 0:
y = -1.0
elif x > 0:
y = 1.0
else:
y = 0.0
return y
def cart2pol(x: "ndarray", y: "ndarray") -> typing.Tuple["ndarray", "ndarray"]:
"""Transform Cartesian to polar coordinates"""
return atan2(y, x), hypot(x, y)
def pol2cart(theta: "ndarray", rho: "ndarray") -> typing.Tuple["ndarray", "ndarray"]:
"""Transform polar to Cartesian coordinates"""
return rho * cos(theta), rho * sin(theta)
def cart2sph(x: "ndarray", y: "ndarray", z: "ndarray") -> typing.Tuple["ndarray", "ndarray", "ndarray"]:
"""Transform Cartesian to spherical coordinates"""
hxy = hypot(x, y)
r = hypot(hxy, z)
el = atan2(z, hxy)
az = atan2(y, x)
return az, el, r
def sph2cart(az: "ndarray", el: "ndarray", r: "ndarray") -> typing.Tuple["ndarray", "ndarray", "ndarray"]:
"""Transform spherical to Cartesian coordinates"""
rcos_theta = r * cos(el)
x = rcos_theta * cos(az)
y = rcos_theta * sin(az)
z = r * sin(el)
return x, y, z
def sanitize(lat: "ndarray", ell: Ellipsoid, deg: bool) -> typing.Tuple["ndarray", Ellipsoid]:
if ell is None:
ell = Ellipsoid()
if asarray is not None:
lat = asarray(lat)
if deg:
lat = radians(lat)
if asarray is not None:
if (abs(lat) > pi / 2).any():
raise ValueError("-pi/2 <= latitude <= pi/2")
else:
if abs(lat) > pi / 2:
raise ValueError("-pi/2 <= latitude <= pi/2")
return lat, ell
| 25.815789 | 106 | 0.594801 |
ace7061645f8c41eaf96e0628632f58c2f33984a | 541 | py | Python | beancount_gmail/uk_amazon_email/__init__.py | kubauk/beancount-import-gmail | cf462d0dfb774d26c21a633bd460a0dfb1b2476b | [
"MIT"
] | 1 | 2022-01-10T01:52:21.000Z | 2022-01-10T01:52:21.000Z | beancount_gmail/uk_amazon_email/__init__.py | kubauk/beancount-import-gmail | cf462d0dfb774d26c21a633bd460a0dfb1b2476b | [
"MIT"
] | 1 | 2022-01-13T22:00:18.000Z | 2022-01-13T23:03:49.000Z | beancount_gmail/uk_amazon_email/__init__.py | kubauk/beancount-import-gmail | cf462d0dfb774d26c21a633bd460a0dfb1b2476b | [
"MIT"
] | 1 | 2022-01-13T21:42:20.000Z | 2022-01-13T21:42:20.000Z | from datetime import datetime
from bs4 import BeautifulSoup
from beancount_gmail.email_parser_protocol import EmailParser
from beancount_gmail.receipt import Receipt
from beancount_gmail.uk_amazon_email.parsing import extract_receipts
class UKAmazonParser(EmailParser):
def extract_receipts(self, message_date: datetime, soup: BeautifulSoup) -> list[Receipt]:
return extract_receipts(message_date, soup)
def search_query(self) -> str:
return r'\'Your Amazon.co.uk order confirmation\' auto-confirm@amazon.co.uk'
| 33.8125 | 93 | 0.796673 |
ace7066ff7fbc60b2b11e821619a47703d6fc9cc | 773 | py | Python | paddleseg3d/models/losses/__init__.py | parap1uie-s/PaddleSeg3D | 419e8158f057c98e3c78b2a5f80254259ec8478a | [
"Apache-2.0"
] | null | null | null | paddleseg3d/models/losses/__init__.py | parap1uie-s/PaddleSeg3D | 419e8158f057c98e3c78b2a5f80254259ec8478a | [
"Apache-2.0"
] | null | null | null | paddleseg3d/models/losses/__init__.py | parap1uie-s/PaddleSeg3D | 419e8158f057c98e3c78b2a5f80254259ec8478a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .dice_loss import DiceLoss
from .binary_cross_entropy_loss import BCELoss
from .cross_entropy_loss import CrossEntropyLoss
from .mixes_losses import MixedLoss
| 42.944444 | 74 | 0.789133 |
ace70914a0255667e2cd21dc6d22037801ee7453 | 826 | py | Python | hwtest/automated/otg_test.py | crvallance/wlanpi-hwtest | 8858ef6e8fa78767238b968b121b4d5ab2155701 | [
"MIT"
] | null | null | null | hwtest/automated/otg_test.py | crvallance/wlanpi-hwtest | 8858ef6e8fa78767238b968b121b4d5ab2155701 | [
"MIT"
] | null | null | null | hwtest/automated/otg_test.py | crvallance/wlanpi-hwtest | 8858ef6e8fa78767238b968b121b4d5ab2155701 | [
"MIT"
] | null | null | null | from hwtest.shell_utils import is_module_present, run_command
# Further reading: https://michael.stapelberg.ch/posts/2021-04-27-linux-usb-virtual-serial-cdc-acm/
def test_RNDIS_gadget():
"""
Test for idProduct 0xa4a2 Linux-USB Ethernet/RNDIS Gadget in `lsusb` output
"""
resp = run_command(["lsusb"])
assert ":a4a2 " in resp
assert "RNDIS" in resp
def test_cdc_ether_mod():
"""
Test command:
lsmod | grep cdc_ether
Results:
True - cdc_ether module detected in lsmod
False - not detected
Description:
g_ether is used on the device/peripheral side, cdc_ether is used on the host side.
If we see cdc_ether loaded then we know communication is established between the 2 USB ports.
"""
assert is_module_present("cdc_ether") == True
| 25.030303 | 101 | 0.684019 |
ace709e95f3399fac7fbdb38df2899695be32a99 | 77 | py | Python | mercury/nnet/__init__.py | ludius0/Mercury | 19831025a7325c59d77e9d430df4fd9167d36846 | [
"MIT"
] | null | null | null | mercury/nnet/__init__.py | ludius0/Mercury | 19831025a7325c59d77e9d430df4fd9167d36846 | [
"MIT"
] | null | null | null | mercury/nnet/__init__.py | ludius0/Mercury | 19831025a7325c59d77e9d430df4fd9167d36846 | [
"MIT"
] | null | null | null | from .activation import *
from .loss_function import *
from .optim import * | 25.666667 | 29 | 0.753247 |
ace70a41480ae55d979a80ae62484c00ab6fea49 | 2,313 | py | Python | hack/macros.py | dustinsmith1024/docs-1 | 74773b1201e459cc90e55c0fc951d84c62e82581 | [
"Apache-2.0"
] | 3,383 | 2018-07-23T21:00:17.000Z | 2022-03-30T17:13:52.000Z | hack/macros.py | dustinsmith1024/docs-1 | 74773b1201e459cc90e55c0fc951d84c62e82581 | [
"Apache-2.0"
] | 4,617 | 2018-07-23T21:55:06.000Z | 2022-03-31T21:52:36.000Z | hack/macros.py | dustinsmith1024/docs-1 | 74773b1201e459cc90e55c0fc951d84c62e82581 | [
"Apache-2.0"
] | 1,240 | 2018-07-23T20:36:04.000Z | 2022-03-30T20:03:07.000Z | import os
def define_env(env):
@env.macro
def feature(alpha="", beta="", stable=""):
versions = []
descriptions = []
if alpha != "":
versions.append('<span class="feature-alpha">alpha</span> since Knative v{version}'.format(version=alpha))
descriptions.append(' - <span class="feature-alpha">alpha</span> features are experimental, and may change or be removed without notice.')
if beta != "":
versions.append('<span class="feature-beta">beta</span> since Knative v{version}'.format(version=beta))
descriptions.append(' - <span class="feature-beta">beta</span> features are well-tested and enabling them is considered safe. Support for the overall feature will not be dropped, though details may change in incompatible ways.')
if stable != "":
versions.append('<span class="feature-stable">stable</span> since Knative v{version}'.format(version=stable))
descriptions.append(' - <span class="feature-stable">stable</span> features will be maintained for many future versions.')
return '??? info "Feature Availability: ' + ', '.join(versions) + '"\n' + '\n'.join(descriptions)
@env.macro
def artifact(repo, file, org="knative"):
"""Generates a download link for the current release version.
When the version in the KNATIVE_VERSION environment variable is
empty this links to googlestorage, otherwise it links via
the matching release in github.
"""
version = os.environ.get("KNATIVE_VERSION")
if version == None:
return 'https://storage.googleapis.com/knative-nightly/{repo}/latest/{file}'.format(
repo=repo,
file=file)
else:
if version.startswith("v1."):
return 'https://github.com/{org}/{repo}/releases/download/knative-{version}/{file}'.format(
repo=repo,
file=file,
version=version,
org=org)
else:
return 'https://github.com/{org}/{repo}/releases/download/{version}/{file}'.format(
repo=repo,
file=file,
version=version,
org=org)
| 50.282609 | 243 | 0.586684 |
ace70ad1a08da82b1a9bc8f6b036d62d60beef04 | 5,555 | py | Python | build/android/pylib/local/device/local_device_test_run_test.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 | 2017-08-29T15:15:32.000Z | 2022-03-21T05:29:41.000Z | build/android/pylib/local/device/local_device_test_run_test.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 | 2017-08-30T18:31:18.000Z | 2021-08-02T10:59:35.000Z | build/android/pylib/local/device/local_device_test_run_test.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 | 2017-08-30T01:19:34.000Z | 2022-03-17T22:55:31.000Z | #!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import unittest
from pylib.base import base_test_result
from pylib.constants import host_paths
from pylib.local.device import local_device_test_run
with host_paths.SysPath(host_paths.PYMOCK_PATH):
import mock # pylint: disable=import-error
class SubstituteDeviceRootTest(unittest.TestCase):
def testNoneDevicePath(self):
self.assertEquals(
'/fake/device/root',
local_device_test_run.SubstituteDeviceRoot(
None, '/fake/device/root'))
def testStringDevicePath(self):
self.assertEquals(
'/another/fake/device/path',
local_device_test_run.SubstituteDeviceRoot(
'/another/fake/device/path', '/fake/device/root'))
def testListWithNoneDevicePath(self):
self.assertEquals(
'/fake/device/root/subpath',
local_device_test_run.SubstituteDeviceRoot(
[None, 'subpath'], '/fake/device/root'))
def testListWithoutNoneDevicePath(self):
self.assertEquals(
'/another/fake/device/path',
local_device_test_run.SubstituteDeviceRoot(
['/', 'another', 'fake', 'device', 'path'],
'/fake/device/root'))
class TestLocalDeviceTestRun(local_device_test_run.LocalDeviceTestRun):
# pylint: disable=abstract-method
def __init__(self):
super(TestLocalDeviceTestRun, self).__init__(
mock.MagicMock(), mock.MagicMock())
class TestLocalDeviceNonStringTestRun(
local_device_test_run.LocalDeviceTestRun):
# pylint: disable=abstract-method
def __init__(self):
super(TestLocalDeviceNonStringTestRun, self).__init__(
mock.MagicMock(), mock.MagicMock())
def _GetUniqueTestName(self, test):
return test['name']
class LocalDeviceTestRunTest(unittest.TestCase):
def testGetTestsToRetry_allTestsPassed(self):
results = [
base_test_result.BaseTestResult(
'Test1', base_test_result.ResultType.PASS),
base_test_result.BaseTestResult(
'Test2', base_test_result.ResultType.PASS),
]
tests = [r.GetName() for r in results]
try_results = base_test_result.TestRunResults()
try_results.AddResults(results)
test_run = TestLocalDeviceTestRun()
tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
self.assertEquals(0, len(tests_to_retry))
def testGetTestsToRetry_testFailed(self):
results = [
base_test_result.BaseTestResult(
'Test1', base_test_result.ResultType.FAIL),
base_test_result.BaseTestResult(
'Test2', base_test_result.ResultType.PASS),
]
tests = [r.GetName() for r in results]
try_results = base_test_result.TestRunResults()
try_results.AddResults(results)
test_run = TestLocalDeviceTestRun()
tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
self.assertEquals(1, len(tests_to_retry))
self.assertIn('Test1', tests_to_retry)
def testGetTestsToRetry_testUnknown(self):
results = [
base_test_result.BaseTestResult(
'Test2', base_test_result.ResultType.PASS),
]
tests = ['Test1'] + [r.GetName() for r in results]
try_results = base_test_result.TestRunResults()
try_results.AddResults(results)
test_run = TestLocalDeviceTestRun()
tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
self.assertEquals(1, len(tests_to_retry))
self.assertIn('Test1', tests_to_retry)
def testGetTestsToRetry_wildcardFilter_allPass(self):
results = [
base_test_result.BaseTestResult(
'TestCase.Test1', base_test_result.ResultType.PASS),
base_test_result.BaseTestResult(
'TestCase.Test2', base_test_result.ResultType.PASS),
]
tests = ['TestCase.*']
try_results = base_test_result.TestRunResults()
try_results.AddResults(results)
test_run = TestLocalDeviceTestRun()
tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
self.assertEquals(0, len(tests_to_retry))
def testGetTestsToRetry_wildcardFilter_oneFails(self):
results = [
base_test_result.BaseTestResult(
'TestCase.Test1', base_test_result.ResultType.PASS),
base_test_result.BaseTestResult(
'TestCase.Test2', base_test_result.ResultType.FAIL),
]
tests = ['TestCase.*']
try_results = base_test_result.TestRunResults()
try_results.AddResults(results)
test_run = TestLocalDeviceTestRun()
tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
self.assertEquals(1, len(tests_to_retry))
self.assertIn('TestCase.*', tests_to_retry)
def testGetTestsToRetry_nonStringTests(self):
results = [
base_test_result.BaseTestResult(
'TestCase.Test1', base_test_result.ResultType.PASS),
base_test_result.BaseTestResult(
'TestCase.Test2', base_test_result.ResultType.FAIL),
]
tests = [
{'name': 'TestCase.Test1'},
{'name': 'TestCase.Test2'},
]
try_results = base_test_result.TestRunResults()
try_results.AddResults(results)
test_run = TestLocalDeviceNonStringTestRun()
tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
self.assertEquals(1, len(tests_to_retry))
self.assertIsInstance(tests_to_retry[0], dict)
self.assertEquals(tests[1], tests_to_retry[0])
if __name__ == '__main__':
unittest.main(verbosity=2)
| 31.742857 | 72 | 0.712871 |
ace70b3996830333aed623bd863c1e6dd4ee765a | 1,102 | py | Python | django/solution/addressbook/addressbook/contact/migrations/0001_initial.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | 1 | 2019-01-02T15:04:08.000Z | 2019-01-02T15:04:08.000Z | django/solution/addressbook/addressbook/contact/migrations/0001_initial.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | django/solution/addressbook/addressbook/contact/migrations/0001_initial.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.6 on 2018-06-13 09:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('first_name', models.CharField(max_length=30, verbose_name='First Name')),
('last_name', models.CharField(db_index=True, max_length=30, verbose_name='Last Name')),
('date_of_birth', models.DateField(blank=True, default=None, null=True, verbose_name='Date of birth')),
('email', models.EmailField(blank=True, default=None, max_length=254, null=True, verbose_name='Email')),
('bio', models.TextField(blank=True, default=None, null=True, verbose_name='Bio')),
],
),
]
| 39.357143 | 120 | 0.611615 |
ace70ca96a953bf5cc9204bfabbdb4410604c755 | 1,058 | py | Python | ax/exceptions/data_provider.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | 1 | 2022-02-10T10:51:40.000Z | 2022-02-10T10:51:40.000Z | ax/exceptions/data_provider.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | null | null | null | ax/exceptions/data_provider.py | mpolson64/Ax-1 | cf9e12cc1253efe0fc893f2620e99337e0927a26 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
class DataProviderError(Exception):
"""Base Exception for Ax DataProviders.
The type of the data provider must be included.
The raw error is stored in the data_provider_error section,
and an Ax-friendly message is stored as the actual error message.
"""
def __init__(
self, message: str, data_provider: str, data_provider_error: Any
) -> None:
self.message = message
self.data_provider = data_provider
self.data_provider_error = data_provider_error
def __str__(self) -> str:
return (
"{message}. \n Error thrown by: {dp} data provider \n"
+ "Native {dp} data provider error: {dp_error}"
).format(
dp=self.data_provider,
message=self.message,
dp_error=self.data_provider_error,
)
| 31.117647 | 72 | 0.65879 |
ace70d641b62437041ab531174dcf1119849304f | 16,009 | py | Python | defense/gin.py | Harshitha-Nagapudi/NN_Project | f0df170a33b6b35a00929a0104dc6ee04c5062a9 | [
"MIT"
] | 28 | 2020-10-18T06:21:09.000Z | 2022-03-28T07:48:11.000Z | defense/gin.py | Harshitha-Nagapudi/NN_Project | f0df170a33b6b35a00929a0104dc6ee04c5062a9 | [
"MIT"
] | 8 | 2020-12-21T09:20:13.000Z | 2021-09-15T09:58:23.000Z | defense/gin.py | Harshitha-Nagapudi/NN_Project | f0df170a33b6b35a00929a0104dc6ee04c5062a9 | [
"MIT"
] | 9 | 2021-02-15T15:16:48.000Z | 2022-03-09T04:21:13.000Z | import torch.nn as nn
import torch.nn.functional as F
import math
import torch
import torch.optim as optim
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from deeprobust.graph import utils
from copy import deepcopy
from sklearn.metrics import jaccard_score
from sklearn.metrics.pairwise import cosine_similarity,euclidean_distances
import numpy as np
from deeprobust.graph.utils import *
from torch_geometric.nn import GINConv, global_add_pool, GATConv, GCNConv, ChebConv, JumpingKnowledge
from torch.nn import Sequential, Linear, ReLU
from scipy.sparse import lil_matrix
from sklearn.preprocessing import normalize
from sklearn.metrics import f1_score
from deeprobust.graph.defense.basicfunction import att_coef
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, with_bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if with_bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
# self.weight.data.fill_(1)
# if self.bias is not None:
# self.bias.data.fill_(1)
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj, edge_weight=None):
if input.data.is_sparse:
support = torch.spmm(input, self.weight)
else:
support = torch.mm(input, self.weight) # this function seems do message passing
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GIN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout=0.5, lr=0.01, weight_decay=5e-4, n_edge=1,with_relu=True, drop=False,
with_bias=True, device=None):
super(GIN, self).__init__()
assert device is not None, "Please specify 'device'!"
self.device = device
self.nfeat = nfeat
self.hidden_sizes = [nhid]
self.nclass = int(nclass)
self.dropout = dropout
self.lr = lr
self.drop = drop
if not with_relu:
self.weight_decay = 0
else:
self.weight_decay = weight_decay
self.with_relu = with_relu
self.with_bias = with_bias
self.n_edge = n_edge
self.output = None
self.best_model = None
self.best_output = None
self.adj_norm = None
self.features = None
self.gate = Parameter(torch.rand(1)) # creat a generator between [0,1]
nclass = int(nclass)
"""GIN from torch-geometric"""
num_features = nfeat
dim = nhid
nn1 = Sequential(Linear(num_features, dim), ReLU(), )
self.gc1 = GINConv(nn1)
# self.bn1 = torch.nn.BatchNorm1d(dim)
nn2 = Sequential(Linear(dim, dim), ReLU(), )
self.gc2 = GINConv(nn2)
nn3 = Sequential(Linear(dim, dim), ReLU(), )
self.gc3 = GINConv(nn3)
self.jump = JumpingKnowledge(mode='cat')
# self.bn2 = torch.nn.BatchNorm1d(dim)
self.fc1 = Linear(dim, dim)
self.fc2 = Linear(dim*1, int(nclass))
def forward(self, x, adj):
"""we don't change the edge_index, just update the edge_weight;
some edge_weight are regarded as removed if it equals to zero"""
x = x.to_dense()
edge_index = adj._indices()
"""GIN"""
if self.attention:
adj = self.att_coef(x, adj, i=0)
x = F.relu(self.gc1(x, edge_index=edge_index, edge_weight=adj._values()))
if self.attention: # if attention=True, use attention mechanism
adj_2 = self.att_coef(x, adj, i=1)
adj_values = self.gate * adj._values() + (1 - self.gate) * adj_2._values()
else:
adj_values = adj._values()
x = F.dropout(x, self.dropout, training=self.training)
x = F.relu(self.gc2(x, edge_index=edge_index, edge_weight=adj_values))
x = F.dropout(x, self.dropout,training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def initialize(self):
self.gc1.reset_parameters()
self.gc2.reset_parameters()
self.fc2.reset_parameters()
try:
self.jump.reset_parameters()
self.gc3.reset_parameters()
self.fc1.reset_parameters()
except:
pass
def att_coef(self, fea, edge_index, is_lil=False, i=0):
if is_lil == False:
edge_index = edge_index._indices()
else:
edge_index = edge_index.tocoo()
n_node = fea.shape[0]
row, col = edge_index[0].cpu().data.numpy()[:], edge_index[1].cpu().data.numpy()[:]
# row, col = edge_index[0], edge_index[1]
fea_copy = fea.cpu().data.numpy()
sim_matrix = cosine_similarity(X=fea_copy, Y=fea_copy) # try cosine similarity
# sim_matrix = torch.from_numpy(sim_matrix)
sim = sim_matrix[row, col]
sim[sim<0.1] = 0
# print('dropped {} edges'.format(1-sim.nonzero()[0].shape[0]/len(sim)))
# """use jaccard for binary features and cosine for numeric features"""
# fea_start, fea_end = fea[edge_index[0]], fea[edge_index[1]]
# isbinray = np.array_equal(fea_copy, fea_copy.astype(bool)) # check is the fea are binary
# np.seterr(divide='ignore', invalid='ignore')
# if isbinray:
# fea_start, fea_end = fea_start.T, fea_end.T
# sim = jaccard_score(fea_start, fea_end, average=None) # similarity scores of each edge
# else:
# fea_copy[np.isinf(fea_copy)] = 0
# fea_copy[np.isnan(fea_copy)] = 0
# sim_matrix = cosine_similarity(X=fea_copy, Y=fea_copy) # try cosine similarity
# sim = sim_matrix[edge_index[0], edge_index[1]]
# sim[sim < 0.01] = 0
"""build a attention matrix"""
att_dense = lil_matrix((n_node, n_node), dtype=np.float32)
att_dense[row, col] = sim
if att_dense[0, 0] == 1:
att_dense = att_dense - sp.diags(att_dense.diagonal(), offsets=0, format="lil")
# normalization, make the sum of each row is 1
att_dense_norm = normalize(att_dense, axis=1, norm='l1')
"""add learnable dropout, make character vector"""
if self.drop:
character = np.vstack((att_dense_norm[row, col].A1,
att_dense_norm[col, row].A1))
character = torch.from_numpy(character.T)
drop_score = self.drop_learn_1(character)
drop_score = torch.sigmoid(drop_score) # do not use softmax since we only have one element
mm = torch.nn.Threshold(0.5, 0)
drop_score = mm(drop_score)
mm_2 = torch.nn.Threshold(-0.49, 1)
drop_score = mm_2(-drop_score)
drop_decision = drop_score.clone().requires_grad_()
# print('rate of left edges', drop_decision.sum().data/drop_decision.shape[0])
drop_matrix = lil_matrix((n_node, n_node), dtype=np.float32)
drop_matrix[row, col] = drop_decision.cpu().data.numpy().squeeze(-1)
att_dense_norm = att_dense_norm.multiply(drop_matrix.tocsr()) # update, remove the 0 edges
if att_dense_norm[0, 0] == 0: # add the weights of self-loop only add self-loop at the first layer
degree = (att_dense_norm != 0).sum(1).A1
# degree = degree.squeeze(-1).squeeze(-1)
lam = 1 / (degree + 1) # degree +1 is to add itself
self_weight = sp.diags(np.array(lam), offsets=0, format="lil")
att = att_dense_norm + self_weight # add the self loop
else:
att = att_dense_norm
att_adj = edge_index
att_edge_weight = att[row, col]
att_edge_weight = np.exp(att_edge_weight) # exponent, kind of softmax
att_edge_weight = torch.tensor(np.array(att_edge_weight)[0], dtype=torch.float32).cuda()
shape = (n_node, n_node)
new_adj = torch.sparse.FloatTensor(att_adj, att_edge_weight, shape)
return new_adj
def add_loop_sparse(self, adj, fill_value=1):
# make identify sparse tensor
row = torch.range(0, int(adj.shape[0]-1), dtype=torch.int64)
i = torch.stack((row, row), dim=0)
v = torch.ones(adj.shape[0], dtype=torch.float32)
shape = adj.shape
I_n = torch.sparse.FloatTensor(i, v, shape)
return adj + I_n.to(self.device)
def fit(self, features, adj, labels, idx_train, idx_val=None, idx_test=None, train_iters=81, att_0=None,
attention=False, model_name=None, initialize=True, verbose=False, normalize=False, patience=500, ):
'''
train the gcn model, when idx_val is not None, pick the best model
according to the validation loss
'''
self.sim = None
self.attention = attention
self.idx_test = idx_test
# self.device = self.gc1.weight.device
if initialize:
self.initialize()
if type(adj) is not torch.Tensor:
features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device)
else:
features = features.to(self.device)
adj = adj.to(self.device)
labels = labels.to(self.device)
# normalize = False # we don't need normalize here, the norm is conducted in the GCN (self.gcn1) model
# if normalize:
# if utils.is_sparse_tensor(adj):
# adj_norm = utils.normalize_adj_tensor(adj, sparse=True)
# else:
# adj_norm = utils.normalize_adj_tensor(adj)
# else:
# adj_norm = adj
adj = self.add_loop_sparse(adj)
"""Make the coefficient D^{-1/2}(A+I)D^{-1/2}"""
self.adj_norm = adj
self.features = features
self.labels = labels
if idx_val is None:
self._train_without_val(labels, idx_train, train_iters, verbose)
else:
if patience < train_iters:
self._train_with_early_stopping(labels, idx_train, idx_val, train_iters, patience, verbose)
else:
self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def _train_without_val(self, labels, idx_train, train_iters, verbose):
self.train()
optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
for i in range(train_iters):
optimizer.zero_grad()
output = self.forward(self.features, self.adj_norm)
loss_train = F.nll_loss(output[idx_train], labels[idx_train], weight=None) # this weight is the weight of each training nodes
loss_train.backward()
optimizer.step()
if verbose and i % 10 == 0:
print('Epoch {}, training loss: {}'.format(i, loss_train.item()))
self.eval()
output = self.forward(self.features, self.adj_norm)
self.output = output
def _train_with_val(self, labels, idx_train, idx_val, train_iters, verbose):
if verbose:
print('=== training gcn model ===')
optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
best_loss_val = 100
best_acc_val = 0
for i in range(train_iters):
self.train()
optimizer.zero_grad()
output = self.forward(self.features, self.adj_norm)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
# pred = output[self.idx_test].max(1)[1]
# acc_test =accuracy(output[self.idx_test], labels[self.idx_test])
# acc_test = pred.eq(labels[self.idx_test]).sum().item() / self.idx_test.shape[0]
self.eval()
output = self.forward(self.features, self.adj_norm)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = utils.accuracy(output[idx_val], labels[idx_val])
# if verbose and i % 20 == 0:
# print('Epoch {}, training loss: {}, val acc: {}'.format(i, loss_train.item(), acc_val))
if best_loss_val > loss_val:
best_loss_val = loss_val
self.output = output
weights = deepcopy(self.state_dict())
if acc_val > best_acc_val:
best_acc_val = acc_val
self.output = output
weights = deepcopy(self.state_dict())
if verbose:
print('=== picking the best model according to the performance on validation ===')
self.load_state_dict(weights)
def _train_with_early_stopping(self, labels, idx_train, idx_val, train_iters, patience, verbose):
if verbose:
print('=== training gcn model ===')
optimizer = optim.Adam(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)
early_stopping = patience
best_loss_val = 100
for i in range(train_iters):
self.train()
optimizer.zero_grad()
output = self.forward(self.features, self.adj_norm)
loss_train = F.nll_loss(output[idx_train], labels[idx_train])
loss_train.backward()
optimizer.step()
self.eval()
output = self.forward(self.features, self.adj_norm)
if verbose and i % 10 == 0:
print('Epoch {}, training loss: {}'.format(i, loss_train.item()))
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
if best_loss_val > loss_val:
best_loss_val = loss_val
self.output = output
weights = deepcopy(self.state_dict())
patience = early_stopping
else:
patience -= 1
if i > early_stopping and patience <= 0:
break
if verbose:
print('=== early stopping at {0}, loss_val = {1} ==='.format(i, best_loss_val) )
self.load_state_dict(weights)
def test(self, idx_test, model_name=None):
# self.model_name = model_name
self.eval()
output = self.predict()
# output = self.output
loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
# print("Test set results:",
# "loss= {:.4f}".format(loss_test.item()),
# "accuracy= {:.4f}".format(acc_test.item()))
return acc_test, output
def _set_parameters(self):
# TODO
pass
def predict(self, features=None, adj=None):
'''By default, inputs are unnormalized data'''
# self.eval()
if features is None and adj is None:
return self.forward(self.features, self.adj_norm)
else:
if type(adj) is not torch.Tensor:
features, adj = utils.to_tensor(features, adj, device=self.device)
self.features = features
if utils.is_sparse_tensor(adj):
self.adj_norm = utils.normalize_adj_tensor(adj, sparse=True)
else:
self.adj_norm = utils.normalize_adj_tensor(adj)
return self.forward(self.features, self.adj_norm)
| 39.431034 | 138 | 0.601224 |
ace70dd8086e76f3d5973552befb6fc26686047b | 417 | py | Python | src/tk_mvc/window.py | pladams9/hexsheets | 115d722a90964dd9c02bb79ab71e25f69292d10c | [
"MIT"
] | 2 | 2020-06-05T00:23:00.000Z | 2022-02-27T18:15:34.000Z | src/tk_mvc/window.py | pladams9/hexsheets | 115d722a90964dd9c02bb79ab71e25f69292d10c | [
"MIT"
] | 39 | 2020-06-04T03:39:01.000Z | 2022-03-12T00:34:37.000Z | src/tk_mvc/window.py | pladams9/hex-spreadsheet | 633191cff5d4f3e3bcb28652d00c7d480d1875e9 | [
"MIT"
] | 2 | 2020-06-05T06:04:10.000Z | 2020-10-28T03:45:46.000Z | from tkinter import Frame
from tkinter import Toplevel
class BaseWindow(Frame):
"""
All windows in tk_mvc derive from BaseWindow. It is simply a Frame that View will place inside a TopLevel
upon creation.
"""
def __init__(self, view, parent_toplevel: Toplevel, *args, **kwargs) -> None:
super().__init__(parent_toplevel)
self._view = view
self._window = parent_toplevel
| 27.8 | 109 | 0.690647 |
ace70ddefe2c9f775b5136e4644327aafe29fed8 | 27,325 | py | Python | scipy/special/__init__.py | mwtoews/scipy | 3edebe0cb4831ffd52cbd4a5b5550fa16789e441 | [
"BSD-3-Clause"
] | null | null | null | scipy/special/__init__.py | mwtoews/scipy | 3edebe0cb4831ffd52cbd4a5b5550fa16789e441 | [
"BSD-3-Clause"
] | null | null | null | scipy/special/__init__.py | mwtoews/scipy | 3edebe0cb4831ffd52cbd4a5b5550fa16789e441 | [
"BSD-3-Clause"
] | null | null | null | """
========================================
Special functions (:mod:`scipy.special`)
========================================
.. currentmodule:: scipy.special
Nearly all of the functions below are universal functions and follow
broadcasting and automatic array-looping rules. Exceptions are
noted.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings or raise
exceptions when an error occurs. By default this is disabled; to
query and control the current error handling state the following
functions are provided.
.. autosummary::
:toctree: generated/
geterr -- Get the current way of handling special-function errors.
seterr -- Set how special-function errors are handled.
errstate -- Context manager for special-function error handling.
SpecialFunctionWarning -- Warning that can be emitted by special functions.
SpecialFunctionError -- Exception that can be raised by special functions.
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- [+]Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- [+]Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic Functions and Integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1
ellipkinc -- Incomplete elliptic integral of the first kind
ellipe -- Complete elliptic integral of the second kind
ellipeinc -- Incomplete elliptic integral of the second kind
Bessel Functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and complex argument.
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and real argument.
yv -- Bessel function of the second kind of real order and complex argument.
yve -- Exponentially scaled Bessel function of the second kind of real order.
kn -- Modified Bessel function of the second kind of integer order `n`
kv -- Modified Bessel function of the second kind of real order `v`
kve -- Exponentially scaled modified Bessel function of the second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the first kind
hankel1 -- Hankel function of the first kind
hankel1e -- Exponentially scaled Hankel function of the first kind
hankel2 -- Hankel function of the second kind
hankel2e -- Exponentially scaled Hankel function of the second kind
The following is not an universal function:
.. autosummary::
:toctree: generated/
lmbda -- [+]Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
jnjnp_zeros -- [+]Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- [+]Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- [+]Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- [+]Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- [+]Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- [+]Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- [+]Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- [+]Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1
Integrals of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0
it2j0y0 -- Integrals related to Bessel functions of order 0
iti0k0 -- Integrals of modified Bessel functions of order 0
it2i0k0 -- Integrals related to modified Bessel functions of order 0
besselpoly -- [+]Weighted integral of a Bessel function.
Derivatives of Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel Functions
^^^^^^^^^^^^^^^^^^^^^^^^
These are not universal functions:
.. autosummary::
:toctree: generated/
riccati_jn -- [+]Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- [+]Compute Ricatti-Bessel function of the second kind and its derivative.
Struve Functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw Statistical Functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
btdtr -- Cumulative distribution function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x)
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd
gdtr -- Gamma distribution cumulative distribution function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
pdtr -- Poisson cumulative distribution function
pdtrc -- Poisson survival function
pdtri -- Inverse to `pdtr` vs m
pdtrik -- Inverse to `pdtr` vs k
stdtr -- Student t distribution cumulative distribution function
stdtridf -- Inverse of `stdtr` vs df
stdtrit -- Inverse of `stdtr` vs `t`
chdtr -- Chi square cumulative distribution function
chdtrc -- Chi square survival function
chdtri -- Inverse to `chdtrc`
chdtriv -- Inverse to `chdtr` vs `v`
ndtr -- Gaussian cumulative distribution function.
log_ndtr -- Logarithm of Gaussian cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x
chndtr -- Non-central chi square cumulative distribution function
chndtridf -- Inverse to `chndtr` vs `df`
chndtrinc -- Inverse to `chndtr` vs `nc`
chndtrix -- Inverse to `chndtr` vs `x`
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function
smirnovi -- Inverse to `smirnov`
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution
kolmogi -- Inverse function to `kolmogorov`
tklmbda -- Tukey-Lambda cumulative distribution function
logit -- Logit ufunc for ndarrays.
expit -- Expit ufunc for ndarrays.
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
owens_t -- Owen's T Function.
Information Theory Functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and Related Functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out])
poch -- Rising factorial (z)_m
Error Function and Fresnel Integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals
modfresnelm -- Modified Fresnel negative integrals
These are not universal functions:
.. autosummary::
:toctree: generated/
erf_zeros -- [+]Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- [+]Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- [+]Compute nt complex zeros of sine Fresnel integral S(z).
Legendre Functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
These are not universal functions:
.. autosummary::
:toctree: generated/
clpmn -- [+]Associated Legendre function of the first kind for complex arguments.
lpn -- [+]Legendre function of the first kind.
lqn -- [+]Legendre function of the second kind.
lpmn -- [+]Sequence of associated Legendre functions of the first kind.
lqmn -- [+]Sequence of associated Legendre functions of the second kind.
Ellipsoidal Harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l)
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l)
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
``orthopoly1d`` objects, which function similarly as `numpy.poly1d`.
The ``orthopoly1d`` class also has an attribute ``weights`` which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- [+]Legendre polynomial.
chebyt -- [+]Chebyshev polynomial of the first kind.
chebyu -- [+]Chebyshev polynomial of the second kind.
chebyc -- [+]Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- [+]Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- [+]Jacobi polynomial.
laguerre -- [+]Laguerre polynomial.
genlaguerre -- [+]Generalized (associated) Laguerre polynomial.
hermite -- [+]Physicist's Hermite polynomial.
hermitenorm -- [+]Normalized (probabilist's) Hermite polynomial.
gegenbauer -- [+]Gegenbauer (ultraspherical) polynomial.
sh_legendre -- [+]Shifted Legendre polynomial.
sh_chebyt -- [+]Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- [+]Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- [+]Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric Functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x)
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind
hyp0f1 -- Confluent hypergeometric limit function 0F1.
Parabolic Cylinder Functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D
pbvv -- Parabolic cylinder function V
pbwa -- Parabolic cylinder function W
These are not universal functions:
.. autosummary::
:toctree: generated/
pbdv_seq -- [+]Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- [+]Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- [+]Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and Related Functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions
mathieu_b -- Characteristic value of odd Mathieu functions
These are not universal functions:
.. autosummary::
:toctree: generated/
mathieu_even_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- [+]Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative
mathieu_sem -- Odd Mathieu function and its derivative
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative
Spheroidal Wave Functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function
obl_cv -- Characteristic value of oblate spheroidal function
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Kelvin Functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers
kelvin_zeros -- [+]Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`
beip -- Derivative of the Kelvin function `bei`
ker -- Kelvin function ker
kei -- Kelvin function ker
kerp -- Derivative of the Kelvin function ker
keip -- Derivative of the Kelvin function kei
These are not universal functions:
.. autosummary::
:toctree: generated/
ber_zeros -- [+]Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- [+]Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- [+]Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- [+]Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- [+]Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- [+]Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- [+]Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- [+]Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- [+]The number of combinations of N things taken k at a time.
perm -- [+]Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and Related Functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other Special Functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n
exp1 -- Exponential integral E_1 of complex argument z
expi -- Exponential integral Ei
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- [+]Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
softmax -- Softmax function.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience Functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`
exp10 -- 10**x
exp2 -- 2**x
radian -- Convert from degrees to radians
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero
expm1 -- exp(x) - 1 for use when `x` is near zero.
cosm1 -- cos(x) - 1 for use when `x` is near zero.
round -- Round to nearest integer
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
.. [+] in the description indicates a function which is not a universal
.. function and does not follow broadcasting and automatic
.. array-looping rules.
"""
from __future__ import division, print_function, absolute_import
from .sf_error import SpecialFunctionWarning, SpecialFunctionError
from ._ufuncs import *
from .basic import *
from ._logsumexp import logsumexp, softmax
from . import specfun
from . import orthogonal
from .orthogonal import *
from .spfun_stats import multigammaln
from ._ellip_harm import ellip_harm, ellip_harm_2, ellip_normal
from .lambertw import lambertw
from ._spherical_bessel import (spherical_jn, spherical_yn, spherical_in,
spherical_kn)
__all__ = [s for s in dir() if not s.startswith('_')]
from numpy.dual import register_func
register_func('i0',i0)
del register_func
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 41.401515 | 104 | 0.688271 |
ace70e42ba87d051d5ab296ea793bae201419e01 | 17,216 | py | Python | documentation/test_doxygen/test_compound.py | DarkContact/m.css | a56227e89de90d0ea5751d0ebfa96734a5e55b96 | [
"MIT"
] | null | null | null | documentation/test_doxygen/test_compound.py | DarkContact/m.css | a56227e89de90d0ea5751d0ebfa96734a5e55b96 | [
"MIT"
] | null | null | null | documentation/test_doxygen/test_compound.py | DarkContact/m.css | a56227e89de90d0ea5751d0ebfa96734a5e55b96 | [
"MIT"
] | null | null | null | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019 Vladimír Vondruš <mosra@centrum.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os
import unittest
from distutils.version import LooseVersion
from . import IntegrationTestCase, doxygen_version
class Listing(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'listing', *args, **kwargs)
def test_index_pages(self):
self.run_doxygen(wildcard='index.xml', index_pages=['annotated', 'namespaces', 'pages'])
self.assertEqual(*self.actual_expected_contents('annotated.html'))
self.assertEqual(*self.actual_expected_contents('namespaces.html'))
self.assertEqual(*self.actual_expected_contents('pages.html'))
def test_index_pages_custom_expand_level(self):
self.run_doxygen(wildcard='index.xml', index_pages=['files'])
self.assertEqual(*self.actual_expected_contents('files.html'))
def test_dir(self):
self.run_doxygen(wildcard='dir_*.xml')
self.assertEqual(*self.actual_expected_contents('dir_4b0d5f8864bf89936129251a2d32609b.html'))
self.assertEqual(*self.actual_expected_contents('dir_bbe5918fe090eee9db2d9952314b6754.html'))
def test_file(self):
self.run_doxygen(wildcard='*_8h.xml')
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
self.assertEqual(*self.actual_expected_contents('Class_8h.html'))
def test_namespace(self):
self.run_doxygen(wildcard='namespaceRoot_1_1Directory.xml')
self.assertEqual(*self.actual_expected_contents('namespaceRoot_1_1Directory.html'))
def test_namespace_empty(self):
self.run_doxygen(wildcard='namespaceAnother.xml')
self.assertEqual(*self.actual_expected_contents('namespaceAnother.html'))
def test_class(self):
self.run_doxygen(wildcard='classRoot_1_1Directory_1_1Sub_1_1Class.xml')
self.assertEqual(*self.actual_expected_contents('classRoot_1_1Directory_1_1Sub_1_1Class.html'))
def test_page_no_toc(self):
self.run_doxygen(wildcard='page-no-toc.xml')
self.assertEqual(*self.actual_expected_contents('page-no-toc.html'))
class Detailed(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'detailed', *args, **kwargs)
def test_namespace(self):
self.run_doxygen(wildcard='namespaceNamee.xml')
self.assertEqual(*self.actual_expected_contents('namespaceNamee.html'))
def test_class_template(self):
self.run_doxygen(wildcard='structTemplate.xml')
self.assertEqual(*self.actual_expected_contents('structTemplate.html'))
def test_class_template_specialized(self):
self.run_doxygen(wildcard='structTemplate_3_01void_01_4.xml')
self.assertEqual(*self.actual_expected_contents('structTemplate_3_01void_01_4.html'))
def test_class_template_warnings(self):
self.run_doxygen(wildcard='structTemplateWarning.xml')
self.assertEqual(*self.actual_expected_contents('structTemplateWarning.html'))
def test_function(self):
self.run_doxygen(wildcard='namespaceFoo.xml')
self.assertEqual(*self.actual_expected_contents('namespaceFoo.html'))
def test_enum(self):
self.run_doxygen(wildcard='namespaceEno.xml')
self.assertEqual(*self.actual_expected_contents('namespaceEno.html'))
def test_function_enum_warnings(self):
self.run_doxygen(wildcard='namespaceWarning.xml')
self.assertEqual(*self.actual_expected_contents('namespaceWarning.html'))
def test_typedef(self):
self.run_doxygen(wildcard='namespaceType.xml')
self.assertEqual(*self.actual_expected_contents('namespaceType.html'))
def test_var(self):
self.run_doxygen(wildcard='namespaceVar.xml')
self.assertEqual(*self.actual_expected_contents('namespaceVar.html'))
def test_define(self):
self.run_doxygen(wildcard='File_8h.xml')
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
class Ignored(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'ignored', *args, **kwargs)
def test(self):
self.run_doxygen(index_pages=[], wildcard='*.xml')
self.assertTrue(os.path.exists(os.path.join(self.path, 'html', 'classA.html')))
self.assertFalse(os.path.exists(os.path.join(self.path, 'html', 'classA_1_1PrivateClass.html')))
self.assertFalse(os.path.exists(os.path.join(self.path, 'html', 'File_8cpp.html')))
self.assertFalse(os.path.exists(os.path.join(self.path, 'html', 'input_8h.html')))
self.assertFalse(os.path.exists(os.path.join(self.path, 'html', 'namespace_0D0.html')))
@unittest.expectedFailure
def test_empty_class_doc_not_generated(self):
# This needs to be generated in order to be compatible with tag files
self.run_doxygen(index_pages=[], wildcard='classBrief.xml')
self.assertFalse(os.path.exists(os.path.join(self.path, 'html', 'classBrief.html')))
class Warnings(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'warnings', *args, **kwargs)
def test(self):
# Should warn that an export macro is present in the XML
self.run_doxygen(wildcard='namespaceMagnum.xml')
self.assertEqual(*self.actual_expected_contents('namespaceMagnum.html'))
class Modules(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'modules', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
self.assertEqual(*self.actual_expected_contents('group__group.html'))
self.assertEqual(*self.actual_expected_contents('group__group2.html'))
self.assertEqual(*self.actual_expected_contents('group__subgroup.html'))
self.assertEqual(*self.actual_expected_contents('modules.html'))
class ModulesInNamespace(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'modules_in_namespace', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
self.assertEqual(*self.actual_expected_contents('group__group1.html'))
self.assertEqual(*self.actual_expected_contents('group__group2.html'))
self.assertEqual(*self.actual_expected_contents('namespaceNamespace.html'))
self.assertEqual(*self.actual_expected_contents('file3_8h.html'))
class Deprecated(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'deprecated', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# Test that the [deprecated] label is in all places where it should ne
# Class tree
self.assertEqual(*self.actual_expected_contents('annotated.html'))
# Member namespace and define listing
self.assertEqual(*self.actual_expected_contents('DeprecatedFile_8h.html'))
# Member file and directory listing
self.assertEqual(*self.actual_expected_contents('dir_da5033def2d0db76e9883b31b76b3d0c.html'))
# File and directory tree
self.assertEqual(*self.actual_expected_contents('files.html'))
# Member module listing
self.assertEqual(*self.actual_expected_contents('group__group.html'))
# Module tree
self.assertEqual(*self.actual_expected_contents('modules.html'))
# Member namespace, class, function, variable, typedef and enum listing
self.assertEqual(*self.actual_expected_contents('namespaceDeprecatedNamespace.html'))
# Namespace tree
self.assertEqual(*self.actual_expected_contents('namespaces.html'))
# Page tree
self.assertEqual(*self.actual_expected_contents('pages.html'))
# Base and derived class listing
self.assertEqual(*self.actual_expected_contents('structDeprecatedNamespace_1_1BaseDeprecatedClass.html'))
self.assertEqual(*self.actual_expected_contents('structDeprecatedNamespace_1_1DeprecatedClass.html'))
class NamespaceMembersInFileScope(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'namespace_members_in_file_scope', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='namespaceNamespace.xml')
# The namespace should have the detailed docs
self.assertEqual(*self.actual_expected_contents('namespaceNamespace.html'))
@unittest.skipUnless(LooseVersion(doxygen_version()) > LooseVersion("1.8.14"),
"https://github.com/doxygen/doxygen/pull/653")
def test_file(self):
self.run_doxygen(wildcard='File_8h.xml')
# The file should have just links to detailed docs
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
class NamespaceMembersInFileScopeDefineBaseUrl(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'namespace_members_in_file_scope_define_base_url', *args, **kwargs)
@unittest.skipUnless(LooseVersion(doxygen_version()) > LooseVersion("1.8.14"),
"https://github.com/doxygen/doxygen/pull/653")
def test(self):
self.run_doxygen(wildcard='File_8h.xml')
# The file should have just links to detailed docs
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
class FilenameCase(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'filename_case', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# Verify that all filenames are "converted" to lowercase and the links
# and page tree work properly as well
self.assertEqual(*self.actual_expected_contents('index.html'))
self.assertEqual(*self.actual_expected_contents('pages.html'))
self.assertEqual(*self.actual_expected_contents('_u_p_p_e_r_c_a_s_e.html'))
self.assertEqual(*self.actual_expected_contents('class_u_p_p_e_r_c_l_a_s_s.html'))
class CrazyTemplateParams(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'crazy_template_params', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# The file should have the whole template argument as a type
self.assertEqual(*self.actual_expected_contents('File_8h.html'))
class Includes(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'includes', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# The Contained namespace should have just the global include, the
# Spread just the local includes, the class a global include and the
# group, even though in a single file, should have local includes
self.assertEqual(*self.actual_expected_contents('namespaceContained.html'))
self.assertEqual(*self.actual_expected_contents('namespaceSpread.html'))
self.assertEqual(*self.actual_expected_contents('classClass.html'))
self.assertEqual(*self.actual_expected_contents('group__group.html'))
# These two should all have local includes because otherwise it gets
# misleading; the Empty namespace a global one
self.assertEqual(*self.actual_expected_contents('namespaceContainsNamespace.html'))
self.assertEqual(*self.actual_expected_contents('namespaceContainsNamespace_1_1ContainsClass.html'))
self.assertEqual(*self.actual_expected_contents('namespaceEmpty.html'))
class IncludesDisabled(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'includes_disabled', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# No include information as SHOW_INCLUDE_FILES is disabled globally
self.assertEqual(*self.actual_expected_contents('namespaceContained.html'))
self.assertEqual(*self.actual_expected_contents('namespaceSpread.html'))
self.assertEqual(*self.actual_expected_contents('classClass.html'))
self.assertEqual(*self.actual_expected_contents('group__group.html'))
class IncludesUndocumentedFiles(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'includes_undocumented_files', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# The files are not documented, so there should be no include
# information -- practically the same output as when SHOW_INCLUDE_FILES
# is disabled globally
self.assertEqual(*self.actual_expected_contents('namespaceContained.html', '../compound_includes_disabled/namespaceContained.html'))
self.assertEqual(*self.actual_expected_contents('namespaceSpread.html', '../compound_includes_disabled/namespaceSpread.html'))
self.assertEqual(*self.actual_expected_contents('classClass.html', '../compound_includes_disabled/classClass.html'))
self.assertEqual(*self.actual_expected_contents('group__group.html', '../compound_includes_disabled/group__group.html'))
class IncludesTemplated(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'includes_templated', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# All entries should have the includes next to the template
self.assertEqual(*self.actual_expected_contents('namespaceSpread.html'))
self.assertEqual(*self.actual_expected_contents('structStruct.html'))
class BaseDerivedInRootNamespace(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'base_derived_in_root_namespace', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# Shouldn't crash or anything
self.assertEqual(*self.actual_expected_contents('structNamespace_1_1BothBaseAndDerivedInRootNamespace.html'))
class Since(IntegrationTestCase):
def __init__(self, *args, **kwargs):
super().__init__(__file__, 'since', *args, **kwargs)
def test(self):
self.run_doxygen(wildcard='*.xml')
# Verify all entries and details get the Since badge with a link to
# changelog. Not class/namespace/file/dir entries yet because we don't
# propagate those right now.
self.assertEqual(*self.actual_expected_contents('dir_4b0d5f8864bf89936129251a2d32609b.html'))
self.assertEqual(*self.actual_expected_contents('Class_8h.html'))
self.assertEqual(*self.actual_expected_contents('group__group.html'))
self.assertEqual(*self.actual_expected_contents('namespaceFoo.html'))
self.assertEqual(*self.actual_expected_contents('classFoo_1_1Class.html'))
self.assertEqual(*self.actual_expected_contents('structFoo_1_1Subclass.html'))
self.assertEqual(*self.actual_expected_contents('a.html'))
# And these should have an extended deprecation badge
self.assertEqual(*self.actual_expected_contents('dir_73d1500434dee6f1c83b12ee799c54af.html'))
self.assertEqual(*self.actual_expected_contents('DeprecatedClass_8h.html'))
self.assertEqual(*self.actual_expected_contents('group__deprecated-group.html'))
self.assertEqual(*self.actual_expected_contents('namespaceDeprecatedFoo.html'))
self.assertEqual(*self.actual_expected_contents('classDeprecatedFoo_1_1DeprecatedClass.html'))
self.assertEqual(*self.actual_expected_contents('structDeprecatedFoo_1_1DeprecatedSubclass.html'))
self.assertEqual(*self.actual_expected_contents('deprecated-a.html'))
# The listings should have both
self.assertEqual(*self.actual_expected_contents('annotated.html'))
self.assertEqual(*self.actual_expected_contents('files.html'))
self.assertEqual(*self.actual_expected_contents('modules.html'))
self.assertEqual(*self.actual_expected_contents('namespaces.html'))
self.assertEqual(*self.actual_expected_contents('pages.html'))
| 47.426997 | 140 | 0.724965 |
ace70f849fa4df0aca31db05c6cc6c221f24769f | 387 | py | Python | Copy_SFTP_to_SharePoint.py | ali-senguel/Data-Management | a3d999f749aca6db3f62067dff12bd46368407e0 | [
"MIT"
] | null | null | null | Copy_SFTP_to_SharePoint.py | ali-senguel/Data-Management | a3d999f749aca6db3f62067dff12bd46368407e0 | [
"MIT"
] | null | null | null | Copy_SFTP_to_SharePoint.py | ali-senguel/Data-Management | a3d999f749aca6db3f62067dff12bd46368407e0 | [
"MIT"
] | null | null | null | #import all the libraries
import shutil
directory_r = r"\\frlcork-storage.thefacebook.com\oresearch_cork_001\ExternalData\evg\incoming\SOAP_Bonding_Process_Tracker_Architecture_A 28.10.2020.xlsx"
directory_w = r"C:\Users\senguel\OneDrive - Facebook\Architecture A\Test\SOAP_Bonding_Process_Tracker_Architecture_A 28.10.2020.xlsx"
shutil.copyfile(directory_r, directory_w)
| 35.181818 | 156 | 0.821705 |
ace710560c5d9a3f485011ee36d4544efb6ba734 | 168 | py | Python | AGC001/AGC001b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | AGC001/AGC001b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | AGC001/AGC001b.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | #AGC001b
def main():
import sys
input=sys.stdin.readline
sys.setrecursionlimit(10**6)
# map(int, input().split())
if __name__ == '__main__':
main() | 18.666667 | 32 | 0.625 |
ace7115d8bdc8495fb0ce152734b087b4365760f | 2,687 | py | Python | django/goal/tests/test_views.py | andreyvpng/lifelog | 42802ba8759d9e4ce5bf73e45bfb3a41ce1c4137 | [
"Apache-2.0"
] | null | null | null | django/goal/tests/test_views.py | andreyvpng/lifelog | 42802ba8759d9e4ce5bf73e45bfb3a41ce1c4137 | [
"Apache-2.0"
] | 1 | 2018-10-11T17:06:01.000Z | 2018-10-14T00:39:07.000Z | django/goal/tests/test_views.py | andreyvpng/lifelog | 42802ba8759d9e4ce5bf73e45bfb3a41ce1c4137 | [
"Apache-2.0"
] | 1 | 2018-10-13T21:43:35.000Z | 2018-10-13T21:43:35.000Z | from django.test import TestCase
from goal.models import Goal
from utils.factory import ActionFactory, GoalFactory, UserFactory
class GoalCreateViewTest(TestCase):
def setUp(self):
self.user = UserFactory(username='test', password='12345')
self.other_user = UserFactory(username='test1', password='12345')
self.action = ActionFactory(user=self.user)
self.url = '/goal/create/action/{}'.format(
self.action.id)
self.url_update = '/goal/update/action/{}'.format(
self.action.id)
self.object = {'action': self.action,
'daily_value': 100}
def test_create_goal_by_user(self):
resp = self.client.login(username='test', password='12345')
resp = self.client.post(self.url, self.object)
object = Goal.objects.filter(action__user=self.user)
# Check that the user has created our goal
self.assertTrue(object)
self.assertEqual(resp.status_code, 302)
self.assertRedirects(resp, '/dashboard/')
def test_create_goal_by_other_user(self):
resp = self.client.login(username='test1', password='12345')
resp = self.client.post(self.url, self.object)
object = Goal.objects.filter(action__user=self.user)
# Check that the user does not have updated our goal
self.assertFalse(object)
self.assertEqual(resp.status_code, 400)
def test_create_goal_if_goal_exists(self):
GoalFactory(action=self.action)
resp = self.client.login(username='test', password='12345')
resp = self.client.post(self.url, self.object)
self.assertEqual(resp.status_code, 302)
self.assertRedirects(resp, self.url_update)
class GoalUpdateViewTest(TestCase):
def setUp(self):
self.user = UserFactory(username='test', password='12345')
self.other = UserFactory(username='test1', password='12345')
self.action = ActionFactory(user=self.user)
self.goal = GoalFactory(action=self.action)
self.url = '/goal/update/action/{}'.format(
self.action.id)
self.object = {'action': self.action,
'daily_value': 100}
def test_update_goal_by_user(self):
resp = self.client.login(username='test', password='12345')
resp = self.client.post(self.url, self.object)
self.assertEqual(resp.status_code, 302)
self.assertRedirects(resp, '/dashboard/')
def test_update_goal_by_other_user(self):
resp = self.client.login(username='test1', password='12345')
resp = self.client.post(self.url, self.object)
self.assertEqual(resp.status_code, 403)
| 35.826667 | 73 | 0.655006 |
ace7123a5e412afe9c58b4347364bd69d4284d2e | 395 | py | Python | gatspy/datasets/__init__.py | abhimat/gatspy | 5aba05a839347eef1552cd108b8d3301d3ce63e0 | [
"BSD-2-Clause"
] | 66 | 2015-02-07T00:13:17.000Z | 2022-01-29T03:33:25.000Z | gatspy/datasets/__init__.py | abhimat/gatspy | 5aba05a839347eef1552cd108b8d3301d3ce63e0 | [
"BSD-2-Clause"
] | 34 | 2015-05-28T04:54:17.000Z | 2021-05-30T02:42:40.000Z | gatspy/datasets/__init__.py | abhimat/gatspy | 5aba05a839347eef1552cd108b8d3301d3ce63e0 | [
"BSD-2-Clause"
] | 32 | 2015-02-08T05:19:17.000Z | 2021-04-05T06:33:43.000Z | """
Datasets for Astronomical Time Series
=====================================
"""
from __future__ import absolute_import
__all__ = ['fetch_rrlyrae_templates', 'fetch_rrlyrae',
'fetch_rrlyrae_lc_params', 'fetch_rrlyrae_fitdata',
'RRLyraeLC', 'PartialRRLyraeLC', 'RRLyraeTemplates',
'RRLyraeGenerated']
from .rrlyrae import *
from .rrlyrae_generated import *
| 26.333333 | 63 | 0.648101 |
ace71319e8958f8bd7e8b8ef35a1139e5c3fc0b3 | 1,471 | py | Python | conanfile.py | tao-cpp/algorithm | 156655aed1c522a3386cb82fb4aa2b3a302ee7e8 | [
"MIT"
] | 2 | 2017-01-13T09:20:58.000Z | 2019-06-28T15:27:13.000Z | conanfile.py | tao-cpp/algorithm | 156655aed1c522a3386cb82fb4aa2b3a302ee7e8 | [
"MIT"
] | null | null | null | conanfile.py | tao-cpp/algorithm | 156655aed1c522a3386cb82fb4aa2b3a302ee7e8 | [
"MIT"
] | 2 | 2017-05-31T12:05:26.000Z | 2019-10-13T22:36:32.000Z | from conans import ConanFile, CMake
class TaoCppAlgorithm(ConanFile):
name = "algorithm"
license = "MIT"
url = "https://github.com/tao-cpp/algorithm"
description = "C++ general purpose algorithms library"
settings = "os", "compiler", "arch", "build_type"
exports_sources = "CMakeLists.txt", "include/*", "test/*", "benchmark/*", "src/*"
no_copy_source = True
# build_policy = "missing"
options = {
"tests": [True, False],
}
default_options = {
"tests": False,
}
def configure(self):
# self.output.info("****** configure ******* self.options.tests: %s" % (self.options.tests,))
# If header only, the compiler, etc, does not affect the package!
if not self.options.tests:
# self.output.info("****** CLEARING THE SETTINGS *******")
self.settings.clear()
def build(self):
if self.options.tests:
# self.output.info("****** build ******* self.options.tests: %s" % (self.options.tests,))
cmake = CMake(self)
cmake.configure()
cmake.build()
# cmake.install()
cmake.test()
def package(self):
self.copy("*.h", dst="include", src="include")
self.copy("*.hpp", dst="include", src="include")
self.copy("*.ipp", dst="include", src="include")
def package_id(self):
self.info.header_only()
self.info.options.tests = "ANY"
| 28.843137 | 101 | 0.557444 |
ace7131e1e8606d7796d60de94b1918812bc8ee0 | 22,331 | py | Python | generated/python/googleapis-common-protos/google/rpc/error_details_pb2.py | software-dov/api-client-staging | bbd2a32529bba73e26ac430b745360b4a8af0c53 | [
"BSD-3-Clause"
] | 18 | 2016-12-08T20:47:57.000Z | 2022-01-29T19:36:04.000Z | generated/python/googleapis-common-protos/google/rpc/error_details_pb2.py | software-dov/api-client-staging | bbd2a32529bba73e26ac430b745360b4a8af0c53 | [
"BSD-3-Clause"
] | 252 | 2016-09-21T20:51:36.000Z | 2021-03-25T23:02:36.000Z | generated/python/googleapis-common-protos/google/rpc/error_details_pb2.py | software-dov/api-client-staging | bbd2a32529bba73e26ac430b745360b4a8af0c53 | [
"BSD-3-Clause"
] | 37 | 2016-09-19T21:13:16.000Z | 2022-01-29T19:36:07.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/rpc/error_details.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/rpc/error_details.proto',
package='google.rpc',
syntax='proto3',
serialized_options=_b('\n\016com.google.rpcB\021ErrorDetailsProtoP\001Z?google.golang.org/genproto/googleapis/rpc/errdetails;errdetails\242\002\003RPC'),
serialized_pb=_b('\n\x1egoogle/rpc/error_details.proto\x12\ngoogle.rpc\x1a\x1egoogle/protobuf/duration.proto\";\n\tRetryInfo\x12.\n\x0bretry_delay\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"2\n\tDebugInfo\x12\x15\n\rstack_entries\x18\x01 \x03(\t\x12\x0e\n\x06\x64\x65tail\x18\x02 \x01(\t\"y\n\x0cQuotaFailure\x12\x36\n\nviolations\x18\x01 \x03(\x0b\x32\".google.rpc.QuotaFailure.Violation\x1a\x31\n\tViolation\x12\x0f\n\x07subject\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\"\x95\x01\n\x13PreconditionFailure\x12=\n\nviolations\x18\x01 \x03(\x0b\x32).google.rpc.PreconditionFailure.Violation\x1a?\n\tViolation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0f\n\x07subject\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\"\x83\x01\n\nBadRequest\x12?\n\x10\x66ield_violations\x18\x01 \x03(\x0b\x32%.google.rpc.BadRequest.FieldViolation\x1a\x34\n\x0e\x46ieldViolation\x12\r\n\x05\x66ield\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\"7\n\x0bRequestInfo\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x14\n\x0cserving_data\x18\x02 \x01(\t\"`\n\x0cResourceInfo\x12\x15\n\rresource_type\x18\x01 \x01(\t\x12\x15\n\rresource_name\x18\x02 \x01(\t\x12\r\n\x05owner\x18\x03 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\"V\n\x04Help\x12$\n\x05links\x18\x01 \x03(\x0b\x32\x15.google.rpc.Help.Link\x1a(\n\x04Link\x12\x13\n\x0b\x64\x65scription\x18\x01 \x01(\t\x12\x0b\n\x03url\x18\x02 \x01(\t\"3\n\x10LocalizedMessage\x12\x0e\n\x06locale\x18\x01 \x01(\t\x12\x0f\n\x07message\x18\x02 \x01(\tBl\n\x0e\x63om.google.rpcB\x11\x45rrorDetailsProtoP\x01Z?google.golang.org/genproto/googleapis/rpc/errdetails;errdetails\xa2\x02\x03RPCb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,])
_RETRYINFO = _descriptor.Descriptor(
name='RetryInfo',
full_name='google.rpc.RetryInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retry_delay', full_name='google.rpc.RetryInfo.retry_delay', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=78,
serialized_end=137,
)
_DEBUGINFO = _descriptor.Descriptor(
name='DebugInfo',
full_name='google.rpc.DebugInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='stack_entries', full_name='google.rpc.DebugInfo.stack_entries', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='detail', full_name='google.rpc.DebugInfo.detail', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=189,
)
_QUOTAFAILURE_VIOLATION = _descriptor.Descriptor(
name='Violation',
full_name='google.rpc.QuotaFailure.Violation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subject', full_name='google.rpc.QuotaFailure.Violation.subject', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='google.rpc.QuotaFailure.Violation.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=263,
serialized_end=312,
)
_QUOTAFAILURE = _descriptor.Descriptor(
name='QuotaFailure',
full_name='google.rpc.QuotaFailure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='violations', full_name='google.rpc.QuotaFailure.violations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_QUOTAFAILURE_VIOLATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=191,
serialized_end=312,
)
_PRECONDITIONFAILURE_VIOLATION = _descriptor.Descriptor(
name='Violation',
full_name='google.rpc.PreconditionFailure.Violation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.rpc.PreconditionFailure.Violation.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subject', full_name='google.rpc.PreconditionFailure.Violation.subject', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='google.rpc.PreconditionFailure.Violation.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=401,
serialized_end=464,
)
_PRECONDITIONFAILURE = _descriptor.Descriptor(
name='PreconditionFailure',
full_name='google.rpc.PreconditionFailure',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='violations', full_name='google.rpc.PreconditionFailure.violations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_PRECONDITIONFAILURE_VIOLATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=315,
serialized_end=464,
)
_BADREQUEST_FIELDVIOLATION = _descriptor.Descriptor(
name='FieldViolation',
full_name='google.rpc.BadRequest.FieldViolation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='field', full_name='google.rpc.BadRequest.FieldViolation.field', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='google.rpc.BadRequest.FieldViolation.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=546,
serialized_end=598,
)
_BADREQUEST = _descriptor.Descriptor(
name='BadRequest',
full_name='google.rpc.BadRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='field_violations', full_name='google.rpc.BadRequest.field_violations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_BADREQUEST_FIELDVIOLATION, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=467,
serialized_end=598,
)
_REQUESTINFO = _descriptor.Descriptor(
name='RequestInfo',
full_name='google.rpc.RequestInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_id', full_name='google.rpc.RequestInfo.request_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='serving_data', full_name='google.rpc.RequestInfo.serving_data', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=600,
serialized_end=655,
)
_RESOURCEINFO = _descriptor.Descriptor(
name='ResourceInfo',
full_name='google.rpc.ResourceInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_type', full_name='google.rpc.ResourceInfo.resource_type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.rpc.ResourceInfo.resource_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='owner', full_name='google.rpc.ResourceInfo.owner', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='google.rpc.ResourceInfo.description', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=657,
serialized_end=753,
)
_HELP_LINK = _descriptor.Descriptor(
name='Link',
full_name='google.rpc.Help.Link',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='description', full_name='google.rpc.Help.Link.description', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='url', full_name='google.rpc.Help.Link.url', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=801,
serialized_end=841,
)
_HELP = _descriptor.Descriptor(
name='Help',
full_name='google.rpc.Help',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='links', full_name='google.rpc.Help.links', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_HELP_LINK, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=755,
serialized_end=841,
)
_LOCALIZEDMESSAGE = _descriptor.Descriptor(
name='LocalizedMessage',
full_name='google.rpc.LocalizedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='locale', full_name='google.rpc.LocalizedMessage.locale', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='google.rpc.LocalizedMessage.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=843,
serialized_end=894,
)
_RETRYINFO.fields_by_name['retry_delay'].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_QUOTAFAILURE_VIOLATION.containing_type = _QUOTAFAILURE
_QUOTAFAILURE.fields_by_name['violations'].message_type = _QUOTAFAILURE_VIOLATION
_PRECONDITIONFAILURE_VIOLATION.containing_type = _PRECONDITIONFAILURE
_PRECONDITIONFAILURE.fields_by_name['violations'].message_type = _PRECONDITIONFAILURE_VIOLATION
_BADREQUEST_FIELDVIOLATION.containing_type = _BADREQUEST
_BADREQUEST.fields_by_name['field_violations'].message_type = _BADREQUEST_FIELDVIOLATION
_HELP_LINK.containing_type = _HELP
_HELP.fields_by_name['links'].message_type = _HELP_LINK
DESCRIPTOR.message_types_by_name['RetryInfo'] = _RETRYINFO
DESCRIPTOR.message_types_by_name['DebugInfo'] = _DEBUGINFO
DESCRIPTOR.message_types_by_name['QuotaFailure'] = _QUOTAFAILURE
DESCRIPTOR.message_types_by_name['PreconditionFailure'] = _PRECONDITIONFAILURE
DESCRIPTOR.message_types_by_name['BadRequest'] = _BADREQUEST
DESCRIPTOR.message_types_by_name['RequestInfo'] = _REQUESTINFO
DESCRIPTOR.message_types_by_name['ResourceInfo'] = _RESOURCEINFO
DESCRIPTOR.message_types_by_name['Help'] = _HELP
DESCRIPTOR.message_types_by_name['LocalizedMessage'] = _LOCALIZEDMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RetryInfo = _reflection.GeneratedProtocolMessageType('RetryInfo', (_message.Message,), {
'DESCRIPTOR' : _RETRYINFO,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.RetryInfo)
})
_sym_db.RegisterMessage(RetryInfo)
DebugInfo = _reflection.GeneratedProtocolMessageType('DebugInfo', (_message.Message,), {
'DESCRIPTOR' : _DEBUGINFO,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.DebugInfo)
})
_sym_db.RegisterMessage(DebugInfo)
QuotaFailure = _reflection.GeneratedProtocolMessageType('QuotaFailure', (_message.Message,), {
'Violation' : _reflection.GeneratedProtocolMessageType('Violation', (_message.Message,), {
'DESCRIPTOR' : _QUOTAFAILURE_VIOLATION,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.QuotaFailure.Violation)
})
,
'DESCRIPTOR' : _QUOTAFAILURE,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.QuotaFailure)
})
_sym_db.RegisterMessage(QuotaFailure)
_sym_db.RegisterMessage(QuotaFailure.Violation)
PreconditionFailure = _reflection.GeneratedProtocolMessageType('PreconditionFailure', (_message.Message,), {
'Violation' : _reflection.GeneratedProtocolMessageType('Violation', (_message.Message,), {
'DESCRIPTOR' : _PRECONDITIONFAILURE_VIOLATION,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.PreconditionFailure.Violation)
})
,
'DESCRIPTOR' : _PRECONDITIONFAILURE,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.PreconditionFailure)
})
_sym_db.RegisterMessage(PreconditionFailure)
_sym_db.RegisterMessage(PreconditionFailure.Violation)
BadRequest = _reflection.GeneratedProtocolMessageType('BadRequest', (_message.Message,), {
'FieldViolation' : _reflection.GeneratedProtocolMessageType('FieldViolation', (_message.Message,), {
'DESCRIPTOR' : _BADREQUEST_FIELDVIOLATION,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.BadRequest.FieldViolation)
})
,
'DESCRIPTOR' : _BADREQUEST,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.BadRequest)
})
_sym_db.RegisterMessage(BadRequest)
_sym_db.RegisterMessage(BadRequest.FieldViolation)
RequestInfo = _reflection.GeneratedProtocolMessageType('RequestInfo', (_message.Message,), {
'DESCRIPTOR' : _REQUESTINFO,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.RequestInfo)
})
_sym_db.RegisterMessage(RequestInfo)
ResourceInfo = _reflection.GeneratedProtocolMessageType('ResourceInfo', (_message.Message,), {
'DESCRIPTOR' : _RESOURCEINFO,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.ResourceInfo)
})
_sym_db.RegisterMessage(ResourceInfo)
Help = _reflection.GeneratedProtocolMessageType('Help', (_message.Message,), {
'Link' : _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), {
'DESCRIPTOR' : _HELP_LINK,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.Help.Link)
})
,
'DESCRIPTOR' : _HELP,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.Help)
})
_sym_db.RegisterMessage(Help)
_sym_db.RegisterMessage(Help.Link)
LocalizedMessage = _reflection.GeneratedProtocolMessageType('LocalizedMessage', (_message.Message,), {
'DESCRIPTOR' : _LOCALIZEDMESSAGE,
'__module__' : 'google.rpc.error_details_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.LocalizedMessage)
})
_sym_db.RegisterMessage(LocalizedMessage)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 35.786859 | 1,686 | 0.741883 |
ace713d992baddd96ef8122c5925a164b1bfccea | 15,909 | py | Python | cclib/io/ccio.py | kunalsharma05/cclib | 6976824bcbf810ecd3e7f3dc25c4b6910c1e3b56 | [
"BSD-3-Clause"
] | 3 | 2018-05-30T18:14:35.000Z | 2018-11-06T21:22:07.000Z | cclib/io/ccio.py | kunalsharma05/cclib | 6976824bcbf810ecd3e7f3dc25c4b6910c1e3b56 | [
"BSD-3-Clause"
] | null | null | null | cclib/io/ccio.py | kunalsharma05/cclib | 6976824bcbf810ecd3e7f3dc25c4b6910c1e3b56 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Tools for identifying, reading and writing files and streams."""
from __future__ import print_function
import atexit
import io
import os
import sys
import re
from tempfile import NamedTemporaryFile
# Python 2->3 changes the default file object hierarchy.
if sys.version_info[0] == 2:
fileclass = file
from urllib2 import urlopen, URLError
else:
fileclass = io.IOBase
from urllib.request import urlopen
from urllib.error import URLError
from cclib.parser import logfileparser
from cclib.parser import data
from cclib.parser.adfparser import ADF
from cclib.parser.daltonparser import DALTON
from cclib.parser.gamessparser import GAMESS
from cclib.parser.gamessukparser import GAMESSUK
from cclib.parser.gaussianparser import Gaussian
from cclib.parser.jaguarparser import Jaguar
from cclib.parser.molcasparser import Molcas
from cclib.parser.molproparser import Molpro
from cclib.parser.mopacparser import MOPAC
from cclib.parser.nwchemparser import NWChem
from cclib.parser.orcaparser import ORCA
from cclib.parser.psi3parser import Psi3
from cclib.parser.psi4parser import Psi4
from cclib.parser.qchemparser import QChem
from cclib.parser.turbomoleparser import Turbomole
from cclib.io import cjsonreader
from cclib.io import cjsonwriter
from cclib.io import cmlwriter
from cclib.io import moldenwriter
from cclib.io import wfxwriter
from cclib.io import xyzreader
from cclib.io import xyzwriter
try:
from cclib.bridge import cclib2openbabel
_has_cclib2openbabel = True
except ImportError:
_has_cclib2openbabel = False
# Regular expression for validating URLs
URL_PATTERN = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE
)
# Parser choice is triggered by certain phrases occuring the logfile. Where these
# strings are unique, we can set the parser and break. In other cases, the situation
# is a little but more complicated. Here are the exceptions:
# 1. The GAMESS trigger also works for GAMESS-UK files, so we can't break
# after finding GAMESS in case the more specific phrase is found.
# 2. Molpro log files don't have the program header, but always contain
# the generic string 1PROGRAM, so don't break here either to be cautious.
# 3. "MOPAC" is used in some packages like GAMESS, so match MOPAC20##
#
# The triggers are defined by the tuples in the list below like so:
# (parser, phrases, flag whether we should break)
triggers = [
(ADF, ["Amsterdam Density Functional"], True),
(DALTON, ["Dalton - An Electronic Structure Program"], True),
(GAMESS, ["GAMESS"], False),
(GAMESS, ["GAMESS VERSION"], True),
(GAMESSUK, ["G A M E S S - U K"], True),
(Gaussian, ["Gaussian, Inc."], True),
(Jaguar, ["Jaguar"], True),
(Molcas, ["MOLCAS"], True),
(Molpro, ["PROGRAM SYSTEM MOLPRO"], True),
(Molpro, ["1PROGRAM"], False),
(MOPAC, ["MOPAC20"], True),
(NWChem, ["Northwest Computational Chemistry Package"], True),
(ORCA, ["O R C A"], True),
(Psi3, ["PSI3: An Open-Source Ab Initio Electronic Structure Package"], True),
(Psi4, ["Psi4: An Open-Source Ab Initio Electronic Structure Package"], True),
(QChem, ["A Quantum Leap Into The Future Of Chemistry"], True),
(Turbomole, ["TURBOMOLE"], True),
]
readerclasses = {
'cjson': cjsonreader.CJSON,
'json': cjsonreader.CJSON,
'xyz': xyzreader.XYZ,
}
writerclasses = {
'cjson': cjsonwriter.CJSON,
'json': cjsonwriter.CJSON,
'cml': cmlwriter.CML,
'molden': moldenwriter.MOLDEN,
'wfx': wfxwriter.WFXWriter,
'xyz': xyzwriter.XYZ,
}
class UnknownOutputFormatError(Exception):
"""Raised when an unknown output format is encountered."""
def guess_filetype(inputfile):
"""Try to guess the filetype by searching for trigger strings."""
if not inputfile:
return None
filetype = None
for line in inputfile:
for parser, phrases, do_break in triggers:
if all([line.lower().find(p.lower()) >= 0 for p in phrases]):
filetype = parser
if do_break:
return filetype
return filetype
def ccread(source, *args, **kargs):
"""Attempt to open and read computational chemistry data from a file.
If the file is not appropriate for cclib parsers, a fallback mechanism
will try to recognize some common chemistry formats and read those using
the appropriate bridge such as OpenBabel.
Inputs:
source - a single logfile, a list of logfiles (for a single job),
an input stream, or an URL pointing to a log file.
*args, **kargs - arguments and keyword arguments passed to ccopen
Returns:
a ccData object containing cclib data attributes
"""
log = ccopen(source, *args, **kargs)
if log:
if kargs.get('verbose', None):
print('Identified logfile to be in %s format' % log.logname)
# If the input file is a CJSON file and not a standard compchemlog file
cjson_as_input = kargs.get("cjson", False)
if cjson_as_input:
return log.read_cjson()
else:
return log.parse()
else:
if kargs.get('verbose', None):
print('Attempting to use fallback mechanism to read file')
return fallback(source)
def ccopen(source, *args, **kargs):
"""Guess the identity of a particular log file and return an instance of it.
Inputs:
source - a single logfile, a list of logfiles (for a single job),
an input stream, or an URL pointing to a log file.
*args, **kargs - arguments and keyword arguments passed to filetype
Returns:
one of ADF, DALTON, GAMESS, GAMESS UK, Gaussian, Jaguar,
Molpro, MOPAC, NWChem, ORCA, Psi3, Psi/Psi4, QChem, CJSON or None
(if it cannot figure it out or the file does not exist).
"""
inputfile = None
is_stream = False
# Check if source is a link or contains links. Retrieve their content.
# Try to open the logfile(s), using openlogfile, if the source is a string (filename)
# or list of filenames. If it can be read, assume it is an open file object/stream.
is_string = isinstance(source, str)
is_url = True if is_string and URL_PATTERN.match(source) else False
is_listofstrings = isinstance(source, list) and all([isinstance(s, str) for s in source])
if is_string or is_listofstrings:
# Process links from list (download contents into temporary location)
if is_listofstrings:
filelist = []
for filename in source:
if not URL_PATTERN.match(filename):
filelist.append(filename)
else:
try:
response = urlopen(filename)
tfile = NamedTemporaryFile(delete=False)
tfile.write(response.read())
# Close the file because Windows won't let open it second time
tfile.close()
filelist.append(tfile.name)
# Delete temporary file when the program finishes
atexit.register(os.remove, tfile.name)
except (ValueError, URLError) as error:
if not kargs.get('quiet', False):
(errno, strerror) = error.args
return None
source = filelist
if not is_url:
try:
inputfile = logfileparser.openlogfile(source)
except IOError as error:
if not kargs.get('quiet', False):
(errno, strerror) = error.args
return None
else:
try:
response = urlopen(source)
is_stream = True
# Retrieve filename from URL if possible
filename = re.findall("\w+\.\w+", source.split('/')[-1])
filename = filename[0] if filename else ""
inputfile = logfileparser.openlogfile(filename, object=response.read())
except (ValueError, URLError) as error:
if not kargs.get('quiet', False):
(errno, strerror) = error.args
return None
elif hasattr(source, "read"):
inputfile = source
is_stream = True
# Streams are tricky since they don't have seek methods or seek won't work
# by design even if it is present. We solve this now by reading in the
# entire stream and using a StringIO buffer for parsing. This might be
# problematic for very large streams. Slow streams might also be an issue if
# the parsing is not instantaneous, but we'll deal with such edge cases
# as they arise. Ideally, in the future we'll create a class dedicated to
# dealing with these issues, supporting both files and streams.
if is_stream:
try:
inputfile.seek(0, 0)
except (AttributeError, IOError):
contents = inputfile.read()
try:
inputfile = io.StringIO(contents)
except:
inputfile = io.StringIO(unicode(contents))
inputfile.seek(0, 0)
# Proceed to return an instance of the logfile parser only if the filetype
# could be guessed. Need to make sure the input file is closed before creating
# an instance, because parsers will handle opening/closing on their own.
filetype = guess_filetype(inputfile)
# If the input file isn't a standard compchem log file, try one of
# the readers, falling back to Open Babel.
if not filetype:
if kargs.get("cjson"):
filetype = readerclasses['cjson']
elif source and not is_stream:
ext = os.path.splitext(source)[1][1:].lower()
for extension in readerclasses:
if ext == extension:
filetype = readerclasses[extension]
# Proceed to return an instance of the logfile parser only if the filetype
# could be guessed. Need to make sure the input file is closed before creating
# an instance, because parsers will handle opening/closing on their own.
if filetype:
# We're going to clase and reopen below anyway, so this is just to avoid
# the missing seek method for fileinput.FileInput. In the long run
# we need to refactor to support for various input types in a more
# centralized fashion.
if is_listofstrings:
pass
else:
inputfile.seek(0, 0)
if not is_stream:
inputfile.close()
return filetype(source, *args, **kargs)
return filetype(inputfile, *args, **kargs)
def fallback(source):
"""Attempt to read standard molecular formats using other libraries.
Currently this will read XYZ files with OpenBabel, but this can easily
be extended to other formats and libraries, too.
"""
if isinstance(source, str):
ext = os.path.splitext(source)[1][1:].lower()
if _has_cclib2openbabel:
if ext in ('xyz', ):
return cclib2openbabel.readfile(source, ext)
else:
print("Could not import openbabel, fallback mechanism might not work.")
def ccwrite(ccobj, outputtype=None, outputdest=None,
indices=None, terse=False, returnstr=False,
*args, **kwargs):
"""Write the parsed data from an outputfile to a standard chemical
representation.
Inputs:
ccobj - Either a job (from ccopen) or a data (from job.parse()) object
outputtype - The output format (should be a string)
outputdest - A filename or file object for writing
indices - One or more indices for extracting specific geometries/etc. (zero-based)
terse - This option is currently limited to the cjson/json format. Whether to indent the cjson/json or not
returnstr - Whether or not to return a string representation.
The different writers may take additional arguments, which are
documented in their respective docstrings.
Returns:
the string representation of the chemical datatype
requested, or None.
"""
# Determine the correct output format.
outputclass = _determine_output_format(outputtype, outputdest)
# Is ccobj an job object (unparsed), or is it a ccdata object (parsed)?
if isinstance(ccobj, logfileparser.Logfile):
jobfilename = ccobj.filename
ccdata = ccobj.parse()
elif isinstance(ccobj, data.ccData):
jobfilename = None
ccdata = ccobj
else:
raise ValueError
# If the logfile name has been passed in through kwargs (such as
# in the ccwrite script), make sure it has precedence.
if 'jobfilename' in kwargs.keys():
jobfilename = kwargs['jobfilename']
# Avoid passing multiple times into the main call.
del kwargs['jobfilename']
outputobj = outputclass(ccdata, jobfilename=jobfilename,
indices=indices, terse=terse,
*args, **kwargs)
output = outputobj.generate_repr()
# If outputdest isn't None, write the output to disk.
if outputdest is not None:
if isinstance(outputdest, str):
with open(outputdest, 'w') as outputobj:
outputobj.write(output)
elif isinstance(outputdest, fileclass):
outputdest.write(output)
else:
raise ValueError
# If outputdest is None, return a string representation of the output.
else:
return output
if returnstr:
return output
def _determine_output_format(outputtype, outputdest):
"""
Determine the correct output format.
Inputs:
outputtype - a string corresponding to the file type
outputdest - a filename string or file handle
Returns:
outputclass - the class corresponding to the correct output format
Raises:
UnknownOutputFormatError for unsupported file writer extensions
"""
# Priority for determining the correct output format:
# 1. outputtype
# 2. outputdest
outputclass = None
# First check outputtype.
if isinstance(outputtype, str):
extension = outputtype.lower()
if extension in writerclasses:
outputclass = writerclasses[extension]
else:
raise UnknownOutputFormatError(extension)
else:
# Then checkout outputdest.
if isinstance(outputdest, str):
extension = os.path.splitext(outputdest)[1].lower()
elif isinstance(outputdest, fileclass):
extension = os.path.splitext(outputdest.name)[1].lower()
else:
raise UnknownOutputFormatError
if extension in writerclasses:
outputclass = writerclasses[extension]
else:
raise UnknownOutputFormatError(extension)
return outputclass
| 37.968974 | 115 | 0.623232 |
ace714151c3db8831bbb6922e7ee84e79801623e | 5,558 | py | Python | appium/sample-scripts/python/testdroid_ios.py | spedepekka/testdroid-samples | 4d925fda21980d82d1e4276208676ff9424095b0 | [
"Apache-2.0"
] | null | null | null | appium/sample-scripts/python/testdroid_ios.py | spedepekka/testdroid-samples | 4d925fda21980d82d1e4276208676ff9424095b0 | [
"Apache-2.0"
] | null | null | null | appium/sample-scripts/python/testdroid_ios.py | spedepekka/testdroid-samples | 4d925fda21980d82d1e4276208676ff9424095b0 | [
"Apache-2.0"
] | null | null | null | ##
## For help on setting up your machine and configuring this TestScript go to
## http://docs.testdroid.com/appium/
##
import os
import time
import unittest
from time import sleep
from appium import webdriver
from device_finder import DeviceFinder
from selenium.common.exceptions import NoSuchElementException
def log(msg):
print (time.strftime("%H:%M:%S") + ": " + msg)
class TestdroidIOS(unittest.TestCase):
"""
Take screenshot and store files to defined location, with numbering prefix
:Args:
- name - files are stored as #_name
"""
def screenshot(self, name):
screenshot_name = str(self.screenshot_count) + "_" + name + ".png"
log ("Taking screenshot: " + screenshot_name)
self.driver.save_screenshot(self.screenshot_dir + "/" + screenshot_name)
self.screenshot_count += 1
def setUp(self):
##
## IMPORTANT: Set the following parameters.
##
testdroid_url = os.environ.get('TESTDROID_URL') or "https://cloud.testdroid.com"
appium_url = os.environ.get('TESTDROID_APPIUM_URL') or 'http://appium.testdroid.com/wd/hub'
testdroid_apiKey = os.environ.get('TESTDROID_APIKEY') or ""
testdroid_project_name = os.environ.get('TESTDROID_PROJECT') or "iOS sample project"
testdroid_testrun_name = os.environ.get('TESTDROID_TESTRUN') or "My testrun"
testdroid_app = os.environ.get('TESTDROID_APP') or ""
testdroid_bundle_id = os.environ.get('TESTDROID_BUNDLE_ID') or "com.bitbar.testdroid.BitbarIOSSample"
new_command_timeout = os.environ.get('TESTDROID_CMD_TIMEOUT') or '60'
testdroid_test_timeout = os.environ.get('TESTDROID_TEST_TIMEOUT') or '600'
self.screenshot_dir = os.environ.get('TESTDROID_SCREENSHOTS') or os.getcwd() + "/screenshots"
log ("Will save screenshots at: " + self.screenshot_dir)
self.screenshot_count = 1
# Options to select device
# 1) Set environment variable TESTDROID_DEVICE
# 2) Set device name to this python script
# 3) Do not set #1 and #2 and let DeviceFinder to find free device for you
testdroid_device = os.environ.get('TESTDROID_DEVICE') or ""
deviceFinder = DeviceFinder(url=testdroid_url)
if testdroid_device == "":
# Loop will not exit until free device is found
while testdroid_device == "":
testdroid_device = deviceFinder.available_ios_device()
print "Starting Appium test using device '%s'" % testdroid_device
desired_capabilities_cloud = {}
desired_capabilities_cloud['testdroid_apiKey'] = testdroid_apiKey
desired_capabilities_cloud['testdroid_target'] = 'ios'
desired_capabilities_cloud['testdroid_project'] = testdroid_project_name
desired_capabilities_cloud['testdroid_testrun'] = testdroid_testrun_name
desired_capabilities_cloud['testdroid_device'] = testdroid_device
desired_capabilities_cloud['testdroid_app'] = testdroid_app
desired_capabilities_cloud['platformName'] = 'iOS'
desired_capabilities_cloud['deviceName'] = 'iPhone device'
desired_capabilities_cloud['bundleId'] = testdroid_bundle_id
desired_capabilities_cloud['newCommandTimeout'] = new_command_timeout
desired_capabilities_cloud['testdroid_testTimeout'] = testdroid_test_timeout
# set up webdriver
log ("WebDriver request initiated. Waiting for response, this typically takes 2-3 mins")
self.driver = webdriver.Remote(command_executor=appium_url, desired_capabilities=desired_capabilities_cloud)
log ("WebDriver response received")
def tearDown(self):
log ("Quitting")
self.driver.quit()
def testSample(self):
# view1
log ("view1: Finding buttons")
buttons = self.driver.find_elements_by_class_name('UIAButton')
log ("view1: Clicking button [0] - RadioButton 1")
buttons[0].click()
log ("view1: Typing in textfield[0]: Testdroid user")
elem = self.driver.find_element_by_class_name('UIATextField')
elem.clear()
elem.send_keys('Testdroid user')
log ("view1: Taking screenshot screenshot1.png")
self.screenshot("screenshot1")
log ("view1: Hiding Keyboard")
self.driver.find_element_by_xpath("//*[contains(@name, 'Return')]").click()
log ("view1: Taking screenshot screenshot2.png")
self.screenshot("screenshot2")
log ("view1: Clicking button[6] - OK Button")
buttons[6].click()
log ("view2: Taking screenshot screenshot3.png")
self.screenshot("screenshot3")
# view2
log ("view2: Finding buttons")
buttons = self.driver.find_elements_by_class_name('UIAButton')
log ("view2: Clicking button[0] - Back/OK button")
buttons[0].click()
# view 1
log ("view1: Finding buttons")
buttons = self.driver.find_elements_by_class_name('UIAButton')
log ("view1: Clicking button[2] - RadioButton 2")
buttons[2].click()
log ("view1: Clicking button[6] - OK Button")
buttons[6].click()
log ("view1: Taking screenshot screenshot4.png")
self.screenshot("screenshot4")
log ("view1: Sleeping 3 before quitting webdriver.")
sleep(3)
def initialize():
return TestdroidIOS
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestdroidIOS)
unittest.TextTestRunner(verbosity=2).run(suite)
| 38.597222 | 116 | 0.675783 |
ace716ea09a8873863a014e8ef1f6fbc75cb7a67 | 2,026 | py | Python | gig_server.py | nuuuwan/gig-server | 40625a2ff52b029ae6689d583600bbd5d9d99ea3 | [
"MIT"
] | null | null | null | gig_server.py | nuuuwan/gig-server | 40625a2ff52b029ae6689d583600bbd5d9d99ea3 | [
"MIT"
] | null | null | null | gig_server.py | nuuuwan/gig-server | 40625a2ff52b029ae6689d583600bbd5d9d99ea3 | [
"MIT"
] | 1 | 2021-08-18T06:46:07.000Z | 2021-08-18T06:46:07.000Z | """GIGServer."""
import logging
from flask import Flask
from flask_caching import Cache
from flask_cors import CORS
from waitress import serve
from utils.sysx import log_metrics
import gig.ents
import gig.nearby
import gig.ext_data
DEFAULT_CACHE_TIMEOUT = 120
logging.basicConfig(level=logging.INFO)
app = Flask(__name__)
CORS(app)
cache = Cache(config={'CACHE_TYPE': 'SimpleCache'})
cache.init_app(app)
# ----------------------------------------------------------------
# Handlers
# ----------------------------------------------------------------
@app.route('/status')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def status():
"""Index."""
data = log_metrics()
data['server'] = 'gig_server'
return data
@app.route('/entities/<string:entity_ids_str>')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def entities(entity_ids_str):
"""Get entity."""
_entity_ids = entity_ids_str.split(';')
return gig.ents.multiget_entities(_entity_ids)
@app.route('/entity_ids/<string:entity_type>')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def entity_ids(entity_type):
"""Get entity IDs."""
return {
'entity_ids': gig.ents.get_entity_ids(entity_type),
}
@app.route('/nearby/<string:latlng_str>')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def nearby(latlng_str):
"""Get places near latlng."""
lat, _, lng = latlng_str.partition(',')
lat_lng = (float)(lat), (float)(lng)
return {
'nearby_entity_info_list': gig.nearby.get_nearby_entities(lat_lng),
}
@app.route(
'/ext_data/<string:data_group>/<string:table_id>/<string:entity_id>'
)
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def ext_data(data_group, table_id, entity_id):
"""Get extended data."""
return gig.ext_data.get_table_data(data_group, table_id, [entity_id])
if __name__ == '__main__':
PORT = 4001
HOST = '0.0.0.0'
logging.info('Starting gig_server on %s:%d...', HOST, PORT)
serve(
app,
host=HOST,
port=PORT,
threads=8,
)
| 24.707317 | 75 | 0.652517 |
ace717e987391bb81fde2f7e2fbcc1981c12b4b4 | 212 | py | Python | Coding Club India/Asked Google Interview Questions/PatternSearch.py | AbhiSaphire/Codechef.Practice | f671292dad2695e37458866442a6b951ba4e1a71 | [
"MIT"
] | 27 | 2020-05-19T06:46:45.000Z | 2022-02-06T20:29:58.000Z | Coding Club India/Asked Google Interview Questions/PatternSearch.py | AbhiSaphire/Codechef.Practice | f671292dad2695e37458866442a6b951ba4e1a71 | [
"MIT"
] | 1 | 2020-06-23T13:08:08.000Z | 2020-10-06T06:27:15.000Z | Coding Club India/Asked Google Interview Questions/PatternSearch.py | AbhiSaphire/Codechef.Practice | f671292dad2695e37458866442a6b951ba4e1a71 | [
"MIT"
] | 4 | 2020-05-19T06:47:52.000Z | 2021-07-09T02:49:09.000Z | def PatternSearch(pat, text):
i, j = 0, len(pat)
while j < len(text):
j = i+len(pat)
if text[i:j] == pat:
print("Found at index :", i+1)
i+=1
PatternSearch("ABC", "ABCDEFABCDDGHSGABHHUABC")
# 1, 7, 21 | 21.2 | 47 | 0.603774 |
ace718d3cdfc2924874f1bbfc07a6eb898deccee | 938 | py | Python | tests/unit/models/test_candidateset_model.py | der-ofenmeister/recommendation-api | e32fb360c5da05df284c3a1e03e5e2e6b993ce66 | [
"Apache-2.0"
] | 14 | 2021-03-03T15:43:39.000Z | 2022-03-27T02:45:50.000Z | tests/unit/models/test_candidateset_model.py | Pocket/recommendation-api | f13fc101054b102b0467b3c0ff31f3e091b2818f | [
"Apache-2.0"
] | 325 | 2021-03-03T22:07:45.000Z | 2022-03-31T16:07:35.000Z | tests/unit/models/test_candidateset_model.py | der-ofenmeister/recommendation-api | e32fb360c5da05df284c3a1e03e5e2e6b993ce66 | [
"Apache-2.0"
] | 2 | 2021-07-25T16:41:32.000Z | 2021-08-06T13:15:28.000Z | import unittest
import json
import os
from app.models.candidate_set import RecItCandidateSet
from app.config import ROOT_DIR
class TestCandidateSetModel(unittest.TestCase):
def test_recit_parse(self):
with open(os.path.join(ROOT_DIR, "tests/assets/json/recit_response.json")) as f:
recit_json = json.load(f)
candidate_set = RecItCandidateSet.parse_recit_response("test-id", recit_json)
self.assertEqual(len(candidate_set.candidates), len(recit_json['items']))
self.assertEqual(candidate_set.candidates[0].item_id, recit_json['items'][0]["resolved_id"])
def test_recit_validate_id(self):
self.assertTrue(RecItCandidateSet._verify_candidate_set("recit-personalized/bestof"))
self.assertFalse(RecItCandidateSet._verify_candidate_set("recit-personalized/not-a-real-module"))
self.assertFalse(RecItCandidateSet._verify_candidate_set("wrackit-personalized/bestof"))
| 49.368421 | 105 | 0.765458 |
ace71918f90c33c68a541cf2aba9c1426387318e | 4,164 | py | Python | scripts/lib/node_cache.py | rogersouza/zulip | 6de6b0ed3118820f7823d1575e2c7909ffab4fef | [
"Apache-2.0"
] | 3 | 2018-12-04T01:44:43.000Z | 2019-05-13T06:16:21.000Z | scripts/lib/node_cache.py | hcxiong/zulip | bf22eefedebd50b25f32b22988217c13a89b65d1 | [
"Apache-2.0"
] | 58 | 2018-11-27T15:18:54.000Z | 2018-12-09T13:43:07.000Z | scripts/lib/node_cache.py | hcxiong/zulip | bf22eefedebd50b25f32b22988217c13a89b65d1 | [
"Apache-2.0"
] | 9 | 2019-11-04T18:59:29.000Z | 2022-03-22T17:46:37.000Z |
import os
import hashlib
if False:
from typing import Optional, List, IO, Tuple, Any
from scripts.lib.zulip_tools import subprocess_text_output, run
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
ZULIP_SRV_PATH = "/srv"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
ZULIP_SRV_PATH = "/home/travis"
NODE_MODULES_CACHE_PATH = os.path.join(ZULIP_SRV_PATH, 'zulip-npm-cache')
YARN_BIN = os.path.join(ZULIP_SRV_PATH, 'zulip-yarn/bin/yarn')
DEFAULT_PRODUCTION = False
def get_yarn_args(production):
# type: (bool) -> List[str]
if production:
yarn_args = ["--prod"]
else:
yarn_args = []
return yarn_args
def generate_sha1sum_node_modules(setup_dir=None, production=DEFAULT_PRODUCTION):
# type: (Optional[str], bool) -> str
if setup_dir is None:
setup_dir = os.path.realpath(os.getcwd())
PACKAGE_JSON_FILE_PATH = os.path.join(setup_dir, 'package.json')
YARN_LOCK_FILE_PATH = os.path.join(setup_dir, 'yarn.lock')
sha1sum = hashlib.sha1()
sha1sum.update(subprocess_text_output(['cat', PACKAGE_JSON_FILE_PATH]).encode('utf8'))
if os.path.exists(YARN_LOCK_FILE_PATH):
# For backwards compatibility, we can't assume yarn.lock exists
sha1sum.update(subprocess_text_output(['cat', YARN_LOCK_FILE_PATH]).encode('utf8'))
sha1sum.update(subprocess_text_output([YARN_BIN, '--version']).encode('utf8'))
sha1sum.update(subprocess_text_output(['node', '--version']).encode('utf8'))
yarn_args = get_yarn_args(production=production)
sha1sum.update(''.join(sorted(yarn_args)).encode('utf8'))
return sha1sum.hexdigest()
def setup_node_modules(production=DEFAULT_PRODUCTION, stdout=None, stderr=None, copy_modules=False,
prefer_offline=False):
# type: (bool, Optional[IO[Any]], Optional[IO[Any]], bool, bool) -> None
yarn_args = get_yarn_args(production=production)
if prefer_offline:
yarn_args.append("--prefer-offline")
sha1sum = generate_sha1sum_node_modules(production=production)
target_path = os.path.join(NODE_MODULES_CACHE_PATH, sha1sum)
cached_node_modules = os.path.join(target_path, 'node_modules')
success_stamp = os.path.join(target_path, '.success-stamp')
# Check if a cached version already exists
if not os.path.exists(success_stamp):
do_yarn_install(target_path,
yarn_args,
success_stamp,
stdout=stdout,
stderr=stderr,
copy_modules=copy_modules)
print("Using cached node modules from %s" % (cached_node_modules,))
cmds = [
['rm', '-rf', 'node_modules'],
["ln", "-nsf", cached_node_modules, 'node_modules'],
]
for cmd in cmds:
run(cmd, stdout=stdout, stderr=stderr)
def do_yarn_install(target_path, yarn_args, success_stamp, stdout=None, stderr=None,
copy_modules=False):
# type: (str, List[str], str, Optional[IO[Any]], Optional[IO[Any]], bool) -> None
cmds = [
['mkdir', '-p', target_path],
['cp', 'package.json', "yarn.lock", target_path],
]
cached_node_modules = os.path.join(target_path, 'node_modules')
if copy_modules:
print("Cached version not found! Copying node modules.")
cmds.append(["cp", "-rT", "prod-static/serve/node_modules", cached_node_modules])
else:
print("Cached version not found! Installing node modules.")
# Copy the existing node_modules to speed up install
if os.path.exists("node_modules"):
cmds.append(["cp", "-R", "node_modules/", cached_node_modules])
cd_exec = os.path.join(ZULIP_PATH, "scripts/lib/cd_exec")
if os.environ.get('CUSTOM_CA_CERTIFICATES'):
cmds.append([YARN_BIN, "config", "set", "cafile", os.environ['CUSTOM_CA_CERTIFICATES']])
cmds.append([cd_exec, target_path, YARN_BIN, "install", "--non-interactive"] +
yarn_args)
cmds.append(['touch', success_stamp])
for cmd in cmds:
run(cmd, stdout=stdout, stderr=stderr)
| 41.227723 | 100 | 0.664265 |
ace719e8187f4cd881309d5ea203c563e6ad4884 | 32,990 | py | Python | pylxd/models/instance.py | k3idii/pylxd | 196df08cbe0d018e8fd1f2c6b79cd6526718d862 | [
"Apache-2.0"
] | 247 | 2015-05-26T21:39:38.000Z | 2022-03-23T23:56:12.000Z | pylxd/models/instance.py | k3idii/pylxd | 196df08cbe0d018e8fd1f2c6b79cd6526718d862 | [
"Apache-2.0"
] | 417 | 2015-05-31T12:57:55.000Z | 2022-03-28T14:35:09.000Z | pylxd/models/instance.py | k3idii/pylxd | 196df08cbe0d018e8fd1f2c6b79cd6526718d862 | [
"Apache-2.0"
] | 170 | 2015-05-31T11:10:59.000Z | 2022-01-18T01:36:17.000Z | # Copyright (c) 2016-2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import stat
import time
from typing import IO, NamedTuple
from urllib import parse
from ws4py.client import WebSocketBaseClient
from ws4py.manager import WebSocketManager
from ws4py.messaging import BinaryMessage
from pylxd import managers
from pylxd.exceptions import LXDAPIException
from pylxd.models import _model as model
from pylxd.models.operation import Operation
class InstanceState(model.AttributeDict):
"""A simple object for representing instance state."""
class _InstanceExecuteResult(NamedTuple):
exit_code: int
stdout: IO
stderr: IO
class Instance(model.Model):
"""An LXD Instance.
This class is not intended to be used directly, but rather to be used
via `Client.instance.create`.
"""
architecture = model.Attribute()
config = model.Attribute()
created_at = model.Attribute()
devices = model.Attribute()
ephemeral = model.Attribute()
expanded_config = model.Attribute()
expanded_devices = model.Attribute()
name = model.Attribute(readonly=True)
description = model.Attribute()
profiles = model.Attribute()
status = model.Attribute(readonly=True)
last_used_at = model.Attribute(readonly=True)
location = model.Attribute(readonly=True)
type = model.Attribute(readonly=True)
status_code = model.Attribute(readonly=True)
stateful = model.Attribute(readonly=True)
snapshots = model.Manager()
files = model.Manager()
_endpoint = "instances"
@property
def api(self):
return self.client.api[self._endpoint][self.name]
class FilesManager:
"""A pseudo-manager for namespacing file operations."""
def __init__(self, instance):
self._instance = instance
self._endpoint = instance.client.api[instance._endpoint][
instance.name
].files
def put(self, filepath, data, mode=None, uid=None, gid=None):
"""Push a file to the instance.
This pushes a single file to the instances file system named by
the `filepath`.
:param filepath: The path in the instance to to store the data in.
:type filepath: str
:param data: The data to store in the file.
:type data: bytes or str
:param mode: The unit mode to store the file with. The default of
None stores the file with the current mask of 0700, which is
the lxd default.
:type mode: Union[oct, int, str]
:param uid: The uid to use inside the instance. Default of None
results in 0 (root).
:type uid: int
:param gid: The gid to use inside the instance. Default of None
results in 0 (root).
:type gid: int
:raises: LXDAPIException if something goes wrong
"""
headers = self._resolve_headers(mode=mode, uid=uid, gid=gid)
response = self._endpoint.post(
params={"path": filepath}, data=data, headers=headers or None
)
if response.status_code == 200:
return
raise LXDAPIException(response)
def mk_dir(self, path, mode=None, uid=None, gid=None):
"""Creates an empty directory on the container.
This pushes an empty directory to the containers file system
named by the `filepath`.
:param path: The path in the container to to store the data in.
:type path: str
:param mode: The unit mode to store the file with. The default of
None stores the file with the current mask of 0700, which is
the lxd default.
:type mode: Union[oct, int, str]
:param uid: The uid to use inside the container. Default of None
results in 0 (root).
:type uid: int
:param gid: The gid to use inside the container. Default of None
results in 0 (root).
:type gid: int
:raises: LXDAPIException if something goes wrong
"""
headers = self._resolve_headers(mode=mode, uid=uid, gid=gid)
headers["X-LXD-type"] = "directory"
response = self._endpoint.post(params={"path": path}, headers=headers)
if response.status_code == 200:
return
raise LXDAPIException(response)
@staticmethod
def _resolve_headers(headers=None, mode=None, uid=None, gid=None):
if headers is None:
headers = {}
if mode is not None:
if isinstance(mode, int):
mode = format(mode, "o")
if not isinstance(mode, str):
raise ValueError("'mode' parameter must be int or string")
if not mode.startswith("0"):
mode = "0{}".format(mode)
headers["X-LXD-mode"] = mode
if uid is not None:
headers["X-LXD-uid"] = str(uid)
if gid is not None:
headers["X-LXD-gid"] = str(gid)
return headers
def delete_available(self):
"""File deletion is an extension API and may not be available.
https://github.com/lxc/lxd/blob/master/doc/api-extensions.md#file_delete
"""
return self._instance.client.has_api_extension("file_delete")
def delete(self, filepath):
self._instance.client.assert_has_api_extension("file_delete")
response = self._endpoint.delete(params={"path": filepath})
if response.status_code != 200:
raise LXDAPIException(response)
def get(self, filepath):
response = self._endpoint.get(params={"path": filepath}, is_api=False)
return response.content
def recursive_put(self, src, dst, mode=None, uid=None, gid=None):
"""Recursively push directory to the instance.
Recursively pushes directory to the instances
named by the `dst`
:param src: The source path of directory to copy.
:type src: str
:param dst: The destination path in the instance
of directory to copy
:type dst: str
:param mode: The unit mode to store the file with. The default of
None stores the file with the current mask of 0700, which is
the lxd default.
:type mode: Union[oct, int, str]
:param uid: The uid to use inside the instance. Default of None
results in 0 (root).
:type uid: int
:param gid: The gid to use inside the instance. Default of None
results in 0 (root).
:type gid: int
:raises: NotADirectoryError if src is not a directory
:raises: LXDAPIException if an error occurs
"""
norm_src = os.path.normpath(src)
if not os.path.isdir(norm_src):
raise NotADirectoryError("'src' parameter must be a directory ")
idx = len(norm_src)
dst_items = set()
for path, dirname, files in os.walk(norm_src):
dst_path = os.path.normpath(
os.path.join(dst, path[idx:].lstrip(os.path.sep))
)
# create directory or symlink (depending on what's there)
if path not in dst_items:
dst_items.add(path)
headers = self._resolve_headers(mode=mode, uid=uid, gid=gid)
# determine what the file is: a directory or a symlink
fmode = os.stat(path).st_mode
if stat.S_ISLNK(fmode):
headers["X-LXD-type"] = "symlink"
else:
headers["X-LXD-type"] = "directory"
self._endpoint.post(params={"path": dst_path}, headers=headers)
# copy files
for f in files:
src_file = os.path.join(path, f)
with open(src_file, "rb") as fp:
filepath = os.path.join(dst_path, f)
headers = self._resolve_headers(mode=mode, uid=uid, gid=gid)
response = self._endpoint.post(
params={"path": filepath},
data=fp.read(),
headers=headers or None,
)
if response.status_code != 200:
raise LXDAPIException(response)
def recursive_get(self, remote_path, local_path):
"""Recursively pulls a directory from the container.
Pulls the directory named `remote_path` from the container and
creates a local folder named `local_path` with the
content of `remote_path`.
If `remote_path` is a file, it will be copied to `local_path`.
:param remote_path: The directory path on the container.
:type remote_path: str
:param local_path: The path at which the directory will be stored.
:type local_path: str
:return:
:raises: LXDAPIException if an error occurs
"""
response = self._endpoint.get(params={"path": remote_path}, is_api=False)
if "X-LXD-type" in response.headers:
if response.headers["X-LXD-type"] == "directory":
# TODO: We considered using the X-LXD-uid, X-LXD-gid,
# and X-LXD-mode header information, but it was
# beyond the scope of this change.
os.mkdir(local_path)
content = json.loads(response.content)
if "metadata" in content and content["metadata"]:
for file in content["metadata"]:
self.recursive_get(
os.path.join(remote_path, file),
os.path.join(local_path, file),
)
elif response.headers["X-LXD-type"] == "file":
with open(local_path, "wb") as f:
# TODO: Same thoughts on file permissions as above.
f.write(response.content)
@classmethod
def exists(cls, client, name):
"""Determine whether a instance exists."""
try:
getattr(client, cls._endpoint).get(name)
return True
except cls.NotFound:
return False
@classmethod
def get(cls, client, name):
"""Get a instance by name."""
response = client.api[cls._endpoint][name].get()
return cls(client, **response.json()["metadata"])
@classmethod
def all(cls, client):
"""Get all instances.
Instances returned from this method will only have the name
set, as that is the only property returned from LXD. If more
information is needed, `Instance.sync` is the method call
that should be used.
"""
response = client.api[cls._endpoint].get()
instances = []
for url in response.json()["metadata"]:
name = url.split("/")[-1]
instances.append(cls(client, name=name))
return instances
@classmethod
def create(cls, client, config, wait=False, target=None):
"""Create a new instance config.
:param client: client instance
:type client: Client
:param config: The configuration for the new instance.
:type config: dict
:param wait: Whether to wait for async operations to complete.
:type wait: bool
:param target: If in cluster mode, the target member.
:type target: str
:raises LXDAPIException: if something goes wrong.
:returns: an instance if successful
:rtype: :class:`Instance`
"""
response = client.api[cls._endpoint].post(json=config, target=target)
if wait:
client.operations.wait_for_operation(response.json()["operation"])
return cls(client, name=config["name"])
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snapshots = managers.SnapshotManager(self.client, self)
self.files = self.FilesManager(self)
def rename(self, name, wait=False):
"""Rename an instance."""
response = self.api.post(json={"name": name})
if wait:
self.client.operations.wait_for_operation(response.json()["operation"])
self.name = name
def _set_state(self, state, timeout=30, force=True, wait=False):
response = self.api.state.put(
json={"action": state, "timeout": timeout, "force": force}
)
if wait:
self.client.operations.wait_for_operation(response.json()["operation"])
if "status" in self.__dirty__:
self.__dirty__.remove("status")
if self.ephemeral and state == "stop":
self.client = None
else:
self.sync()
def state(self):
response = self.api.state.get()
state = InstanceState(response.json()["metadata"])
return state
def start(self, timeout=30, force=True, wait=False):
"""Start the instance."""
return self._set_state("start", timeout=timeout, force=force, wait=wait)
def stop(self, timeout=30, force=True, wait=False):
"""Stop the instance."""
return self._set_state("stop", timeout=timeout, force=force, wait=wait)
def restart(self, timeout=30, force=True, wait=False):
"""Restart the instance."""
return self._set_state("restart", timeout=timeout, force=force, wait=wait)
def freeze(self, timeout=30, force=True, wait=False):
"""Freeze the instance."""
return self._set_state("freeze", timeout=timeout, force=force, wait=wait)
def unfreeze(self, timeout=30, force=True, wait=False):
"""Unfreeze the instance."""
return self._set_state("unfreeze", timeout=timeout, force=force, wait=wait)
def execute(
self,
commands,
environment=None,
encoding=None,
decode=True,
stdin_payload=None,
stdin_encoding="utf-8",
stdout_handler=None,
stderr_handler=None,
user=None,
group=None,
cwd=None,
):
"""Execute a command on the instance. stdout and stderr are buffered if
no handler is given.
:param commands: The command and arguments as a list of strings
:type commands: [str]
:param environment: The environment variables to pass with the command
:type environment: {str: str}
:param encoding: The encoding to use for stdout/stderr if the param
decode is True. If encoding is None, then no override is
performed and whatever the existing encoding from LXD is used.
:type encoding: str
:param decode: Whether to decode the stdout/stderr or just return the
raw buffers.
:type decode: bool
:param stdin_payload: Payload to pass via stdin
:type stdin_payload: Can be a file, string, bytearray, generator or
ws4py Message object
:param stdin_encoding: Encoding to pass text to stdin (default utf-8)
:param stdout_handler: Callable than receive as first parameter each
message received via stdout
:type stdout_handler: Callable[[str], None]
:param stderr_handler: Callable than receive as first parameter each
message received via stderr
:type stderr_handler: Callable[[str], None]
:param user: User to run the command as
:type user: int
:param group: Group to run the command as
:type group: int
:param cwd: Current working directory
:type cwd: str
:returns: A tuple of `(exit_code, stdout, stderr)`
:rtype: _InstanceExecuteResult() namedtuple
"""
if isinstance(commands, str):
raise TypeError("First argument must be a list.")
if environment is None:
environment = {}
response = self.api["exec"].post(
json={
"command": commands,
"environment": environment,
"wait-for-websocket": True,
"interactive": False,
"user": user,
"group": group,
"cwd": cwd,
}
)
fds = response.json()["metadata"]["metadata"]["fds"]
operation_id = Operation.extract_operation_id(response.json()["operation"])
parsed = parse.urlparse(
self.client.api.operations[operation_id].websocket._api_endpoint
)
with managers.web_socket_manager(WebSocketManager()) as manager:
stdin = _StdinWebsocket(
self.client.websocket_url,
payload=stdin_payload,
encoding=stdin_encoding,
)
stdin.resource = "{}?secret={}".format(parsed.path, fds["0"])
stdin.connect()
stdout = _CommandWebsocketClient(
manager,
self.client.websocket_url,
encoding=encoding,
decode=decode,
handler=stdout_handler,
)
stdout.resource = "{}?secret={}".format(parsed.path, fds["1"])
stdout.connect()
stderr = _CommandWebsocketClient(
manager,
self.client.websocket_url,
encoding=encoding,
decode=decode,
handler=stderr_handler,
)
stderr.resource = "{}?secret={}".format(parsed.path, fds["2"])
stderr.connect()
manager.start()
# watch for the end of the command:
while True:
operation = self.client.operations.get(operation_id)
if "return" in operation.metadata:
break
time.sleep(0.5) # pragma: no cover
try:
stdin.close()
except BrokenPipeError:
pass
stdout.finish_soon()
stderr.finish_soon()
try:
manager.close_all()
except BrokenPipeError:
pass
while not stdout.finished or not stderr.finished:
time.sleep(0.1) # progma: no cover
manager.stop()
manager.join()
return _InstanceExecuteResult(
operation.metadata["return"], stdout.data, stderr.data
)
def raw_interactive_execute(
self, commands, environment=None, user=None, group=None, cwd=None
):
"""Execute a command on the instance interactively and returns
urls to websockets. The urls contain a secret uuid, and can be accesses
without further authentication. The caller has to open and manage
the websockets themselves.
:param commands: The command and arguments as a list of strings
(most likely a shell)
:type commands: [str]
:param environment: The environment variables to pass with the command
:type environment: {str: str}
:param user: User to run the command as
:type user: int
:param group: Group to run the command as
:type group: int
:param cwd: Current working directory
:type cwd: str
:returns: Two urls to an interactive websocket and a control socket
:rtype: {'ws':str,'control':str}
"""
if isinstance(commands, str):
raise TypeError("First argument must be a list.")
if environment is None:
environment = {}
response = self.api["exec"].post(
json={
"command": commands,
"environment": environment,
"wait-for-websocket": True,
"interactive": True,
"user": user,
"group": group,
"cwd": cwd,
}
)
fds = response.json()["metadata"]["metadata"]["fds"]
operation_id = response.json()["operation"].split("/")[-1].split("?")[0]
parsed = parse.urlparse(
self.client.api.operations[operation_id].websocket._api_endpoint
)
return {
"ws": "{}?secret={}".format(parsed.path, fds["0"]),
"control": "{}?secret={}".format(parsed.path, fds["control"]),
}
def migrate(self, new_client, live=False, wait=False):
"""Migrate a instance.
Destination host information is contained in the client
connection passed in.
If the `live` param is True, then a live migration is attempted,
otherwise a non live one is running.
If the instance is running for live migration, it either must be shut
down first or criu must be installed on the source and destination
machines and the `live` param should be True.
:param new_client: the pylxd client connection to migrate the instance
to.
:type new_client: :class:`pylxd.client.Client`
:param live: whether to perform a live migration
:type live: bool
:param wait: if True, wait for the migration to complete
:type wait: bool
:raises: LXDAPIException if any of the API calls fail.
:raises: ValueError if source of target are local connections
:returns: the response from LXD of the new instance (the target of the
migration and not the operation if waited on.)
:rtype: :class:`requests.Response`
"""
if self.api.scheme in ("http+unix",):
raise ValueError("Cannot migrate from a local client connection")
if self.status_code == 103:
try:
res = getattr(new_client, self._endpoint).create(
self.generate_migration_data(live), wait=wait
)
except LXDAPIException as e:
if e.response.status_code == 103:
self.delete()
return getattr(new_client, self._endpoint).get(self.name)
else:
raise e
else:
res = getattr(new_client, self._endpoint).create(
self.generate_migration_data(live), wait=wait
)
self.delete()
return res
def generate_migration_data(self, live=False):
"""Generate the migration data.
This method can be used to handle migrations where the client
connection uses the local unix socket. For more information on
migration, see `Instance.migrate`.
:param live: Whether to include "live": "true" in the migration
:type live: bool
:raises: LXDAPIException if the request to migrate fails
:returns: dictionary of migration data suitable to send to an new
client to complete a migration.
:rtype: Dict[str, ANY]
"""
self.sync() # Make sure the object isn't stale
_json = {"migration": True}
if live:
_json["live"] = True
response = self.api.post(json=_json)
operation = self.client.operations.get(response.json()["operation"])
operation_url = self.client.api.operations[operation.id]._api_endpoint
secrets = response.json()["metadata"]["metadata"]
cert = self.client.host_info["environment"]["certificate"]
return {
"name": self.name,
"architecture": self.architecture,
"config": self.config,
"devices": self.devices,
"epehemeral": self.ephemeral,
"default": self.profiles,
"source": {
"type": "migration",
"operation": operation_url,
"mode": "pull",
"certificate": cert,
"secrets": secrets,
},
}
def publish(self, public=False, wait=False):
"""Publish a instance as an image.
The instance must be stopped in order publish it as an image. This
method does not enforce that constraint, so a LXDAPIException may be
raised if this method is called on a running instance.
If wait=True, an Image is returned.
"""
data = {
"public": public,
"source": {
"type": self.type,
"name": self.name,
},
}
response = self.client.api.images.post(json=data)
if wait:
operation = self.client.operations.wait_for_operation(
response.json()["operation"]
)
return self.client.images.get(operation.metadata["fingerprint"])
def restore_snapshot(self, snapshot_name, wait=False):
"""Restore a snapshot using its name.
Attempts to restore a instance using a snapshot previously made. The
instance should be stopped, but the method does not enforce this
constraint, so an LXDAPIException may be raised if this method fails.
:param snapshot_name: the name of the snapshot to restore from
:type snapshot_name: str
:param wait: wait until the operation is completed.
:type wait: boolean
:raises: LXDAPIException if the the operation fails.
:returns: the original response from the restore operation (not the
operation result)
:rtype: :class:`requests.Response`
"""
response = self.api.put(json={"restore": snapshot_name})
if wait:
self.client.operations.wait_for_operation(response.json()["operation"])
return response
class _CommandWebsocketClient(WebSocketBaseClient): # pragma: no cover
"""Handle a websocket for instance.execute(...) and manage decoding of the
returned values depending on 'decode' and 'encoding' parameters.
"""
def __init__(self, manager, *args, **kwargs):
self.manager = manager
self.decode = kwargs.pop("decode", True)
self.encoding = kwargs.pop("encoding", None)
self.handler = kwargs.pop("handler", None)
self.message_encoding = None
self.finish_off = False
self.finished = False
self.last_message_empty = False
self.buffer = []
super().__init__(*args, **kwargs)
def handshake_ok(self):
self.manager.add(self)
self.buffer = []
def received_message(self, message):
if message.data is None or len(message.data) == 0:
self.last_message_empty = True
if self.finish_off:
self.finished = True
return
else:
self.last_message_empty = False
if message.encoding and self.message_encoding is None:
self.message_encoding = message.encoding
if self.handler:
self.handler(self._maybe_decode(message.data))
else:
self.buffer.append(message.data)
if self.finish_off and isinstance(message, BinaryMessage):
self.finished = True
def closed(self, code, reason=None):
self.finished = True
def finish_soon(self):
self.finish_off = True
if self.last_message_empty:
self.finished = True
def _maybe_decode(self, buffer):
if self.decode and buffer is not None:
if self.encoding:
return buffer.decode(self.encoding)
if self.message_encoding:
return buffer.decode(self.message_encoding)
# This is the backwards compatible "always decode to utf-8"
return buffer.decode("utf-8")
return buffer
@property
def data(self):
buffer = b"".join(self.buffer)
return self._maybe_decode(buffer)
class _StdinWebsocket(WebSocketBaseClient): # pragma: no cover
"""A websocket client for handling stdin.
Allow comunicate with instance commands via stdin
"""
def __init__(self, url, payload=None, **kwargs):
self.encoding = kwargs.pop("encoding", None)
self.payload = payload
super().__init__(url, **kwargs)
def _smart_encode(self, msg):
if type(msg) == str and self.encoding:
return msg.encode(self.encoding)
return msg
def handshake_ok(self):
if self.payload:
if hasattr(self.payload, "read"):
self.send(
(self._smart_encode(line) for line in self.payload), binary=True
)
else:
self.send(self._smart_encode(self.payload), binary=True)
self.send(b"", binary=False)
class Snapshot(model.Model):
"""A instance snapshot."""
name = model.Attribute()
created_at = model.Attribute()
stateful = model.Attribute()
instance = model.Parent()
@property
def api(self):
return self.client.api[self.instance._endpoint][self.instance.name].snapshots[
self.name
]
@classmethod
def get(cls, client, instance, name):
response = client.api[instance._endpoint][instance.name].snapshots[name].get()
snapshot = cls(client, instance=instance, **response.json()["metadata"])
# Snapshot names are namespaced in LXD, as
# instance-name/snapshot-name. We hide that implementation
# detail.
snapshot.name = snapshot.name.split("/")[-1]
return snapshot
@classmethod
def all(cls, client, instance):
response = client.api[instance._endpoint][instance.name].snapshots.get()
return [
cls(client, name=snapshot.split("/")[-1], instance=instance)
for snapshot in response.json()["metadata"]
]
@classmethod
def create(cls, client, instance, name, stateful=False, wait=False):
response = client.api[instance._endpoint][instance.name].snapshots.post(
json={"name": name, "stateful": stateful}
)
snapshot = cls(client, instance=instance, name=name)
if wait:
client.operations.wait_for_operation(response.json()["operation"])
return snapshot
def rename(self, new_name, wait=False):
"""Rename a snapshot."""
response = self.api.post(json={"name": new_name})
if wait:
self.client.operations.wait_for_operation(response.json()["operation"])
self.name = new_name
def publish(self, public=False, wait=False):
"""Publish a snapshot as an image.
If wait=True, an Image is returned.
This functionality is currently broken in LXD. Please see
https://github.com/lxc/lxd/issues/2201 - The implementation
here is mostly a guess. Once that bug is fixed, we can verify
that this works, or file a bug to fix it appropriately.
"""
data = {
"public": public,
"source": {
"type": "snapshot",
"name": "{}/{}".format(self.instance.name, self.name),
},
}
response = self.client.api.images.post(json=data)
if wait:
operation = self.client.operations.wait_for_operation(
response.json()["operation"]
)
return self.client.images.get(operation.metadata["fingerprint"])
def restore(self, wait=False):
"""Restore this snapshot.
Attempts to restore a instance using this snapshot. The instance
should be stopped, but the method does not enforce this constraint, so
an LXDAPIException may be raised if this method fails.
:param wait: wait until the operation is completed.
:type wait: boolean
:raises: LXDAPIException if the the operation fails.
:returns: the original response from the restore operation (not the
operation result)
:rtype: :class:`requests.Response`
"""
return self.instance.restore_snapshot(self.name, wait)
| 37.91954 | 86 | 0.582904 |
ace71a12a9caeb7de4aaeaa33a6d81cc6a8f0949 | 2,620 | py | Python | users/models.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | null | null | null | users/models.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | 6 | 2021-03-19T01:06:25.000Z | 2021-09-22T18:47:10.000Z | users/models.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | null | null | null | from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin
)
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from books.models import Book
class UserManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password, phone,
commit=True):
if not email:
raise ValueError(_('Users must have an email address'))
if not first_name:
raise ValueError(_('Users must have a first name'))
if not last_name:
raise ValueError(_('Users must have a last name'))
user = self.model(
email=self.normalize_email(email),
first_name=first_name,
last_name=last_name,
phone=phone
)
user.set_password(password)
if commit:
user.save(using=self._db)
return user
def create_superuser(self, email, first_name, last_name, password, phone):
user = self.create_user(
email,
password=password,
first_name=first_name,
last_name=last_name,
phone=phone
)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(max_length=255, unique=True)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=150, blank=True)
phone = models.IntegerField()
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
date_joined = models.DateTimeField(default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name', 'phone'] # Email & Password are required by default.
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def __str__(self):
return '{} <{}>'.format(self.get_full_name(), self.email)
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
class BorrowedBook(models.Model):
book = models.ForeignKey(Book, on_delete=models.DO_NOTHING)
user = models.ForeignKey(User, related_name='borrowed_books', on_delete=models.CASCADE)
date_of_Pickup = models.DateTimeField()
date_of_return = models.DateTimeField()
def _str_(self):
return self.title
| 32.75 | 103 | 0.661069 |
ace71a39c9e35795066b8d51e13e1e260cb8db9b | 5,527 | py | Python | karabo_data/geometry2/crystfel_fmt.py | zhujun98/karabo_data | 68ee19d52cd7f140052d029545a7b6169ec9752a | [
"BSD-3-Clause"
] | 13 | 2018-05-03T08:41:06.000Z | 2021-03-21T01:47:26.000Z | karabo_data/geometry2/crystfel_fmt.py | zhujun98/karabo_data | 68ee19d52cd7f140052d029545a7b6169ec9752a | [
"BSD-3-Clause"
] | 175 | 2018-04-27T12:48:37.000Z | 2021-11-26T10:16:14.000Z | karabo_data/geometry2/crystfel_fmt.py | zhujun98/karabo_data | 68ee19d52cd7f140052d029545a7b6169ec9752a | [
"BSD-3-Clause"
] | 7 | 2018-05-03T14:49:44.000Z | 2020-08-21T07:13:48.000Z | """Write geometry in CrystFEL format.
"""
from itertools import product
import numpy as np
HEADER_TEMPLATE = """\
; AGIPD-1M geometry file written by karabo_data {version}
; You may need to edit this file to add:
; - data and mask locations in the file
; - mask_good & mask_bad values to interpret the mask
; - adu_per_eV & photon_energy
; - clen (detector distance)
;
; See: http://www.desy.de/~twhite/crystfel/manual-crystfel_geometry.html
{paths}
{frame_dim}
res = {resolution} ; pixels per metre
; Beam energy in eV
{photon_energy}
; Camera length, aka detector distance
{clen}
; Analogue Digital Units per eV
{adu_per_ev}
"""
PANEL_TEMPLATE = """
{dims}
{name}/min_fs = {min_fs}
{name}/min_ss = {min_ss}
{name}/max_fs = {max_fs}
{name}/max_ss = {max_ss}
{name}/fs = {fs_vec}
{name}/ss = {ss_vec}
{name}/corner_x = {corner_x}
{name}/corner_y = {corner_y}
{name}/coffset = {coffset}
"""
def _crystfel_format_vec(vec):
"""Convert an array of 3 numbers to CrystFEL format like "+1.0x -0.1y"
"""
s = '{:+}x {:+}y'.format(*vec[:2])
if vec[2] != 0:
s += ' {:+}z'.format(vec[2])
return s
def frag_to_crystfel(fragment, p, a, ss_slice, fs_slice, dims, pixel_size):
tile_name = 'p{}a{}'.format(p, a)
c = fragment.corner_pos / pixel_size
dim_list = []
for num, value in dims.items():
if value == 'modno':
key = p
else:
key = value
dim_list.append('{}/dim{} = {}'.format(tile_name, num, key))
return PANEL_TEMPLATE.format(
dims='\n'.join(dim_list),
name=tile_name,
min_ss=ss_slice.start,
max_ss=ss_slice.stop - 1,
min_fs=fs_slice.start,
max_fs=fs_slice.stop - 1,
ss_vec=_crystfel_format_vec(fragment.ss_vec / pixel_size),
fs_vec=_crystfel_format_vec(fragment.fs_vec/ pixel_size),
corner_x=c[0],
corner_y=c[1],
coffset=c[2],
)
def write_crystfel_geom(self, filename, *,
data_path='/entry_1/instrument_1/detector_1/data',
mask_path=None, dims=('frame', 'modno', 'ss', 'fs'),
adu_per_ev=None, clen=None, photon_energy=None):
"""Write this geometry to a CrystFEL format (.geom) geometry file.
"""
from .. import __version__
if adu_per_ev is None:
adu_per_ev_str = '; adu_per_eV = SET ME'
# TODO: adu_per_ev should be fixed for each detector, we should
# find out the values and set them.
else:
adu_per_ev_str = 'adu_per_eV = {}'.format(adu_per_ev)
if clen is None:
clen_str = '; clen = SET ME'
else:
clen_str = 'clen = {}'.format(clen)
if photon_energy is None:
photon_energy_str = '; photon_energy = SET ME'
else:
photon_energy_str = 'photon_energy = {}'.format(photon_energy)
# Get the frame dimension
tile_dims = {}
frame_dim = None
for nn, dim_name in enumerate(dims):
if dim_name == 'frame':
frame_dim = 'dim{} = %'.format(nn)
else:
tile_dims[nn] = dim_name
if frame_dim is None:
raise ValueError('No frame dimension given')
panel_chunks = []
for p, module in enumerate(self.modules):
for a, fragment in enumerate(module):
ss_slice, fs_slice = self._tile_slice(a)
if 'modno' not in dims:
# If we don't have a modno dimension, assume modules are
# concatenated along the slow-scan dim, e.g. AGIPD (8192, 128)
module_offset = p * self.expected_data_shape[1]
ss_slice = slice(
ss_slice.start + module_offset,
ss_slice.stop + module_offset
)
panel_chunks.append(frag_to_crystfel(
fragment, p, a, ss_slice, fs_slice, tile_dims, self.pixel_size
))
resolution = 1.0 / self.pixel_size # Pixels per metre
paths = dict(data=data_path)
if mask_path:
paths['mask'] = mask_path
path_str = '\n'.join('{} = {} ;'.format(i, j) for i, j in paths.items())
with open(filename, 'w') as f:
f.write(HEADER_TEMPLATE.format(
version=__version__,
paths=path_str,
frame_dim=frame_dim,
resolution=resolution,
adu_per_ev=adu_per_ev_str,
clen=clen_str,
photon_energy=photon_energy_str
))
rigid_groups = get_rigid_groups(self)
f.write(rigid_groups)
for chunk in panel_chunks:
f.write(chunk)
def get_rigid_groups(geom, nquads=4):
"""Create string for rigid groups definition."""
quads = ','.join(['q{}'.format(q) for q in range(nquads)])
modules = ','.join(['p{}'.format(p) for p in range(geom.n_modules)])
prod = product(range(geom.n_modules), range(geom.n_tiles_per_module))
rigid_group = ['p{}a{}'.format(p, a) for (p, a) in prod]
rigid_string = '\n'
for nn, rigid_group_q in enumerate(np.array_split(rigid_group, nquads)):
rigid_string += 'rigid_group_q{} = {}\n'.format(nn, ','.join(rigid_group_q))
rigid_string += '\n'
for nn, rigid_group_p in enumerate(np.array_split(rigid_group, geom.n_modules)):
rigid_string += 'rigid_group_p{} = {}\n'.format(nn, ','.join(rigid_group_p))
rigid_string += '\n'
rigid_string += 'rigid_group_collection_quadrants = {}\n'.format(quads)
rigid_string += 'rigid_group_collection_asics = {}\n\n'.format(modules)
return rigid_string
| 31.403409 | 84 | 0.606839 |
ace71a5ac697d8764d469274b5e5a150ed8c580a | 6,403 | py | Python | keras/distribute/keras_premade_models_test.py | ahmedopolis/keras | 4c87dc9685eea2ed20111f9604b10d627b17f032 | [
"Apache-2.0"
] | null | null | null | keras/distribute/keras_premade_models_test.py | ahmedopolis/keras | 4c87dc9685eea2ed20111f9604b10d627b17f032 | [
"Apache-2.0"
] | null | null | null | keras/distribute/keras_premade_models_test.py | ahmedopolis/keras | 4c87dc9685eea2ed20111f9604b10d627b17f032 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for keras premade models using tf.distribute.Strategy."""
from absl.testing import parameterized
from keras.engine import sequential
from keras.layers import core
from keras.optimizers.optimizer_v2 import adagrad
from keras.optimizers.optimizer_v2 import gradient_descent
from keras.premade_models import linear
from keras.premade_models import wide_deep
from keras.utils import dataset_creator
import numpy as np
import tensorflow.compat.v2 as tf
def strategy_combinations_eager_data_fn():
return tf.__internal__.test.combinations.combine(
distribution=[
tf.__internal__.distribute.combinations.default_strategy,
tf.__internal__.distribute.combinations.one_device_strategy,
tf.__internal__.distribute.combinations.one_device_strategy_gpu,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_gpu_and_cpu,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_two_gpus,
tf.__internal__.distribute.combinations
.mirrored_strategy_with_two_gpus_no_merge_call,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_cpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x1_gpu,
tf.__internal__.distribute.combinations.multi_worker_mirrored_2x2_gpu,
tf.__internal__.distribute.combinations
.parameter_server_strategy_1worker_2ps_cpu,
tf.__internal__.distribute.combinations
.parameter_server_strategy_1worker_2ps_1gpu,
# NOTE: TPUStrategy not tested because the models in this test are
# sparse and do not work with TPUs.
],
use_dataset_creator=[True, False],
mode=['eager'],
data_fn=['numpy', 'dataset'])
INPUT_SIZE = 64
BATCH_SIZE = 10
def get_numpy():
inputs = np.random.uniform(
low=-5., high=5., size=(INPUT_SIZE, 2)).astype(np.float32)
output = .3 * inputs[:, 0] + .2 * inputs[:, 1]
return inputs, output
def get_dataset(input_context=None, batch_size=None):
inputs, output = get_numpy()
dataset = tf.data.Dataset.from_tensor_slices((inputs, output))
if input_context:
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
if batch_size is None:
batch_size = BATCH_SIZE
dataset = dataset.batch(batch_size).repeat(200)
return dataset
# A `dataset_fn` is required for `Model.fit` to work across all strategies.
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(
global_batch_size=BATCH_SIZE)
return get_dataset(input_context, batch_size)
class KerasPremadeModelsTest(tf.test.TestCase, parameterized.TestCase):
@tf.__internal__.distribute.combinations.generate(
strategy_combinations_eager_data_fn())
def test_linear_model(self, distribution, use_dataset_creator, data_fn):
if ((not use_dataset_creator) and isinstance(
distribution, tf.distribute.experimental.ParameterServerStrategy)):
self.skipTest(
'Parameter Server strategy requires dataset creator to be used in '
'model.fit.')
if (not tf.__internal__.tf2.enabled() and use_dataset_creator
and isinstance(distribution,
tf.distribute.experimental.ParameterServerStrategy)):
self.skipTest(
'Parameter Server strategy with dataset creator needs to be run when '
'eager execution is enabled.')
with distribution.scope():
model = linear.LinearModel()
opt = gradient_descent.SGD(learning_rate=0.1)
model.compile(opt, 'mse')
if use_dataset_creator:
x = dataset_creator.DatasetCreator(dataset_fn)
hist = model.fit(x, epochs=3, steps_per_epoch=INPUT_SIZE)
else:
if data_fn == 'numpy':
inputs, output = get_numpy()
hist = model.fit(inputs, output, epochs=3)
else:
hist = model.fit(get_dataset(), epochs=3)
self.assertLess(hist.history['loss'][2], 0.2)
@tf.__internal__.distribute.combinations.generate(
strategy_combinations_eager_data_fn())
def test_wide_deep_model(self, distribution, use_dataset_creator, data_fn):
if ((not use_dataset_creator) and isinstance(
distribution, tf.distribute.experimental.ParameterServerStrategy)):
self.skipTest(
'Parameter Server strategy requires dataset creator to be used in '
'model.fit.')
if (not tf.__internal__.tf2.enabled() and use_dataset_creator
and isinstance(distribution,
tf.distribute.experimental.ParameterServerStrategy)):
self.skipTest(
'Parameter Server strategy with dataset creator needs to be run when '
'eager execution is enabled.')
with distribution.scope():
linear_model = linear.LinearModel(units=1)
dnn_model = sequential.Sequential([core.Dense(units=1)])
wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model)
linear_opt = gradient_descent.SGD(learning_rate=0.05)
dnn_opt = adagrad.Adagrad(learning_rate=0.1)
wide_deep_model.compile(optimizer=[linear_opt, dnn_opt], loss='mse')
if use_dataset_creator:
x = dataset_creator.DatasetCreator(dataset_fn)
hist = wide_deep_model.fit(x, epochs=3, steps_per_epoch=INPUT_SIZE)
else:
if data_fn == 'numpy':
inputs, output = get_numpy()
hist = wide_deep_model.fit(inputs, output, epochs=3)
else:
hist = wide_deep_model.fit(get_dataset(), epochs=3)
self.assertLess(hist.history['loss'][2], 0.2)
if __name__ == '__main__':
tf.__internal__.distribute.multi_process_runner.test_main()
| 41.309677 | 80 | 0.710448 |
ace71b262d108165cf1f0202424a0705d71bf563 | 4,819 | py | Python | research/nlp/senta/src/data/data_set_reader/ernie_onesentclassification_dataset_reader_en.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | research/nlp/senta/src/data/data_set_reader/ernie_onesentclassification_dataset_reader_en.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | research/nlp/senta/src/data/data_set_reader/ernie_onesentclassification_dataset_reader_en.py | leelige/mindspore | 5199e05ba3888963473f2b07da3f7bca5b9ef6dc | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
:py:class:`BasicDataSetReader`
"""
import csv
import os
import logging
from collections import namedtuple
import numpy as np
from src.common.register import RegisterSet
from src.data.data_set_reader.basic_dataset_reader import BasicDataSetReader
@RegisterSet.data_set_reader.register
class OneSentClassifyReaderEn(BasicDataSetReader):
"""BasicDataSetReader:一个基础的data_set_reader,实现了文件读取,id序列化,token embedding化等基本操作
"""
def __init__(self, name, fields, config):
BasicDataSetReader.__init__(self, name, fields, config)
self.trainer_id = 0
self.trainer_nums = 1
if "train" in self.name or "predict" in self.name:
self.dev_count = self.trainer_nums
elif "dev" in self.name or "test" in self.name:
self.dev_count = 1
use_multi_gpu_test = True
if use_multi_gpu_test:
self.dev_count = min(self.trainer_nums, 8)
else:
logging.error("the phase must be train, eval or test !")
def read_files(self, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
try:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
headers = next(reader)
text_indices = [
index for index, h in enumerate(headers) if h != "label"
]
Example = namedtuple('Example', headers)
examples = []
# i = 0
for line in reader:
for index, text in enumerate(line):
if index in text_indices:
line[index] = text # .replace(' ', '')
example = Example(*line)
examples.append(example)
return examples
except IOError:
logging.error("error in read tsv")
def data_generator(self):
"""
:return:
"""
assert os.path.isdir(self.config.data_path), "%s must be a directory that stores data files" \
% self.config.data_path
data_files = os.listdir(self.config.data_path)
def wrapper():
"""
:return:
"""
all_dev_batches = []
for epoch_index in range(self.config.epoch):
self.current_example = 0
self.current_epoch = epoch_index
self.global_rng = np.random.RandomState(epoch_index)
for input_file in data_files:
examples = self.read_files(os.path.join(
self.config.data_path, input_file))
if self.config.shuffle:
self.global_rng.shuffle(examples)
for batch_data in self.prepare_batch_data(
examples, self.config.batch_size):
if len(all_dev_batches) < self.dev_count:
all_dev_batches.append(batch_data)
if len(all_dev_batches) == self.dev_count:
# trick: handle batch inconsistency caused by data
# sharding for each trainer
yield all_dev_batches[self.trainer_id]
all_dev_batches = []
if "train" not in self.name:
if self.trainer_id < len(all_dev_batches):
yield all_dev_batches[self.trainer_id]
return wrapper
def serialize_batch_records(self, batch_records):
"""
:param batch_records:
:return:
"""
return_list = []
example = batch_records[0]
for index in range(len(example._fields)):
text_batch = []
for record in batch_records:
text_batch.append(record[index])
id_list = self.fields[index].field_reader.convert_texts_to_ids(
text_batch)
return_list.extend(id_list)
return return_list
| 37.069231 | 102 | 0.559037 |
ace71b972ad5d81a2a0a30460d47a1db1d2ae2eb | 45 | py | Python | conftest.py | StanczakDominik/PIC3 | 583262cff0edfaee48b9540505bcd68983ec53ec | [
"BSD-3-Clause"
] | 19 | 2016-03-29T09:07:07.000Z | 2021-09-27T07:59:17.000Z | conftest.py | StanczakDominik/PIC3 | 583262cff0edfaee48b9540505bcd68983ec53ec | [
"BSD-3-Clause"
] | 16 | 2017-02-14T13:27:24.000Z | 2017-03-10T19:53:03.000Z | conftest.py | StanczakDominik/PythonPIC | 583262cff0edfaee48b9540505bcd68983ec53ec | [
"BSD-3-Clause"
] | 8 | 2016-09-11T19:31:20.000Z | 2021-01-11T03:26:02.000Z | # coding=utf-8
collect_ignore = ["setup.py"]
| 15 | 29 | 0.688889 |
ace71e4af81c2830d9dcd00b25abf9ba4592a194 | 15,163 | py | Python | homeassistant/components/ozw/__init__.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 1 | 2021-03-24T13:28:02.000Z | 2021-03-24T13:28:02.000Z | homeassistant/components/ozw/__init__.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 60 | 2020-08-03T07:32:56.000Z | 2022-03-31T06:02:07.000Z | homeassistant/components/ozw/__init__.py | edofullin/core | 106dc4d28ad59cb192c60fc7a354cafa86899ea4 | [
"Apache-2.0"
] | 4 | 2017-01-10T04:17:33.000Z | 2021-09-02T16:37:24.000Z | """The ozw integration."""
import asyncio
from contextlib import suppress
import json
import logging
from openzwavemqtt import OZWManager, OZWOptions
from openzwavemqtt.const import (
EVENT_INSTANCE_EVENT,
EVENT_NODE_ADDED,
EVENT_NODE_CHANGED,
EVENT_NODE_REMOVED,
EVENT_VALUE_ADDED,
EVENT_VALUE_CHANGED,
EVENT_VALUE_REMOVED,
CommandClass,
ValueType,
)
from openzwavemqtt.models.node import OZWNode
from openzwavemqtt.models.value import OZWValue
from openzwavemqtt.util.mqtt_client import MQTTClient
from homeassistant.components import mqtt
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.config_entries import ENTRY_STATE_LOADED, ConfigEntry
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.device_registry import async_get_registry as get_dev_reg
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import const
from .const import (
CONF_INTEGRATION_CREATED_ADDON,
CONF_USE_ADDON,
DATA_UNSUBSCRIBE,
DOMAIN,
MANAGER,
NODES_VALUES,
PLATFORMS,
TOPIC_OPENZWAVE,
)
from .discovery import DISCOVERY_SCHEMAS, check_node_schema, check_value_schema
from .entity import (
ZWaveDeviceEntityValues,
create_device_id,
create_device_name,
create_value_id,
)
from .services import ZWaveServices
from .websocket_api import async_register_api
_LOGGER = logging.getLogger(__name__)
DATA_DEVICES = "zwave-mqtt-devices"
DATA_STOP_MQTT_CLIENT = "ozw_stop_mqtt_client"
async def async_setup(hass: HomeAssistant, config: dict):
"""Initialize basic config of ozw component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up ozw from a config entry."""
ozw_data = hass.data[DOMAIN][entry.entry_id] = {}
ozw_data[DATA_UNSUBSCRIBE] = []
data_nodes = {}
hass.data[DOMAIN][NODES_VALUES] = data_values = {}
removed_nodes = []
manager_options = {"topic_prefix": f"{TOPIC_OPENZWAVE}/"}
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=DOMAIN)
if entry.data.get(CONF_USE_ADDON):
# Do not use MQTT integration. Use own MQTT client.
# Retrieve discovery info from the OpenZWave add-on.
discovery_info = await hass.components.hassio.async_get_addon_discovery_info(
"core_zwave"
)
if not discovery_info:
_LOGGER.error("Failed to get add-on discovery info")
raise ConfigEntryNotReady
discovery_info_config = discovery_info["config"]
host = discovery_info_config["host"]
port = discovery_info_config["port"]
username = discovery_info_config["username"]
password = discovery_info_config["password"]
mqtt_client = MQTTClient(host, port, username=username, password=password)
manager_options["send_message"] = mqtt_client.send_message
else:
mqtt_entries = hass.config_entries.async_entries("mqtt")
if not mqtt_entries or mqtt_entries[0].state != ENTRY_STATE_LOADED:
_LOGGER.error("MQTT integration is not set up")
return False
mqtt_entry = mqtt_entries[0] # MQTT integration only has one entry.
@callback
def send_message(topic, payload):
if mqtt_entry.state != ENTRY_STATE_LOADED:
_LOGGER.error("MQTT integration is not set up")
return
mqtt.async_publish(hass, topic, json.dumps(payload))
manager_options["send_message"] = send_message
options = OZWOptions(**manager_options)
manager = OZWManager(options)
hass.data[DOMAIN][MANAGER] = manager
@callback
def async_node_added(node):
# Caution: This is also called on (re)start.
_LOGGER.debug("[NODE ADDED] node_id: %s", node.id)
data_nodes[node.id] = node
if node.id not in data_values:
data_values[node.id] = []
@callback
def async_node_changed(node):
_LOGGER.debug("[NODE CHANGED] node_id: %s", node.id)
data_nodes[node.id] = node
# notify devices about the node change
if node.id not in removed_nodes:
hass.async_create_task(async_handle_node_update(hass, node))
@callback
def async_node_removed(node):
_LOGGER.debug("[NODE REMOVED] node_id: %s", node.id)
data_nodes.pop(node.id)
# node added/removed events also happen on (re)starts of hass/mqtt/ozw
# cleanup device/entity registry if we know this node is permanently deleted
# entities itself are removed by the values logic
if node.id in removed_nodes:
hass.async_create_task(async_handle_remove_node(hass, node))
removed_nodes.remove(node.id)
@callback
def async_instance_event(message):
event = message["event"]
event_data = message["data"]
_LOGGER.debug("[INSTANCE EVENT]: %s - data: %s", event, event_data)
# The actual removal action of a Z-Wave node is reported as instance event
# Only when this event is detected we cleanup the device and entities from hass
# Note: Find a more elegant way of doing this, e.g. a notification of this event from OZW
if event in ["removenode", "removefailednode"] and "Node" in event_data:
removed_nodes.append(event_data["Node"])
@callback
def async_value_added(value):
node = value.node
# Clean up node.node_id and node.id use. They are the same.
node_id = value.node.node_id
# Filter out CommandClasses we're definitely not interested in.
if value.command_class in [
CommandClass.MANUFACTURER_SPECIFIC,
]:
return
_LOGGER.debug(
"[VALUE ADDED] node_id: %s - label: %s - value: %s - value_id: %s - CC: %s",
value.node.id,
value.label,
value.value,
value.value_id_key,
value.command_class,
)
node_data_values = data_values[node_id]
# Check if this value should be tracked by an existing entity
value_unique_id = create_value_id(value)
for values in node_data_values:
values.async_check_value(value)
if values.values_id == value_unique_id:
return # this value already has an entity
# Run discovery on it and see if any entities need created
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value, schema[const.DISC_VALUES][const.DISC_PRIMARY]
):
continue
values = ZWaveDeviceEntityValues(hass, options, schema, value)
values.async_setup()
# This is legacy and can be cleaned up since we are in the main thread:
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
data_values[node_id] = node_data_values + [values]
@callback
def async_value_changed(value):
# if an entity belonging to this value needs updating,
# it's handled within the entity logic
_LOGGER.debug(
"[VALUE CHANGED] node_id: %s - label: %s - value: %s - value_id: %s - CC: %s",
value.node.id,
value.label,
value.value,
value.value_id_key,
value.command_class,
)
# Handle a scene activation message
if value.command_class in [
CommandClass.SCENE_ACTIVATION,
CommandClass.CENTRAL_SCENE,
]:
async_handle_scene_activated(hass, value)
return
@callback
def async_value_removed(value):
_LOGGER.debug(
"[VALUE REMOVED] node_id: %s - label: %s - value: %s - value_id: %s - CC: %s",
value.node.id,
value.label,
value.value,
value.value_id_key,
value.command_class,
)
# signal all entities using this value for removal
value_unique_id = create_value_id(value)
async_dispatcher_send(hass, const.SIGNAL_DELETE_ENTITY, value_unique_id)
# remove value from our local list
node_data_values = data_values[value.node.id]
node_data_values[:] = [
item for item in node_data_values if item.values_id != value_unique_id
]
# Listen to events for node and value changes
for event, event_callback in (
(EVENT_NODE_ADDED, async_node_added),
(EVENT_NODE_CHANGED, async_node_changed),
(EVENT_NODE_REMOVED, async_node_removed),
(EVENT_VALUE_ADDED, async_value_added),
(EVENT_VALUE_CHANGED, async_value_changed),
(EVENT_VALUE_REMOVED, async_value_removed),
(EVENT_INSTANCE_EVENT, async_instance_event),
):
ozw_data[DATA_UNSUBSCRIBE].append(options.listen(event, event_callback))
# Register Services
services = ZWaveServices(hass, manager)
services.async_register()
# Register WebSocket API
async_register_api(hass)
@callback
def async_receive_message(msg):
manager.receive_message(msg.topic, msg.payload)
async def start_platforms():
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(entry, platform)
for platform in PLATFORMS
]
)
if entry.data.get(CONF_USE_ADDON):
mqtt_client_task = asyncio.create_task(mqtt_client.start_client(manager))
async def async_stop_mqtt_client(event=None):
"""Stop the mqtt client.
Do not unsubscribe the manager topic.
"""
mqtt_client_task.cancel()
with suppress(asyncio.CancelledError):
await mqtt_client_task
ozw_data[DATA_UNSUBSCRIBE].append(
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_stop_mqtt_client
)
)
ozw_data[DATA_STOP_MQTT_CLIENT] = async_stop_mqtt_client
else:
ozw_data[DATA_UNSUBSCRIBE].append(
await mqtt.async_subscribe(
hass, f"{manager.options.topic_prefix}#", async_receive_message
)
)
hass.async_create_task(start_platforms())
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
# cleanup platforms
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if not unload_ok:
return False
# unsubscribe all listeners
for unsubscribe_listener in hass.data[DOMAIN][entry.entry_id][DATA_UNSUBSCRIBE]:
unsubscribe_listener()
if entry.data.get(CONF_USE_ADDON):
async_stop_mqtt_client = hass.data[DOMAIN][entry.entry_id][
DATA_STOP_MQTT_CLIENT
]
await async_stop_mqtt_client()
hass.data[DOMAIN].pop(entry.entry_id)
return True
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Remove a config entry."""
if not entry.data.get(CONF_INTEGRATION_CREATED_ADDON):
return
try:
await hass.components.hassio.async_stop_addon("core_zwave")
except HassioAPIError as err:
_LOGGER.error("Failed to stop the OpenZWave add-on: %s", err)
return
try:
await hass.components.hassio.async_uninstall_addon("core_zwave")
except HassioAPIError as err:
_LOGGER.error("Failed to uninstall the OpenZWave add-on: %s", err)
async def async_handle_remove_node(hass: HomeAssistant, node: OZWNode):
"""Handle the removal of a Z-Wave node, removing all traces in device/entity registry."""
dev_registry = await get_dev_reg(hass)
# grab device in device registry attached to this node
dev_id = create_device_id(node)
device = dev_registry.async_get_device({(DOMAIN, dev_id)})
if not device:
return
devices_to_remove = [device.id]
# also grab slave devices (node instances)
for item in dev_registry.devices.values():
if item.via_device_id == device.id:
devices_to_remove.append(item.id)
# remove all devices in registry related to this node
# note: removal of entity registry is handled by core
for dev_id in devices_to_remove:
dev_registry.async_remove_device(dev_id)
async def async_handle_node_update(hass: HomeAssistant, node: OZWNode):
"""
Handle a node updated event from OZW.
Meaning some of the basic info like name/model is updated.
We want these changes to be pushed to the device registry.
"""
dev_registry = await get_dev_reg(hass)
# grab device in device registry attached to this node
dev_id = create_device_id(node)
device = dev_registry.async_get_device({(DOMAIN, dev_id)})
if not device:
return
# update device in device registry with (updated) info
for item in dev_registry.devices.values():
if item.id != device.id and item.via_device_id != device.id:
continue
dev_name = create_device_name(node)
dev_registry.async_update_device(
item.id,
manufacturer=node.node_manufacturer_name,
model=node.node_product_name,
name=dev_name,
)
@callback
def async_handle_scene_activated(hass: HomeAssistant, scene_value: OZWValue):
"""Handle a (central) scene activation message."""
node_id = scene_value.node.id
ozw_instance_id = scene_value.ozw_instance.id
scene_id = scene_value.index
scene_label = scene_value.label
if scene_value.command_class == CommandClass.SCENE_ACTIVATION:
# legacy/network scene
scene_value_id = scene_value.value
scene_value_label = scene_value.label
else:
# central scene command
if scene_value.type != ValueType.LIST:
return
scene_value_label = scene_value.value["Selected"]
scene_value_id = scene_value.value["Selected_id"]
_LOGGER.debug(
"[SCENE_ACTIVATED] ozw_instance: %s - node_id: %s - scene_id: %s - scene_value_id: %s",
ozw_instance_id,
node_id,
scene_id,
scene_value_id,
)
# Simply forward it to the hass event bus
hass.bus.async_fire(
const.EVENT_SCENE_ACTIVATED,
{
const.ATTR_INSTANCE_ID: ozw_instance_id,
const.ATTR_NODE_ID: node_id,
const.ATTR_SCENE_ID: scene_id,
const.ATTR_SCENE_LABEL: scene_label,
const.ATTR_SCENE_VALUE_ID: scene_value_id,
const.ATTR_SCENE_VALUE_LABEL: scene_value_label,
},
)
| 35.018476 | 97 | 0.660621 |
ace71ea22f2753b6ac793cd1f45edf7ae0258552 | 664 | py | Python | env/Lib/site-packages/plotly/validators/funnelarea/domain/_x.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | packages/python/plotly/plotly/validators/funnelarea/domain/_x.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | packages/python/plotly/plotly/validators/funnelarea/domain/_x.py | jiangrongbo/plotly.py | df19fc702b309586cc24e25373b87e8bdbb3ff60 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="x", parent_name="funnelarea.domain", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"editType": "calc", "max": 1, "min": 0, "valType": "number"},
{"editType": "calc", "max": 1, "min": 0, "valType": "number"},
],
),
**kwargs
)
| 34.947368 | 83 | 0.521084 |
ace71f72f6fec69bd10419f331bc90a189c5af9e | 7,879 | py | Python | tests/gbe/test_create_vendor.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 1 | 2021-03-14T11:56:47.000Z | 2021-03-14T11:56:47.000Z | tests/gbe/test_create_vendor.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 180 | 2019-09-15T19:52:46.000Z | 2021-11-06T23:48:01.000Z | tests/gbe/test_create_vendor.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | null | null | null | from django.urls import reverse
from django.test import TestCase
from django.test import Client
from tests.factories.gbe_factories import (
BusinessFactory,
ConferenceFactory,
ProfileFactory,
UserMessageFactory,
VendorFactory
)
from tests.functions.ticketing_functions import setup_fees
from tests.functions.gbe_functions import (
current_conference,
login_as,
assert_alert_exists,
make_vendor_app_purchase,
make_vendor_app_ticket
)
from gbetext import (
default_vendor_submit_msg,
default_vendor_draft_msg
)
from gbe.models import (
Conference,
UserMessage
)
class TestCreateVendor(TestCase):
'''Tests for create_vendor view'''
view_name = 'vendor_create'
def setUp(self):
Conference.objects.all().delete()
self.client = Client()
self.profile = ProfileFactory()
self.conference = current_conference()
UserMessage.objects.all().delete()
self.business = BusinessFactory(owners=[self.profile])
def get_form(self, submit=False, invalid=False):
form = {'thebiz-business': self.business.pk}
if submit:
form['submit'] = True
if invalid:
form['thebiz-business'] = self.business.pk + 10
return form
def post_paid_vendor_submission(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
username = self.profile.user_object.username
make_vendor_app_purchase(self.conference, self.profile.user_object)
login_as(self.profile, self)
data = self.get_form(submit=True)
response = self.client.post(url,
data,
follow=True)
return response, data
def post_unpaid_vendor_draft(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
login_as(self.profile, self)
data = self.get_form()
data['draft'] = True
response = self.client.post(url,
data,
follow=True)
return response, data
def test_create_vendor_post_form_valid(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
event_id = make_vendor_app_ticket(self.conference)
response, data = self.post_unpaid_vendor_draft()
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Welcome to GBE')
draft_string = (
'<i class="fas fa-arrow-alt-circle-right"></i> <b>%s - %s</b>'
) % (self.business.name, self.conference.conference_slug)
self.assertContains(response, "(Click to edit)")
self.assertContains(response, draft_string)
def test_create_vendor_post_form_valid_submit(self):
url = reverse(self.view_name, urlconf='gbe.urls')
login_as(self.profile, self)
tickets = setup_fees(self.conference, is_vendor=True)
data = self.get_form(submit=True)
data['main_ticket'] = tickets[0].pk
response = self.client.post(url,
data,
follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Fee has not been Paid')
def test_create_vendor_post_form_invalid(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
login_as(self.profile, self)
data = self.get_form(invalid=True)
response = self.client.post(
url, data=data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Select a valid choice.")
def test_create_vendor_post_form_not_my_biz(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
data = self.get_form()
other_biz = BusinessFactory()
data['thebiz-business'] = other_biz.pk
login_as(self.profile, self)
response = self.client.post(
url, data=data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Select a valid choice.")
def test_create_vendor_with_get_request(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
login_as(self.profile, self)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Vendor Application')
def test_create_vendor_with_no_business(self):
url = reverse(self.view_name,
urlconf='gbe.urls')
login_as(ProfileFactory(), self)
response = self.client.get(url, follow=True)
self.assertContains(response, 'Tell Us About Your Business')
def test_create_vendor_post_with_vendor_app_paid(self):
response, data = self.post_paid_vendor_submission()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Welcome to GBE")
self.assertContains(response, "(Click to view)")
self.assertContains(response, self.business.name)
def test_create_paid_vendor_w_other_vendor_paid(self):
VendorFactory(b_conference=self.conference, submitted=True)
response, data = self.post_paid_vendor_submission()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Welcome to GBE")
self.assertContains(response, "(Click to view)")
self.assertContains(response, self.business.name)
def test_create_vendor_post_with_vendor_old_comp(self):
comped_vendor = VendorFactory(
submitted=True,
business=self.business,
b_conference=ConferenceFactory(status='completed')
)
response, data = self.post_paid_vendor_submission()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Welcome to GBE")
self.assertContains(response, "(Click to view)")
self.assertContains(response, self.business.name)
def test_create_vendor_post_with_second_vendor_app_paid(self):
prev_vendor = VendorFactory(
submitted=True,
business=self.business,
b_conference=self.conference
)
make_vendor_app_purchase(self.conference, self.profile.user_object)
response, data = self.post_paid_vendor_submission()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Welcome to GBE")
self.assertContains(response, "(Click to view)")
self.assertContains(response, self.business.name)
def test_vendor_submit_make_message(self):
response, data = self.post_paid_vendor_submission()
self.assertEqual(response.status_code, 200)
assert_alert_exists(
response, 'success', 'Success', default_vendor_submit_msg)
def test_vendor_draft_make_message(self):
response, data = self.post_unpaid_vendor_draft()
self.assertEqual(200, response.status_code)
assert_alert_exists(
response, 'success', 'Success', default_vendor_draft_msg)
def test_vendor_submit_has_message(self):
msg = UserMessageFactory(
view='MakeVendorView',
code='SUBMIT_SUCCESS')
response, data = self.post_paid_vendor_submission()
self.assertEqual(response.status_code, 200)
assert_alert_exists(
response, 'success', 'Success', msg.description)
def test_vendor_draft_has_message(self):
msg = UserMessageFactory(
view='MakeVendorView',
code='DRAFT_SUCCESS')
response, data = self.post_unpaid_vendor_draft()
self.assertEqual(200, response.status_code)
assert_alert_exists(
response, 'success', 'Success', msg.description)
| 38.434146 | 75 | 0.647544 |
ace720126ca3789dc8c54714445a03e192f01643 | 85,809 | py | Python | scanpy/tools/aga.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | null | null | null | scanpy/tools/aga.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | null | null | null | scanpy/tools/aga.py | gioelelm/scanpy | 97391a0e7908b9644b2d6640c8e26d37bdc7811e | [
"BSD-3-Clause"
] | 1 | 2019-02-18T07:39:59.000Z | 2019-02-18T07:39:59.000Z | # Author: Alex Wolf (http://falexwolf.de)
from collections import namedtuple
import numpy as np
import scipy as sp
import networkx as nx
import scipy.sparse
from textwrap import indent, dedent
from .. import logging as logg
from ..data_structs import data_graph
from .. import utils
from .. import settings
from ..plotting import utils as pl_utils
MINIMAL_TREE_ATTACHEDNESS = 0.05
doc_string_base = dedent("""\
Generate cellular maps of differentiation manifolds with complex
topologies [Wolf17i]_.
Approximate graph abstraction (AGA) quantifies the connectivity of partitions of a
neighborhood graph of single cells, thereby generating a much simpler
abstracted graph whose nodes label the partitions. Together with a random
walk-based distance measure, this generates a topology preserving map of
single cells --- a partial coordinatization of data useful for exploring and
explaining its variation. We use the abstracted graph to assess which
subsets of data are better explained by discrete clusters than by a
continuous variable, to trace gene expression changes along aggregated
single-cell paths through data and to infer abstracted trees that best
explain the global topology of data.
Most of the following parameters appear similarly in other tools.
Parameters
----------
adata : AnnData
Annotated data matrix, optionally with `adata.add['iroot']`, the index
of root cell for computing a pseudotime.
n_neighbors : int or None, optional (default: 30)
Number of nearest neighbors on the knn graph. Often this can be reduced
down to a value of 4.
n_pcs : int, optional (default: 50)
Use n_pcs PCs to compute the euclidean distance matrix, which is the
basis for generating the graph. Set to 0 if you don't want preprocessing
with PCA.
n_dcs : int, optional (default: 10)
Number of diffusion components (very similar to eigen vectors of
adjacency matrix) to use for distance computations.
node_groups : any categorical sample annotation or {{'louvain', 'segments'}}, optional (default: 'louvain')
Criterion to determine the resoluting partitions of the
graph/data. 'louvain' uses the louvain algorithm and optimizes
modularity of the graph, 'segments' uses a bipartioning
criterium that is loosely inspired by hierarchical clustering. You can
also pass your predefined groups by choosing any sample annotation.
resolution : float, optional (default: 1.0)
See tool `louvain`.
random_state : int, optional (default: 0)
See tool `louvain`.
tree_detection : {{'iterative_matching', 'min_span_tree'}}, optional (default: 'min_span_tree')
How to detect a tree structure in the abstracted graph. If choosing
'min_span_tree', a minimum spanning tree is fitted for the abstracted
graph, weighted by inverse attachedness. If choosing 'iterative_matching',
a recursive algorithm that greedily attaches partitions (groups) that
maximize the random-walk based distance measure is run.
attachedness_measure : {{'connectedness', 'random_walk'}}, optional (default: 'connectedness')
How to measure connectedness between groups.
n_nodes : int or None, optional (default: None)
Number of nodes in the abstracted graph. Except when choosing
'segments' for `node_groups`, for which `n_nodes` defaults to
`n_nodes=1`, `n_nodes` defaults to the number of groups implied by the
choice of `node_groups`.
recompute_graph : bool, optional (default: False)
Recompute single-cell graph. Only then `n_neighbors` has an effect if
there is already a cached `distance` or `X_diffmap` in adata.
recompute_pca : bool, optional (default: False)
Recompute PCA.
recompute_louvain : bool, optional (default: False)
When changing the `resolution` parameter, you should set this to True.
n_jobs : int or None (default: settings.n_jobs)
Number of cpus to use for parallel processing.
copy : bool, optional (default: False)
Copy instance before computation and return a copy. Otherwise, perform
computation inplace and return None.
Returns
-------
Returns or updates adata depending on `copy` with
{returns}
Reference
---------
Wolf et al., bioRxiv (2017)
""")
doc_string_returns = dedent("""\
aga_adjacency_full_attachedness : np.ndarray in adata.add
The full adjacency matrix of the abstracted graph, weights
correspond to connectedness.
aga_adjacency_full_confidence : np.ndarray in adata.add
The full adjacency matrix of the abstracted graph, weights
correspond to confidence in the presence of an edge.
aga_adjacency_tree_confidence : sparse csr matrix in adata.add
The adjacency matrix of the tree-like subgraph that best explains
the topology
aga_groups : np.ndarray of dtype string in adata.smp
Group labels for each sample.
aga_pseudotime : np.ndarray of dtype float in adata.smp
Pseudotime labels, that is, distance a long the manifold for each
cell.
""")
def aga(adata,
n_neighbors=30,
n_pcs=50,
n_dcs=10,
node_groups='louvain',
resolution=1,
random_state=0,
attachedness_measure='connectedness',
tree_detection='min_span_tree',
tree_based_confidence=True,
n_nodes=None,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
recompute_louvain=False,
n_jobs=None,
copy=False):
adata = adata.copy() if copy else adata
if tree_detection not in {'iterative_matching', 'min_span_tree'}:
raise ValueError('`tree_detection` needs to be one of {}'
.format({'iterative_matching', 'min_span_tree'}))
fresh_compute_louvain = False
if (node_groups == 'louvain'
and ('louvain_groups' not in adata.smp_keys()
or ('louvain_params' in adata.add and adata.add['louvain_params']['resolution'] != resolution)
or recompute_louvain
or not data_graph.no_recompute_of_graph_necessary(
adata,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph,
n_neighbors=n_neighbors,
n_dcs=n_dcs))):
from .louvain import louvain
louvain(adata,
resolution=resolution,
n_neighbors=n_neighbors,
recompute_pca=recompute_pca,
recompute_graph=recompute_graph,
n_pcs=n_pcs,
n_dcs=n_dcs,
random_state=random_state)
fresh_compute_louvain = True
clusters = node_groups
if node_groups == 'louvain': clusters = 'louvain_groups'
logg.info('running Approximate Graph Abstraction (AGA)', reset=True)
if ('iroot' not in adata.add
and 'xroot' not in adata.add
and 'xroot' not in adata.var):
logg.info(' no root cell found, no computation of pseudotime')
msg = \
"""To enable computation of pseudotime, pass the index or expression vector
of a root cell. Either add
adata.add['iroot'] = root_cell_index
or (robust to subsampling)
adata.var['xroot'] = adata.X[root_cell_index, :]
where "root_cell_index" is the integer index of the root cell, or
adata.var['xroot'] = adata[root_cell_name, :].X
where "root_cell_name" is the name (a string) of the root cell."""
logg.hint(msg)
aga = AGA(adata,
clusters=clusters,
n_neighbors=n_neighbors,
n_pcs=n_pcs,
n_dcs=n_dcs,
n_jobs=n_jobs,
tree_based_confidence=tree_based_confidence,
# we do not need to recompute things both in the louvain
# call above and here
recompute_graph=recompute_graph and not fresh_compute_louvain,
recompute_distances=recompute_distances and not fresh_compute_louvain,
recompute_pca=recompute_pca and not fresh_compute_louvain,
n_nodes=n_nodes,
attachedness_measure=attachedness_measure)
updated_diffmap = aga.update_diffmap()
adata.smp['X_diffmap'] = aga.rbasis[:, 1:]
adata.smp['X_diffmap0'] = aga.rbasis[:, 0]
adata.add['diffmap_evals'] = aga.evals[1:]
adata.add['data_graph_distance_local'] = aga.Dsq
adata.add['data_graph_norm_weights'] = aga.Ktilde
if aga.iroot is not None:
aga.set_pseudotime() # pseudotimes are random walk distances from root point
adata.add['iroot'] = aga.iroot # update iroot, might have changed when subsampling, for example
adata.smp['aga_pseudotime'] = aga.pseudotime
# detect splits and partition the data into segments
aga.splits_segments()
# vector of length n_samples of group names
adata.smp['aga_groups'] = aga.segs_names.astype('U')
# vectors of length n_groups
adata.add['aga_groups_order'] = np.array([str(n) for n in aga.segs_names_unique])
adata.add['aga_groups_sizes'] = aga.segs_sizes
if tree_detection == 'min_span_tree':
min_span_tree = utils.compute_minimum_spanning_tree(
1./aga.segs_adjacency_full_attachedness)
min_span_tree.data = 1./min_span_tree.data
full_confidence, tree_confidence = aga.compute_adjacency_confidence(
aga.segs_adjacency_full_attachedness, min_span_tree, tree_based_confidence)
else:
full_confidence, tree_confidence = aga.segs_adjacency_full_confidence, aga.segs_adjacency_tree_confidence
adata.add['aga_adjacency_full_attachedness'] = aga.segs_adjacency_full_attachedness
adata.add['aga_adjacency_full_confidence'] = full_confidence
adata.add['aga_adjacency_tree_confidence'] = tree_confidence
# manage cluster names and colors
if (clusters not in {'segments', 'unconstrained_segments'}):
adata.add['aga_groups_original'] = clusters
adata.add['aga_groups_order_original'] = np.array(aga.segs_names_original)
if (clusters + '_colors' not in adata.add
or len(adata.add[clusters + '_colors']) != len(adata.add['aga_groups_order'])):
pl_utils.add_colors_for_categorical_sample_annotation(adata, clusters)
colors_original = []
if clusters + '_order' not in adata.add:
from natsort import natsorted
adata.add[clusters + '_order'] = natsorted(np.unique(adata.smp[clusters]))
name_list = list(adata.add[clusters + '_order'])
for name in aga.segs_names_original:
idx = name_list.index(name)
colors_original.append(adata.add[clusters + '_colors'][idx])
adata.add['aga_groups_colors_original'] = np.array(colors_original)
logg.info('... finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added\n' + indent(doc_string_returns, ' '))
return adata if copy else None
aga.__doc__ = doc_string_base.format(returns=doc_string_returns)
def aga_degrees(adata):
"""Compute the degree of each node in the abstracted graph.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
degrees : list
List of degrees for each node.
"""
import networkx as nx
g = nx.Graph(adata.add['aga_adjacency_full_confidence'])
degrees = [d for _, d in g.degree_iter(weight='weight')]
return degrees
def aga_expression_entropies(adata):
"""Compute the median expression entropy for each node-group.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
entropies : list
Entropies of median expressions for each node.
"""
from scipy.stats import entropy
groups_order, groups_masks = utils.select_groups(adata, smp='aga_groups')
entropies = []
for mask in groups_masks:
X_mask = adata.X[mask]
x_median = np.median(X_mask, axis=0)
x_probs = (x_median - np.min(x_median)) / (np.max(x_median) - np.min(x_median))
entropies.append(entropy(x_probs))
return entropies
def aga_compare_paths(adata1, adata2,
adjacency_key='aga_adjacency_full_confidence'):
"""Compare paths in abstracted graphs in two datasets.
Compute the fraction of consistent paths between leafs, a measure for the
topological similarity between graphs.
By increasing the verbosity to level 4 and 5, the paths that do not agree
and the paths that agree are written to the output, respectively.
Parameters
----------
adata1, adata2 : AnnData
Annotated data matrices to compare.
adjacency_key : str
Key for indexing the adjacency matrices to be used in adata1 and adata2.
Returns
-------
OrderedTuple with attributes ``n_steps`` (total number of steps in paths)
and ``frac_steps`` (fraction of consistent steps), ``n_paths`` and
``frac_paths``.
"""
import networkx as nx
g1 = nx.Graph(adata1.add[adjacency_key])
g2 = nx.Graph(adata2.add[adjacency_key])
leaf_nodes1 = [str(x) for x in g1.nodes() if g1.degree(x) == 1]
logg.msg('leaf nodes in graph 1: {}'.format(leaf_nodes1), v=5, no_indent=True)
asso_groups1 = utils.identify_groups(adata1.smp['aga_groups'], adata2.smp['aga_groups'])
asso_groups2 = utils.identify_groups(adata2.smp['aga_groups'], adata1.smp['aga_groups'])
orig_names1 = adata1.add['aga_groups_order_original']
orig_names2 = adata2.add['aga_groups_order_original']
import itertools
n_steps = 0
n_agreeing_steps = 0
n_paths = 0
n_agreeing_paths = 0
# loop over all pairs of leaf nodes in the reference adata1
for (r, s) in itertools.combinations(leaf_nodes1, r=2):
r2, s2 = asso_groups1[r][0], asso_groups1[s][0]
orig_names = [orig_names1[int(i)] for i in [r, s]]
orig_names += [orig_names2[int(i)] for i in [r2, s2]]
logg.msg('compare shortest paths between leafs ({}, {}) in graph1 and ({}, {}) in graph2:'
.format(*orig_names), v=4, no_indent=True)
no_path1 = False
try:
path1 = [str(x) for x in nx.shortest_path(g1, int(r), int(s))]
except nx.NetworkXNoPath:
no_path1 = True
no_path2 = False
try:
path2 = [str(x) for x in nx.shortest_path(g2, int(r2), int(s2))]
except nx.NetworkXNoPath:
no_path2 = True
if no_path1 and no_path2:
# consistent behavior
n_paths += 1
n_agreeing_paths += 1
n_steps += 1
n_agreeing_steps += 1
continue
elif no_path1 or no_path2:
# non-consistent result
n_paths += 1
n_steps += 1
continue
if len(path1) >= len(path2):
path_mapped = [asso_groups1[l] for l in path1]
path_compare = path2
path_compare_id = 2
path_compare_orig_names = [[orig_names2[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names2[int(s)] for s in l] for l in path_mapped]
else:
path_mapped = [asso_groups2[l] for l in path2]
path_compare = path1
path_compare_id = 1
path_compare_orig_names = [[orig_names1[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names1[int(s)] for s in l] for l in path_mapped]
n_agreeing_steps_path = 0
ip_progress = 0
for il, l in enumerate(path_compare[:-1]):
for ip, p in enumerate(path_mapped):
if ip >= ip_progress and l in p:
# check whether we can find the step forward of path_compare in path_mapped
if (ip + 1 < len(path_mapped)
and
path_compare[il + 1] in path_mapped[ip + 1]):
# make sure that a step backward leads us to the same value of l
# in case we "jumped"
logg.msg('found matching step ({} -> {}) at position {} in path{} and position {} in path_mapped'
.format(l, path_compare_orig_names[il + 1], il, path_compare_id, ip), v=6)
consistent_history = True
for iip in range(ip, ip_progress, -1):
if l not in path_mapped[iip - 1]:
consistent_history = False
if consistent_history:
# here, we take one step further back (ip_progress - 1); it's implied that this
# was ok in the previous step
logg.msg(' step(s) backward to position(s) {} in path_mapped are fine, too: valid step'
.format(list(range(ip - 1, ip_progress - 2, -1))), v=6)
n_agreeing_steps_path += 1
ip_progress = ip + 1
break
n_steps_path = len(path_compare) - 1
n_agreeing_steps += n_agreeing_steps_path
n_steps += n_steps_path
n_paths += 1
if n_agreeing_steps_path == n_steps_path: n_agreeing_paths += 1
# only for the output, use original names
path1_orig_names = [orig_names1[int(s)] for s in path1]
path2_orig_names = [orig_names2[int(s)] for s in path2]
logg.msg(' path1 = {},\n'
'path_mapped = {},\n'
' path2 = {},\n'
'-> n_agreeing_steps = {} / n_steps = {}.'
.format(path1_orig_names,
[list(p) for p in path_mapped_orig_names],
path2_orig_names,
n_agreeing_steps_path, n_steps_path), v=5, no_indent=True)
Result = namedtuple('aga_compare_paths_result',
['frac_steps', 'n_steps', 'frac_paths', 'n_paths'])
return Result(frac_steps=n_agreeing_steps/n_steps if n_steps > 0 else np.nan,
n_steps=n_steps if n_steps > 0 else np.nan,
frac_paths=n_agreeing_paths/n_paths if n_steps > 0 else np.nan,
n_paths=n_paths if n_steps > 0 else np.nan)
def aga_contract_graph(adata, min_group_size=0.01, max_n_contractions=1000, copy=False):
"""Contract the abstracted graph.
"""
adata = adata.copy() if copy else adata
if 'aga_adjacency_tree_confidence' not in adata.add: raise ValueError('run tool aga first!')
min_group_size = min_group_size if min_group_size >= 1 else int(min_group_size * adata.n_smps)
logg.info('contract graph using `min_group_size={}`'.format(min_group_size))
def propose_nodes_to_contract(adjacency_tree_confidence, node_groups):
# nodes with two edges
n_edges_per_seg = np.sum(adjacency_tree_confidence > 0, axis=1).A1
for i in range(adjacency_tree_confidence.shape[0]):
if n_edges_per_seg[i] == 2:
neighbors = adjacency_tree_confidence[i].nonzero()[1]
for neighbors_edges in range(1, 20):
for n_cnt, n in enumerate(neighbors):
if n_edges_per_seg[n] == neighbors_edges:
logg.msg('merging node {} into {} (two edges)'
.format(i, n), v=4)
return i, n
# node groups with a very small cell number
for i in range(adjacency_tree_confidence.shape[0]):
if node_groups[str(i) == node_groups].size < min_group_size:
neighbors = adjacency_tree_confidence[i].nonzero()[1]
neighbor_sizes = [node_groups[str(n) == node_groups].size for n in neighbors]
n = neighbors[np.argmax(neighbor_sizes)]
logg.msg('merging node {} into {} '
'(smaller than `min_group_size` = {})'
.format(i, n, min_group_size), v=4)
return i, n
return 0, 0
def contract_nodes(adjacency_tree_confidence, node_groups):
for count in range(max_n_contractions):
i, n = propose_nodes_to_contract(adjacency_tree_confidence, node_groups)
if i != 0 or n != 0:
G = nx.Graph(adjacency_tree_confidence)
G_contracted = nx.contracted_nodes(G, n, i, self_loops=False)
adjacency_tree_confidence = nx.to_scipy_sparse_matrix(G_contracted)
node_groups[str(i) == node_groups] = str(n)
for j in range(i+1, G.size()+1):
node_groups[str(j) == node_groups] = str(j-1)
else:
break
return adjacency_tree_confidence, node_groups
size_before = adata.add['aga_adjacency_tree_confidence'].shape[0]
adata.add['aga_adjacency_tree_confidence'], adata.smp['aga_groups'] = contract_nodes(
adata.add['aga_adjacency_tree_confidence'], adata.smp['aga_groups'])
adata.add['aga_groups_order'] = np.unique(adata.smp['aga_groups'])
for key in ['aga_adjacency_full_confidence', 'aga_groups_original',
'aga_groups_order_original', 'aga_groups_colors_original']:
if key in adata.add: del adata.add[key]
logg.info(' contracted graph from {} to {} nodes'
.format(size_before, adata.add['aga_adjacency_tree_confidence'].shape[0]))
logg.msg('removed adata.add["aga_adjacency_full_confidence"]', v=4)
return adata if copy else None
class AGA(data_graph.DataGraph):
"""Approximate Graph Abstraction
"""
def __init__(self,
adata,
n_nodes=None,
n_neighbors=30,
n_pcs=50,
n_dcs=10,
min_group_size=1,
tree_based_confidence=True,
minimal_distance_evidence=0.95,
recompute_pca=False,
recompute_distances=False,
recompute_graph=False,
attachedness_measure='connectedness',
clusters=None,
n_jobs=1):
super(AGA, self).__init__(adata,
k=n_neighbors,
n_pcs=n_pcs,
n_dcs=n_dcs,
n_jobs=n_jobs,
recompute_pca=recompute_pca,
recompute_distances=recompute_distances,
recompute_graph=recompute_graph)
self.n_neighbors = n_neighbors
self.minimal_distance_evidence = minimal_distance_evidence
# the ratio of max(minimal_distances)/min(minimal_distances) has to be smaller than minimal_distance_evidence
# in order to be considered convincing evidence, otherwise, consider median_distances
self.min_group_size = min_group_size if min_group_size >= 1 else int(min_group_size * self.X.shape[0])
self.passed_adata = adata # just for debugging purposes
self.choose_largest_segment = True
self.attachedness_measure = attachedness_measure
self.tree_based_confidence = tree_based_confidence
self.clusters = clusters
self.clusters_precomputed = None
self.clusters_precomputed_names = None
self.flavor_develop = 'bi' # bipartitioning
if clusters not in {'segments', 'unconstrained_segments'}:
if clusters not in adata.smp_keys():
raise ValueError('Did not find {} in adata.smp_keys()! '
'If you do not have any precomputed clusters, pass "segments" for "node_groups" instead'
.format(clusters))
clusters_array = adata.smp[clusters]
# transform to a list of index arrays
self.clusters_precomputed = []
# TODO: this is not a good solution
if clusters + '_order' in adata.add:
self.clusters_precomputed_names = list(adata.add[clusters + '_order'])
else:
self.clusters_precomputed_names = []
from natsort import natsorted
for cluster_name in natsorted(np.unique(clusters_array)):
self.clusters_precomputed.append(np.where(cluster_name == clusters_array)[0])
if clusters + '_order' not in adata.add:
self.clusters_precomputed_names.append(cluster_name)
n_nodes = len(self.clusters_precomputed)
else:
if n_nodes is None:
n_nodes = 1
logg.hint(
'by passing the parameter `n_nodes`, '
'choose the number of subgroups to detect')
self.n_splits = n_nodes - 1
def splits_segments(self):
"""Detect splits and partition the data into corresponding segments.
Detect all splits up to `n_nodes`.
Writes
------
segs : np.ndarray
Array of dimension (number of segments) × (number of data
points). Each row stores a mask array that defines a segment.
segs_tips : np.ndarray
Array of dimension (number of segments) × 2. Each row stores the
indices of the two tip points of each segment.
segs_names : np.ndarray
Array of dimension (number of data points). Stores an integer label
for each segment.
"""
self.detect_splits()
self.postprocess_segments()
self.set_segs_names()
self.order_pseudotime()
def detect_splits(self):
"""Detect all splits up to `n_nodes`.
Writes Attributes
-----------------
segs : np.ndarray
List of integer index arrays.
segs_tips : np.ndarray
List of indices of the tips of segments.
"""
logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))
indices_all = np.arange(self.X.shape[0], dtype=int)
segs = [indices_all]
if False: # this is safe, but not compatible with on-the-fly computation
tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))
else:
if self.iroot is not None:
tip_0 = np.argmax(self.Dchosen[self.iroot])
else:
tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to "0"
tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])
# we keep a list of the tips of each segment
segs_tips = [tips_all]
if self.clusters_precomputed_names:
self.segs_names_original = [', '.join(self.clusters_precomputed_names)]
segs_undecided = [True]
segs_adjacency = [[]]
segs_distances = np.zeros((1, 1))
segs_adjacency_nodes = [{}]
# logg.info(' do not consider groups with less than {} points for splitting'
# .format(self.min_group_size))
for ibranch in range(self.n_splits):
if self.clusters == 'unconstrained_segments':
iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)
if iseg == -1:
logg.info('... partitioning converged')
break
logg.info('... branching {}:'.format(ibranch + 1),
'split group', iseg)
segs_distances = self.do_split(segs, segs_tips,
segs_undecided,
segs_adjacency,
segs_distances,
iseg, new_tips)
else:
logg.msg(' split', ibranch + 1, v=4)
stop, segs_distances = self.do_split_constrained(segs, segs_tips,
segs_adjacency,
segs_adjacency_nodes,
segs_distances)
if stop: break
# segments
self.segs = segs
self.segs_tips = segs_tips
self.segs_sizes = []
for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))
# the full, unscaled adjacency matrix
self.segs_adjacency_full_attachedness = 1/segs_distances
# if self.attachedness_measure == 'connectedness':
# norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))
# self.segs_adjacency_full_attachedness /= norm
self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \
= self.compute_adjacency_confidence(
self.segs_adjacency_full_attachedness,
segs_adjacency,
self.tree_based_confidence)
np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)
def compute_adjacency_confidence(self, full_attachedness, tree_adjacency, tree_based_confidence):
"""Translates the attachedness measure into a confidence measure.
"""
if sp.sparse.issparse(tree_adjacency):
tree_adjacency = [tree_adjacency[i].nonzero()[1] for i in range(tree_adjacency.shape[0])]
segs_distances = 1/full_attachedness
if not tree_based_confidence: # inter- and intra-cluster based confidence
from scipy.stats import norm
# intra-cluster connections
total_n = self.k * np.array(self.segs_sizes) # total number of connections
a = full_attachedness
confidence = np.zeros_like(full_attachedness)
for i in range(a.shape[0]):
for j in range(i+1, a.shape[1]):
expected = total_n[i] * total_n[j] / np.sum(total_n)**2
actual = a[i, j] / np.sum(total_n)
variance = expected * (1 - expected) / np.sum(total_n)
if actual > expected:
confidence[i, j] = 1
elif actual < 1e-12:
confidence[i, j] = 0
else:
confidence[i, j] = 2 * norm.cdf(actual, expected, np.sqrt(variance))
# i_name = self.segs_names_original[i]
# j_name = self.segs_names_original[j]
# print(i_name, j_name, expected, actual, variance, confidence[i, j])
full_confidence = confidence + confidence.T
tree_confidence = self.compute_tree_confidence(full_confidence, tree_adjacency)
else:
# compute the average tree distances
tree_distances = []
for i, neighbors in enumerate(tree_adjacency):
tree_distances += segs_distances[i][neighbors].tolist()
median_tree_distances = np.median(tree_distances)
full_confidence = np.zeros_like(segs_distances)
full_confidence[segs_distances <= median_tree_distances] = 1
full_confidence[segs_distances > median_tree_distances] = (
np.exp(-(segs_distances-median_tree_distances)/median_tree_distances)
[segs_distances > median_tree_distances])
np.fill_diagonal(full_confidence, 0)
tree_confidence = self.compute_tree_confidence(full_confidence, tree_adjacency, minimal_tree_attachedness=MINIMAL_TREE_ATTACHEDNESS)
return full_confidence, tree_confidence
def compute_tree_confidence(self, full_confidence, tree_adjacency, minimal_tree_attachedness=1e-14):
n = full_confidence.shape[0]
tree_confidence = sp.sparse.lil_matrix((n, n), dtype=float)
for i, neighbors in enumerate(tree_adjacency):
clipped_attachedness = full_confidence[i][neighbors]
clipped_attachedness[clipped_attachedness < minimal_tree_attachedness] = minimal_tree_attachedness
tree_confidence[i, neighbors] = clipped_attachedness
full_confidence[i, neighbors] = clipped_attachedness
tree_confidence = tree_confidence.tocsr()
return tree_confidence
def do_split_constrained(self, segs, segs_tips,
segs_adjacency,
segs_adjacency_nodes,
segs_distances):
if max([len(seg) for seg in segs]) < self.min_group_size:
return True, segs_distances
def binary_split_largest():
isegs = np.argsort([len(seg) for seg in segs])[::-1]
for iseg in isegs:
seg = segs[iseg]
logg.msg(' splitting group {} with size {}'.format(iseg, len(seg)), v=4)
jsegs = [jseg for jseg in range(len(segs)) if jseg != iseg]
dtip = np.zeros(len(seg))
for jseg in jsegs:
if len(segs_tips[jseg]) > 0:
jtip = segs_tips[jseg][0]
dtip += self.Dchosen[jtip, seg]
if len(jsegs) > 0: dtip /= len(jsegs)
itip = segs_tips[iseg][0]
dtip += self.Dchosen[itip, seg]
imax = np.argmax(dtip)
dist_new_itip = dtip[imax]
new_itip = seg[imax]
new_seg = self.Dchosen[new_itip, seg] < self.Dchosen[itip, seg]
ssegs = [seg[new_seg], seg[~new_seg]]
ssegs_tips = [[new_itip], []]
sizes = [len(ssegs[0]), len(ssegs[1])]
if sizes[0] != 0 and sizes[1] != 0: break
logg.msg(' new tip {} with distance {:.6}, constraint was {}'
.format(new_itip, dist_new_itip, itip), v=4)
logg.msg(' new sizes {} and {}'
.format(sizes[0], sizes[1]), v=4)
if len(segs_tips[iseg]) > 0: ssegs_tips[1] = [segs_tips[iseg][0]]
return iseg, seg, ssegs, ssegs_tips, sizes
def new_split(segs_tips):
# upon initialization, start with no tips
if len(segs) == 1:
segs_tips.pop(0)
segs_tips.append([])
scores = []
new_tips = []
second_tips = []
third_tips = []
for iseg, seg in enumerate(segs):
seg = segs[iseg]
if len(seg) <= self.min_group_size:
scores.append(-1)
new_tips.append(0)
second_tips.append(0)
third_tips.append(0)
continue
jsegs = [jseg for jseg in range(len(segs)) if jseg != iseg]
dtip_others = np.zeros(len(seg))
for jseg in jsegs:
if len(segs_tips[jseg]) > 0:
jtip = segs_tips[jseg][0]
dtip_others += self.Dchosen[jtip, seg]
if len(jsegs) > 0: dtip_others /= len(jsegs)
dtip = dtip_others
need_to_compute_another_tip = False
if len(segs_tips[iseg]) > 0:
itip = segs_tips[iseg][0]
dtip += self.Dchosen[itip, seg]
elif len(jsegs) == 0:
# just take a random point and the extremum with respect to that
# point, the point is fixed to be the first in the segment
itip = seg[np.argmax(self.Dchosen[seg[0], seg])]
dtip += self.Dchosen[itip, seg]
else:
need_to_compute_another_tip = True
new_itip = seg[np.argmax(dtip)]
if need_to_compute_another_tip:
itip = seg[np.argmax(self.Dchosen[new_itip, seg])]
dtip = self.Dchosen[itip, seg] + self.Dchosen[new_itip, seg]
itip_third = np.argmax(dtip)
# score = dtip[itip_third] / self.Dchosen[itip, new_itip]
score = len(seg)
scores.append(score)
new_tips.append(new_itip)
second_tips.append(itip)
third_tips.append(seg[itip_third])
iseg = np.argmax(scores)
new_itip = new_tips[iseg]
itip = second_tips[iseg]
third_itip = third_tips[iseg]
seg = segs[iseg]
logg.msg('... splitting group {} with size {}'.format(iseg, len(seg)), v=4)
new_seg = self.Dchosen[new_itip, seg] < self.Dchosen[itip, seg]
size_0 = np.sum(new_seg)
if False:
if size_0 > len(seg) - size_0 and len(segs) == 1:
new_itip = itip
new_seg = ~new_seg
size_0 = len(seg) - size_0
idcs = np.argsort(self.Dchosen[new_itip, seg])
sorted_dists_from_new_tip = self.Dchosen[new_itip, seg][idcs]
i = np.argmax(np.diff(sorted_dists_from_new_tip))
if i <= size_0: new_seg[idcs[i+1:]] = False # idx starts at zero and this works
ssegs = [seg[new_seg], seg[~new_seg]]
ssegs_tips = [[new_itip], []]
sizes = [len(ssegs[0]), len(ssegs[1])]
logg.msg(' new tip {} with distance {:.6}, constraint was {}'
.format(new_itip, 0.0, itip), v=4)
logg.msg(' new sizes {} and {}'
.format(sizes[0], sizes[1]), v=4)
logg.msg(' the scores where', scores, v=4)
return iseg, seg, ssegs, ssegs_tips, sizes
def star_split(segs_tips):
if len(segs) == 1:
segs_tips.pop(0)
segs_tips.append([])
isegs = np.argsort([len(seg) for seg in segs])[::-1]
iseg = isegs[0]
seg = segs[iseg]
new_tips = [seg[np.argmax(self.Dchosen[seg[0], seg])]]
dtip_others = self.Dchosen[new_tips[0], seg]
dists = [np.max(dtip_others)]
for j in range(10):
new_tip = seg[np.argmax(dtip_others)]
if new_tip in new_tips: break
new_tips.append(new_tip)
dtip_j = self.Dchosen[new_tips[-1], seg]
dists.append(np.max(dtip_j))
dtip_others += dtip_j
tip_idx_max = np.argmax(dists)
new_tip = new_tips.pop(tip_idx_max)
dist_max = dists.pop(tip_idx_max)
new_seg = np.ones(len(seg), dtype=bool)
for constraint_tip in new_tips:
new_seg[self.Dchosen[new_tip, seg] > self.Dchosen[constraint_tip, seg]] = False
ssegs = [seg[new_seg], seg[~new_seg]]
ssegs_tips = [[new_tip], new_tips]
sizes = [len(ssegs[0]), len(ssegs[1])]
np.set_printoptions(precision=4)
logg.msg(' new tip', new_tip, 'with distance', dist_max,
'using constraints {} with distances'
.format(new_tips), v=4)
logg.msg(' ', dists, v=4)
logg.msg(' new sizes {} and {}'
.format(sizes[0], sizes[1]), v=4)
return iseg, seg, ssegs, ssegs_tips, sizes
def select_precomputed(segs_tips):
if len(segs) == 1:
segs_tips.pop(0)
segs_tips.append([])
iseg = 0
seg = segs[iseg]
logg.msg(' splitting group {} with size {}'.format(iseg, len(seg)), v=4)
new_tips = [seg[np.argmax(self.Dchosen[seg[0], seg])]]
dtip_others = self.Dchosen[new_tips[0], seg]
dists = [np.max(dtip_others)]
# it would be equivalent to just consider one pair of points
for j in range(10):
new_tip = seg[np.argmax(dtip_others)]
if new_tip in new_tips: break
new_tips.append(new_tip)
dtip_j = self.Dchosen[new_tips[-1], seg]
dists.append(np.max(dtip_j))
dtip_others += dtip_j
tip_idx_max = np.argmax(dists)
new_tip = new_tips.pop(tip_idx_max)
dist_max = dists.pop(tip_idx_max)
for iclus, clus in enumerate(self.clusters_precomputed):
if new_tip in set(clus):
new_seg = clus
clus_name = self.clusters_precomputed_names[iclus]
break
pos_new_seg = np.in1d(seg, new_seg, assume_unique=True)
ssegs = [new_seg, seg[~pos_new_seg]]
ssegs_tips = [[new_tip], new_tips]
sizes = [len(ssegs[0]), len(ssegs[1])]
np.set_printoptions(precision=4)
logg.msg(' new tip', new_tip, 'with distance', dist_max,
'using constraints {} with distances'
.format(new_tips), v=4)
logg.msg(' ', dists, v=4)
logg.msg(' new sizes {} and {}'
.format(sizes[0], sizes[1]), v=4)
return iseg, seg, ssegs, ssegs_tips, sizes, clus_name
if self.clusters_precomputed is None:
iseg, seg, ssegs, ssegs_tips, sizes = binary_split_largest()
# iseg, seg, ssegs, ssegs_tips, sizes = new_split(segs_tips)
# iseg, seg, ssegs, ssegs_tips, sizes = star_split(segs_tips)
else:
iseg, seg, ssegs, ssegs_tips, sizes, clus_name = select_precomputed(segs_tips)
trunk = 1
segs.pop(iseg)
segs_tips.pop(iseg)
# insert trunk at same position
segs.insert(iseg, ssegs[trunk])
segs_tips.insert(iseg, ssegs_tips[trunk])
if self.clusters_precomputed_names:
# there is one partition that corresponds to all other partitions...
iseg_name = ' '.join(np.setdiff1d(self.clusters_precomputed_names,
[n for n in self.segs_names_original]
+ [clus_name]))
self.segs_names_original[iseg] = iseg_name
# append other segments
segs += [seg for iseg, seg in enumerate(ssegs) if iseg != trunk]
segs_tips += [seg_tips for iseg, seg_tips in enumerate(ssegs_tips) if iseg != trunk]
if self.clusters_precomputed_names: self.segs_names_original += [clus_name]
# correct edges in adjacency matrix
n_add = len(ssegs) - 1
new_shape = (segs_distances.shape[0] + n_add, segs_distances.shape[1] + n_add)
# segs_distances.resize() throws an error!
segs_distances_help = segs_distances.copy()
segs_distances = np.zeros((new_shape))
segs_distances[np.ix_(range(segs_distances_help.shape[0]),
range(segs_distances_help.shape[1]))] = segs_distances_help
segs_distances = self.adjust_adjacency(iseg,
n_add,
segs,
segs_tips,
segs_adjacency,
segs_adjacency_nodes,
segs_distances, iseg)
return False, segs_distances
def select_segment(self, segs, segs_tips, segs_undecided):
"""Out of a list of line segments, choose segment that has the most
distant second data point.
Assume the distance matrix Ddiff is sorted according to seg_idcs.
Compute all the distances.
Returns
-------
iseg : int
Index identifying the position within the list of line segments.
new_tips : int
Positions of tips within chosen segment.
"""
scores_tips = np.zeros((len(segs), 4))
allindices = np.arange(self.X.shape[0], dtype=int)
for iseg, seg in enumerate(segs):
# do not consider too small segments
if segs_tips[iseg][0] == -1: continue
# restrict distance matrix to points in segment
if not isinstance(self.Dchosen, data_graph.OnFlySymMatrix):
Dseg = self.Dchosen[np.ix_(seg, seg)]
else:
Dseg = self.Dchosen.restrict(seg)
# map the global position to the position within the segment
tips = [np.where(allindices[seg] == tip)[0][0]
for tip in segs_tips[iseg]]
# find the third point on the segment that has maximal
# added distance from the two tip points
dseg = Dseg[tips[0]] + Dseg[tips[1]]
third_tip = np.argmax(dseg)
new_tips = np.append(tips, third_tip)
# compute the score as ratio of the added distance to the third tip,
# to what it would be if it were on the straight line between the
# two first tips, given by Dseg[tips[:2]]
# if we did not normalize, there would be a danger of simply
# assigning the highest score to the longest segment
if 'bi' == self.flavor_develop:
score = Dseg[new_tips[0], new_tips[1]]
elif 'tri' == self.flavor_develop:
score = dseg[new_tips[2]] / Dseg[new_tips[0], new_tips[1]] * len(seg)
else:
raise ValueError('unknown `self.flavor_develop`')
score = len(seg) if self.choose_largest_segment else score # simply the number of points
# self.choose_largest_segment = False
logg.msg('... group', iseg, 'score', score, 'n_points', len(seg),
'(too small)' if len(seg) < self.min_group_size else '', v=4)
if len(seg) <= self.min_group_size: score = 0
# write result
scores_tips[iseg, 0] = score
scores_tips[iseg, 1:] = new_tips
iseg = np.argmax(scores_tips[:, 0])
if scores_tips[iseg, 0] == 0: return -1, None
new_tips = scores_tips[iseg, 1:].astype(int)
return iseg, new_tips
def postprocess_segments(self):
"""Convert the format of the segment class members."""
# make segs a list of mask arrays, it's easier to store
# as there is a hdf5 equivalent
for iseg, seg in enumerate(self.segs):
mask = np.zeros(self.X.shape[0], dtype=bool)
mask[seg] = True
self.segs[iseg] = mask
# convert to arrays
self.segs = np.array(self.segs)
self.segs_tips = np.array(self.segs_tips)
def set_segs_names(self):
"""Return a single array that stores integer segment labels."""
segs_names = np.zeros(self.X.shape[0], dtype=np.int8)
self.segs_names_unique = []
for iseg, seg in enumerate(self.segs):
segs_names[seg] = iseg
self.segs_names_unique.append(iseg)
self.segs_names = segs_names
def order_pseudotime(self):
"""Define indices that reflect segment and pseudotime order.
Writes
------
indices : np.ndarray
Index array of shape n, which stores an ordering of the data points
with respect to increasing segment index and increasing pseudotime.
changepoints : np.ndarray
Index array of shape len(ssegs)-1, which stores the indices of
points where the segment index changes, with respect to the ordering
of indices.
"""
# sort indices according to segments
indices = np.argsort(self.segs_names)
segs_names = self.segs_names[indices]
# find changepoints of segments
changepoints = np.arange(indices.size-1)[np.diff(segs_names) == 1] + 1
if self.iroot is not None:
pseudotime = self.pseudotime[indices]
for iseg, seg in enumerate(self.segs):
# only consider one segment, it's already ordered by segment
seg_sorted = seg[indices]
# consider the pseudotime on this segment and sort them
seg_indices = np.argsort(pseudotime[seg_sorted])
# within the segment, order indices according to increasing pseudotime
indices[seg_sorted] = indices[seg_sorted][seg_indices]
# define class members
self.indices = indices
self.changepoints = changepoints
def do_split(self, segs, segs_tips, segs_undecided, segs_adjacency,
segs_distances, iseg, new_tips):
"""Detect branching on given segment.
Updates all list parameters inplace.
Call function _do_split and perform bookkeeping on segs and
segs_tips.
Parameters
----------
segs : list of np.ndarray
Dchosen distance matrix restricted to segment.
segs_tips : list of np.ndarray
Stores all tip points for the segments in segs.
iseg : int
Position of segment under study in segs.
new_tips : np.ndarray
The three tip points. They form a 'triangle' that contains the data.
"""
seg = segs[iseg]
# restrict distance matrix to points in segment
if not isinstance(self.Dchosen, data_graph.OnFlySymMatrix):
Dseg = self.Dchosen[np.ix_(seg, seg)]
else:
Dseg = self.Dchosen.restrict(seg)
# given the three tip points and the distance matrix detect the
# branching on the segment, return the list ssegs of segments that
# are defined by splitting this segment
result = self._do_split(Dseg, new_tips, seg, segs_tips)
ssegs, ssegs_tips, ssegs_adjacency, trunk = result
# map back to global indices
for iseg_new, seg_new in enumerate(ssegs):
ssegs[iseg_new] = seg[seg_new]
ssegs_tips[iseg_new] = seg[ssegs_tips[iseg_new]]
# remove previous segment
segs.pop(iseg)
segs_tips.pop(iseg)
# insert trunk at same position
segs.insert(iseg, ssegs[trunk])
segs_tips.insert(iseg, ssegs_tips[trunk])
# append other segments
segs += [seg for iseg, seg in enumerate(ssegs) if iseg != trunk]
segs_tips += [seg_tips for iseg, seg_tips in enumerate(ssegs_tips) if iseg != trunk]
if len(ssegs) == 4:
# insert undecided cells at same position
segs_undecided.pop(iseg)
segs_undecided.insert(iseg, True)
# correct edges in adjacency matrix
n_add = len(ssegs) - 1
new_shape = (segs_distances.shape[0] + n_add, segs_distances.shape[1] + n_add)
# segs_distances.resize() throws an error!
segs_distances_help = segs_distances.copy()
segs_distances = np.zeros((new_shape))
segs_distances[np.ix_(range(segs_distances_help.shape[0]),
range(segs_distances_help.shape[1]))] = segs_distances_help
segs_distances = self.adjust_adjacency(iseg, n_add,
segs,
segs_tips,
segs_adjacency,
segs_distances)
segs_undecided += [False for i in range(n_add)]
# need to return segs_distances as inplace formulation doesn't work
return segs_distances
def compute_attachedness(self, jseg, kseg_list, segs, segs_tips,
segs_adjacency_nodes):
distances = []
median_distances = []
measure_points_in_jseg = []
measure_points_in_kseg = []
if self.attachedness_measure == 'random_walk_approx':
for kseg in kseg_list:
reference_point_in_kseg = segs_tips[kseg][0]
measure_points_in_jseg.append(segs[jseg][np.argmin(self.Dchosen[reference_point_in_kseg, segs[jseg]])])
reference_point_in_jseg = measure_points_in_jseg[-1]
measure_points_in_kseg.append(segs[kseg][np.argmin(self.Dchosen[reference_point_in_jseg, segs[kseg]])])
distances.append(self.Dchosen[measure_points_in_jseg[-1], measure_points_in_kseg[-1]])
logg.msg(' ',
jseg, '(tip: {}, clos: {})'.format(segs_tips[jseg][0], measure_points_in_jseg[-1]),
kseg, '(tip: {}, clos: {})'.format(segs_tips[kseg][0], measure_points_in_kseg[-1]),
'->', distances[-1], v=4)
elif self.attachedness_measure == 'random_walk':
for kseg in kseg_list:
closest_distance = 1e12
measure_point_in_jseg = 0
measure_point_in_kseg = 0
distances_pairs = []
robust_quantile_jseg = int(0.0*len(segs[jseg]))
robust_quantile_kseg = int(0.0*len(segs[kseg]))
for reference_point_in_kseg in segs[kseg]:
position_in_jseg = np.argpartition(self.Dchosen[reference_point_in_kseg, segs[jseg]], robust_quantile_jseg)[robust_quantile_jseg]
measure_point_in_jseg_test = segs[jseg][position_in_jseg]
distances_pairs.append(self.Dchosen[reference_point_in_kseg, measure_point_in_jseg_test])
if distances_pairs[-1] < closest_distance:
measure_point_in_jseg = measure_point_in_jseg_test
measure_point_in_kseg = reference_point_in_kseg
closest_distance = distances_pairs[-1]
measure_points_in_kseg.append(measure_point_in_kseg)
measure_points_in_jseg.append(measure_point_in_jseg)
closest_distance = np.partition(distances_pairs, robust_quantile_kseg)[robust_quantile_kseg]
distances.append(closest_distance)
median_distance = np.median(self.Dchosen[measure_point_in_kseg, segs[jseg]])
median_distances.append(median_distance)
logg.msg(' ',
jseg, '({})'.format(measure_points_in_jseg[-1]),
kseg, '({})'.format(measure_points_in_kseg[-1]),
'->', distances[-1], median_distance, v=4)
elif self.attachedness_measure == 'euclidian_distance_full_pairwise':
for kseg in kseg_list:
closest_similarity = 1e12
measure_point_in_jseg = 0
measure_point_in_kseg = 0
for reference_point_in_kseg in segs[kseg]:
measure_point_in_jseg_test = segs[jseg][np.argmax(self.Ktilde[reference_point_in_kseg, segs[jseg]])]
if self.Ktilde[reference_point_in_kseg, measure_point_in_jseg_test] > closest_similarity:
measure_point_in_jseg = measure_point_in_jseg_test
measure_point_in_kseg = reference_point_in_kseg
closest_similarity = self.Ktilde[reference_point_in_kseg, measure_point_in_jseg_test]
measure_points_in_kseg.append(measure_point_in_kseg)
measure_points_in_jseg.append(measure_point_in_jseg)
closest_distance = 1/closest_similarity
distances.append(closest_distance)
logg.msg(' ',
jseg, '(tip: {}, clos: {})'.format(segs_tips[jseg][0], measure_points_in_jseg[-1]),
kseg, '(tip: {}, clos: {})'.format(segs_tips[kseg][0], measure_points_in_kseg[-1]),
'->', distances[-1], v=4)
elif self.attachedness_measure == 'connectedness_brute_force':
segs_jseg = set(segs[jseg])
for kseg in kseg_list:
connectedness = 0
for reference_point_in_kseg in segs[kseg]:
for j in self.Ktilde[reference_point_in_kseg].nonzero()[1]:
if j in segs_jseg:
connectedness += 1
# distances.append(1./(connectedness+1))
distances.append(1./connectedness if connectedness != 0 else np.inf)
logg.msg(' ', jseg, '-', kseg_list, '->', distances, v=4)
else:
raise ValueError('unknown attachedness measure')
return distances, median_distances, measure_points_in_jseg, measure_points_in_kseg
def trace_existing_connections(self, jseg, kseg_list, segs, segs_tips, segs_adjacency_nodes, trunk):
j_connects = segs_adjacency_nodes[jseg].copy()
connectedness = [0, 0]
not_trunk = 1 if trunk == 0 else 0
kseg_trunk = set(segs[kseg_list[trunk]])
kseg_not_trunk = set(segs[kseg_list[not_trunk]])
for j_connect, connects in j_connects.items():
for point_connect, seg_connect in connects:
if seg_connect == kseg_list[trunk]:
# score = 0
# if self.Dsq[point_connect, j_connect] > 0:
# score += 1. / (1 + self.Dsq[point_connect, j_connect]) # / (1 + len(segs_adjacency_nodes[jseg])) # len(segs[jseg])
# if self.Dsq[j_connect, point_connect] > 0:
# score += 1. / (1 + self.Dsq[j_connect, point_connect]) # / (1 + len(segs_adjacency_nodes[kseg_list[trunk if in_kseg_trunk else not_trunk]])) # len(kseg_trunk if in_kseg_trunk else kseg_not_trunk)
score = 1
in_kseg_trunk = True if point_connect in kseg_trunk else False
if in_kseg_trunk:
connectedness[trunk] += score
else:
# elif point_connect in kseg_not_trunk:
if j_connect not in segs_adjacency_nodes[jseg]:
segs_adjacency_nodes[jseg][j_connect] = []
idx = segs_adjacency_nodes[jseg][j_connect].index((point_connect, kseg_list[trunk]))
segs_adjacency_nodes[jseg][j_connect][idx] = (point_connect, kseg_list[not_trunk])
if point_connect not in segs_adjacency_nodes[kseg_list[not_trunk]]:
segs_adjacency_nodes[kseg_list[not_trunk]][point_connect] = []
segs_adjacency_nodes[kseg_list[not_trunk]][point_connect].append((j_connect, jseg))
# clean up the dictionary for trunk
idx = segs_adjacency_nodes[kseg_list[trunk]][point_connect].index((j_connect, jseg))
segs_adjacency_nodes[kseg_list[trunk]][point_connect].pop(idx)
if len(segs_adjacency_nodes[kseg_list[trunk]][point_connect]) == 0:
del segs_adjacency_nodes[kseg_list[trunk]][point_connect]
connectedness[not_trunk] += score
distances = [1/c if c > 0 else np.inf for c in connectedness]
# distances = [1/(1+c) for c in connectedness]
logg.msg(' ', jseg, '-', kseg_list, '->', distances, v=5)
return distances
def establish_new_connections(self, kseg_list, segs, segs_adjacency_nodes):
kseg_loop_idx = 0 if len(segs[kseg_list[0]]) < len(segs[kseg_list[1]]) else 1
kseg_loop = kseg_list[kseg_loop_idx]
kseg_test = kseg_list[0 if kseg_loop_idx == 1 else 1]
seg_loop = segs[kseg_loop]
seg_test = set(segs[kseg_test])
connections = 0
for p in seg_loop:
p_neighbors = set(self.Ktilde[p].nonzero()[1])
for q in p_neighbors:
if q in seg_test:
if p not in segs_adjacency_nodes[kseg_loop]:
segs_adjacency_nodes[kseg_loop][p] = []
segs_adjacency_nodes[kseg_loop][p].append((q, kseg_test))
if q not in segs_adjacency_nodes[kseg_test]:
segs_adjacency_nodes[kseg_test][q] = []
segs_adjacency_nodes[kseg_test][q].append((p, kseg_loop))
# treat this in a different loop so we can normalize with surface of segment
for p, q_list in segs_adjacency_nodes[kseg_loop].items():
q_list = [q for q, jseg in q_list if jseg == kseg_test]
for q in q_list:
# score = 0
# if self.Dsq[p, q] > 0: score += 1. / (1 + self.Dsq[p, q]) # / (1 + len(segs_adjacency_nodes[kseg_test])) # len(seg_test)
# if self.Dsq[q, p] > 0: score += 1. / (1 + self.Dsq[q, p]) # / (1 + len(segs_adjacency_nodes[kseg_loop])) # len(seg_loop)
score = 1
connections += score
# distance = 1/(1+connections)
distance = 1/connections if connections > 0 else np.inf
logg.msg(' ', kseg_list[0], '-', kseg_list[1], '->', distance, v=5)
return distance
def adjust_adjacency(self, iseg, n_add, segs, segs_tips, segs_adjacency,
segs_adjacency_nodes, segs_distances, trunk):
prev_connecting_segments = segs_adjacency[iseg].copy()
segs_adjacency += [[] for i in range(n_add)]
segs_adjacency_nodes += [{} for i in range(n_add)]
kseg_list = list(range(len(segs) - n_add, len(segs))) + [iseg]
trunk = len(kseg_list) - 1
if self.attachedness_measure == 'connectedness':
jseg_list = [jseg for jseg in range(len(segs)) if jseg not in kseg_list]
for jseg in jseg_list:
distances = self.trace_existing_connections(jseg, kseg_list, segs, segs_tips, segs_adjacency_nodes, trunk=trunk)
segs_distances[jseg, kseg_list] = distances
segs_distances[kseg_list, jseg] = distances
distance = self.establish_new_connections(kseg_list, segs, segs_adjacency_nodes)
segs_distances[kseg_list[0], kseg_list[1]] = distance
segs_distances[kseg_list[1], kseg_list[0]] = distance
# treat existing connections
# logg.info('... treat existing connections')
for jseg in prev_connecting_segments:
median_distances = []
if self.attachedness_measure != 'connectedness':
result = self.compute_attachedness(jseg, kseg_list, segs, segs_tips, segs_adjacency_nodes)
distances, median_distances, measure_points_in_jseg, measure_points_in_kseg = result
segs_distances[jseg, kseg_list] = distances
segs_distances[kseg_list, jseg] = distances
distances = segs_distances[jseg, kseg_list]
# in case we do not have convincing evidence for a connection based on the maximal distances
if (median_distances
and ((max(distances) < 0.1 and min(distances) / max(distances) >= 0.4)
# all distances are very small, we require significant statistical evidence here
or (min(distances) >= 0.1 and min(distances) / max(distances) >= self.minimal_distance_evidence))
# distances are larger
and min(median_distances) / max(median_distances) < self.minimal_distance_evidence):
# require median_distances to actually provide better evidence
logg.msg(' no convincing evidence in minimal distances, consider median distance', v=4)
idx = np.argmin(median_distances)
else:
idx = np.argmin(distances)
kseg_min = kseg_list[idx]
pos = segs_adjacency[jseg].index(iseg)
segs_adjacency[jseg][pos] = kseg_min
pos_2 = segs_adjacency[iseg].index(jseg)
segs_adjacency[iseg].pop(pos_2)
segs_adjacency[kseg_min].append(jseg)
logg.msg(' group {} is now attached to {}'.format(jseg, kseg_min), v=4)
# in case the segment we split should correspond to two "clusters", we
# need to check whether the new segments connect to any of the other old
# segments
# if not, we add a link between the new segments, if yes, we add two
# links to connect them at the correct old segments
# logg.info('... treat new connections')
do_not_attach_ksegs_with_each_other = False
continue_after_distance_compute = False
for kseg in kseg_list:
jseg_list = [jseg for jseg in range(len(segs))
if jseg != kseg and jseg not in segs_adjacency[kseg]] # prev_connecting_segments] # if it's a cluster split, this is allowed?
if self.attachedness_measure != 'connectedness':
result = self.compute_attachedness(kseg, jseg_list, segs, segs_tips, segs_adjacency_nodes)
distances, median_distances, measure_points_in_kseg, measure_points_in_jseg = result
segs_distances[kseg, jseg_list] = distances
segs_distances[jseg_list, kseg] = distances
if continue_after_distance_compute: continue
idx = np.argmin(segs_distances[kseg, jseg_list])
# candidate for the segment to which we attach would attach the new
# segment
jseg_min = jseg_list[idx]
logg.msg(' consider connecting', kseg, 'to', jseg_min, v=4)
# if the closest segment is not among the two new segments
if jseg_min not in kseg_list:
segs_adjacency_sparse = sp.sparse.lil_matrix(
(len(segs), len(segs)), dtype=float)
for i, neighbors in enumerate(segs_adjacency):
segs_adjacency_sparse[i, neighbors] = 1
G = nx.Graph(segs_adjacency_sparse)
paths_all = nx.single_source_dijkstra_path(G, source=kseg)
# we can attach the new segment to an old segment
if jseg_min not in paths_all:
segs_adjacency[jseg_min].append(kseg)
segs_adjacency[kseg].append(jseg_min)
logg.msg(' attaching new segment',
kseg, 'at', jseg_min, v=4)
# if we establish the new connection with an old segment
# we should not add a new connection to the second new segment
do_not_attach_ksegs_with_each_other = True
# we cannot attach it to an old segment as this
# would produce a cycle
else:
logg.msg(' cannot attach new segment',
kseg, 'at', jseg_min,
'(would produce cycle)', v=4)
# we still have the other new segment to inspect so it's not
# a drama that we couldn't establish a new connection
if kseg != kseg_list[-1]:
logg.msg(' continue', v=4)
continue
# we do not add add a new link
else:
logg.msg(' do not add another link', v=4)
continue_after_distance_compute = True
if jseg_min in kseg_list and not do_not_attach_ksegs_with_each_other:
segs_adjacency[jseg_min].append(kseg)
segs_adjacency[kseg].append(jseg_min)
# we're already done as we found the new connection
continue_after_distance_compute = True
logg.msg(' attaching new segment',
kseg, 'with new segment', jseg_min, v=4)
return segs_distances
def _do_split(self, Dseg, tips, seg_reference, old_tips):
"""Detect branching on given segment.
Call function __do_split three times for all three orderings of
tips. Points that do not belong to the same segment in all three
orderings are assigned to a fourth segment. The latter is, by Haghverdi
et al. (2016) referred to as 'undecided cells'.
Parameters
----------
Dseg : np.ndarray
Dchosen distance matrix restricted to segment.
tips : np.ndarray
The three tip points. They form a 'triangle' that contains the data.
Returns
-------
ssegs : list of np.ndarray
List of segments obtained from splitting the single segment defined
via the first two tip cells.
ssegs_tips : list of np.ndarray
List of tips of segments in ssegs.
"""
if 'tri' == self.flavor_develop:
ssegs = self._do_split_single_wolf17_tri(Dseg, tips)
elif 'bi' == self.flavor_develop:
ssegs = self._do_split_single_wolf17_bi(Dseg, tips)
else:
raise ValueError('unknown `self.flavor_develop`')
# make sure that each data point has a unique association with a segment
masks = np.zeros((len(ssegs), Dseg.shape[0]), dtype=bool)
for iseg, seg in enumerate(ssegs):
masks[iseg][seg] = True
nonunique = np.sum(masks, axis=0) > 1
ssegs = []
for iseg, mask in enumerate(masks):
mask[nonunique] = False
ssegs.append(np.arange(Dseg.shape[0], dtype=int)[mask])
# compute new tips within new segments
ssegs_tips = []
for inewseg, newseg in enumerate(ssegs):
secondtip = newseg[np.argmax(Dseg[tips[inewseg]][newseg])]
ssegs_tips.append([tips[inewseg], secondtip])
undecided_cells = np.arange(Dseg.shape[0], dtype=int)[nonunique]
if len(undecided_cells) > 0:
ssegs.append(undecided_cells)
# establish the connecting points with the other segments
for inewseg, newseg_tips in enumerate(ssegs_tips):
reference_point = newseg_tips[0]
# closest cell to the new segment within undecided cells
closest_cell = undecided_cells[np.argmin(Dseg[reference_point][undecided_cells])]
# closest cell to the undecided cells within new segment
closest_cell = ssegs[inewseg][np.argmin(Dseg[closest_cell][ssegs[inewseg]])]
# also compute tips for the undecided cells
tip_0 = undecided_cells[np.argmax(Dseg[undecided_cells[0]][undecided_cells])]
tip_1 = undecided_cells[np.argmax(Dseg[tip_0][undecided_cells])]
ssegs_tips.append([tip_0, tip_1])
ssegs_adjacency = [[3], [3], [3], [0, 1, 2]]
trunk = 3
# import matplotlib.pyplot as pl
# for iseg_new, seg_new in enumerate(ssegs):
# pl.figure()
# pl.scatter(self.passed_adata.smp['X_diffmap'][:, 0], self.passed_adata.smp['X_diffmap'][:, 1], s=1, c='grey')
# pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][seg_new, 0], self.passed_adata.smp['X_diffmap'][seg_reference][seg_new, 1], marker='x', s=2, c='blue')
# # pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][tips[iseg_new], 0], self.passed_adata.smp['X_diffmap'][seg_reference][tips[iseg_new], 1], marker='x', c='black')
# # pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][second_tip[iseg_new], 0], self.passed_adata.smp['X_diffmap'][seg_reference][second_tip[iseg_new], 1], marker='o', c='black')
# pl.xticks([])
# pl.yticks([])
# # pl.savefig('./figs/cutting_off_tip={}.png'.format(iseg_new))
# pl.show()
elif len(ssegs) == 3:
reference_point = np.zeros(3, dtype=int)
reference_point[0] = ssegs_tips[0][0]
reference_point[1] = ssegs_tips[1][0]
reference_point[2] = ssegs_tips[2][0]
measure_points = np.zeros((3, 3), dtype=int)
# this is another strategy than for the undecided_cells
# here it's possible to use the more symmetric procedure
# shouldn't make much of a difference
measure_points[0, 1] = ssegs[1][np.argmin(Dseg[reference_point[0]][ssegs[1]])]
measure_points[1, 0] = ssegs[0][np.argmin(Dseg[reference_point[1]][ssegs[0]])]
measure_points[0, 2] = ssegs[2][np.argmin(Dseg[reference_point[0]][ssegs[2]])]
measure_points[2, 0] = ssegs[0][np.argmin(Dseg[reference_point[2]][ssegs[0]])]
measure_points[1, 2] = ssegs[2][np.argmin(Dseg[reference_point[1]][ssegs[2]])]
measure_points[2, 1] = ssegs[1][np.argmin(Dseg[reference_point[2]][ssegs[1]])]
added_dist = np.zeros(3)
added_dist[0] = Dseg[measure_points[1, 0], measure_points[0, 1]] + Dseg[measure_points[2, 0], measure_points[0, 2]]
added_dist[1] = Dseg[measure_points[0, 1], measure_points[1, 0]] + Dseg[measure_points[2, 1], measure_points[1, 2]]
added_dist[2] = Dseg[measure_points[1, 2], measure_points[2, 1]] + Dseg[measure_points[0, 2], measure_points[2, 0]]
trunk = np.argmin(added_dist)
ssegs_adjacency = [[trunk] if i != trunk else
[j for j in range(3) if j != trunk]
for i in range(3)]
# print(ssegs_adjacency)
# import matplotlib.pyplot as pl
# for iseg_new, seg_new in enumerate(ssegs):
# pl.figure()
# pl.scatter(self.passed_adata.smp['X_diffmap'][:, 0], self.passed_adata.smp['X_diffmap'][:, 1], s=1, c='grey')
# pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][seg_new, 0], self.passed_adata.smp['X_diffmap'][seg_reference][seg_new, 1], marker='x', s=2, c='blue')
# # pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][tips[iseg_new], 0], self.passed_adata.smp['X_diffmap'][seg_reference][tips[iseg_new], 1], marker='x', c='black')
# # pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][second_tip[iseg_new], 0], self.passed_adata.smp['X_diffmap'][seg_reference][second_tip[iseg_new], 1], marker='o', c='black')
# for i in range(3):
# if i != iseg_new:
# pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][measure_points[iseg_new, i], 0],
# self.passed_adata.smp['X_diffmap'][seg_reference][measure_points[iseg_new, i], 1], marker='o', c='black')
# pl.scatter(self.passed_adata.smp['X_diffmap'][seg_reference][measure_points[i, iseg_new], 0],
# self.passed_adata.smp['X_diffmap'][seg_reference][measure_points[i, iseg_new], 1], marker='x', c='black')
# pl.xticks([])
# pl.yticks([])
# # pl.savefig('./figs/cutting_off_tip={}.png'.format(iseg_new))
# pl.show()
# print('trunk', trunk)
else:
trunk = 0
ssegs_adjacency = [[1], [0]]
reference_point_in_0 = ssegs_tips[0][0]
measure_point_in_1 = ssegs[1][np.argmin(Dseg[reference_point_in_0][ssegs[1]])]
reference_point_in_1 = measure_point_in_1 # ssegs_tips[1][0]
measure_point_in_0 = ssegs[0][np.argmin(Dseg[reference_point_in_1][ssegs[0]])]
return ssegs, ssegs_tips, ssegs_adjacency, trunk
def _do_split_single_haghverdi16(self, Dseg, tips):
"""Detect branching on given segment.
"""
# compute splits using different starting points the first index of
# tips is the starting point for the other two, the order does not
# matter
ssegs = []
# permutations of tip cells
ps = [[0, 1, 2], # start by computing distances from the first tip
[1, 2, 0], # -"- second tip
[2, 0, 1]] # -"- third tip
# import matplotlib.pyplot as pl
for i, p in enumerate(ps):
ssegs.append(self.__do_split_haghverdi16(Dseg, tips[p]))
return ssegs
def _do_split_single_wolf17_tri(self, Dseg, tips):
# all pairwise distances
dist_from_0 = Dseg[tips[0]]
dist_from_1 = Dseg[tips[1]]
dist_from_2 = Dseg[tips[2]]
closer_to_0_than_to_1 = dist_from_0 < dist_from_1
closer_to_0_than_to_2 = dist_from_0 < dist_from_2
closer_to_1_than_to_2 = dist_from_1 < dist_from_2
masks = np.zeros((2, Dseg.shape[0]), dtype=bool)
masks[0] = closer_to_0_than_to_1
masks[1] = closer_to_0_than_to_2
segment_0 = np.sum(masks, axis=0) == 2
masks = np.zeros((2, Dseg.shape[0]), dtype=bool)
masks[0] = ~closer_to_0_than_to_1
masks[1] = closer_to_1_than_to_2
segment_1 = np.sum(masks, axis=0) == 2
masks = np.zeros((2, Dseg.shape[0]), dtype=bool)
masks[0] = ~closer_to_0_than_to_2
masks[1] = ~closer_to_1_than_to_2
segment_2 = np.sum(masks, axis=0) == 2
ssegs = [segment_0, segment_1, segment_2]
return ssegs
def _do_split_single_wolf17_bi(self, Dseg, tips):
dist_from_0 = Dseg[tips[0]]
dist_from_1 = Dseg[tips[1]]
if True:
closer_to_0_than_to_1 = dist_from_0 < dist_from_1
ssegs = [closer_to_0_than_to_1, ~closer_to_0_than_to_1]
else:
time = dist_from_0 - dist_from_1
idcs = np.argsort(time)
i = np.argmax(np.diff(time[idcs]))
ssegs = [idcs[:i+1], idcs[i+1:]]
return ssegs
def __do_split_haghverdi16(self, Dseg, tips):
"""Detect branching on given segment.
Compute point that maximizes kendall tau correlation of the sequences of
distances to the second and the third tip, respectively, when 'moving
away' from the first tip: tips[0]. 'Moving away' means moving in the
direction of increasing distance from the first tip.
Parameters
----------
Dseg : np.ndarray
Dchosen distance matrix restricted to segment.
tips : np.ndarray
The three tip points. They form a 'triangle' that contains the data.
Returns
-------
ssegs : list of np.ndarray
List of segments obtained from "splitting away the first tip cell".
"""
# sort distance from first tip point
# then the sequence of distances Dseg[tips[0]][idcs] increases
idcs = np.argsort(Dseg[tips[0]])
# consider now the sequence of distances from the other
# two tip points, which only increase when being close to `tips[0]`
# where they become correlated
# at the point where this happens, we define a branching point
if True:
imax = self.kendall_tau_split(Dseg[tips[1]][idcs],
Dseg[tips[2]][idcs])
if False:
# if we were in euclidian space, the following should work
# as well, but here, it doesn't because the scales in Dseg are
# highly different, one would need to write the following equation
# in terms of an ordering, such as exploited by the kendall
# correlation method above
imax = np.argmin(Dseg[tips[0]][idcs]
+ Dseg[tips[1]][idcs]
+ Dseg[tips[2]][idcs])
# init list to store new segments
ssegs = []
# first new segment: all points until, but excluding the branching point
# increasing the following slightly from imax is a more conservative choice
# as the criterion based on normalized distances, which follows below,
# is less stable
ibranch = imax + 2 # this used to be imax + 1!
# ibranch = int(0.95 * imax)
return idcs[:ibranch]
# ssegs.append(idcs[:ibranch])
# TODO get rid of the following heuristics
# define nomalized distances to tip points for the rest of the data
# dist1 = Dseg[tips[1], idcs[ibranch:]] / Dseg[tips[1], idcs[ibranch-1]]
# dist2 = Dseg[tips[2], idcs[ibranch:]] / Dseg[tips[2], idcs[ibranch-1]]
# assign points according to whether being closer to tip cell 1 or 2
# ssegs.append(idcs[ibranch:][dist1 <= dist2])
# ssegs.append(idcs[ibranch:][dist1 > dist2])
# return ssegs
def kendall_tau_split(self, a, b):
"""Return splitting index that maximizes correlation in the sequences.
Compute difference in Kendall tau for all splitted sequences.
For each splitting index i, compute the difference of the two
correlation measures kendalltau(a[:i], b[:i]) and
kendalltau(a[i:], b[i:]).
Returns the splitting index that maximizes
kendalltau(a[:i], b[:i]) - kendalltau(a[i:], b[i:])
Parameters
----------
a, b : np.ndarray
One dimensional sequences.
Returns
-------
i : int
Splitting index according to above description.
"""
if a.size != b.size:
raise ValueError('a and b need to have the same size')
if a.ndim != b.ndim != 1:
raise ValueError('a and b need to be one-dimensional arrays')
import scipy as sp
min_length = 5
n = a.size
idx_range = np.arange(min_length, a.size-min_length-1, dtype=int)
corr_coeff = np.zeros(idx_range.size)
pos_old = sp.stats.kendalltau(a[:min_length], b[:min_length])[0]
neg_old = sp.stats.kendalltau(a[min_length:], b[min_length:])[0]
for ii, i in enumerate(idx_range):
if True:
# compute differences in concordance when adding a[i] and b[i]
# to the first subsequence, and removing these elements from
# the second subsequence
diff_pos, diff_neg = self._kendall_tau_diff(a, b, i)
pos = pos_old + self._kendall_tau_add(i, diff_pos, pos_old)
neg = neg_old + self._kendall_tau_subtract(n-i, diff_neg, neg_old)
pos_old = pos
neg_old = neg
if False:
# computation using sp.stats.kendalltau, takes much longer!
# just for debugging purposes
pos = sp.stats.kendalltau(a[:i+1], b[:i+1])[0]
neg = sp.stats.kendalltau(a[i+1:], b[i+1:])[0]
if False:
# the following is much slower than using sp.stats.kendalltau,
# it is only good for debugging because it allows to compute the
# tau-a version, which does not account for ties, whereas
# sp.stats.kendalltau computes tau-b version, which accounts for
# ties
pos = sp.stats.mstats.kendalltau(a[:i], b[:i], use_ties=False)[0]
neg = sp.stats.mstats.kendalltau(a[i:], b[i:], use_ties=False)[0]
corr_coeff[ii] = pos - neg
iimax = np.argmax(corr_coeff)
imax = min_length + iimax
corr_coeff_max = corr_coeff[iimax]
if corr_coeff_max < 0.3:
logg.msg('... is root itself, never obtain significant correlation', v=4)
return imax
def _kendall_tau_add(self, len_old, diff_pos, tau_old):
"""Compute Kendall tau delta.
The new sequence has length len_old + 1.
Parameters
----------
len_old : int
The length of the old sequence, used to compute tau_old.
diff_pos : int
Difference between concordant and non-concordant pairs.
tau_old : float
Kendall rank correlation of the old sequence.
"""
return 2./(len_old+1)*(float(diff_pos)/len_old-tau_old)
def _kendall_tau_subtract(self, len_old, diff_neg, tau_old):
"""Compute Kendall tau delta.
The new sequence has length len_old - 1.
Parameters
----------
len_old : int
The length of the old sequence, used to compute tau_old.
diff_neg : int
Difference between concordant and non-concordant pairs.
tau_old : float
Kendall rank correlation of the old sequence.
"""
return 2./(len_old-2)*(-float(diff_neg)/(len_old-1)+tau_old)
def _kendall_tau_diff(self, a, b, i):
"""Compute difference in concordance of pairs in split sequences.
Consider splitting a and b at index i.
Parameters
----------
a, b : np.ndarray
Returns
-------
diff_pos, diff_neg : int, int
Difference between concordant and non-concordant pairs for both
subsequences.
"""
# compute ordering relation of the single points a[i] and b[i]
# with all previous points of the sequences a and b, respectively
a_pos = np.zeros(a[:i].size, dtype=int)
a_pos[a[:i] > a[i]] = 1
a_pos[a[:i] < a[i]] = -1
b_pos = np.zeros(b[:i].size, dtype=int)
b_pos[b[:i] > b[i]] = 1
b_pos[b[:i] < b[i]] = -1
diff_pos = np.dot(a_pos, b_pos).astype(float)
# compute ordering relation of the single points a[i] and b[i]
# with all later points of the sequences
a_neg = np.zeros(a[i:].size, dtype=int)
a_neg[a[i:] > a[i]] = 1
a_neg[a[i:] < a[i]] = -1
b_neg = np.zeros(b[i:].size, dtype=int)
b_neg[b[i:] > b[i]] = 1
b_neg[b[i:] < b[i]] = -1
diff_neg = np.dot(a_neg, b_neg)
return diff_pos, diff_neg
| 50.446208 | 223 | 0.588213 |
ace721fb1769f6c7c3defc71c509827c05393e34 | 14,447 | py | Python | ucloud/services/usms/client.py | yangyimincn/ucloud-sdk-python3 | 9732d67f32ec5f46467458ba655c44c193a6bbff | [
"Apache-2.0"
] | 1 | 2020-01-20T02:49:43.000Z | 2020-01-20T02:49:43.000Z | ucloud/services/usms/client.py | yangyimincn/ucloud-sdk-python3 | 9732d67f32ec5f46467458ba655c44c193a6bbff | [
"Apache-2.0"
] | null | null | null | ucloud/services/usms/client.py | yangyimincn/ucloud-sdk-python3 | 9732d67f32ec5f46467458ba655c44c193a6bbff | [
"Apache-2.0"
] | null | null | null | """ Code is generated by ucloud-model, DO NOT EDIT IT. """
import typing
from ucloud.core.client import Client
from ucloud.services.usms.schemas import apis
class USMSClient(Client):
def __init__(
self, config: dict, transport=None, middleware=None, logger=None
):
super(USMSClient, self).__init__(config, transport, middleware, logger)
def create_usms_signature(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" CreateUSMSSignature - 调用接口CreateUSMSSignature申请短信签名
**Request**
- **ProjectId** (str) - (Config) 项目ID,不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **CertificateType** (int) - (Required) 签名的资质证明文件类型,需与签名类型保持一致,说明如下:0-三证合一/企业营业执照/组织机构代码证书/社会信用代码证书;1-应用商店后台开发者管理截图;2-备案服务商的备案成功截图(含域名,网站名称,备案号);3-公众号或小程序的管理界面截图;4-商标注册证书;5-组织机构代码证书、社会信用代码证书;
- **Description** (str) - (Required) 短信签名申请原因
- **File** (str) - (Required) 短信签名的资质证明文件,需先进行base64编码格式转换,此处填写转换后的字符串。文件大小不超过4 MB
- **SigContent** (str) - (Required) 短信签名名称;长度为2-12个字符, 可包含中文、数字和符号;无需填写【】或[],系统会自动添加
- **SigPurpose** (int) - (Required) 签名用途,0-自用,1-他用;
- **SigType** (int) - (Required) 签名类型,说明如下:0-公司或企业的全称或简称;1-App应用的全称或简称;2-工信部备案网站的全称或简称;3-公众号或小程序的全称或简称;4-商标名的全称或简称;5-政府/机关事业单位/其他单位的全称或简称;
- **ProxyFile** (str) - 短信签名授权委托文件,需先进行base64编码格式转换,此处填写转换后的字符串。文件大小不超过4 MB;当您是代理并使用第三方的签名时(也即SigPurpose为1-他用),该项为必填项;
**Response**
- **Message** (str) - 返回状态码描述,如果操作成功,默认返回为空
- **SigContent** (str) - 短信签名名称
- **SigId** (str) - 短信签名ID(短信签名申请时的工单ID)
"""
# build request
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.CreateUSMSSignatureRequestSchema().dumps(d)
# build options
kwargs["max_retries"] = 0 # ignore retry when api is not idempotent
resp = self.invoke("CreateUSMSSignature", d, **kwargs)
return apis.CreateUSMSSignatureResponseSchema().loads(resp)
def create_usms_template(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" CreateUSMSTemplate - 调用接口CreateUSMSTemplate申请短信模板
**Request**
- **ProjectId** (str) - (Config) 项目ID,不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
- **Purpose** (int) - (Required) 短信模板用途类型:1-验证码类短信模板;2-系统通知类短信模板;3-会员推广类短信模板;
- **Template** (str) - (Required) 短信模板内容,说明如下:字数不超过500,每个中文、符号、英文、数组等都计为一个字;模板中的变量填写格式:{N},其中N为大于1的整数,有多个参数时,建议N从1开始顺次,例如:{1}、{2}等;短信模板禁止仅包括变量的情况;
- **TemplateName** (str) - (Required) 短信模板名称,不超过32个字符,每个中文、符号、英文、数字等都计为1个字。
- **Remark** (str) - 短信模板申请原因说明,字数不超过128,每个中文、符号、英文、数字等都计为1个字。
- **UnsubscribeInfo** (str) - 当Purpose为3时,也即会员推广类短信模板,该项必填。枚举值:TD退订、回T退订、回N退订、回TD退订、退订回T、退订回D、退订回TD、退订回复T、退订回复D、退订回复N、退订回复TD、拒收回T
- **Zone** (str) - 可用区。参见 `可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
**Response**
- **Message** (str) - 返回状态码描述,如果操作成功,默认返回为空
- **TemplateId** (str) - 短信模板ID(短信模板申请时的工单ID)
"""
# build request
d = {"ProjectId": self.config.project_id, "Region": self.config.region}
req and d.update(req)
d = apis.CreateUSMSTemplateRequestSchema().dumps(d)
# build options
kwargs["max_retries"] = 0 # ignore retry when api is not idempotent
resp = self.invoke("CreateUSMSTemplate", d, **kwargs)
return apis.CreateUSMSTemplateResponseSchema().loads(resp)
def delete_usms_signature(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" DeleteUSMSSignature - 调用接口DeleteUSMSSignature删除短信签名
**Request**
- **ProjectId** (str) - (Config) 项目ID,不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **SigIds** (list) - (Required) 签名ID(也即短信签名申请时的工单ID),支持以数组的方式,举例,以SigIds.0、SigIds.1...SigIds.N方式传入
**Response**
- **Message** (str) - 返回状态码描述,如果操作成功,默认返回为空
"""
# build request
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.DeleteUSMSSignatureRequestSchema().dumps(d)
resp = self.invoke("DeleteUSMSSignature", d, **kwargs)
return apis.DeleteUSMSSignatureResponseSchema().loads(resp)
def delete_usms_template(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" DeleteUSMSTemplate - 调用接口DeleteUSMSTemplate删除短信模板
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
- **TemplateIds** (list) - (Required) 模板ID(也即短信模板申请时的工单ID),支持以数组的方式,举例,以TemplateIds.0、TemplateIds.1...TemplateIds.N方式传入
- **Zone** (str) - 可用区。参见 `可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
**Response**
- **Message** (str) - 返回状态码描述,如果操作成功,默认返回为空
"""
# build request
d = {"ProjectId": self.config.project_id, "Region": self.config.region}
req and d.update(req)
d = apis.DeleteUSMSTemplateRequestSchema().dumps(d)
resp = self.invoke("DeleteUSMSTemplate", d, **kwargs)
return apis.DeleteUSMSTemplateResponseSchema().loads(resp)
def get_usms_send_receipt(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" GetUSMSSendReceipt - 获取短信发送回执信息。下游服务提供商回执信息返回会有一定延时,建议发送完短信以后,5-10分钟后再调用该接口拉取回执信息。若超过12小时未返回,则请联系技术支持确认原因
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
- **SessionNoSet** (list) - (Required) 发送短信时返回的SessionNo集合,SessionNoSet.0,SessionNoSet.1....格式
- **Zone** (str) - 可用区。参见 `可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
**Response**
- **Data** (list) - 见 **ReceiptPerSession** 模型定义
- **Message** (str) - 错误描述
**Response Model**
**ReceiptPerPhone**
- **CostCount** (int) - 消耗短信条数
- **Phone** (str) - 手机号码
- **ReceiptDesc** (str) - 回执结果描述
- **ReceiptResult** (str) - 回执结果
- **ReceiptTime** (int) - 回执返回时间
**ReceiptPerSession**
- **ReceiptSet** (list) - 见 **ReceiptPerPhone** 模型定义
- **SessionNo** (str) - 发送短信时返回的SessionNo
"""
# build request
d = {"ProjectId": self.config.project_id, "Region": self.config.region}
req and d.update(req)
d = apis.GetUSMSSendReceiptRequestSchema().dumps(d)
resp = self.invoke("GetUSMSSendReceipt", d, **kwargs)
return apis.GetUSMSSendReceiptResponseSchema().loads(resp)
def query_usms_signature(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" QueryUSMSSignature - 调用接口QueryUSMSSignature查询短信签名申请状态
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **SigContent** (str) - 签名内容;签名ID和签名至少需填写1项;
- **SigId** (str) - 已申请的短信签名ID(短信签名申请时的工单ID);签名ID和签名至少需填写1项;
**Response**
- **Data** (dict) - 见 **OutSignature** 模型定义
- **Message** (str) - 发生错误时,表示具体错误描述
**Response Model**
**OutSignature**
- **ErrDesc** (str) - 签名审核失败原因
- **SigContent** (str) - 签名内容
- **SigId** (str) - 签名ID
- **Status** (int) - 签名状态。0-待审核 1-审核中 2-审核通过 3-审核未通过 4-被禁用
"""
# build request
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.QueryUSMSSignatureRequestSchema().dumps(d)
resp = self.invoke("QueryUSMSSignature", d, **kwargs)
return apis.QueryUSMSSignatureResponseSchema().loads(resp)
def query_usms_template(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" QueryUSMSTemplate - 调用接口QueryUSMSTemplate查询短信模板申请状态
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **TemplateId** (str) - (Required) 模板ID
**Response**
- **Data** (dict) - 见 **OutTemplate** 模型定义
- **Message** (str) - 当RetCode不为0时,Message中显示具体错误描述
**Response Model**
**OutTemplate**
- **CreateTime** (int) - 创建时间
- **ErrDesc** (str) - 审核失败原因
- **Purpose** (int) - 模板类型,选项:1-验证码类 2-通知类 3-会员推广类
- **Remark** (str) - 模板说明
- **Status** (int) - 短信模板状态;状态说明:0-待审核,1-审核中,2-审核通过,3-审核未通过,4-被禁用
- **Template** (str) - 短信模板内容
- **TemplateId** (str) - 短信模板ID
- **TemplateName** (str) - 短信模板名称
- **UnsubscribeInfo** (str) - 退订信息;一般填写方式“回T退订”,当purpose为3(也即会员推广类)时,为必填项
"""
# build request
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.QueryUSMSTemplateRequestSchema().dumps(d)
resp = self.invoke("QueryUSMSTemplate", d, **kwargs)
return apis.QueryUSMSTemplateResponseSchema().loads(resp)
def send_usms_message(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" SendUSMSMessage - 发送短信息。短信字数超过70个后,按照每66个进行切割(因为要加上1/3), 2/3)等字样,占用4个字长)。短信最大长度不能超过600个字。每个汉字、数字、字母、字符都按一个字计
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
- **PhoneNumbers** (list) - (Required) 电话号码数组,电话号码格式为(60)1xxxxxxxx,()中为国际长途区号(如中国为86或0086,两种格式都支持),后面为电话号码.若不传入国际区号,如1851623xxxx,则默认为国内手机号
- **TemplateId** (str) - (Required) 模板ID。若指定的模板ID审核未通过(status不等于2)则不允许发送
- **TemplateParams** (list) - (Required) 模板参数数组,以TempalteParams.0,TempalteParams.1.。。格式。若模板ID指定的模板无可变参数,则不传入该参数。模板参数个数与模板不匹配,则不允许发送
- **SigContent** (str) - 使用的签名,如果不输入则使用默认签名,若没有申请默认签名不允许发送;若输入的签名没有申请,则无法发送
- **Zone** (str) - 可用区。参见 `可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
**Response**
- **Action** (str) - 操作名称
- **Message** (str) - 发生错误时表示错误描述
- **RetCode** (int) - 返回码
- **SessionNo** (str) - 本次提交发送的短信的唯一ID,可根据该值查询本次发送的短信列表
"""
# build request
d = {"ProjectId": self.config.project_id, "Region": self.config.region}
req and d.update(req)
d = apis.SendUSMSMessageRequestSchema().dumps(d)
# build options
kwargs["max_retries"] = 0 # ignore retry when api is not idempotent
resp = self.invoke("SendUSMSMessage", d, **kwargs)
return apis.SendUSMSMessageResponseSchema().loads(resp)
def update_usms_signature(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" UpdateUSMSSignature - 调用接口UpdateUSMSSignature修改未通过审核的短信签名,并重新提交审核
**Request**
- **ProjectId** (str) - (Config) 项目ID,不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **File** (str) - (Required) 短信签名的资质证明文件,需先进行base64编码格式转换,此处填写转换后的字符串。文件大小不超过4 MB
- **SigContent** (str) - (Required) 新的短信签名名称;长度为2-12个字符, 可包含中文、数字和符号;无需填写【】或[],系统会自动添加
- **SigId** (str) - (Required) 签名ID(也即短信签名申请时的工单ID),支持以数组的方式,举例,以SigIds.0、SigIds.1...SigIds.N方式传入
- **SigPurpose** (int) - (Required) 签名用途,0-自用,1-他用;
- **SigType** (int) - (Required) 签名类型,说明如下:0-公司或企业的全称或简称;1-App应用的全称或简称;2-工信部备案网站的全称或简称;3-公众号或小程序的全称或简称;4-商标名的全称或简称;5-政府/机关事业单位/其他单位的全称或简称;
- **CertificateType** (int) - 签名的资质证明文件类型,需与签名类型保持一致,说明如下:0-三证合一/企业营业执照/组织机构代码证书/社会信用代码证书;1-应用商店后台开发者管理截图;2-备案服务商的备案成功截图(含域名,网站名称,备案号);3-公众号或小程序的管理界面截图;4-商标注册证书;5-组织机构代码证书、社会信用代码证书;
- **ProxyFile** (str) - 短信签名授权委托文件,需先进行base64编码格式转换,此处填写转换后的字符串。文件大小不超过4 MB;当您是代理并使用第三方的签名时(也即SigPurpose为1-他用),该项为必填项;
**Response**
- **Message** (str) - 返回状态码描述,如果操作成功,默认返回为空
"""
# build request
d = {"ProjectId": self.config.project_id}
req and d.update(req)
d = apis.UpdateUSMSSignatureRequestSchema().dumps(d)
resp = self.invoke("UpdateUSMSSignature", d, **kwargs)
return apis.UpdateUSMSSignatureResponseSchema().loads(resp)
def update_usms_template(
self, req: typing.Optional[dict] = None, **kwargs
) -> dict:
""" UpdateUSMSTemplate - 调用接口UpdateUSMSTemplate修改未通过审核的短信模板,并重新提交审核
**Request**
- **ProjectId** (str) - (Config) 项目ID。不填写为默认项目,子帐号必须填写。 请参考 `GetProjectList接口 <https://docs.ucloud.cn/api/summary/get_project_list.html>`_
- **Region** (str) - (Config) 地域。 参见 `地域和可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
- **Template** (str) - (Required) 新的模板内容。模板名称和模板内容必须提供一个,否则会报错。小于等于600个字
- **TemplateId** (str) - (Required) 短信模板ID
- **Remark** (str) - 短信模板申请原因说明,字数不超过128,每个中文、符号、英文、数字等都计为1个字。
- **TemplateName** (str) - 新的模板名称。小于等于32个字,每个中文、英文、数组、符合都计为一个字
- **UnsubscribeInfo** (str) - 当Purpose为3时,也即会员推广类短信模板,该项必填。枚举值:TD退订、回T退订、回N退订、回TD退订、退订回T、退订回D、退订回TD、退订回复T、退订回复D、退订回复N、退订回复TD、拒收回T
- **Zone** (str) - 可用区。参见 `可用区列表 <https://docs.ucloud.cn/api/summary/regionlist.html>`_
**Response**
- **Message** (str) - 发生错误时表示错误描述
"""
# build request
d = {"ProjectId": self.config.project_id, "Region": self.config.region}
req and d.update(req)
d = apis.UpdateUSMSTemplateRequestSchema().dumps(d)
resp = self.invoke("UpdateUSMSTemplate", d, **kwargs)
return apis.UpdateUSMSTemplateResponseSchema().loads(resp)
| 43.125373 | 200 | 0.619921 |
ace722c382261530604e53401f9417dcdbf0cf15 | 53,452 | py | Python | isi_sdk/api/network_groupnets_subnets_api.py | erik-hansen/isilon_sdk_python | 19958108ec550865ebeb1f2a4d250322cf4681c2 | [
"MIT"
] | null | null | null | isi_sdk/api/network_groupnets_subnets_api.py | erik-hansen/isilon_sdk_python | 19958108ec550865ebeb1f2a4d250322cf4681c2 | [
"MIT"
] | null | null | null | isi_sdk/api/network_groupnets_subnets_api.py | erik-hansen/isilon_sdk_python | 19958108ec550865ebeb1f2a4d250322cf4681c2 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from isi_sdk_8_0.api_client import ApiClient
class NetworkGroupnetsSubnetsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_pools_pool_rebalance_ip(self, pools_pool_rebalance_ip, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_rebalance_ip # noqa: E501
Rebalance IP addresses in specified pool. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_rebalance_ip(pools_pool_rebalance_ip, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param Empty pools_pool_rebalance_ip: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_pools_pool_rebalance_ip_with_http_info(pools_pool_rebalance_ip, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.create_pools_pool_rebalance_ip_with_http_info(pools_pool_rebalance_ip, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def create_pools_pool_rebalance_ip_with_http_info(self, pools_pool_rebalance_ip, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_rebalance_ip # noqa: E501
Rebalance IP addresses in specified pool. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_rebalance_ip_with_http_info(pools_pool_rebalance_ip, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param Empty pools_pool_rebalance_ip: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_rebalance_ip', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_pools_pool_rebalance_ip" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_rebalance_ip' is set
if ('pools_pool_rebalance_ip' not in params or
params['pools_pool_rebalance_ip'] is None):
raise ValueError("Missing the required parameter `pools_pool_rebalance_ip` when calling `create_pools_pool_rebalance_ip`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `create_pools_pool_rebalance_ip`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `create_pools_pool_rebalance_ip`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `create_pools_pool_rebalance_ip`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'pools_pool_rebalance_ip' in params:
body_params = params['pools_pool_rebalance_ip']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/rebalance-ips', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_pools_pool_rule(self, pools_pool_rule, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_rule # noqa: E501
Create a new rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_rule(pools_pool_rule, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolRuleCreateParams pools_pool_rule: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_pools_pool_rule_with_http_info(pools_pool_rule, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.create_pools_pool_rule_with_http_info(pools_pool_rule, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def create_pools_pool_rule_with_http_info(self, pools_pool_rule, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_rule # noqa: E501
Create a new rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_rule_with_http_info(pools_pool_rule, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolRuleCreateParams pools_pool_rule: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: CreateResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_rule', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_pools_pool_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_rule' is set
if ('pools_pool_rule' not in params or
params['pools_pool_rule'] is None):
raise ValueError("Missing the required parameter `pools_pool_rule` when calling `create_pools_pool_rule`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `create_pools_pool_rule`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `create_pools_pool_rule`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `create_pools_pool_rule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'pools_pool_rule' in params:
body_params = params['pools_pool_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/rules', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CreateResponse', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_pools_pool_sc_resume_node(self, pools_pool_sc_resume_node, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_sc_resume_node # noqa: E501
Resume suspended nodes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_sc_resume_node(pools_pool_sc_resume_node, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolScResumeNode pools_pool_sc_resume_node: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_pools_pool_sc_resume_node_with_http_info(pools_pool_sc_resume_node, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.create_pools_pool_sc_resume_node_with_http_info(pools_pool_sc_resume_node, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def create_pools_pool_sc_resume_node_with_http_info(self, pools_pool_sc_resume_node, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_sc_resume_node # noqa: E501
Resume suspended nodes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_sc_resume_node_with_http_info(pools_pool_sc_resume_node, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolScResumeNode pools_pool_sc_resume_node: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_sc_resume_node', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_pools_pool_sc_resume_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_sc_resume_node' is set
if ('pools_pool_sc_resume_node' not in params or
params['pools_pool_sc_resume_node'] is None):
raise ValueError("Missing the required parameter `pools_pool_sc_resume_node` when calling `create_pools_pool_sc_resume_node`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `create_pools_pool_sc_resume_node`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `create_pools_pool_sc_resume_node`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `create_pools_pool_sc_resume_node`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'pools_pool_sc_resume_node' in params:
body_params = params['pools_pool_sc_resume_node']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/sc-resume-nodes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_pools_pool_sc_suspend_node(self, pools_pool_sc_suspend_node, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_sc_suspend_node # noqa: E501
Suspend nodes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_sc_suspend_node(pools_pool_sc_suspend_node, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolScResumeNode pools_pool_sc_suspend_node: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_pools_pool_sc_suspend_node_with_http_info(pools_pool_sc_suspend_node, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.create_pools_pool_sc_suspend_node_with_http_info(pools_pool_sc_suspend_node, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def create_pools_pool_sc_suspend_node_with_http_info(self, pools_pool_sc_suspend_node, groupnet, subnet, pool, **kwargs): # noqa: E501
"""create_pools_pool_sc_suspend_node # noqa: E501
Suspend nodes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_pools_pool_sc_suspend_node_with_http_info(pools_pool_sc_suspend_node, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolScResumeNode pools_pool_sc_suspend_node: (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: Empty
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_sc_suspend_node', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_pools_pool_sc_suspend_node" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_sc_suspend_node' is set
if ('pools_pool_sc_suspend_node' not in params or
params['pools_pool_sc_suspend_node'] is None):
raise ValueError("Missing the required parameter `pools_pool_sc_suspend_node` when calling `create_pools_pool_sc_suspend_node`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `create_pools_pool_sc_suspend_node`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `create_pools_pool_sc_suspend_node`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `create_pools_pool_sc_suspend_node`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'pools_pool_sc_suspend_node' in params:
body_params = params['pools_pool_sc_suspend_node']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/sc-suspend-nodes', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Empty', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_pools_pool_rule(self, pools_pool_rule_id, groupnet, subnet, pool, **kwargs): # noqa: E501
"""delete_pools_pool_rule # noqa: E501
Delete a network rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_pools_pool_rule(pools_pool_rule_id, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str pools_pool_rule_id: Delete a network rule. (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_pools_pool_rule_with_http_info(pools_pool_rule_id, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.delete_pools_pool_rule_with_http_info(pools_pool_rule_id, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def delete_pools_pool_rule_with_http_info(self, pools_pool_rule_id, groupnet, subnet, pool, **kwargs): # noqa: E501
"""delete_pools_pool_rule # noqa: E501
Delete a network rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_pools_pool_rule_with_http_info(pools_pool_rule_id, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str pools_pool_rule_id: Delete a network rule. (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_rule_id', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_pools_pool_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_rule_id' is set
if ('pools_pool_rule_id' not in params or
params['pools_pool_rule_id'] is None):
raise ValueError("Missing the required parameter `pools_pool_rule_id` when calling `delete_pools_pool_rule`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `delete_pools_pool_rule`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `delete_pools_pool_rule`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `delete_pools_pool_rule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'pools_pool_rule_id' in params:
path_params['PoolsPoolRuleId'] = params['pools_pool_rule_id'] # noqa: E501
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/rules/{PoolsPoolRuleId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pools_pool_interfaces(self, groupnet, subnet, pool, **kwargs): # noqa: E501
"""get_pools_pool_interfaces # noqa: E501
Get a list of interfaces. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pools_pool_interfaces(groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:param str sort: The field that will be used for sorting.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str lnns: Get a list of interfaces for the specified lnn.
:return: PoolsPoolInterfaces
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_pools_pool_interfaces_with_http_info(groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.get_pools_pool_interfaces_with_http_info(groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def get_pools_pool_interfaces_with_http_info(self, groupnet, subnet, pool, **kwargs): # noqa: E501
"""get_pools_pool_interfaces # noqa: E501
Get a list of interfaces. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pools_pool_interfaces_with_http_info(groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:param str sort: The field that will be used for sorting.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str lnns: Get a list of interfaces for the specified lnn.
:return: PoolsPoolInterfaces
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['groupnet', 'subnet', 'pool', 'sort', 'resume', 'limit', 'dir', 'lnns'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pools_pool_interfaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `get_pools_pool_interfaces`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `get_pools_pool_interfaces`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `get_pools_pool_interfaces`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_pools_pool_interfaces`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'lnns' in params:
query_params.append(('lnns', params['lnns'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/interfaces', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PoolsPoolInterfaces', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_pools_pool_rule(self, pools_pool_rule_id, groupnet, subnet, pool, **kwargs): # noqa: E501
"""get_pools_pool_rule # noqa: E501
View a single network rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pools_pool_rule(pools_pool_rule_id, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str pools_pool_rule_id: View a single network rule. (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: PoolsPoolRules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_pools_pool_rule_with_http_info(pools_pool_rule_id, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.get_pools_pool_rule_with_http_info(pools_pool_rule_id, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def get_pools_pool_rule_with_http_info(self, pools_pool_rule_id, groupnet, subnet, pool, **kwargs): # noqa: E501
"""get_pools_pool_rule # noqa: E501
View a single network rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_pools_pool_rule_with_http_info(pools_pool_rule_id, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str pools_pool_rule_id: View a single network rule. (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: PoolsPoolRules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_rule_id', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_pools_pool_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_rule_id' is set
if ('pools_pool_rule_id' not in params or
params['pools_pool_rule_id'] is None):
raise ValueError("Missing the required parameter `pools_pool_rule_id` when calling `get_pools_pool_rule`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `get_pools_pool_rule`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `get_pools_pool_rule`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `get_pools_pool_rule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'pools_pool_rule_id' in params:
path_params['PoolsPoolRuleId'] = params['pools_pool_rule_id'] # noqa: E501
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/rules/{PoolsPoolRuleId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PoolsPoolRules', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_pools_pool_rules(self, groupnet, subnet, pool, **kwargs): # noqa: E501
"""list_pools_pool_rules # noqa: E501
Get a list of network rules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_pools_pool_rules(groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:param str sort: The field that will be used for sorting.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: PoolsPoolRulesExtended
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_pools_pool_rules_with_http_info(groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.list_pools_pool_rules_with_http_info(groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def list_pools_pool_rules_with_http_info(self, groupnet, subnet, pool, **kwargs): # noqa: E501
"""list_pools_pool_rules # noqa: E501
Get a list of network rules. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_pools_pool_rules_with_http_info(groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:param str sort: The field that will be used for sorting.
:param int limit: Return no more than this many results at once (see resume).
:param str dir: The direction of the sort.
:param str resume: Continue returning results from previous call using this token (token should come from the previous call, resume cannot be used with other options).
:return: PoolsPoolRulesExtended
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['groupnet', 'subnet', 'pool', 'sort', 'limit', 'dir', 'resume'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_pools_pool_rules" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `list_pools_pool_rules`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `list_pools_pool_rules`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `list_pools_pool_rules`") # noqa: E501
if 'limit' in params and params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `list_pools_pool_rules`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'dir' in params:
query_params.append(('dir', params['dir'])) # noqa: E501
if 'resume' in params:
query_params.append(('resume', params['resume'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/rules', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PoolsPoolRulesExtended', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_pools_pool_rule(self, pools_pool_rule, pools_pool_rule_id, groupnet, subnet, pool, **kwargs): # noqa: E501
"""update_pools_pool_rule # noqa: E501
Modify a network rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_pools_pool_rule(pools_pool_rule, pools_pool_rule_id, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolRule pools_pool_rule: (required)
:param str pools_pool_rule_id: Modify a network rule. (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_pools_pool_rule_with_http_info(pools_pool_rule, pools_pool_rule_id, groupnet, subnet, pool, **kwargs) # noqa: E501
else:
(data) = self.update_pools_pool_rule_with_http_info(pools_pool_rule, pools_pool_rule_id, groupnet, subnet, pool, **kwargs) # noqa: E501
return data
def update_pools_pool_rule_with_http_info(self, pools_pool_rule, pools_pool_rule_id, groupnet, subnet, pool, **kwargs): # noqa: E501
"""update_pools_pool_rule # noqa: E501
Modify a network rule. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_pools_pool_rule_with_http_info(pools_pool_rule, pools_pool_rule_id, groupnet, subnet, pool, async=True)
>>> result = thread.get()
:param async bool
:param PoolsPoolRule pools_pool_rule: (required)
:param str pools_pool_rule_id: Modify a network rule. (required)
:param str groupnet: (required)
:param str subnet: (required)
:param str pool: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['pools_pool_rule', 'pools_pool_rule_id', 'groupnet', 'subnet', 'pool'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_pools_pool_rule" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'pools_pool_rule' is set
if ('pools_pool_rule' not in params or
params['pools_pool_rule'] is None):
raise ValueError("Missing the required parameter `pools_pool_rule` when calling `update_pools_pool_rule`") # noqa: E501
# verify the required parameter 'pools_pool_rule_id' is set
if ('pools_pool_rule_id' not in params or
params['pools_pool_rule_id'] is None):
raise ValueError("Missing the required parameter `pools_pool_rule_id` when calling `update_pools_pool_rule`") # noqa: E501
# verify the required parameter 'groupnet' is set
if ('groupnet' not in params or
params['groupnet'] is None):
raise ValueError("Missing the required parameter `groupnet` when calling `update_pools_pool_rule`") # noqa: E501
# verify the required parameter 'subnet' is set
if ('subnet' not in params or
params['subnet'] is None):
raise ValueError("Missing the required parameter `subnet` when calling `update_pools_pool_rule`") # noqa: E501
# verify the required parameter 'pool' is set
if ('pool' not in params or
params['pool'] is None):
raise ValueError("Missing the required parameter `pool` when calling `update_pools_pool_rule`") # noqa: E501
collection_formats = {}
path_params = {}
if 'pools_pool_rule_id' in params:
path_params['PoolsPoolRuleId'] = params['pools_pool_rule_id'] # noqa: E501
if 'groupnet' in params:
path_params['Groupnet'] = params['groupnet'] # noqa: E501
if 'subnet' in params:
path_params['Subnet'] = params['subnet'] # noqa: E501
if 'pool' in params:
path_params['Pool'] = params['pool'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'pools_pool_rule' in params:
body_params = params['pools_pool_rule']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/platform/3/network/groupnets/{Groupnet}/subnets/{Subnet}/pools/{Pool}/rules/{PoolsPoolRuleId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.529813 | 175 | 0.626057 |
ace723abeb45110fbf4cf7dfbf504a54281ae94f | 250 | py | Python | data-structures/simple-problems/character-count.py | AkashSDas/DataStructures-and-Algorithms | 200b54e69f7932047d16dfcadc595245e548ca91 | [
"MIT"
] | null | null | null | data-structures/simple-problems/character-count.py | AkashSDas/DataStructures-and-Algorithms | 200b54e69f7932047d16dfcadc595245e548ca91 | [
"MIT"
] | null | null | null | data-structures/simple-problems/character-count.py | AkashSDas/DataStructures-and-Algorithms | 200b54e69f7932047d16dfcadc595245e548ca91 | [
"MIT"
] | null | null | null | def char_counter(string):
char_count = {}
for char in string:
if char in char_count.keys():
char_count[char] += 1
else:
char_count[char] = 1
return char_count
print(char_counter('hello world'))
| 17.857143 | 37 | 0.58 |
ace7242ef312879754a0524956a227f698fc39ed | 1,135 | py | Python | ipyregulus/filters/trigger.py | yarden-livnat/ipyregulus | 971ab02cd3676b9ea8c712fd3940d42d974c445d | [
"BSD-3-Clause"
] | 1 | 2018-09-06T17:07:41.000Z | 2018-09-06T17:07:41.000Z | ipyregulus/filters/trigger.py | yarden-livnat/ipyregulus | 971ab02cd3676b9ea8c712fd3940d42d974c445d | [
"BSD-3-Clause"
] | 3 | 2021-03-10T09:24:25.000Z | 2022-01-22T10:49:25.000Z | ipyregulus/filters/trigger.py | yarden-livnat/ipyregulus | 971ab02cd3676b9ea8c712fd3940d42d974c445d | [
"BSD-3-Clause"
] | 2 | 2018-08-30T19:11:05.000Z | 2020-01-07T16:29:01.000Z | from .filters import Filter
class Trigger(Filter):
def __init__(self, monitor=None, **kwargs):
super().__init__(**kwargs)
self._monitored = []
self.monitor = monitor
def __call__(self):
return self.disabled or self.func()
def _exec(self, change):
self()
@property
def monitor(self):
return self._monitored
@monitor.setter
def monitor(self, obj):
for m in self._monitored:
m.unobserve(self._exec, names='version')
self._monitored = [obj] if not isinstance(obj, list) else obj
for m in self._monitored:
m.observe(self._exec, names='version')
def add(self, item):
if item not in self._monitored:
self._monitored.append(item)
item.observe(self._exec, names='version')
def remove(self, item):
if item in self._monitored:
self._monitored.remove(item)
item.unobserve(self._exec, names='vesion')
def clear(self):
for item in self._monitored:
item.unobserve(self._exec, names='version')
self._monitored = [] | 27.682927 | 69 | 0.6 |
ace724644c955bcf5cdd58c3a8e62add15441819 | 2,577 | py | Python | models/mnist.py | jeromerony/augmented_lagrangian_adversarial_attacks | 6d2f96deb8fcdf87bbd6d428a0549c935c0e6388 | [
"BSD-3-Clause"
] | 12 | 2020-11-25T19:08:18.000Z | 2022-03-17T04:50:05.000Z | models/mnist.py | jeromerony/augmented_lagrangian_adversarial_attacks | 6d2f96deb8fcdf87bbd6d428a0549c935c0e6388 | [
"BSD-3-Clause"
] | 1 | 2022-03-15T09:19:58.000Z | 2022-03-15T14:09:01.000Z | models/mnist.py | jeromerony/augmented_lagrangian_adversarial_attacks | 6d2f96deb8fcdf87bbd6d428a0549c935c0e6388 | [
"BSD-3-Clause"
] | 1 | 2022-01-13T02:55:32.000Z | 2022-01-13T02:55:32.000Z | from collections import OrderedDict
from torch import nn
class SmallCNN(nn.Module):
def __init__(self, drop=0.5):
super(SmallCNN, self).__init__()
self.num_channels = 1
self.num_labels = 10
activ = nn.ReLU(True)
self.feature_extractor = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(self.num_channels, 32, 3)),
('relu1', activ),
('conv2', nn.Conv2d(32, 32, 3)),
('relu2', activ),
('maxpool1', nn.MaxPool2d(2, 2)),
('conv3', nn.Conv2d(32, 64, 3)),
('relu3', activ),
('conv4', nn.Conv2d(64, 64, 3)),
('relu4', activ),
('maxpool2', nn.MaxPool2d(2, 2)),
]))
self.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(64 * 4 * 4, 200)),
('relu1', activ),
('drop', nn.Dropout(drop)),
('fc2', nn.Linear(200, 200)),
('relu2', activ),
('fc3', nn.Linear(200, self.num_labels)),
]))
for m in self.modules():
if isinstance(m, (nn.Conv2d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
nn.init.constant_(self.classifier.fc3.weight, 0)
nn.init.constant_(self.classifier.fc3.bias, 0)
def forward(self, input):
features = self.feature_extractor(input)
logits = self.classifier(features.view(-1, 64 * 4 * 4))
return logits
def IBP_large(in_ch, in_dim, linear_size=512):
"""Large model from:
Zhang, H., Chen, H., Xiao, C., Gowal, S., Stanforth, R., Li, B., Boning, D. and Hsieh, C.J., 2019.
Towards stable and efficient training of verifiably robust neural networks. arXiv preprint arXiv:1906.06316.3
https://github.com/huanzhang12/CROWN-IBP
"""
model = nn.Sequential(
nn.Conv2d(in_ch, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 128, 3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.ReLU(),
nn.Flatten(1),
nn.Linear((in_dim // 2) * (in_dim // 2) * 128, linear_size),
nn.ReLU(),
nn.Linear(linear_size, 10)
)
return model
| 33.467532 | 113 | 0.53473 |
ace724853628dad0c4a11884965b06b2751526d1 | 1,952 | py | Python | setup.py | fafhrd91/pyramid_amdjs | 90f878f456f6019f965c939123d330ee6b8a0ae0 | [
"MIT"
] | 1 | 2015-01-01T16:45:56.000Z | 2015-01-01T16:45:56.000Z | setup.py | fafhrd91/pyramid_amdjs | 90f878f456f6019f965c939123d330ee6b8a0ae0 | [
"MIT"
] | null | null | null | setup.py | fafhrd91/pyramid_amdjs | 90f878f456f6019f965c939123d330ee6b8a0ae0 | [
"MIT"
] | null | null | null | import os
import sys
from setuptools import setup, find_packages
version = '0.6.0dev1'
install_requires = ['setuptools',
'pyramid >= 1.4']
if sys.version_info[:2] == (2, 6):
install_requires.extend((
'argparse',
'ordereddict',
'unittest2'))
if sys.version_info[:2] in ((2, 6), (2, 7), (3, 3)):
install_requires.extend(('simplejson', ))
tests_require = install_requires + ['nose', 'mock']
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
setup(name='pyramid_amdjs',
version=version,
description=('Pyramid JS/CSS resource management with curl.js'),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))),
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: CPython",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
'Topic :: Internet :: WWW/HTTP :: WSGI'],
author='Nikolay Kim',
author_email='fafhrd91@gmail.com',
url='https://github.com/fafhrd91/pyramid_amdjs/',
license='MIT',
packages=find_packages(),
install_requires = install_requires,
tests_require = tests_require,
test_suite = 'nose.collector',
include_package_data = True,
zip_safe = False,
entry_points = {
'console_scripts': [
'amdjs = pyramid_amdjs.script:main',
'pstatic = pyramid_amdjs.pstatic:main',
],
'babel.extractors': [
'handlebars = pyramid_amdjs.handlebars:extract_i18n',
]}
)
| 32 | 78 | 0.586578 |
ace724ac8711d2bdbca22fe61fa62c2239d358c4 | 12,550 | py | Python | tensorflow/python/ops/ragged/ragged_map_fn_op_test.py | kalosisz/tensorflow | b7ecd75b24f577b73500024fe91d2ea0c806d05a | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/ops/ragged/ragged_map_fn_op_test.py | kalosisz/tensorflow | b7ecd75b24f577b73500024fe91d2ea0c806d05a | [
"Apache-2.0"
] | 17 | 2021-08-12T19:38:42.000Z | 2022-01-27T14:39:35.000Z | tensorflow/python/ops/ragged/ragged_map_fn_op_test.py | kalosisz/tensorflow | b7ecd75b24f577b73500024fe91d2ea0c806d05a | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_map_ops.map_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops as mo
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_map_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class RaggedMapOpTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters([
# The following test sets map over a RaggedTensor and apply a
# transformation that returns with shape:
# [d1, (d2)] -> [d1]
dict(
fn=mo.reduce_mean,
elems=[[1, 2, 3], [4, 5], [6, 7]],
elems_dtype=dtypes.int32,
expected_output=[2, 4, 6],
result_dtype=dtypes.int32,
),
dict(
fn=string_ops.reduce_join,
elems=[['foo', 'bar', 'baz'], ['a'], ['b', 'c']],
expected_output=[b'foobarbaz', b'a', b'bc'],
elems_dtype=dtypes.string,
result_dtype=dtypes.string,
),
# [d1, (d2)] -> [d1, 2]
dict(
fn=lambda x: array_ops.stack([mo.reduce_mean(x), mo.reduce_sum(x)]),
# fn=self.stack_mean_and_sum,
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 6], [4.5, 9], [6.5, 13]],
elems_dtype=dtypes.float32,
result_dtype=dtypes.float32,
expected_ragged_rank=0,
),
# [d1, (d2)] -> [d1, (d2)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[2, 3, 4], [5, 6], [7, 8]],
elems_dtype=dtypes.int64,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), d3] -> [d1, (d2), d3]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
elems_ragged_rank=1,
expected_ragged_rank=1,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
expected_output=[[[2, 3], [4, 5]], [], [[6, 7], [8, 9], [10, 1]]],
),
# [d1, (d2)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0]),
elems=[[1, 2, 3], [4, 5], [6, 7]],
expected_output=[[[1, 2, 3]], [[4, 5]], [[6, 7]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_functional_ops.map_flat_values(mo.add, x, 1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[[2, 3, 4]], [[5, 6], [7, 8]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3)] -> [d1, (d2)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=1),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[6], [9, 13]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1, (d3)]
dict(
fn=lambda x: ragged_math_ops.reduce_sum(x, axis=0),
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[[1, 2, 3], [10, 12]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1, (d2), (d3)] -> [d1]
dict(
fn=ragged_math_ops.reduce_sum,
elems=[[[1, 2, 3]], [[4, 5], [6, 7]]],
expected_output=[6, 22],
result_dtype=dtypes.int64,
),
# [d1] -> [d1, (d2)]
dict(
fn=mo.range,
elems=[4, 0, 2],
expected_output=[[0, 1, 2, 3], [], [0, 1]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=1),
),
# [d1] -> [d1, (d2), (d3)]
dict(
fn=lambda x: ragged_math_ops.range(mo.range(x)),
elems=[5, 0, 3],
expected_output=[[[], [0], [0, 1], [0, 1, 2], [0, 1, 2, 3]], [],
[[], [0], [0, 1]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2),
),
# [d1, (d2), (d3), (d4a), (d5)] -> [d1, (d2), (d3), (d4b), (d5)]
dict(
fn=lambda x: x + np.int64(1),
elems=[[[[[1, 2, 3]], [[4], [5]]]], [[[[6, 7]]], [[[8], []]]]],
expected_output=[[[[[2, 3, 4]], [[5], [6]]]], [[[[7, 8]]], [[[9],
[]]]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=4),
),
# [d1] -> [d1, (d2), (d3)]
dict(
fn=ragged_math_ops.range,
elems=np.array([1, 2, 3], np.int64),
expected_output=[[[0]], [[0, 1]], [[0, 1, 2]]],
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2)),
# [0] -> [0, (d2), (d3)] (github issue #36232)
dict(
fn=ragged_math_ops.range,
elems=np.zeros([0], np.int64),
expected_output=[],
expected_ragged_rank=2,
result_dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=2)),
])
def testRaggedMap(
self,
fn,
elems,
expected_output,
expected_ragged_rank=None,
result_ragged_rank=None,
elems_ragged_rank=None,
elems_dtype=dtypes.int64,
result_dtype=None,
infer_shape=True,
):
elems = ragged_factory_ops.constant(elems, elems_dtype, elems_ragged_rank)
output = ragged_map_ops.map_fn(
fn=fn, elems=elems, dtype=result_dtype, infer_shape=infer_shape)
expected_rt = ragged_factory_ops.constant(
expected_output, ragged_rank=expected_ragged_rank)
self.assertAllEqual(expected_rt, output)
def testRaggedMapOnStructure(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _reduce_sum_from_all(f):
return mo.reduce_sum(f['batman']) + mo.reduce_sum(f['robin'])
output = ragged_map_ops.map_fn(
fn=_reduce_sum_from_all,
elems=features,
dtype=dtypes.int32,
)
self.assertAllEqual(output, [66, 44, 198])
# Test mapping over a dict of RTs can produce a dict of RTs.
def testRaggedMapOnStructure_RaggedOutputs(self):
batman = ragged_factory_ops.constant([[1, 2, 3], [4], [5, 6, 7]])
# [[10, 20, 30], [40], [50, 60, 70]]
robin = ragged_functional_ops.map_flat_values(mo.multiply, batman, 10)
features = {'batman': batman, 'robin': robin}
def _increment(f):
return {
'batman': f['batman'] + 1,
'robin': f['robin'] + 1,
}
output = ragged_map_ops.map_fn(
fn=_increment,
elems=features,
infer_shape=False,
dtype={
'batman':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1),
'robin':
ragged_tensor.RaggedTensorType(
dtype=dtypes.int32, ragged_rank=1)
},
)
self.assertAllEqual(output['batman'], [[2, 3, 4], [5], [6, 7, 8]])
self.assertAllEqual(output['robin'], [[11, 21, 31], [41], [51, 61, 71]])
def testZip(self):
x = ragged_factory_ops.constant(
[[10, 20], [30, 40], [50, 60], [70], [80, 90, 100]], dtypes.int64)
y = array_ops.expand_dims(mo.range(x.nrows(out_type=dtypes.int64)), axis=1)
def _zip(foo):
y_val, x_val = foo
bar = array_ops.tile(y_val, array_ops.shape(x_val))
return array_ops.stack([bar, x_val], axis=1)
output = ragged_map_ops.map_fn(
_zip, (y, x),
dtype=ragged_tensor.RaggedTensorType(dtype=dtypes.int64, ragged_rank=1),
infer_shape=False)
self.assertAllEqual(
output, [[[0, 10], [0, 20]], [[1, 30], [1, 40]], [[2, 50], [2, 60]],
[[3, 70]], [[4, 80], [4, 90], [4, 100]]])
def testBatchGather(self):
tokens = ragged_factory_ops.constant([['hello', '.', 'there'], ['merhaba'],
['bonjour', '.', 'ca va', '?']])
indices = ragged_factory_ops.constant([[0, 2], [0], [0, 2]])
def gather(x):
tokens_val, indices_val = x
return array_ops.gather(tokens_val, indices_val)
data = tokens, indices
out = ragged_map_ops.map_fn(
gather,
data,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.string, ragged_rank=1),
infer_shape=False)
self.assertAllEqual(
out, [[b'hello', b'there'], [b'merhaba'], [b'bonjour', b'ca va']])
def testMismatchRaggedRank(self):
elems = ragged_factory_ops.constant([[[1, 2, 3]], [[4, 5], [6, 7]]])
fn = lambda x: ragged_math_ops.reduce_sum(x, axis=0)
with self.assertRaisesRegex(
ValueError, r'(?s)Expected `fn` to return.*But it returned.*'):
_ = ragged_map_ops.map_fn(
fn,
elems,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=23))
def testMismatchRaggedRank2(self):
elems = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [6, 7]])
fn = lambda x: ragged_tensor.RaggedTensor.from_row_starts(x, [0])
with self.assertRaisesRegex(
ValueError, r'(?s)Expected `fn` to return.*But it returned.*'):
_ = ragged_map_ops.map_fn(
fn,
elems,
dtype=ragged_tensor.RaggedTensorType(
dtype=dtypes.int64, ragged_rank=10))
def testMapOnSparseTensor(self):
s = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
values=[0, 5, 0, 4],
dense_shape=[2, 2],
)
t2 = ragged_tensor.RaggedTensor.from_sparse(s)
id_t2 = ragged_map_ops.map_fn(
lambda x: x, t2,
)
self.assertAllEqual(id_t2, [[0, 5], [0, 4]])
def testRaggedMapWithIncorrectFnOutputSignature(self):
x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]])
with self.assertRaisesRegex(errors.InvalidArgumentError,
'All flat_values must have compatible shapes'):
y = map_fn_lib.map_fn(lambda r: map_fn_lib.map_fn(lambda y: r, r), x)
self.evaluate(y)
def testNestedRaggedMapWithFnOutputSignature(self):
ragged1d = ragged_tensor.RaggedTensorSpec([None], dtypes.int32)
ragged2d = ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)
x = ragged_factory_ops.constant([[1, 2, 3, 4], [1]])
# pylint: disable=g-long-lambda
y = map_fn_lib.map_fn(
lambda r: map_fn_lib.map_fn(
lambda y: r, r, fn_output_signature=ragged1d),
x,
fn_output_signature=ragged2d)
expected = [[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]], [[1]]]
self.assertAllEqual(y, expected)
if __name__ == '__main__':
googletest.main()
| 37.130178 | 80 | 0.572908 |
ace724bd383469ea0eee5b5e72032fea15c08e88 | 479 | py | Python | admin/consumer.py | nucrime/py | 799c7e34ebfd1e492b5bf0a8370f5304afb02c17 | [
"Apache-2.0"
] | null | null | null | admin/consumer.py | nucrime/py | 799c7e34ebfd1e492b5bf0a8370f5304afb02c17 | [
"Apache-2.0"
] | null | null | null | admin/consumer.py | nucrime/py | 799c7e34ebfd1e492b5bf0a8370f5304afb02c17 | [
"Apache-2.0"
] | null | null | null | import pika
params = pika.URLParameters('amqps://cmubfdvq:1LEw5bR9lnpkdBp5Sw3Q8j8efv_K3lhZ@kangaroo.rmq.cloudamqp.com/cmubfdvq')
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue='admin')
def callback(ch, method, properties, body):
print('Received in admin')
print(body)
channel.basic_consume(queue='admin', on_message_callback=callback)
print('Started consuming')
channel.start_consuming()
channel.close() | 20.826087 | 116 | 0.782881 |
ace7278bc659373e46a16ba63cdd954508e826a6 | 1,391 | py | Python | fixture/session.py | Valerie2807/python_training | 8b3169853501c124ce7e051292ff13b70c495cdb | [
"Apache-2.0"
] | null | null | null | fixture/session.py | Valerie2807/python_training | 8b3169853501c124ce7e051292ff13b70c495cdb | [
"Apache-2.0"
] | null | null | null | fixture/session.py | Valerie2807/python_training | 8b3169853501c124ce7e051292ff13b70c495cdb | [
"Apache-2.0"
] | null | null | null | from selenium.webdriver.common.by import By
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//input[@value='Login']").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("user")
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements(By.LINK_TEXT, "Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_login(self, username, password):
wd = self.app.wd
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
| 28.979167 | 73 | 0.607477 |
ace727a883fb309e714e24082121c2e8820a1277 | 639 | py | Python | src/hamcrest/core/helpers/wrap_matcher.py | rbalint/PyHamcrest | 713aa08e313dba997fd8e4b7e0d3d599a72bdd72 | [
"BSD-3-Clause"
] | null | null | null | src/hamcrest/core/helpers/wrap_matcher.py | rbalint/PyHamcrest | 713aa08e313dba997fd8e4b7e0d3d599a72bdd72 | [
"BSD-3-Clause"
] | null | null | null | src/hamcrest/core/helpers/wrap_matcher.py | rbalint/PyHamcrest | 713aa08e313dba997fd8e4b7e0d3d599a72bdd72 | [
"BSD-3-Clause"
] | null | null | null | from hamcrest.core.base_matcher import Matcher
from hamcrest.core.core.isequal import equal_to
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
def wrap_matcher(x):
"""Wraps argument in a matcher, if necessary.
:returns: the argument as-is if it is already a matcher, otherwise wrapped
in an :py:func:`~hamcrest.core.core.isequal.equal_to` matcher.
"""
if isinstance(x, Matcher):
return x
else:
return equal_to(x)
def is_matchable_type(expected_type):
if isinstance(expected_type, type):
return True
return False
| 23.666667 | 78 | 0.70266 |
ace727d8983e460ec141c97b7e4887dcd0406956 | 1,376 | py | Python | artista/artistArt/migrations/0006_artcomment_artlikedislike.py | Rafat97/Artista | 40a824f97dcc8f97632a1864a12329c3172c7c66 | [
"MIT"
] | 17 | 2020-09-21T19:59:23.000Z | 2021-05-16T15:28:41.000Z | artista/artistArt/migrations/0006_artcomment_artlikedislike.py | Rafat97/Artista | 40a824f97dcc8f97632a1864a12329c3172c7c66 | [
"MIT"
] | null | null | null | artista/artistArt/migrations/0006_artcomment_artlikedislike.py | Rafat97/Artista | 40a824f97dcc8f97632a1864a12329c3172c7c66 | [
"MIT"
] | 2 | 2021-03-13T09:31:30.000Z | 2022-03-19T09:43:15.000Z | # Generated by Django 3.0.4 on 2020-06-20 15:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('register', '0019_user_refresh_token'),
('artistArt', '0005_auto_20200620_1932'),
]
operations = [
migrations.CreateModel(
name='ArtLikeDislike',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('like_dislike', models.BooleanField(default=False)),
('artist_art', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='artistArt.ArtistArt')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='register.User')),
],
),
migrations.CreateModel(
name='ArtComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment_message', models.CharField(max_length=255)),
('artist_art', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='artistArt.ArtistArt')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='register.User')),
],
),
]
| 40.470588 | 121 | 0.617006 |
ace727e39188977b9e917d0c060f264cae281678 | 15,380 | py | Python | shadowsocks/shell.py | pigTom/shadowsocks_python | d5f2f574eb98cc2c1bd8b35593308527577f0be4 | [
"Apache-2.0"
] | 1 | 2020-02-25T14:16:42.000Z | 2020-02-25T14:16:42.000Z | shadowsocks/shell.py | pigTom/shadowsocks_python | d5f2f574eb98cc2c1bd8b35593308527577f0be4 | [
"Apache-2.0"
] | null | null | null | shadowsocks/shell.py | pigTom/shadowsocks_python | d5f2f574eb98cc2c1bd8b35593308527577f0be4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import json
import sys
import getopt
import logging
from shadowsocks.common import to_bytes, to_str, IPNetwork, PortRange
from shadowsocks import encrypt
VERBOSE_LEVEL = 5
verbose = 0
def check_python():
info = sys.version_info
if info[0] == 2 and not info[1] >= 6:
print('Python 2.6+ required')
sys.exit(1)
elif info[0] == 3 and not info[1] >= 3:
print('Python 3.3+ required')
sys.exit(1)
elif info[0] not in [2, 3]:
print('Python version not supported')
sys.exit(1)
def print_exception(e):
global verbose
logging.error(e)
if verbose > 0:
import traceback
traceback.print_exc()
def __version():
version_str = ''
try:
import pkg_resources
version_str = pkg_resources.get_distribution('shadowsocks').version
except Exception:
try:
from shadowsocks import version
version_str = version.version()
except Exception:
pass
return version_str
def print_shadowsocks():
print('ShadowsocksR %s' % __version())
def log_shadowsocks_version():
logging.info('ShadowsocksR %s' % __version())
def find_config():
user_config_path = 'user-config.json'
config_path = 'config.json'
def sub_find(file_name):
if os.path.exists(file_name):
return file_name
file_name = os.path.join(os.path.abspath('..'), file_name)
return file_name if os.path.exists(file_name) else None
return sub_find(user_config_path) or sub_find(config_path)
def check_config(config, is_local):
if config.get('daemon', None) == 'stop':
# no need to specify configuration for daemon stop
return
if is_local and not config.get('password', None):
logging.error('password not specified')
print_help(is_local)
sys.exit(2)
if not is_local and not config.get('password', None) \
and not config.get('port_password', None):
logging.error('password or port_password not specified')
print_help(is_local)
sys.exit(2)
if 'local_port' in config:
config['local_port'] = int(config['local_port'])
if 'server_port' in config and type(config['server_port']) != list:
config['server_port'] = int(config['server_port'])
if config.get('local_address', '') in [b'0.0.0.0']:
logging.warning('warning: local set to listen on 0.0.0.0, it\'s not safe')
if config.get('server', '') in ['127.0.0.1', 'localhost']:
logging.warning('warning: server set to listen on %s:%s, are you sure?' %
(to_str(config['server']), config['server_port']))
if config.get('timeout', 300) < 100:
logging.warning('warning: your timeout %d seems too short' %
int(config.get('timeout')))
if config.get('timeout', 300) > 600:
logging.warning('warning: your timeout %d seems too long' %
int(config.get('timeout')))
if config.get('password') in [b'mypassword']:
logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your '
'config.json!')
sys.exit(1)
if config.get('user', None) is not None:
if os.name != 'posix':
logging.error('user can be used only on Unix')
sys.exit(1)
encrypt.try_cipher(config['password'], config['method'])
def get_config(is_local):
global verbose
config = {}
config_path = None
logging.basicConfig(level=logging.INFO,
format='%(levelname)-s: %(message)s')
if is_local:
shortopts = 'hd:s:b:p:k:l:m:O:o:G:g:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=',
'version']
else:
shortopts = 'hd:s:p:k:m:O:o:G:g:c:t:vq'
longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=',
'forbidden-ip=', 'user=', 'manager-address=', 'version']
try:
optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
for key, value in optlist:
if key == '-c':
config_path = value
elif key in ('-h', '--help'):
print_help(is_local)
sys.exit(0)
elif key == '--version':
print_shadowsocks()
sys.exit(0)
else:
continue
if config_path is None:
config_path = find_config()
if config_path:
logging.debug('loading config from %s' % config_path)
with open(config_path, 'rb') as f:
try:
config = parse_json_in_str(remove_comment(f.read().decode('utf8')))
except ValueError as e:
logging.error('found an error in config.json: %s', str(e))
sys.exit(1)
v_count = 0
for key, value in optlist:
if key == '-p':
config['server_port'] = int(value)
elif key == '-k':
config['password'] = to_bytes(value)
elif key == '-l':
config['local_port'] = int(value)
elif key == '-s':
config['server'] = to_str(value)
elif key == '-m':
config['method'] = to_str(value)
elif key == '-O':
config['protocol'] = to_str(value)
elif key == '-o':
config['obfs'] = to_str(value)
elif key == '-G':
config['protocol_param'] = to_str(value)
elif key == '-g':
config['obfs_param'] = to_str(value)
elif key == '-b':
config['local_address'] = to_str(value)
elif key == '-v':
v_count += 1
# '-vv' turns on more verbose mode
config['verbose'] = v_count
elif key == '-t':
config['timeout'] = int(value)
elif key == '--fast-open':
config['fast_open'] = True
elif key == '--workers':
config['workers'] = int(value)
elif key == '--manager-address':
config['manager_address'] = value
elif key == '--user':
config['user'] = to_str(value)
elif key == '--forbidden-ip':
config['forbidden_ip'] = to_str(value)
elif key == '-d':
config['daemon'] = to_str(value)
elif key == '--pid-file':
config['pid-file'] = to_str(value)
elif key == '--log-file':
config['log-file'] = to_str(value)
elif key == '-q':
v_count -= 1
config['verbose'] = v_count
else:
continue
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print_help(is_local)
sys.exit(2)
if not config:
logging.error('config not specified')
print_help(is_local)
sys.exit(2)
config['password'] = to_bytes(config.get('password', b''))
config['method'] = to_str(config.get('method', 'aes-256-cfb'))
config['protocol'] = to_str(config.get('protocol', 'origin'))
config['protocol_param'] = to_str(config.get('protocol_param', ''))
config['obfs'] = to_str(config.get('obfs', 'plain'))
config['obfs_param'] = to_str(config.get('obfs_param', ''))
config['port_password'] = config.get('port_password', None)
config['additional_ports'] = config.get('additional_ports', {})
config['additional_ports_only'] = config.get('additional_ports_only', False)
config['timeout'] = int(config.get('timeout', 300))
config['udp_timeout'] = int(config.get('udp_timeout', 120))
config['udp_cache'] = int(config.get('udp_cache', 64))
config['fast_open'] = config.get('fast_open', False)
config['workers'] = config.get('workers', 1)
config['pid-file'] = config.get('pid-file', '/var/run/shadowsocksr.pid')
config['log-file'] = config.get('log-file', '/var/log/shadowsocksr.log')
config['verbose'] = config.get('verbose', False)
config['connect_verbose_info'] = config.get('connect_verbose_info', 0)
config['local_address'] = to_str(config.get('local_address', '127.0.0.1'))
config['local_port'] = config.get('local_port', 1080)
if is_local:
if config.get('server', None) is None:
logging.error('server addr not specified')
print_local_help()
sys.exit(2)
else:
config['server'] = to_str(config['server'])
else:
config['server'] = to_str(config.get('server', '0.0.0.0'))
try:
config['forbidden_ip'] = \
IPNetwork(config.get('forbidden_ip', '127.0.0.0/8,::1/128'))
except Exception as e:
logging.error(e)
sys.exit(2)
try:
config['forbidden_port'] = PortRange(config.get('forbidden_port', ''))
except Exception as e:
logging.error(e)
sys.exit(2)
try:
config['ignore_bind'] = \
IPNetwork(config.get('ignore_bind', '127.0.0.0/8,::1/128,10.0.0.0/8,192.168.0.0/16'))
except Exception as e:
logging.error(e)
sys.exit(2)
config['server_port'] = config.get('server_port', 8388)
logging.getLogger('').handlers = []
logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
if config['verbose'] >= 2:
level = VERBOSE_LEVEL
elif config['verbose'] == 1:
level = logging.DEBUG
elif config['verbose'] == -1:
level = logging.WARN
elif config['verbose'] <= -2:
level = logging.ERROR
else:
level = logging.INFO
verbose = config['verbose']
logging.basicConfig(level=level,
format='%(asctime)s %(levelname)-8s %(filename)s:%(lineno)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
check_config(config, is_local)
return config
def print_help(is_local):
if is_local:
print_local_help()
else:
print_server_help()
def print_local_help():
print('''usage: sslocal [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address
-p SERVER_PORT server port, default: 8388
-b LOCAL_ADDR local binding address, default: 127.0.0.1
-l LOCAL_PORT local port, default: 1080
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-o OBFS obfsplugin, default: http_simple
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def print_server_help():
print('''usage: ssserver [OPTION]...
A fast tunnel proxy that helps you bypass firewalls.
You can supply configurations via either config file or command line arguments.
Proxy options:
-c CONFIG path to config file
-s SERVER_ADDR server address, default: 0.0.0.0
-p SERVER_PORT server port, default: 8388
-k PASSWORD password
-m METHOD encryption method, default: aes-256-cfb
-o OBFS obfsplugin, default: http_simple
-t TIMEOUT timeout in seconds, default: 300
--fast-open use TCP_FASTOPEN, requires Linux 3.7+
--workers WORKERS number of workers, available on Unix/Linux
--forbidden-ip IPLIST comma seperated IP list forbidden to connect
--manager-address ADDR optional server manager UDP address, see wiki
General options:
-h, --help show this help message and exit
-d start/stop/restart daemon mode
--pid-file PID_FILE pid file for daemon mode
--log-file LOG_FILE log file for daemon mode
--user USER username to run as
-v, -vv verbose mode
-q, -qq quiet mode, only show warnings/errors
--version show version information
Online help: <https://github.com/shadowsocks/shadowsocks>
''')
def _decode_list(data):
rv = []
for item in data:
if hasattr(item, 'encode'):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
rv.append(item)
return rv
def _decode_dict(data):
rv = {}
for key, value in data.items():
if hasattr(value, 'encode'):
value = value.encode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
rv[key] = value
return rv
class JSFormat:
def __init__(self):
self.state = 0
def push(self, ch):
ch = ord(ch)
if self.state == 0:
if ch == ord('"'):
self.state = 1
return to_str(chr(ch))
elif ch == ord('/'):
self.state = 3
else:
return to_str(chr(ch))
elif self.state == 1:
if ch == ord('"'):
self.state = 0
return to_str(chr(ch))
elif ch == ord('\\'):
self.state = 2
return to_str(chr(ch))
elif self.state == 2:
self.state = 1
if ch == ord('"'):
return to_str(chr(ch))
return "\\" + to_str(chr(ch))
elif self.state == 3:
if ch == ord('/'):
self.state = 4
else:
return "/" + to_str(chr(ch))
elif self.state == 4:
if ch == ord('\n'):
self.state = 0
return "\n"
return ""
def remove_comment(json):
fmt = JSFormat()
return "".join([fmt.push(c) for c in json])
def parse_json_in_str(data):
# parse json and convert everything from unicode to str
return json.loads(data, object_hook=_decode_dict)
| 34.561798 | 101 | 0.565085 |
ace72902f4daf99585965f366817f65a9640bd28 | 1,093 | py | Python | tests/test_multicloud.py | krasm/python-onapsdk | 87cd3017fc542a8afd3be51fbd89934ed87ed3a7 | [
"Apache-2.0"
] | 4 | 2020-06-13T04:51:27.000Z | 2021-01-06T15:00:51.000Z | tests/test_multicloud.py | krasm/python-onapsdk | 87cd3017fc542a8afd3be51fbd89934ed87ed3a7 | [
"Apache-2.0"
] | 10 | 2021-09-20T15:42:47.000Z | 2021-09-23T12:49:51.000Z | tests/test_multicloud.py | krasm/python-onapsdk | 87cd3017fc542a8afd3be51fbd89934ed87ed3a7 | [
"Apache-2.0"
] | 8 | 2020-08-28T10:56:02.000Z | 2022-02-11T17:06:03.000Z | from unittest import mock
import pytest
from onapsdk.msb.multicloud import Multicloud
@mock.patch.object(Multicloud, "send_message")
def test_multicloud_register(mock_send_message):
Multicloud.register_vim(cloud_owner="test_cloud_owner",
cloud_region_id="test_cloud_region")
mock_send_message.assert_called_once()
method, description, url = mock_send_message.call_args[0]
assert method == "POST"
assert description == "Register VIM instance to ONAP"
assert url == f"{Multicloud.base_url}/test_cloud_owner/test_cloud_region/registry"
@mock.patch.object(Multicloud, "send_message")
def test_multicloud_unregister(mock_send_message):
Multicloud.unregister_vim(cloud_owner="test_cloud_owner",
cloud_region_id="test_cloud_region")
mock_send_message.assert_called_once()
method, description, url = mock_send_message.call_args[0]
assert method == "DELETE"
assert description == "Unregister VIM instance from ONAP"
assert url == f"{Multicloud.base_url}/test_cloud_owner/test_cloud_region"
| 39.035714 | 86 | 0.748399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.