hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
38c62692fda0c5705934a7c506f825f68cfeadc1
| 2,109
|
py
|
Python
|
lib/resnet_parameters.py
|
googleinterns/loop-project
|
28acb1c815e0a65f51e809d278eea08ffb06483e
|
[
"Apache-2.0"
] | 3
|
2020-05-29T00:34:34.000Z
|
2020-12-14T21:50:12.000Z
|
lib/resnet_parameters.py
|
googleinterns/loop-project
|
28acb1c815e0a65f51e809d278eea08ffb06483e
|
[
"Apache-2.0"
] | 2
|
2020-07-21T00:57:15.000Z
|
2020-09-04T22:09:33.000Z
|
lib/resnet_parameters.py
|
googleinterns/loop-project
|
28acb1c815e0a65f51e809d278eea08ffb06483e
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
import tensorflow as tf
@dataclass
class ResNetParameters:
"""Stores ResNet parameters.
Arguments:
input_shape (tensor): shape of input image tensor
num_layers (int): number of residual blocks
num_classes (int): number of classes
num_templates (int): number of templates
depth: depth of resblock tensors
in_adapter (string): input adapter architecture. The options are
`original`, `space2depth` and `strided`.
out_adapter (string): output adapter architecture. The options are
`v1`, `v2`, `isometric` and `dethwise`.
kernel_regularizer: kernel regularization parameter.
dropout: dropout parameter (drop).
out_filters: a list of two integers that represent number of filters
in conv layers of isometric adapter.
with_head: if True, the model will have a head (top dense layer).
mixture_weights_as_input: if False, the mixture weights will be created
with the model, otherwise they are treated as model input.
separate_bn: if True, separate batch normalization layers will be used
for each layer.
name: model name.
"""
input_shape: tuple = (32, 32, 3)
num_layers: int = 16
num_classes: int = 10
num_templates: int = 4
depth: int = 40
tensor_size: int = 16
in_adapter: str = "strided"
out_adapter: str = "isometric"
out_filters: list = field(default_factory=list)
dropout: float = 0.1
kernel_regularizer: float = 1e-5
with_head: bool = True
name: str = "model"
activation: callable = tf.nn.relu
mixture_weights_as_input: bool = False
separate_bn: bool = False
def init_from_args(self, args):
self.input_shape = (args.reshape_to, args.reshape_to, 3)
self.num_layers = args.num_blocks
self.tensor_size = args.size
self.depth = args.depth
self.separate_bn = args.sep_bn > 0
self.num_templates = args.num_templates
self.dropout = args.dropout
self.in_adapter = args.in_adapter_type
self.out_adapter = args.out_adapter_type
self.out_filters = [args.out_filter_base, 2 * args.out_filter_base]
self.kernel_regularizer = args.kernel_reg
| 35.745763
| 73
| 0.733049
|
faf4c4d8f0b4b6194a1c551fb92679f8fc7673df
| 2,224
|
py
|
Python
|
suzieq/genhosts.py
|
foobug/suzieq
|
c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5
|
[
"Apache-2.0"
] | 487
|
2020-04-29T13:34:34.000Z
|
2022-03-31T06:13:41.000Z
|
suzieq/genhosts.py
|
foobug/suzieq
|
c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5
|
[
"Apache-2.0"
] | 410
|
2020-04-24T20:57:52.000Z
|
2022-03-31T18:07:48.000Z
|
suzieq/genhosts.py
|
foobug/suzieq
|
c5927616a0e1a1fd9283f2a3eeb120d24ff0f2b5
|
[
"Apache-2.0"
] | 75
|
2020-04-29T22:13:34.000Z
|
2022-03-31T17:00:17.000Z
|
#!/usr/bin/env python3
import sys
import shlex
def process_ansible_inventory(filename, namespace='default'):
"""Read an Ansible inventory file and produce device list for suzieq
:param filename: Ansible inventory filename
:type filename: string
:param namespace: The namespace associated with the data from these devices
:type namespace: string
:rtype: list:
:return: List of devices as extracted from the Ansible file
"""
with open(filename, 'r') as f:
lines = f.readlines()
hostsdata = []
for line in lines:
host = None
hostline = ''
out = shlex.split(line)
if 'ansible_network_os=eos' in out:
transport = 'https'
addnl_info = 'devtype=eos'
else:
transport = 'ssh'
addnl_info = ''
for elem in out:
if elem.startswith('ansible_host='):
k, v = elem.split('=')
if v == '127.0.0.1':
host = v
port = 0
continue
else:
hostline = (' - url: {}://vagrant@{} {}'
.format(transport, v, addnl_info))
if elem.startswith('ansible_port='):
k, v = elem.split('=')
if host:
hostline = (
' - url: ssh://vagrant@{}:{}'.format(host, v))
if elem.startswith('ansible_ssh_private_key_file') and transport == 'ssh':
k, v = elem.split('=')
if hostline:
hostline += ' keyfile={}'.format(v.strip())
else:
addnl_info += ' keyfile={}'.format(v.strip())
hostsdata.append(hostline)
hostsdata.insert(0, '- namespace: {}'.format(namespace))
hostsdata.insert(1, ' hosts:')
return hostsdata
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage: genhosts <Ansible inventory file> <output file> <DC name>')
sys.exit(1)
hostsdata = process_ansible_inventory(sys.argv[1], sys.argv[3])
out = '\n'.join(hostsdata)
with open(sys.argv[2], 'w') as f:
f.write(out)
| 28.883117
| 86
| 0.515737
|
9d39035a81627868f41bf9748875f830cfb318d6
| 640
|
py
|
Python
|
Sort/04.04_quicksort_one_recursion.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 7
|
2021-12-28T23:38:42.000Z
|
2022-03-29T16:36:16.000Z
|
Sort/04.04_quicksort_one_recursion.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | null | null | null |
Sort/04.04_quicksort_one_recursion.py
|
Szymon-Budziak/ASD_exercises_solutions
|
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
|
[
"MIT"
] | 4
|
2021-06-29T20:21:52.000Z
|
2022-03-12T10:04:17.000Z
|
from time import perf_counter
from random import randint, seed
seed(100)
def partition(T, p, r):
pivot = T[r]
i = p-1
for j in range(p, r):
if T[j] <= pivot:
i += 1
T[i], T[j] = T[j], T[i]
T[i+1], T[r] = T[r], T[i+1]
return i+1
def quicksort(T, p, r):
while p < r:
q = partition(T, p, r)
if q-p <= r-q:
quicksort(T, p, q-1)
p = q+1
else:
quicksort(T, q, r)
r = q-1
T = [randint(1, 1000) for _ in range(10000)]
start = perf_counter()
quicksort(T, 0, len(T)-1)
print(T)
end = perf_counter()
print(end-start)
| 18.823529
| 44
| 0.479688
|
130123773f93b8fba7426540ddca75e27c97e11b
| 1,659
|
py
|
Python
|
iRobotCreateLib/ConfigManager.py
|
ramtinkermani/iRobotCreateAPI-Python
|
2eaf9d40b5e2af57fae2db222252556fc95ab92c
|
[
"MIT"
] | null | null | null |
iRobotCreateLib/ConfigManager.py
|
ramtinkermani/iRobotCreateAPI-Python
|
2eaf9d40b5e2af57fae2db222252556fc95ab92c
|
[
"MIT"
] | null | null | null |
iRobotCreateLib/ConfigManager.py
|
ramtinkermani/iRobotCreateAPI-Python
|
2eaf9d40b5e2af57fae2db222252556fc95ab92c
|
[
"MIT"
] | null | null | null |
import json
from Errors import IRobotCreateError, ErrorCode
class ConfigManager:
configuration = {}
@staticmethod
def __readConfig():
try:
# TODO: Change the follwoing to a relative path. Temporary fix.
fp = open("/home/pi/workspace/PyCharmRemote/iRobotCreateLib/configuration/SerialConfig.json", "r")
jsonString = fp.read()
serialConfig = json.loads(jsonString)
ConfigManager.configuration["SerialPortAddress"] = serialConfig["SerialPortAddress"]
ConfigManager.configuration["BaudRate"] = serialConfig["BaudRate"]
fp.close()
# TEMPORARILY DISABLING THIS TILL WE UPGRADETP PYTHON 3
# except FileNotFoundError as ex:
# raise IRobotCreateError(ErrorCode.ConfigurationFileMissing, ex.strerror)
# except IOError as ex:
# if ex.errno == IOError.errno.EACCESS:
# raise IRobotCreateError(ErrorCode.ConfigFileError, "Configuration file not found: " + ex.strerror)
# if ex.errno == 13:
# raise IRobotCreateError(ErrorCode.ConfigFileError, "You don't have permission to access the configuration file" + ex.strerror)
except ValueError as ex:
raise IRobotCreateError(ErrorCode.ConfigFileCorrupted, "Configuration file may be corrupted.: " + ex.strerror)
@staticmethod
def getConfig(configName):
ConfigManager.__readConfig()
return ConfigManager.configuration[configName]
def main():
print(ConfigManager.getConfig("SerialPortAddress"))
print(ConfigManager.getConfig("BaudRate"))
if __name__ == "__main__":
main()
| 40.463415
| 144
| 0.675708
|
02f017f0b6c8ea56efe3e24ccbee4d2bb5eb1799
| 3,872
|
py
|
Python
|
facebook_business/adobjects/playablecontent.py
|
mschmo/facebook-python-ads-sdk
|
bf273b4627c99dc261e4483ae0a4c5de648b37c8
|
[
"CNRI-Python"
] | null | null | null |
facebook_business/adobjects/playablecontent.py
|
mschmo/facebook-python-ads-sdk
|
bf273b4627c99dc261e4483ae0a4c5de648b37c8
|
[
"CNRI-Python"
] | null | null | null |
facebook_business/adobjects/playablecontent.py
|
mschmo/facebook-python-ads-sdk
|
bf273b4627c99dc261e4483ae0a4c5de648b37c8
|
[
"CNRI-Python"
] | null | null | null |
# Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class PlayableContent(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isPlayableContent = True
super(PlayableContent, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
id = 'id'
name = 'name'
owner = 'owner'
source = 'source'
source_url = 'source_url'
source_zip = 'source_zip'
app_id = 'app_id'
# @deprecated get_endpoint function is deprecated
@classmethod
def get_endpoint(cls):
return 'adplayables'
def api_create(self, parent_id, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.adobjects.adaccount import AdAccount
return AdAccount(api=self._api, fbid=parent_id).create_ad_playable(fields, params, batch, success, failure, pending)
def api_get(self, fields=None, params=None, batch=None, success=None, failure=None, pending=False):
from facebook_business.utils import api_utils
if batch is None and (success is not None or failure is not None):
api_utils.warning('`success` and `failure` callback only work for batch call.')
param_types = {
}
enums = {
}
request = FacebookRequest(
node_id=self['id'],
method='GET',
endpoint='/',
api=self._api,
param_checker=TypeChecker(param_types, enums),
target_class=PlayableContent,
api_type='NODE',
response_parser=ObjectParser(reuse_object=self),
)
request.add_params(params)
request.add_fields(fields)
if batch is not None:
request.add_to_batch(batch, success=success, failure=failure)
return request
elif pending:
return request
else:
self.assure_call()
return request.execute()
_field_types = {
'id': 'string',
'name': 'string',
'owner': 'Profile',
'source': 'file',
'source_url': 'string',
'source_zip': 'file',
'app_id': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
| 36.528302
| 124
| 0.686725
|
70d3c9496ec73087098d268296c4315738ea29f7
| 2,916
|
py
|
Python
|
junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py
|
wwhio/awesome-DeepLearning
|
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
|
[
"Apache-2.0"
] | 1,150
|
2021-06-01T03:44:21.000Z
|
2022-03-31T13:43:42.000Z
|
junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py
|
wwhio/awesome-DeepLearning
|
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
|
[
"Apache-2.0"
] | 358
|
2021-06-01T03:58:47.000Z
|
2022-03-28T02:55:00.000Z
|
junior_class/chapter-3-Computer_Vision/code/nets/AlexNet.py
|
wwhio/awesome-DeepLearning
|
2cc92edcf0c22bdfc670c537cc819c8fadf33fac
|
[
"Apache-2.0"
] | 502
|
2021-05-31T12:52:14.000Z
|
2022-03-31T02:51:41.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 导入需要的包
import paddle
from paddle.nn import Conv2D, MaxPool2D, Linear, Dropout
import paddle.nn.functional as F
# 定义 AlexNet 网络结构
class AlexNet(paddle.nn.Layer):
def __init__(self, num_classes=1):
super(AlexNet, self).__init__()
# AlexNet与LeNet一样也会同时使用卷积和池化层提取图像特征
# 与LeNet不同的是激活函数换成了‘relu’
self.conv1 = Conv2D(
in_channels=3,
out_channels=96,
kernel_size=11,
stride=4,
padding=5)
self.max_pool1 = MaxPool2D(kernel_size=2, stride=2)
self.conv2 = Conv2D(
in_channels=96,
out_channels=256,
kernel_size=5,
stride=1,
padding=2)
self.max_pool2 = MaxPool2D(kernel_size=2, stride=2)
self.conv3 = Conv2D(
in_channels=256,
out_channels=384,
kernel_size=3,
stride=1,
padding=1)
self.conv4 = Conv2D(
in_channels=384,
out_channels=384,
kernel_size=3,
stride=1,
padding=1)
self.conv5 = Conv2D(
in_channels=384,
out_channels=256,
kernel_size=3,
stride=1,
padding=1)
self.max_pool5 = MaxPool2D(kernel_size=2, stride=2)
self.fc1 = Linear(in_features=12544, out_features=4096)
self.drop_ratio1 = 0.5
self.drop1 = Dropout(self.drop_ratio1)
self.fc2 = Linear(in_features=4096, out_features=4096)
self.drop_ratio2 = 0.5
self.drop2 = Dropout(self.drop_ratio2)
self.fc3 = Linear(in_features=4096, out_features=num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.max_pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.max_pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.conv5(x)
x = F.relu(x)
x = self.max_pool5(x)
x = paddle.reshape(x, [x.shape[0], -1])
x = self.fc1(x)
x = F.relu(x)
# 在全连接之后使用dropout抑制过拟合
x = self.drop1(x)
x = self.fc2(x)
x = F.relu(x)
# 在全连接之后使用dropout抑制过拟合
x = self.drop2(x)
x = self.fc3(x)
return x
| 31.021277
| 74
| 0.580933
|
4cb58ae571f6f42f544ec36dd319d5bc742bf493
| 17,043
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_log_analytics_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_log_analytics_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_06_01/operations/_log_analytics_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_export_request_rate_by_interval_request_initial(
location: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-06-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_export_throttled_requests_request_initial(
location: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2020-06-01") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
class LogAnalyticsOperations(object):
"""LogAnalyticsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _export_request_rate_by_interval_initial(
self,
location: str,
parameters: "_models.RequestRateByIntervalInput",
**kwargs: Any
) -> Optional["_models.LogAnalyticsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogAnalyticsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RequestRateByIntervalInput')
request = build_export_request_rate_by_interval_request_initial(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._export_request_rate_by_interval_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_request_rate_by_interval_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval"} # type: ignore
@distributed_trace
def begin_export_request_rate_by_interval(
self,
location: str,
parameters: "_models.RequestRateByIntervalInput",
**kwargs: Any
) -> LROPoller["_models.LogAnalyticsOperationResult"]:
"""Export logs that show Api requests made by this subscription in the given time window to show
throttling activities.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:param parameters: Parameters supplied to the LogAnalytics getRequestRateByInterval Api.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.RequestRateByIntervalInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either LogAnalyticsOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_06_01.models.LogAnalyticsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogAnalyticsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._export_request_rate_by_interval_initial(
location=location,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_request_rate_by_interval.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval"} # type: ignore
def _export_throttled_requests_initial(
self,
location: str,
parameters: "_models.ThrottledRequestsInput",
**kwargs: Any
) -> Optional["_models.LogAnalyticsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogAnalyticsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ThrottledRequestsInput')
request = build_export_throttled_requests_request_initial(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._export_throttled_requests_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_throttled_requests_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests"} # type: ignore
@distributed_trace
def begin_export_throttled_requests(
self,
location: str,
parameters: "_models.ThrottledRequestsInput",
**kwargs: Any
) -> LROPoller["_models.LogAnalyticsOperationResult"]:
"""Export logs that show total throttled Api requests for this subscription in the given time
window.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:param parameters: Parameters supplied to the LogAnalytics getThrottledRequests Api.
:type parameters: ~azure.mgmt.compute.v2020_06_01.models.ThrottledRequestsInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either LogAnalyticsOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_06_01.models.LogAnalyticsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-06-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogAnalyticsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._export_throttled_requests_initial(
location=location,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_throttled_requests.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests"} # type: ignore
| 45.206897
| 211
| 0.681394
|
4d0f748e4bbbe187b19ecaa48ff425cdaa0a4e3b
| 2,301
|
py
|
Python
|
misc/haptics/game.py
|
mmolnar0/sgillen_research
|
752e09fdf7a996c832e71b0a8296322fe77e9ae3
|
[
"MIT"
] | null | null | null |
misc/haptics/game.py
|
mmolnar0/sgillen_research
|
752e09fdf7a996c832e71b0a8296322fe77e9ae3
|
[
"MIT"
] | null | null | null |
misc/haptics/game.py
|
mmolnar0/sgillen_research
|
752e09fdf7a996c832e71b0a8296322fe77e9ae3
|
[
"MIT"
] | null | null | null |
import cv2
import cv2.aruco as aruco
import numpy as np
from scipy.signal import sawtooth
from numpy import sin, pi
import sounddevice as sd
# Open the default camera (should be your built in webcam if you have one)
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("something went wrong! video not open")
raise SystemExit
# Define parameters that the marker detection library needs
DICTIONARY = aruco.DICT_6X6_1000
aruco_dict = aruco.Dictionary_get(DICTIONARY)
aruco_parameters = aruco.DetectorParameters_create()
# make a window that will display all the found markers
#re, img = cap.read()
#cv2.namedWindow('Markers')
#cv2.imshow('Markers', img)
# Parameters for the wave we send to the motor
fs = 44100 # sampling rate, Hz
duration = .5 # in seconds
f = 15 # sine frequency, Hz
wave = abs((sin(2*pi*np.arange(fs*duration)*f/fs)).astype(np.float32))
#wave = sawtooth(2*pi*np.arange(fs*duration)*f/fs).astype(np.float32)
wave_play = wave.copy() # Make a copy of the wave for the modified version that actually gets sent to the motor
baseline = 70
static_gain = .01
# Start the game loop
while (True):
# run the marker detection
re, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=aruco_parameters)
#found = aruco.drawDetectedMarkers(img, corners, ids)
#cv2.imshow('Markers', found)
# as long as we found at least one marker, go ahead and change the amplitude
if corners:
tracked_marker = corners[0].squeeze()
# This is the x position of opposing corners of the marker (in pixels). Subtracting the two is a lazy way to
# estimate the distance of the marker from the screen
x_track1 = tracked_marker[0, 0]
x_track2 = tracked_marker[2, 0]
gain = (abs(x_track1 - x_track2) - baseline)*static_gain
# Modify the played waveform we play (just change the amplitude for now)
wave_play = gain*wave
print(abs(x_track1 - x_track2), )
# Go ahead and play the wave regardless if we updated the position of the marker
sd.play(wave_play, fs, blocking=True)
# give us a way to actually quit
if cv2.waitKey(1) == ord('q'):
break
sd.stop
cap.release()
| 32.871429
| 116
| 0.706215
|
1332867b1a2c8a2ed09c95a527615a2e6313426b
| 210
|
py
|
Python
|
John/deploy_john/pack_to_bento.py
|
handertolium/lil-NLP-app
|
10342a58034af2b343d3e993910fb98571b509d4
|
[
"Apache-2.0"
] | null | null | null |
John/deploy_john/pack_to_bento.py
|
handertolium/lil-NLP-app
|
10342a58034af2b343d3e993910fb98571b509d4
|
[
"Apache-2.0"
] | null | null | null |
John/deploy_john/pack_to_bento.py
|
handertolium/lil-NLP-app
|
10342a58034af2b343d3e993910fb98571b509d4
|
[
"Apache-2.0"
] | null | null | null |
import torch
from main import QandA
def saveBento():
bento_svc = QandA()
bento_svc.pack('onnx_model', './john_model_fp16.onnx')
bento_svc.save()
if __name__ == '__main__':
saveBento()
| 14
| 58
| 0.657143
|
722c93d91dceb88cc76f522b12b2edbc725294eb
| 5,446
|
py
|
Python
|
src/sentinel/azext_sentinel/vendored_sdks/logic_app/mgmt/logic/operations/workflow_version_triggers_operations.py
|
amirjalali65/azure-cli-extensions
|
dcc7607afafea1815d70bc5c765b7a0be07544a7
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/vendored_sdks/logic_app/mgmt/logic/operations/workflow_version_triggers_operations.py
|
amirjalali65/azure-cli-extensions
|
dcc7607afafea1815d70bc5c765b7a0be07544a7
|
[
"MIT"
] | null | null | null |
src/sentinel/azext_sentinel/vendored_sdks/logic_app/mgmt/logic/operations/workflow_version_triggers_operations.py
|
amirjalali65/azure-cli-extensions
|
dcc7607afafea1815d70bc5c765b7a0be07544a7
|
[
"MIT"
] | 1
|
2021-02-17T21:35:31.000Z
|
2021-02-17T21:35:31.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class WorkflowVersionTriggersOperations(object):
"""WorkflowVersionTriggersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version. Constant value: "2018-07-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-07-01-preview"
self.config = config
def list_callback_url(
self, resource_group_name, workflow_name, version_id, trigger_name, not_after=None, key_type=None, custom_headers=None, raw=False, **operation_config):
"""Get the callback url for a trigger of a workflow version.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param workflow_name: The workflow name.
:type workflow_name: str
:param version_id: The workflow versionId.
:type version_id: str
:param trigger_name: The workflow trigger name.
:type trigger_name: str
:param not_after: The expiry time.
:type not_after: datetime
:param key_type: The key type. Possible values include:
'NotSpecified', 'Primary', 'Secondary'
:type key_type: str or ~azure.mgmt.logic.models.KeyType
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: WorkflowTriggerCallbackUrl or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.logic.models.WorkflowTriggerCallbackUrl or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
parameters = None
if not_after is not None or key_type is not None:
parameters = models.GetCallbackUrlParameters(not_after=not_after, key_type=key_type)
# Construct URL
url = self.list_callback_url.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workflowName': self._serialize.url("workflow_name", workflow_name, 'str'),
'versionId': self._serialize.url("version_id", version_id, 'str'),
'triggerName': self._serialize.url("trigger_name", trigger_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
if parameters is not None:
body_content = self._serialize.body(parameters, 'GetCallbackUrlParameters')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkflowTriggerCallbackUrl', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_callback_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/workflows/{workflowName}/versions/{versionId}/triggers/{triggerName}/listCallbackUrl'}
| 44.639344
| 221
| 0.672971
|
df9036b197570a80a809da1c56061dc6f1f4a3ca
| 6,691
|
py
|
Python
|
tests/ci/build_check.py
|
newly12/ClickHouse
|
e1c2e629d8c077193f951cdb02fac9c0b1631c65
|
[
"Apache-2.0"
] | 1
|
2021-11-25T08:42:06.000Z
|
2021-11-25T08:42:06.000Z
|
tests/ci/build_check.py
|
zhaoqiang75/ClickHouse
|
c86b43c7efc1c0cc9c66eb3c7d1274326378ce1b
|
[
"Apache-2.0"
] | null | null | null |
tests/ci/build_check.py
|
zhaoqiang75/ClickHouse
|
c86b43c7efc1c0cc9c66eb3c7d1274326378ce1b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
import subprocess
import logging
import json
import os
import sys
import time
from github import Github
from s3_helper import S3Helper
from pr_info import PRInfo
from get_robot_token import get_best_robot_token
from version_helper import get_version_from_repo, update_version_local
from ccache_utils import get_ccache_if_not_exists, upload_ccache
from ci_config import build_config_to_string, CI_CONFIG
from docker_pull_helper import get_image_with_version
def get_build_config(build_check_name, build_number):
if build_check_name == 'ClickHouse build check (actions)':
build_config_name = 'build_config'
elif build_check_name == 'ClickHouse special build check (actions)':
build_config_name = 'special_build_config'
else:
raise Exception(f"Unknown build check name {build_check_name}")
return CI_CONFIG[build_config_name][build_number]
def _can_export_binaries(build_config):
if build_config['package_type'] != 'deb':
return False
if build_config['bundled'] != "bundled":
return False
if build_config['splitted'] == 'splitted':
return False
if build_config['sanitizer'] != '':
return True
if build_config['build_type'] != '':
return True
return False
def get_packager_cmd(build_config, packager_path, output_path, build_version, image_version, ccache_path, pr_info):
package_type = build_config['package_type']
comp = build_config['compiler']
cmd = f"cd {packager_path} && ./packager --output-dir={output_path} --package-type={package_type} --compiler={comp}"
if build_config['build_type']:
cmd += ' --build-type={}'.format(build_config['build_type'])
if build_config['sanitizer']:
cmd += ' --sanitizer={}'.format(build_config['sanitizer'])
if build_config['splitted'] == 'splitted':
cmd += ' --split-binary'
if build_config['tidy'] == 'enable':
cmd += ' --clang-tidy'
cmd += ' --cache=ccache'
cmd += ' --ccache_dir={}'.format(ccache_path)
if 'alien_pkgs' in build_config and build_config['alien_pkgs']:
if pr_info == 0 or 'release' in pr_info.labels:
cmd += ' --alien-pkgs rpm tgz'
cmd += ' --docker-image-version={}'.format(image_version)
cmd += ' --version={}'.format(build_version)
if _can_export_binaries(build_config):
cmd += ' --with-binaries=tests'
return cmd
def get_image_name(build_config):
if build_config['package_type'] != 'deb':
return 'clickhouse/binary-builder'
else:
return 'clickhouse/deb-builder'
def build_clickhouse(packager_cmd, logs_path):
build_log_path = os.path.join(logs_path, 'build_log.log')
with open(build_log_path, 'w') as log_file:
retcode = subprocess.Popen(packager_cmd, shell=True, stderr=log_file, stdout=log_file).wait()
if retcode == 0:
logging.info("Built successfully")
else:
logging.info("Build failed")
return build_log_path, retcode == 0
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
repo_path = os.getenv("REPO_COPY", os.path.abspath("../../"))
temp_path = os.getenv("TEMP_PATH", os.path.abspath("."))
caches_path = os.getenv("CACHES_PATH", temp_path)
build_check_name = sys.argv[1]
build_number = int(sys.argv[2])
build_config = get_build_config(build_check_name, build_number)
if not os.path.exists(temp_path):
os.makedirs(temp_path)
with open(os.getenv('GITHUB_EVENT_PATH'), 'r') as event_file:
event = json.load(event_file)
pr_info = PRInfo(event)
logging.info("Repo copy path %s", repo_path)
gh = Github(get_best_robot_token())
image_name = get_image_name(build_config)
docker_image = get_image_with_version(os.getenv("IMAGES_PATH"), image_name)
image_version = docker_image.version
version = get_version_from_repo(repo_path)
version.tweak_update()
update_version_local(repo_path, pr_info.sha, version)
build_name = build_config_to_string(build_config)
logging.info("Build short name %s", build_name)
subprocess.check_call(f"echo 'BUILD_NAME=build_urls_{build_name}' >> $GITHUB_ENV", shell=True)
build_output_path = os.path.join(temp_path, build_name)
if not os.path.exists(build_output_path):
os.makedirs(build_output_path)
ccache_path = os.path.join(caches_path, build_name + '_ccache')
s3_helper = S3Helper('https://s3.amazonaws.com')
logging.info("Will try to fetch cache for our build")
get_ccache_if_not_exists(ccache_path, s3_helper, pr_info.number, temp_path)
if not os.path.exists(ccache_path):
logging.info("cache was not fetched, will create empty dir")
os.makedirs(ccache_path)
packager_cmd = get_packager_cmd(build_config, os.path.join(repo_path, "docker/packager"), build_output_path, version.get_version_string(), image_version, ccache_path, pr_info)
logging.info("Going to run packager with %s", packager_cmd)
build_clickhouse_log = os.path.join(temp_path, "build_log")
if not os.path.exists(build_clickhouse_log):
os.makedirs(build_clickhouse_log)
start = time.time()
log_path, success = build_clickhouse(packager_cmd, build_clickhouse_log)
elapsed = int(time.time() - start)
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {build_output_path}", shell=True)
subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {ccache_path}", shell=True)
logging.info("Build finished with %s, log path %s", success, log_path)
logging.info("Will upload cache")
upload_ccache(ccache_path, s3_helper, pr_info.number, temp_path)
s3_path_prefix = str(pr_info.number) + "/" + pr_info.sha + "/" + build_check_name.lower().replace(' ', '_') + "/" + build_name
if os.path.exists(log_path):
log_url = s3_helper.upload_build_file_to_s3(log_path, s3_path_prefix + "/" + os.path.basename(log_path))
logging.info("Log url %s", log_url)
else:
logging.info("Build log doesn't exist")
build_urls = s3_helper.upload_build_folder_to_s3(build_output_path, s3_path_prefix, keep_dirs_in_s3_path=False, upload_symlinks=False)
logging.info("Got build URLs %s", build_urls)
print("::notice ::Build URLs: {}".format('\n'.join(build_urls)))
result = {
"log_url": log_url,
"build_urls": build_urls,
"build_config": build_config,
"elapsed_seconds": elapsed,
"status": success,
}
print("::notice ::Log URL: {}".format(log_url))
with open(os.path.join(temp_path, "build_urls_" + build_name + '.json'), 'w') as build_links:
json.dump(result, build_links)
| 36.966851
| 179
| 0.698401
|
a92071e10b429d8d42cc08aec09974708a8635c5
| 4,350
|
py
|
Python
|
backend/dataset/publish/repository/tts.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 3
|
2022-01-12T06:51:51.000Z
|
2022-02-23T18:54:33.000Z
|
backend/dataset/publish/repository/tts.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 6
|
2021-08-31T19:21:26.000Z
|
2022-01-03T05:53:42.000Z
|
backend/dataset/publish/repository/tts.py
|
agupta54/ulca
|
c1f570ac254ce2ac73f40c49716458f4f7cbaee2
|
[
"MIT"
] | 8
|
2021-08-12T08:07:49.000Z
|
2022-01-25T04:40:51.000Z
|
import logging
from collections import OrderedDict
from datetime import datetime
from logging.config import dictConfig
from bson import ObjectId
from configs.configs import db_cluster, db, tts_collection
import pymongo
log = logging.getLogger('file')
mongo_instance_tts = None
class TTSRepo:
def __init__(self):
pass
# Method to set ASR Mongo DB collection
def set_tts_collection(self):
if "localhost" not in db_cluster:
log.info(f'Setting the Mongo TTS DS Shard Cluster up..... | {datetime.now()}')
client = pymongo.MongoClient(db_cluster)
ulca_db = client[db]
ulca_db.drop_collection(tts_collection)
ulca_col = ulca_db[tts_collection]
ulca_col.create_index([("tags", -1)])
db_cli = client.admin
key = OrderedDict([("_id", "hashed")])
db_cli.command({'shardCollection': f'{db}.{tts_collection}', 'key': key})
log.info(f'Done! | {datetime.now()}')
else:
log.info(f'Setting the Mongo DB Local for TTS DS.... | {datetime.now()}')
client = pymongo.MongoClient(db_cluster)
ulca_db = client[db]
ulca_db.drop_collection(tts_collection)
ulca_col = ulca_db[tts_collection]
ulca_col.create_index([("tags", -1)])
log.info(f'Done! | {datetime.now()}')
# Initialises and fetches mongo db client
def instantiate(self):
global mongo_instance_tts
client = pymongo.MongoClient(db_cluster)
mongo_instance_tts = client[db][tts_collection]
return mongo_instance_tts
def get_mongo_instance(self):
global mongo_instance_tts
if not mongo_instance_tts:
return self.instantiate()
else:
return mongo_instance_tts
def insert(self, data):
col = self.get_mongo_instance()
col.insert_many(data)
return len(data)
def update(self, object_in):
col = self.get_mongo_instance()
try:
object_in["_id"] = ObjectId(object_in["_id"])
col.replace_one({"_id": object_in["_id"]}, object_in, False)
except Exception as e:
log.exception(f"Exception while updating: {e}", e)
def delete(self, rec_id):
col = self.get_mongo_instance()
col.delete_one({"id": rec_id})
def search(self, query, exclude, offset, res_limit):
try:
seconds, hours = 0, 0
col = self.get_mongo_instance()
if offset is None and res_limit is None:
if exclude:
res = col.find(query, exclude).sort([('_id', 1)])
else:
res = col.find(query).sort([('_id', 1)])
else:
if exclude:
res = col.find(query, exclude).sort([('_id', -1)]).skip(offset).limit(res_limit)
else:
res = col.find(query).sort([('_id', -1)]).skip(offset).limit(res_limit)
result = []
for record in res:
if "_id" in record.keys():
record["_id"] = str(record["_id"])
if 'durationInSeconds' in record.keys():
seconds += record["durationInSeconds"]
result.append(record)
if seconds != 0:
hours = seconds/3600
return result, round(hours, 3)
except Exception as e:
log.exception(e)
return [], 0
# Log config
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] {%(filename)s:%(lineno)d} %(threadName)s %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {
'info': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'formatter': 'default',
'filename': 'info.log'
},
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
'stream': 'ext://sys.stdout',
}
},
'loggers': {
'file': {
'level': 'DEBUG',
'handlers': ['info', 'console'],
'propagate': ''
}
},
'root': {
'level': 'DEBUG',
'handlers': ['info', 'console']
}
})
| 32.706767
| 116
| 0.54
|
6309e90ee4144a8109840fbeeb1b9e5cd77209c4
| 2,948
|
py
|
Python
|
yolox/utils/allreduce_norm.py
|
mrzhuzhe/YOLOX
|
ffb5e3f70a181bc75c045faa7e7f4668d79d629a
|
[
"Apache-2.0"
] | 1
|
2022-01-29T15:47:50.000Z
|
2022-01-29T15:47:50.000Z
|
yolox/utils/allreduce_norm.py
|
mrzhuzhe/YOLOX
|
ffb5e3f70a181bc75c045faa7e7f4668d79d629a
|
[
"Apache-2.0"
] | null | null | null |
yolox/utils/allreduce_norm.py
|
mrzhuzhe/YOLOX
|
ffb5e3f70a181bc75c045faa7e7f4668d79d629a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import pickle
from collections import OrderedDict
import torch
from torch import distributed as dist
from torch import nn
from .dist import _get_global_gloo_group, get_world_size
ASYNC_NORM = (
nn.BatchNorm1d,
nn.BatchNorm2d,
nn.BatchNorm3d,
nn.InstanceNorm1d,
nn.InstanceNorm2d,
nn.InstanceNorm3d,
)
__all__ = [
"get_async_norm_states",
"pyobj2tensor",
"tensor2pyobj",
"all_reduce",
"all_reduce_norm",
]
def get_async_norm_states(module):
async_norm_states = OrderedDict()
for name, child in module.named_modules():
if isinstance(child, ASYNC_NORM):
for k, v in child.state_dict().items():
async_norm_states[".".join([name, k])] = v
return async_norm_states
def pyobj2tensor(pyobj, device="cuda"):
"""serialize picklable python object to tensor"""
storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))
return torch.ByteTensor(storage).to(device=device)
def tensor2pyobj(tensor):
"""deserialize tensor to picklable python object"""
return pickle.loads(tensor.cpu().numpy().tobytes())
def _get_reduce_op(op_name):
return {
"sum": dist.ReduceOp.SUM,
"mean": dist.ReduceOp.SUM,
}[op_name.lower()]
def all_reduce(py_dict, op="sum", group=None):
"""
Apply all reduce function for python dict object.
NOTE: make sure that every py_dict has the same keys and values are in the same shape.
Args:
py_dict (dict): dict to apply all reduce op.
op (str): operator, could be "sum" or "mean".
"""
world_size = get_world_size()
if world_size == 1:
return py_dict
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return py_dict
# all reduce logic across different devices.
py_key = list(py_dict.keys())
py_key_tensor = pyobj2tensor(py_key)
dist.broadcast(py_key_tensor, src=0)
py_key = tensor2pyobj(py_key_tensor)
tensor_shapes = [py_dict[k].shape for k in py_key]
tensor_numels = [py_dict[k].numel() for k in py_key]
flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])
dist.all_reduce(flatten_tensor, op=_get_reduce_op(op))
if op == "mean":
flatten_tensor /= world_size
split_tensors = [
x.reshape(shape)
for x, shape in zip(torch.split(flatten_tensor, tensor_numels), tensor_shapes)
]
return OrderedDict({k: v for k, v in zip(py_key, split_tensors)})
def all_reduce_norm(module):
"""
All reduce norm statistics in different devices.
"""
states = get_async_norm_states(module)
states = all_reduce(states, op="mean")
module.load_state_dict(states, strict=False)
| 28.346154
| 91
| 0.652307
|
11ebf3ce01d0c64ed7827ee2f83fd98d5b9fcc75
| 273
|
py
|
Python
|
setup.py
|
DucTranVan/grasp-detection-pytorch
|
720b395b7d01f058de35a8420773b0a5905fe110
|
[
"MIT"
] | 5
|
2021-08-03T07:18:25.000Z
|
2022-02-05T18:15:39.000Z
|
setup.py
|
DucTranVan/grasp-detection-pytorch
|
720b395b7d01f058de35a8420773b0a5905fe110
|
[
"MIT"
] | 1
|
2021-08-04T10:51:18.000Z
|
2021-08-04T18:03:02.000Z
|
setup.py
|
DucTranVan/grasp-detection-pytorch
|
720b395b7d01f058de35a8420773b0a5905fe110
|
[
"MIT"
] | null | null | null |
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Build a deep learning architecture to predict graspabel locations for robotic manipulation.',
author='DucTran',
license='MIT',
)
| 24.818182
| 110
| 0.710623
|
4746f3b60c0e0688895c7e09ef7540b73cec27be
| 8,383
|
py
|
Python
|
models/gan_normal.py
|
chengyu0910/DeepFusion_IQA_V1.1
|
7c55f7629b24df00a8c37f82e6142c3a636a667b
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T10:30:58.000Z
|
2020-02-04T10:30:58.000Z
|
models/gan_normal.py
|
chengyu0910/DeepFusion_IQA_V1.1
|
7c55f7629b24df00a8c37f82e6142c3a636a667b
|
[
"BSD-3-Clause"
] | null | null | null |
models/gan_normal.py
|
chengyu0910/DeepFusion_IQA_V1.1
|
7c55f7629b24df00a8c37f82e6142c3a636a667b
|
[
"BSD-3-Clause"
] | 2
|
2020-02-03T11:01:28.000Z
|
2022-03-18T06:46:36.000Z
|
import numpy as np
import torch
import os
import itertools
from collections import OrderedDict
from torch.autograd import Variable
import util.util as util
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
from torchvision import transforms
from PIL import Image
import pdb
"""
Final version
netG: resnet (InstanceNorm, 9 blocks)
netDepth: Unet (InstanceNorm, 256, no LB)
netD: MultiD (46 + 256)
"""
class gannoromalModel(BaseModel):
def name(self):
return 'gannormalnet'
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
# define tensors
self.input_A = self.Tensor(opt.batchSize, opt.input_nc,
opt.fineSize, opt.fineSize)
self.input_B = self.Tensor(opt.batchSize, opt.output_nc,
opt.fineSize, opt.fineSize)
# load/define networks
print(self.gpu_ids)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
opt.which_model_netG, opt.norm, not opt.no_dropout, self.gpu_ids,
non_linearity=opt.non_linearity, pooling=opt.pooling)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netD = networks.define_D(opt.output_nc, opt.ndf,
opt.which_model_netD,
opt.n_layers_D, opt.norm, use_sigmoid, self.gpu_ids)
if not self.isTrain or opt.continue_train:
self.load_network(self.netG, 'G', opt.which_epoch)
if self.isTrain:
self.load_network(self.netD, 'D', opt.which_epoch)
if self.isTrain:
self.fake_B_pool = ImagePool(opt.pool_size)
self.old_lr = opt.lr
# define loss functions
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
self.criterionL1 = torch.nn.L1Loss()
self.criterionL2 = torch.nn.MSELoss()
self.criterionTV = networks.TVLoss()
# initialize optimizers
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999))
print('---------- Networks initialized -------------')
networks.print_network(self.netG)
if self.isTrain:
networks.print_network(self.netD)
print('-----------------------------------------------')
def set_input(self, input):
AtoB = self.opt.which_direction == 'AtoB'
input_A = input['A' if AtoB else 'B']
input_B = input['B' if AtoB else 'A']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.input_B.resize_(input_B.size()).copy_(input_B)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
# print(type(input_A))
# print(type(self.input_A))
# print(input_A.size)
# print(self.input_A.size)
def forward(self):
self.real_A = Variable(self.input_A)
self.fake_B = self.netG.forward(self.real_A)
self.real_B = Variable(self.input_B)
print('图片大小')
print(self.input_A.size())
print(self.fake_B.size())
#self.pre_filter, self.depth = self.netDepth.forward(self.real_A)
# recover B according to depth
#self.fake_B2 = util.reverse_matting(self.real_A, self.depth)
# reconstruct A based on optical model
#self.fake_A = util.synthesize_matting(self.fake_B, self.depth)
# no backprop gradients
def test(self):
self.real_A = Variable(self.input_A, volatile=True)
self.fake_B = self.netG.forward(self.real_A)
self.real_B = Variable(self.input_B, volatile=True)
#self.pre_filter, self.depth = self.netDepth.forward(self.real_A)
# recover B according to depth
#self.fake_B2 = util.reverse_matting(self.real_A, self.depth)
# reconstruct A based on optical model
#self.fake_A = util.synthesize_matting(self.fake_B, self.depth)
# get image paths
def get_image_paths(self):
return self.image_paths
def backward_D_basic(self, netD, real, fake):
# Fake
# stop backprop to the generator by detaching fake_B
# pred1_fake, pred2_fake = netD.forward(fake.detach())
# loss_D_fake = 0.5*(self.criterionGAN(pred1_fake, False) + self.criterionGAN(pred2_fake, False))
pred1_fake = netD.forward(fake.detach())#detach的作用是分离变量,分离出来的变量相当于一个新变量,新变量之前无计算图,这样计算D的梯度的时候就不用计算G的
loss_D_fake = self.criterionGAN(pred1_fake, False)
# Real
# pred1_real, pred2_real = netD.forward(real)
# loss_D_real = 0.5*(self.criterionGAN(pred1_real, True) + self.criterionGAN(pred2_real, True))
pred1_real = netD.forward(real)
loss_D_real = self.criterionGAN(pred1_real, True)
# Combined loss
loss_D = loss_D_fake + loss_D_real
loss_D.backward()
return loss_D
def backward_D(self):
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D = self.backward_D_basic(self.netD, self.real_B, self.fake_B)
def backward_G(self):
# First, G(A) should fake the discriminator
# pred1_fake_B, pred2_fake_B = self.netD.forward(self.fake_B)
# self.loss_G_B = 0.5*(self.criterionGAN(pred1_fake_B, True) + self.criterionGAN(pred2_fake_B, True))
pred1_fake_B= self.netD.forward(self.fake_B)
self.loss_G_B = self.criterionGAN(pred1_fake_B, True)
self.loss_G_L2 = self.criterionL2(self.fake_B,self.real_B)
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B)
# Second, L1 loss for reconstruction
#self.loss_G_L1 = self.criterionL1(self.fake_A, self.real_A) * self.opt.lambda_A
# Third, total variance loss
#self.loss_TV = self.criterionTV(self.depth) * self.opt.lambda_TV
#self.loss_G = self.loss_G_L1 + self.loss_G_B + self.loss_TV
self.loss_G = self.loss_G_B + self.loss_G_L1 + self.loss_G_L2
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
print('updata D')
self.optimizer_D.zero_grad()
self.backward_D()
self.optimizer_D.step()
print('updata G')
self.optimizer_G.zero_grad()
self.backward_G()
self.optimizer_G.step()
def get_current_errors(self):
return OrderedDict([('G_B', self.loss_G_B.data[0]),
('G_L1', self.loss_G_L1.data[0]),
('D', self.loss_D.data[0])
])
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
# if self.pre_filter is None:
# pre_filter = util.tensor2im(self.depth.data)
# else:
# pre_filter = util.tensor2im(self.pre_filter.data)
#fake_depth = util.tensor2im(self.depth.data)
real_B = util.tensor2im(self.real_B.data)
#real_depth = util.tensor2im(self.input_C)
#fake_A = util.tensor2im(self.fake_A.data)
#fake_B2 = util.tensor2im(self.fake_B2.data)
# return OrderedDict([('Hazy', real_A), ('Haze-free', fake_B), ('Haze-free-depth', fake_B2), ('pre_filter', pre_filter), ('Estimate_depth', fake_depth),
# ('recover', fake_A), ('real_depth', real_depth), ('real_B', real_B)])
return OrderedDict([('Hazy', real_A), ('Haze-free', fake_B), ('real_B', real_B)])
def save(self, label):
self.save_network(self.netG, 'G', label, self.gpu_ids)
self.save_network(self.netD, 'D', label, self.gpu_ids)
def update_learning_rate(self):
lrd = self.opt.lr / self.opt.niter_decay
lr = self.old_lr - lrd
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = lr
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = lr
print('update learning rate: %f -> %f' % (self.old_lr, lr))
self.old_lr = lr
| 39.356808
| 160
| 0.613146
|
ead3fc0309a5e3fcacf8f4e9ec0702a9dbbef34d
| 1,560
|
py
|
Python
|
LeetCode/weekly-contest-126-2019.10.17/commonChars_1002.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
LeetCode/weekly-contest-126-2019.10.17/commonChars_1002.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
LeetCode/weekly-contest-126-2019.10.17/commonChars_1002.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/10/17 10:26
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description : 1002. 查找常用字符 显示英文描述
用户通过次数301
用户尝试次数324
通过次数303
提交次数480
题目难度Easy
给定仅有小写字母组成的字符串数组 A,返回列表中的每个字符串中都显示的全部字符(包括重复字符)组成的列表。例如,如果一个字符在每个字符串中出现 3 次,但不是 4 次,则需要在最终答案中包含该字符 3 次。
你可以按任意顺序返回答案。
示例 1:
输入:["bella","label","roller"]
输出:["e","l","l"]
示例 2:
输入:["cool","lock","cook"]
输出:["c","o"]
提示:
1 <= A.length <= 100
1 <= A[i].length <= 100
A[i][j] 是小写字母
-------------------------------------------------
"""
import time
from functools import reduce
from typing import List
import collections
__author__ = 'Max_Pengjb'
start_time = time.time()
# 下面写上代码块
class Solution:
def commonChars(self, A: List[str]) -> List[str]:
res = collections.Counter(A[0])
for i in range(1, len(A)):
tmp = {}
for word in A[i]:
if tmp.setdefault(word, 0) < res.setdefault(word, 0):
tmp[word] = tmp.setdefault(word, 0) + 1
res = tmp
return sorted(reduce(lambda x, y: x + y[0] * y[1], res.items(), ""))
print("a" * 0)
aa = ["cool","lock","cook"]
print("cool".count("o"))
r = Solution().commonChars(aa)
print(r)
# 上面中间写上代码块
end_time = time.time()
print('Running time: %s Seconds' % (end_time - start_time))
| 21.971831
| 103
| 0.526923
|
34fbe3a8a7190b8b9a805d732bd92428e4338ced
| 1,838
|
py
|
Python
|
dizoo/mujoco/config/walker2d_sac_default_config.py
|
Hcnaeg/DI-engine
|
aba0c629f87649854091e9e59d948f83962e3e1e
|
[
"Apache-2.0"
] | 464
|
2021-07-08T07:26:33.000Z
|
2022-03-31T12:35:16.000Z
|
dizoo/mujoco/config/walker2d_sac_default_config.py
|
Hcnaeg/DI-engine
|
aba0c629f87649854091e9e59d948f83962e3e1e
|
[
"Apache-2.0"
] | 177
|
2021-07-09T08:22:55.000Z
|
2022-03-31T07:35:22.000Z
|
dizoo/mujoco/config/walker2d_sac_default_config.py
|
Hcnaeg/DI-engine
|
aba0c629f87649854091e9e59d948f83962e3e1e
|
[
"Apache-2.0"
] | 92
|
2021-07-08T12:16:37.000Z
|
2022-03-31T09:24:41.000Z
|
from easydict import EasyDict
walker2d_sac_default_config = dict(
exp_name='walker2d_sac',
env=dict(
env_id='Walker2d-v3',
norm_obs=dict(use_norm=False, ),
norm_reward=dict(use_norm=False, ),
collector_env_num=1,
evaluator_env_num=8,
use_act_scale=True,
n_evaluator_episode=8,
stop_value=6000,
),
policy=dict(
cuda=True,
random_collect_size=10000,
model=dict(
obs_shape=17,
action_shape=6,
twin_critic=True,
action_space='reparameterization',
actor_head_hidden_size=256,
critic_head_hidden_size=256,
),
learn=dict(
update_per_collect=1,
batch_size=256,
learning_rate_q=1e-3,
learning_rate_policy=1e-3,
learning_rate_alpha=3e-4,
ignore_done=False,
target_theta=0.005,
discount_factor=0.99,
alpha=0.2,
reparameterization=True,
auto_alpha=False,
),
collect=dict(
n_sample=1,
unroll_len=1,
),
command=dict(),
eval=dict(),
other=dict(replay_buffer=dict(replay_buffer_size=1000000, ), ),
),
)
walker2d_sac_default_config = EasyDict(walker2d_sac_default_config)
main_config = walker2d_sac_default_config
walker2d_sac_default_create_config = dict(
env=dict(
type='mujoco',
import_names=['dizoo.mujoco.envs.mujoco_env'],
),
env_manager=dict(type='base'),
policy=dict(
type='sac',
import_names=['ding.policy.sac'],
),
replay_buffer=dict(type='naive', ),
)
walker2d_sac_default_create_config = EasyDict(walker2d_sac_default_create_config)
create_config = walker2d_sac_default_create_config
| 27.848485
| 81
| 0.607182
|
b6974f70a4274038fd3ebb9b582106bcf0534bf5
| 700
|
py
|
Python
|
scripts/tags/generate_mapstyle.py
|
rinigus/geocoder-nlp
|
9fa6fd16121caf70bbbe597c4dcc46e40d5655a2
|
[
"MIT"
] | 16
|
2017-01-21T08:01:00.000Z
|
2022-02-15T20:32:45.000Z
|
scripts/tags/generate_mapstyle.py
|
rinigus/geocoder-nlp
|
9fa6fd16121caf70bbbe597c4dcc46e40d5655a2
|
[
"MIT"
] | 58
|
2017-01-08T22:05:00.000Z
|
2022-03-23T17:07:38.000Z
|
scripts/tags/generate_mapstyle.py
|
rinigus/geocoder-nlp
|
9fa6fd16121caf70bbbe597c4dcc46e40d5655a2
|
[
"MIT"
] | 2
|
2017-09-01T20:51:44.000Z
|
2018-08-01T16:20:17.000Z
|
#!/usr/bin/env python3
import sqlite3
db = sqlite3.connect('taginfo-db.db')
c = db.cursor()
mapost = ""
whitelist = ""
keyvals = []
for r in c.execute("select key,value from tags where key='shop' order by count_all desc limit 50"):
key, value = r
# no need for these
if value in ['yes', 'no']:
continue
keyvals.append([key, value])
keyvals.sort()
for r in keyvals:
key, value = r
mapost += ' TYPE ' + key + '_' + value + '\n'
mapost += ' = NODE AREA ("%s"=="%s")\n' % (key, value)
mapost += ' {Name, NameAlt}\n ADDRESS POI\n GROUP ' + key + '\n\n'
whitelist += key + '_' + value + '\n'
print(mapost)
print(whitelist)
| 20
| 99
| 0.555714
|
fa7289fc9c4392cfa5e4bf6d8792bbb750f4a933
| 267
|
py
|
Python
|
pyramid_stocks/pyramid_stocks/views/default.py
|
keitheck/pyramid-stocks
|
11ebed189c48ed9966bf95cbade59d793f04d9ca
|
[
"MIT"
] | null | null | null |
pyramid_stocks/pyramid_stocks/views/default.py
|
keitheck/pyramid-stocks
|
11ebed189c48ed9966bf95cbade59d793f04d9ca
|
[
"MIT"
] | 3
|
2019-12-26T16:42:45.000Z
|
2021-06-01T22:22:44.000Z
|
pyramid_stocks/pyramid_stocks/views/default.py
|
keitheck/pyramid-stocks
|
11ebed189c48ed9966bf95cbade59d793f04d9ca
|
[
"MIT"
] | null | null | null |
from pyramid.view import view_config
from pyramid.security import NO_PERMISSION_REQUIRED
@view_config(
route_name='home',
renderer='../templates/index.jinja2',
permission=NO_PERMISSION_REQUIRED)
def home_view(request):
"""index page"""
return {}
| 24.272727
| 51
| 0.741573
|
2a3aa996770ffe5f626157e59d33fdc1337bb465
| 1,154
|
py
|
Python
|
docs/source/notebooks/make-nblinks.py
|
cweniger/swyft
|
2c0ed514622a37e8ec4e406b99a8327ecafb7ab4
|
[
"MIT"
] | 2
|
2020-06-27T21:30:10.000Z
|
2020-08-07T07:25:53.000Z
|
docs/source/notebooks/make-nblinks.py
|
cweniger/swyft
|
2c0ed514622a37e8ec4e406b99a8327ecafb7ab4
|
[
"MIT"
] | null | null | null |
docs/source/notebooks/make-nblinks.py
|
cweniger/swyft
|
2c0ed514622a37e8ec4e406b99a8327ecafb7ab4
|
[
"MIT"
] | 1
|
2020-11-02T10:04:29.000Z
|
2020-11-02T10:04:29.000Z
|
import json
from pathlib import Path
IGNORE = [
".ipynb_checkpoints",
"Video",
]
def create_nblink(notebook_path):
d = {}
d["path"] = str(notebook_path)
d["extra-media"] = []
return d
def main():
root = Path(__file__).parent
# clean directory first
for old_nblink in root.glob("*.nblink"):
Path(old_nblink).unlink()
# add the relevant notebooks and names
relative_source = Path("../../../notebooks/")
source = Path(root, relative_source)
for nbpath in source.glob("*/"):
if any([ign in str(nbpath) for ign in IGNORE]):
continue
elif nbpath.is_dir():
continue
elif nbpath.suffix != ".ipynb":
continue
else:
target_path = Path(relative_source, f"{nbpath.stem}.ipynb")
nblink = create_nblink(target_path)
linkpath = Path(root, f"{nbpath.stem}.nblink")
with open(linkpath, "w") as f:
print("Creating path for", target_path, "Located at", linkpath)
json.dump(nblink, f)
f.writelines("\n")
if __name__ == "__main__":
main()
| 25.644444
| 79
| 0.570191
|
64fccf49c988f6778c740aa54d540c67b5062957
| 6,968
|
py
|
Python
|
conda/api.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
conda/api.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
conda/api.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import os
from collections import defaultdict
from os.path import isdir, join
from operator import itemgetter
from conda import config
from conda import install
from conda.fetch import fetch_index
from conda.compat import iteritems, itervalues
from conda.resolve import Package, Resolve
def _name_fn(fn):
assert fn.endswith('.tar.bz2')
return install.name_dist(fn[:-8])
def _fn2spec(fn):
assert fn.endswith('.tar.bz2')
return ' '.join(fn[:-8].rsplit('-', 2)[:2])
def _fn2fullspec(fn):
assert fn.endswith('.tar.bz2')
return ' '.join(fn[:-8].rsplit('-', 2))
def get_index(channel_urls=(), prepend=True, platform=None,
use_cache=False, unknown=False, offline=False,
prefix=None):
"""
Return the index of packages available on the channels
If prepend=False, only the channels passed in as arguments are used.
If platform=None, then the current platform is used.
If prefix is supplied, then the packages installed in that prefix are added.
"""
channel_urls = config.normalize_urls(channel_urls, platform, offline)
if prepend:
pri0 = max(itervalues(channel_urls), key=itemgetter(1))[1] if channel_urls else 0
for url, rec in iteritems(config.get_channel_urls(platform, offline)):
channel_urls[url] = (rec[0], rec[1] + pri0)
index = fetch_index(channel_urls, use_cache=use_cache, unknown=unknown)
if prefix:
for dist, info in iteritems(install.linked_data(prefix)):
fn = dist + '.tar.bz2'
channel = info.get('channel', '')
if channel not in channel_urls:
channel_urls[channel] = (config.canonical_channel_name(channel, True, True), 0)
url_s, priority = channel_urls[channel]
key = url_s + '::' + fn if url_s else fn
if key not in index:
# only if the package in not in the repodata, use local
# conda-meta (with 'depends' defaulting to [])
info.setdefault('depends', [])
info['fn'] = fn
info['schannel'] = url_s
info['channel'] = channel
info['url'] = channel + fn
info['priority'] = priority
index[key] = info
return index
##########################################################################
# NOTE: All functions starting with 'app_' in this module are deprecated
# and should no longer be used.
##########################################################################
def app_get_index(all_version=False):
"""
return the index of available applications on the channels
By default only the latest version of each app is included in the result,
unless all_version is set to True.
"""
import sys
pyxx = 'py%d%d' % sys.version_info[:2]
def filter_build(build):
return bool(pyxx in build) if 'py' in build else True
index = {fn: info for fn, info in iteritems(get_index())
if info.get('type') == 'app' and filter_build(info['build'])}
if all_version:
return index
d = defaultdict(list) # name -> list of Package objects
for fn, info in iteritems(index):
d[_name_fn(fn)].append(Package(fn, info))
res = {}
for pkgs in itervalues(d):
pkg = max(pkgs)
res[pkg.fn] = index[pkg.fn]
return res
def app_get_icon_url(fn):
"""
return the URL belonging to the icon for application `fn`.
"""
from conda.misc import make_icon_url
index = get_index()
info = index[fn]
return make_icon_url(info)
def app_info_packages(fn, prefix=config.root_dir):
"""
given the filename of a package, return which packages (and their sizes)
still need to be downloaded, in order to install the package. That is,
the package itself and it's dependencies.
Returns a list of tuples (pkg_name, pkg_version, size,
fetched? True or False).
"""
from conda.resolve import Resolve
index = get_index(prefix=prefix)
r = Resolve(index)
res = []
for fn2 in r.solve([_fn2fullspec(fn)], installed=install.linked(prefix)):
info = index[fn2]
if 'link' not in info:
res.append((info['name'], info['version'], info['size'],
any(install.is_fetched(pkgs_dir, fn2[:-8])
for pkgs_dir in config.pkgs_dirs)))
return res
def app_is_installed(fn, prefixes=None):
"""
Return the list of prefix directories in which `fn` in installed into,
which might be an empty list.
"""
if prefixes is None:
prefixes = [config.root_dir]
for envs_dir in config.envs_dirs:
for fn2 in os.listdir(envs_dir):
prefix = join(envs_dir, fn2)
if isdir(prefix):
prefixes.append(prefix)
dist = fn[:-8]
return [p for p in prefixes if install.is_linked(p, dist)]
# It seems to me that we need different types of apps, i.e. apps which
# are preferably installed (or already exist) in existing environments,
# and apps which are more "standalone" (such as firefox).
def app_install(fn, prefix=config.root_dir):
"""
Install the application `fn` into prefix (which defauts to the root
environment).
"""
import conda.plan as plan
index = get_index(prefix=prefix)
actions = plan.install_actions(prefix, index, [_fn2spec(fn)])
plan.execute_actions(actions, index)
def app_launch(fn, prefix=config.root_dir, additional_args=None):
"""
Launch the application `fn` (with optional additional command line
arguments), in the prefix (which defaults to the root environment).
Returned is the process object (the one returned by subprocess.Popen),
or None if the application `fn` is not installed in the prefix.
"""
from conda.misc import launch
return launch(fn, prefix, additional_args)
def app_uninstall(fn, prefix=config.root_dir):
"""
Uninstall application `fn` (but not its dependencies).
Like `conda remove fn`.
"""
import conda.cli.common as common
import conda.plan as plan
index = get_index(prefix=prefix)
specs = [_fn2spec(fn)]
if (plan.is_root_prefix(prefix) and
common.names_in_specs(common.root_no_rm, specs)):
raise ValueError("Cannot remove %s from the root environment" %
', '.join(common.root_no_rm))
actions = plan.remove_actions(prefix, specs, index=index)
if plan.nothing_to_do(actions):
raise ValueError("Nothing to do")
plan.execute_actions(actions, index)
def get_package_versions(package, offline=False):
index = get_index(offline=offline)
r = Resolve(index)
return r.get_pkgs(package, emptyok=True)
if __name__ == '__main__':
for fn in app_get_index():
print('%s: %s' % (fn, app_is_installed(fn)))
| 33.661836
| 95
| 0.631745
|
159a31bdc066af1c8c4fc48dc709dccca7340277
| 20,638
|
py
|
Python
|
speedcom/utilities.py
|
emissible/emissilbe
|
5537e787ccb883a101d2d40b38d480e257ac9755
|
[
"MIT"
] | 1
|
2019-02-20T05:11:16.000Z
|
2019-02-20T05:11:16.000Z
|
speedcom/utilities.py
|
emissible/emissilbe
|
5537e787ccb883a101d2d40b38d480e257ac9755
|
[
"MIT"
] | null | null | null |
speedcom/utilities.py
|
emissible/emissilbe
|
5537e787ccb883a101d2d40b38d480e257ac9755
|
[
"MIT"
] | null | null | null |
import json
import math
import matplotlib
#matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
try:
import NNModels
except:
import speedcom.NNModels as NNModels
import numpy as np
import os
import pandas as pd
from rdkit import Chem
import rdkit.Chem.Draw as draw
#import speedcom
#import core
#import rdkit
#from rdkit import Chem
#from rdkit.Chem import AllChem
# from rdkit.ForceField.rdForceField import MMFFMolProperties as properties
def remove_deliminators(my_strings):
"""
Remove deliminators from numbers (i.e. commas) so as to be able
to process numbers as int or float types in place of strings.
Args:
-----
my_strings (list or np.ndarray) -- list of string
representations of numbers (i.e. ['1,306', '5,765']).
Returns:
--------
my_array (np.ndarray) -- array of floats.
"""
# Assertions
assert isinstance(my_strings, (list, np.ndarray)), \
'my_strings must be a list or a numpy array.'
my_array = []
for i in my_strings:
number = i
if ',' in i:
tmp = i.split(",")
number = tmp[0] + tmp[1]
number = ''.join(tmp)
try:
my_array.append(float(number))
except:
print('String ' + i + ' not able to be cast to float, characters'
+ " other than ',' or '.'?")
my_array = np.asarray(my_array)
return my_array
def remove_cations(SMILES):
"""
Removes periodic table group 1 and 7 counterions from the SMILES
strings.
Args:
-----
SMILES (str) -- the SMILES string representation of the
molecule.
Returns:
--------
SMILES (str) -- the string representation of the molecule with
the counterions omitted.
"""
# Assertions
assert isinstance(SMILES, str), 'the SMILES must be a string'
# Functionality
split_SMILES = SMILES.split(".")
ion_list = ['[Li+]', '[Na+]', '[K+]', '[Rb+]', '[Cs+]', '[Fr+]', '[F-]',
'[Cl-]', '[Br-]', '[I-]', '[At-]']
SMILES = [i for i in split_SMILES if i not in ion_list]
SMILES = '.'.join(SMILES)
return SMILES
def draw_molecule(SMILES, filename):
"""
Draws the 2D skeletal structure of a molecule using the rdkit
package, returning the output to a file.
Args:
-----
SMILES (str) -- a string representation of the molecule.
filename (str) -- the name of the desired output file
containing the .png file extension. This file type is
required for implementation in the front end.
"""
# Assertions
assert isinstance(SMILES, str), 'the SMILES must be a string'
assert isinstance(filename, str), 'the filename must be a string'
assert filename.endswith('.png'), 'filename must include .png extension'
# Functionality
mol = Chem.MolFromSmiles(SMILES)
Chem.Draw.MolToFile(mol, filename, kekulize=False, size=(200,200), fitimage=True)
return
def get_l_max(wavelength_intensity):
"""
Identifies and returns the wavelength of maximum intensity in a
2D array of wavelengths and intensities.
Args:
-----
wavelength_intensity (np.ndarray(2D), dtypes float)
-- a 2D array where the first coloumn contains the
wavelengths and the second column contains the intensities.
Returns:
--------
lambda_max (float) -- the wavelength of maximum intensity
"""
# Assertions
assert isinstance(wavelength_intensity, np.ndarray)
# Functionality
wavelength_intensity.view('f8,f8').sort(order=['f1'], axis = 0)
lambda_max = wavelength_intensity[-1][0]
return lambda_max
def get_em_max(clean_df, em_file_colname, prefix_dir):
"""
Retrieves the lambda max values from existing files in a list of
emission file names and appends them to a list, filling with
None if the file dosn't exist.
Args:
-----
clean_df (pandas.DataFrame) -- a df containing the cleaned data
em_file_colname (str) -- the name of column that contains the
emission filenames.
prefix_dir (str) -- string representing the directory in which
the original PhotoChemCAD data is contained.
Returns:
--------
emission (list) -- a list containing the lambda max values
"""
# Assertions
assert isinstance(clean_df, pd.DataFrame), 'Input must be a pandas df'
assert isinstance(em_file_colname, str), 'column name must be a string'
assert isinstance(prefix_dir, str), 'directory name must be a string'
# Functionality
from data_extract import get_spectra, get_peaks
emission=[]
for x in clean_df[em_file_colname].astype(str): #cast dtype to string
if x != 'nan':
em_max = get_l_max(get_peaks(get_spectra(os.path.join(prefix_dir,x))))
emission.append(em_max)
else:
emission.append(None)
return emission
def pad_ndarrays(input_dict):
"""
Pads out all arrays in the given input dictionary of arrays
with zeros so that they are all the same size and the
largest input array.
Args:
-----
input_dict (dict) -- input dictionary of arrays
Returns:
--------
input_dict (dict) -- the modified input dictionary,
where all arrays have been padded out.
"""
# Assertions
assert isinstance(input_dict, dict), \
'Wrong Type: input must be a dictionary'
# Functionality
lens_of_arrays = []
for array_i in input_dict.values():
lens_of_arrays.append(len(array_i))
max_len = max(lens_of_arrays)
for array_i_key in input_dict.keys():
array_i_len = len(input_dict[array_i_key])
if(array_i_len < max_len):
input_dict[array_i_key] = \
np.pad(input_dict[array_i_key], (0, max_len-array_i_len),
'constant').tolist()
return input_dict
def compute_fingerprints(df,SMILES_column='SMILES',key_name=None,radius=2, \
nBits=2048, use_features=False, padding=True, \
output_file=None):
"""
Compute the fingerprints for an input dataframe with all the SMILES, and
returns the results as a dictionary or as a json txt file.
Args:
-----
df (pandas.DataFrame) -- an input dataframe with SMILES info
SMILES_column (str) -- the column name of SMILES
key_name (str) -- the column name for output dict key
radius (int) -- passed to the rdkit function, this
represents the the number of atomic neighbours the finger-
print is evaluated for.
nBits (int) -- maxium number of bits for fingerprints
computation
use_features (bool) -- If True (default as False), use features to
compute fingerprints
padding (bool) -- If True (default), pad all the
fingerprints to the maxium length in the dictionary
with zeros
output_file (str) -- If None, return a dict. Otherwise
returns a json .txt file of filename given by the string.
Returns:
--------
fps_dict (dict) -- an dictionary contains the fingerprints
key -- name or index of the molecules
values -- a list of int
"""
#Assertions
assert isinstance(df, pd.DataFrame), \
'Wrong Type: input df must be a pandas dataframe'
assert isinstance(SMILES_column, str), \
'Wrong Type: column names must be a strings'
assert isinstance(key_name, (str, type(None))), \
'Wrong Type: key name must be a string or NoneType'
assert isinstance(radius, int), \
'Wrong Type: radius must be an integer'
assert isinstance(nBits, int), \
'Wrong Type: number of bits must be an integer'
assert nBits > 0, 'nBits must be a positive integer'
assert isinstance(use_features, bool), \
'Wrong Type: padding must be a bool'
assert isinstance(padding, bool), \
'Wrong Type: padding must be a bool'
assert isinstance(output_file, (str, type(None))), \
'Wrong Type: output_file must be a string or NoneType'
# Functionality
spD_engine = NNModels.Descriptors() # Initializing the Descriptors class
fps_dict = {}
for rowi_idx, rowi in df.iterrows():
spD_engine.set_molecule(rowi[SMILES_column])
rowi_fp = spD_engine.get_Morgan_fingerprint(radius,nBits,use_features)
if(key_name is not None):
rowi_idx = rowi[key_name]
fps_dict[rowi_idx] = rowi_fp
if(padding):
pad_ndarrays(fps_dict)
if(output_file is not None):
with open(output_file, 'w') as f:
f.write(json.dumps(fps_dict))
else:
return fps_dict
def compute_coulumb_matrixes(df,SMILES_column='SMILES', key_name=None, \
use_eigval=False, eig_sort=True, padding=True, \
output_file=None):
"""
Compute the fingerprints for an input dataframe with all the SMILES,
and output the results as an dictionary with json txt format.
Sources:
Using Coulomb matrix for machine learning on chemical context:
Grégoire Montavon et al 2013 New J. Phys. 15 095003
Args:
-----
df (pandas.DataFrame) -- an input dataframe with SMILES info
SMILES_column (str) -- the column name of SMILES
key_name (str) -- the column name for output dict key
use_eigval (bool) -- (Default True) If true, will return
just the eigenvalues of the coulomb matrix for each
molecule. If False, returns the Coulomb matrix itself.
eig_sort (bool) -- If True (default), sort the coulomb
matrices with their eigenvalues.
padding (bool) -- If True (default), pad all the coulomb
matrices to the maxium length in the dictionary with zeros.
output_file (str) -- If None, return a dict. Otherwise,
return a json txt file of the name given by the string.
Returns:
--------
fps_dict (dict) -- an dictionary whose values are the
fingerprints of the molecules, and the keys are the index
or names of the molecules.
"""
#Assertions
assert isinstance(df, pd.DataFrame), \
'Wrong Type: input df must be a pandas dataframe'
assert isinstance(SMILES_column, str), \
'Wrong Type: column names must be a strings'
assert isinstance(key_name, (str, type(None))), \
'Wrong Type: key name must be a string or NoneType'
assert isinstance(use_eigval, bool), \
'Wrong Type: use_eigval must be a bool'
assert isinstance(eig_sort, bool), \
'Wrong Type: eig_sort must be a bool'
assert isinstance(padding, bool), \
'Wrong Type: padding must be a bool'
assert isinstance(output_file, (str, type(None))), \
'Wrong Type: output_file must be a string or NoneType'
# Functionality
spD_engine = NNModels.Descriptors() # Initializing the Descriptors class
CMs_dict = {}
for rowi_idx, rowi in df.iterrows():
spD_engine.set_molecule(rowi[SMILES_column])
# print(rowi_idx)
# print(rowi[key_name])
rowi_CM = spD_engine.get_coulomb_matrix(output_eigval=use_eigval)
if(key_name is not None):
rowi_idx = rowi[key_name]
else:
pass
CMs_dict[rowi_idx] = rowi_CM
if(padding):
pad_ndarrays(CMs_dict)
if(output_file is not None):
with open(output_file, 'w') as f:
f.write(json.dumps(CMs_dict))
else:
return CMs_dict
def compute_properties(df,SMILES_column='SMILES',index_name=None,
output_file=None):
"""
Compute the fingerprints for an input dataframe with all the SMILES, and
output the results as a csv txt file (exported by pandas)
Args:
-----
df (pandas.DataFrame) -- an input dataframe with SMILES info
SMILES_column (str) -- the column name of SMILES
index_name (str) -- the index name for output DataFrame
index.
output_file (str) -- string representing the desired name
of the output file. If None, return a dataframe. Otherwise,
outputs as a txt file (must include .txt extension in file
name), where values are comma separated.
Returns:
--------
prop_df (pandas.DataFrame) -- data frame containing the
molecule's properties.
"""
# Assertions
assert isinstance(df, pd.DataFrame), \
'Wrong Type: input df must be a pandas dataframe'
assert isinstance(SMILES_column, str), \
'Wrong Type: column names must be a strings'
assert isinstance(index_name, (str, type(None))), \
'Wrong Type: index name must be a string or NoneType'
assert isinstance(output_file, (str, type(None))), \
'Wrong Type: desired output file name must be a string'
if type(output_file) is str:
assert output_file.endswith('.txt'),\
'output_file string must include the .txt extension'
else:
pass
# Functionality
spD_engine = NNModels.Descriptors() # Initializing the Descriptor class
prop_df = pd.DataFrame()
for rowi_idx, rowi in df.iterrows():
spD_engine.set_molecule(rowi[SMILES_column])
rowi_prop = spD_engine.get_properties()
if(index_name is not None):
rowi_idx = rowi[index_name]
rowi_prop = pd.DataFrame.from_dict(rowi_prop, orient='index',
columns=[rowi_idx]).T
prop_df = prop_df.append(rowi_prop)
if(output_file is not None):
prop_df.to_csv(output_file)
else:
return prop_df
def compute_features(df,SMILES_column='SMILES',key_name=None, output_file=None):
"""
Compute the fingerprints for an input dataframe with all the
SMILES, and output the results as a csv txt file (exported
by pandas).
Args:
-----
df (pandas.DataFrame) -- an input dataframe with SMILES info
SMILES_column (str) -- the column name of SMILES
key_name (str) -- the column name for output dict key
output_file (str) -- the string represntation of the
desired file output name. If None, return a dictionary.
Otherwise, output an json txt file.
Returns (if output_file not specified):
--------
feats_dict (dict) -- a dictionary containing all the molecule's
features.
"""
# Assertions
assert isinstance(df, pd.DataFrame), \
'Wrong Type: input df must be a pandas dataframe'
assert isinstance(SMILES_column, str), \
'Wrong Type: column names must be a strings'
assert isinstance(key_name, (str, type(None))), \
'Wrong Type: key name must be a string or NoneType'
assert isinstance(output_file, (str, type(None))), \
'Wrong Type: desired output file name must be a string'
if type(output_file) is str:
assert output_file.endswith('.txt'),\
'output_file string must include the .txt extension'
else:
pass
# Functionality
spD_engine = NNModels.Descriptors() # Initializing the Descriptor class
feats_dict = {}
for rowi_idx, rowi in df.iterrows():
spD_engine.set_molecule(rowi[SMILES_column])
rowi_feat = spD_engine.get_features()
if(key_name is not None):
rowi_idx = rowi[key_name]
feats_dict[rowi_idx] = rowi_feat
if(output_file is not None):
with open(output_file, 'w') as f:
f.write(json.dumps(feats_dict))
else:
return feats_dict
def broaden_spectrum(spect, sigma):
"""
Broadens a peak defined in spect by the sigma factor and
returns the x and y data to plot.
Args:
----
spect (np.ndarray) -- input array containing the peak info for
the individual peak to be broadened.
sigma (float) -- gaussian broadening term for the peaks given.
Returns:
--------
plot_vals (list) -- a 2D array containing the x and y
values for plotting.
"""
# Assertions
# assert isinstance(spect, (np.ndarray, list)), \
# 'Input must be a list or a numpy array.'
# assert isinstance(sigma, float), \
# 'sigma value must be a float'
#min of the spectrum **FUTURE FEATURE**
#min_x = min(spect[0]) - 50.
min_x = spect[0] - 50
#max of the spectrum **FUTURE FEATURE**
#max_x = max(spect[0]) + 50.
max_x = spect[0] + 50
x = np.linspace(start=min_x, stop=max_x, num=10000)
y = [0. for k in range(len(x))]
for i in range(len(x)):
#**FUTURE FEATURE**
#for j in range(len(spect[0])):
# y[j] += spect[1][j] * math.exp(-0.5 * (((x[i] - spect[0][j]) ** 2\
# ) / sigma ** 2))
y[i] += spect[1] * math.exp(-0.5 * (((x[i] - spect[0]) ** 2) / \
sigma ** 2))
plot_vals = [x, y]
return plot_vals
def visualize(data, sigma=5.0, save_dir='../data/'):
"""
Generate the displayed emission and abosrbance plot from the
informationpredicted by the model. Saves two files called
by the frontend, by the name of the input smiles string which
contain the absorption/emission spectrum as well as the
absorption and emission peaks. **NOTE** This takes in only one
row of a data frame at a time (only one molecule can be plotted
at once, hence the declared numpy array as input will work
with any list object as well as long as it follows the
structure detailed in the Args section below).
Args:
-----
data (list, np.ndarray) -- input array containing the info
for the molecule: [SMILES, abosrbance wavelength,
absorbance intensity, emission wavelength, emission
intensity]
sigma (float) -- (Default 5.0 nm) float designating
the gaussian broadening term applied to the peaks given
to the input peak.
Returns:
--------
"""
#Assertions
assert isinstance(data, (list, np.ndarray)), \
'Input data must be a list or a numpy array.'
assert isinstance(sigma, float), \
'sigma value for broadening must be a float'
# Functionality
#For pretty plotting purposes
min_x = None
max_x = None
max_y = None
#Defining the figure for plotting
#fig, spect = plt.subplots()
#do the absorbance (blue) if present:
if data[1]:
x, y = broaden_spectrum([np.float64(data[1]), np.float64(data[2])], sigma)
plt.plot(x, y, 'b', label='absorption')
#For pretty plotting purposes
min_x = min(x)
max_x = max(x)
max_y = max(y)
#do the emission (orange color) if present:
if data[3]:
x, y = broaden_spectrum([np.float64(data[3]), np.float64(data[4])], sigma)
plt.plot(x, y, color='#FF8C00', label='emission')
#For pretty plotting purposes
tmp_xmax = max(x)
tmp_min = min(x)
tmp_ymax = max(y)
if not min_x or min_x > tmp_min:
min_x = tmp_min
if not max_x or max_x < tmp_xmax:
max_x = tmp_xmax
if not max_y or max_y < tmp_ymax:
max_y = tmp_ymax
#Formatting for the returned figure:
#plt.margins(x=12., y=1.)
plt.xlim(left=min_x, right=max_x)
plt.ylim(bottom=0., top=max_y*1.2)
plt.legend()
plt.xlabel("Wavelength (nm)")
plt.ylabel("Response (arb. units)")
#Saves the generated figure to a file in the data folder (given the same
#name as the input SMILES string (data[0]). Figure is in a *.png file
#format.
plt.savefig(save_dir + str(data[0]) + ".png", dpi=300)
plt.close()
# #Save the data to file
# fo = open(save_dir + str(data[0]) + '_peaks.txt', 'w')
# fo.write("Absorption\tIntensitiy\n")
# #**FUTURE FEATURE**
# #for i in range(len(data[1]):
# # fo.write(str(data[1][i]) + '\t' + str(data[2][i]))
# fo.write(str(data[1]) + '\t' + str(data[2]))
# fo.write("\nEmission\tIntensity\n")
# #**FUTURE FEATURE**
# #for i in range(len(data[3]):
# # fo.write(str(data[3][i]) + '\t' + str(data[4][i]))
# fo.write(str(data[3]) + '\t' + str(data[4]))
# fo.close
return 1
| 35.460481
| 85
| 0.614982
|
bde381a5e18969891e14d48fc5036768ccbfe56d
| 3,419
|
py
|
Python
|
Projects/STM8S_Discovery_Examples/echo_UART_with_ISR/build_upload.py
|
gicking/STM8_templates
|
c077e23f8cc5f6eb4b301eecb91d2d121f905719
|
[
"Apache-2.0"
] | 29
|
2015-10-30T08:05:14.000Z
|
2021-08-09T10:29:43.000Z
|
Projects/STM8S_Discovery_Examples/echo_UART_with_ISR/build_upload.py
|
gicking/STM8_templates
|
c077e23f8cc5f6eb4b301eecb91d2d121f905719
|
[
"Apache-2.0"
] | 6
|
2017-12-22T15:44:54.000Z
|
2018-12-17T09:55:13.000Z
|
Projects/STM8S_Discovery_Examples/echo_UART_with_ISR/build_upload.py
|
gicking/STM8_templates
|
c077e23f8cc5f6eb4b301eecb91d2d121f905719
|
[
"Apache-2.0"
] | 10
|
2016-04-19T07:30:26.000Z
|
2020-05-11T21:44:29.000Z
|
#!/usr/bin/python
'''
Script for building and uploading a STM8 project with dependency auto-detection
'''
# set general options
UPLOAD = 'SWIM' # select 'BSL' or 'SWIM'
TERMINAL = True # set True to open terminal after upload
RESET = 1 # STM8 reset: 0=skip, 1=manual, 2=DTR line (RS232), 3=send 'Re5eT!' @ 115.2kBaud, 4=Arduino pin 8, 5=Raspi pin 12
OPTIONS = '' # e.g. device for SPL ('-DSTM8S105', see stm8s.h)
# set path to root of STM8 templates
ROOT_DIR = '../../../'
LIB_ROOT = ROOT_DIR + 'Library/'
TOOL_DIR = ROOT_DIR + 'Tools/'
OBJDIR = 'output'
TARGET = 'main.ihx'
# set OS specific
import platform
if platform.system() == 'Windows':
PORT = 'COM10'
SWIM_PATH = 'C:/Programme/STMicroelectronics/st_toolset/stvp/'
SWIM_TOOL = 'ST-LINK'
SWIM_NAME = 'STM8S105x6' # STM8 Discovery
#SWIM_NAME = 'STM8S208xB' # muBoard
MAKE_TOOL = 'mingw32-make.exe'
else:
PORT = '/dev/ttyUSB0'
SWIM_TOOL = 'stlink'
SWIM_NAME = 'stm8s105c6' # STM8 Discovery
#SWIM_NAME = 'stm8s208?b' # muBoard
MAKE_TOOL = 'make'
# import required modules
import sys
import os
import platform
import argparse
sys.path.insert(0,TOOL_DIR) # assert that TOOL_DIR is searched first
import misc
from buildProject import createMakefile, buildProject
from uploadHex import stm8gal, stm8flash, STVP
##################
# main program
##################
# commandline parameters with defaults
parser = argparse.ArgumentParser(description="compile and upload STM8 project")
parser.add_argument("--skipmakefile", default=False, action="store_true" , help="skip creating Makefile")
parser.add_argument("--skipbuild", default=False, action="store_true" , help="skip building project")
parser.add_argument("--skipupload", default=False, action="store_true" , help="skip uploading hexfile")
parser.add_argument("--skipterminal", default=False, action="store_true" , help="skip opening terminal")
parser.add_argument("--skippause", default=False, action="store_true" , help="skip pause before exit")
args = parser.parse_args()
# create Makefile
if args.skipmakefile == False:
createMakefile(workdir='.', libroot=LIB_ROOT, outdir=OBJDIR, target=TARGET, options=OPTIONS)
# build target
if args.skipbuild == False:
buildProject(workdir='.', make=MAKE_TOOL)
# upload code via UART bootloader
if args.skipupload == False:
if UPLOAD == 'BSL':
stm8gal(tooldir=TOOL_DIR, port=PORT, outdir=OBJDIR, target=TARGET, reset=RESET)
# upload code via SWIM. Use stm8flash on Linux, STVP on Windows (due to libusb issues)
if UPLOAD == 'SWIM':
if platform.system() == 'Windows':
STVP(tooldir=SWIM_PATH, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
else:
stm8flash(tooldir=TOOL_DIR, device=SWIM_NAME, hardware=SWIM_TOOL, outdir=OBJDIR, target=TARGET)
# if specified open serial console after upload
if args.skipterminal == False:
if TERMINAL == True:
cmd = 'python '+TOOL_DIR+'terminal.py -p '+PORT
exitcode = os.system(cmd)
if (exitcode != 0):
sys.stderr.write('error '+str(exitcode)+'\n\n')
misc.Exit(exitcode)
# wait for return, then close window
if args.skippause == False:
if (sys.version_info.major == 3):
input("\npress return to exit ... ")
else:
raw_input("\npress return to exit ... ")
sys.stdout.write('\n\n')
# END OF MODULE
| 33.851485
| 137
| 0.685581
|
6b13f0da9f2c6cf1e231102b0b38fc940fc0fb01
| 1,582
|
py
|
Python
|
setup.py
|
chenzq/PaddleX
|
089b06af02b6cf4ae1afd537d439cf9c30b7750e
|
[
"Apache-2.0"
] | 1
|
2020-11-04T02:51:42.000Z
|
2020-11-04T02:51:42.000Z
|
setup.py
|
chenzq/PaddleX
|
089b06af02b6cf4ae1afd537d439cf9c30b7750e
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
chenzq/PaddleX
|
089b06af02b6cf4ae1afd537d439cf9c30b7750e
|
[
"Apache-2.0"
] | null | null | null |
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
import sys
long_description = "PaddlePaddle Entire Process Development Toolkit"
setuptools.setup(
name="paddlex",
version='1.2.2',
author="paddlex",
author_email="paddlex@baidu.com",
description=long_description,
long_description=long_description,
long_description_content_type="text/plain",
url="https://github.com/PaddlePaddle/PaddleX",
packages=setuptools.find_packages(),
setup_requires=['cython', 'numpy'],
install_requires=[
"pycocotools;platform_system!='Windows'", 'pyyaml', 'colorama', 'tqdm',
'paddleslim==1.1.1', 'visualdl>=2.0.0', 'paddlehub>=1.8.2',
'shapely>=1.7.0', "opencv-python"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
license='Apache 2.0',
entry_points={'console_scripts': ['paddlex=paddlex.command:main', ]})
| 36.790698
| 79
| 0.702276
|
5a8dcb6f1f6b848b08e284a0b5f2a9fc83e06857
| 14,376
|
py
|
Python
|
eZmaxApi/model/ezsignfolder_reorder_v1_response.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
eZmaxApi/model/ezsignfolder_reorder_v1_response.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
eZmaxApi/model/ezsignfolder_reorder_v1_response.py
|
ezmaxinc/eZmax-SDK-python
|
6794b8001abfb7d9ae18a3b87aba164839b925a0
|
[
"MIT"
] | null | null | null |
"""
eZmax API Definition (Full)
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.7
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from eZmaxApi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from eZmaxApi.exceptions import ApiAttributeError
def lazy_import():
from eZmaxApi.model.common_response import CommonResponse
from eZmaxApi.model.common_response_obj_debug import CommonResponseObjDebug
from eZmaxApi.model.common_response_obj_debug_payload import CommonResponseObjDebugPayload
globals()['CommonResponse'] = CommonResponse
globals()['CommonResponseObjDebug'] = CommonResponseObjDebug
globals()['CommonResponseObjDebugPayload'] = CommonResponseObjDebugPayload
class EzsignfolderReorderV1Response(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'obj_debug_payload': (CommonResponseObjDebugPayload,), # noqa: E501
'obj_debug': (CommonResponseObjDebug,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'obj_debug_payload': 'objDebugPayload', # noqa: E501
'obj_debug': 'objDebug', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""EzsignfolderReorderV1Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
obj_debug_payload (CommonResponseObjDebugPayload): [optional] # noqa: E501
obj_debug (CommonResponseObjDebug): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""EzsignfolderReorderV1Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
obj_debug_payload (CommonResponseObjDebugPayload): [optional] # noqa: E501
obj_debug (CommonResponseObjDebug): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
CommonResponse,
],
'oneOf': [
],
}
| 43.696049
| 121
| 0.584238
|
7d8ab4d551b651445a9b1e7e6123a4acd2c0a63e
| 2,130
|
py
|
Python
|
mimiciii/steps/movements.py
|
AdamBanham/ICPM2021
|
9b50ba0090dd52ea88820d1edf8859a7207a0d56
|
[
"MIT"
] | 1
|
2021-09-01T09:39:43.000Z
|
2021-09-01T09:39:43.000Z
|
mimiciii/steps/movements.py
|
AdamBanham/ICPM2021
|
9b50ba0090dd52ea88820d1edf8859a7207a0d56
|
[
"MIT"
] | null | null | null |
mimiciii/steps/movements.py
|
AdamBanham/ICPM2021
|
9b50ba0090dd52ea88820d1edf8859a7207a0d56
|
[
"MIT"
] | null | null | null |
from tqdm import tqdm
from os import remove as remove_file
import pandas as pd
from ults import get_query_s3_location,update_query_dbname,BATCH_SIZE,run_query
from more_itertools import chunked
CONTROLFLOW_SQL_SWAP = "##SUBJECTS##"
MOVEMENTS_PATIENT_UNIVERSE_SQL = "mimiciii/in/movements_patient_universe.SQL"
MOVEMENTS_CONTROFLOW_SQL = "mimiciii/in/movements_controlflow.SQL"
MOVEMENTS_LOG_CONTROLFLOW_OUT_DIR = "mimiciii/out/movements/"
MOVEMENTS_LOG_PATIENTS_OUT = "patient_universe.csv"
MOVEMENTS_LOG_CONTROLFLOW_OUT_CSV = "controlflow_events.csv"
if __name__ == "__main__":
# find bucket and temp folder
res_bucket, res_out = get_query_s3_location()
tqdm.write(f"Save location for athena queries on s3 will be : s3://{res_bucket}/{res_out}")
#load in athena statements
tqdm.write(f"loading query statements...")
subset_patients_sql = update_query_dbname(open(MOVEMENTS_PATIENT_UNIVERSE_SQL).read())
controlflow_patients_sql = update_query_dbname(open(MOVEMENTS_CONTROFLOW_SQL).read())
# get patients
tqdm.write(f"getting patient universe...")
patients = run_query(subset_patients_sql,MOVEMENTS_LOG_CONTROLFLOW_OUT_DIR+MOVEMENTS_LOG_PATIENTS_OUT,res_bucket, res_out)
subject_ids = tuple(patients.subject_id.values)
controflow_events_df = pd.DataFrame()
# find controlflow events in batches
tqdm.write(f"finding event universe...")
for subset in tqdm(chunked(subject_ids,BATCH_SIZE),desc="collecting controlflow events", total= len(list(chunked(subject_ids,BATCH_SIZE)))):
temp_df = run_query(
controlflow_patients_sql.replace(CONTROLFLOW_SQL_SWAP,str(tuple(set(subset)))),
MOVEMENTS_LOG_CONTROLFLOW_OUT_DIR+"temp.csv",
res_bucket,
res_out
)
controflow_events_df = pd.concat([controflow_events_df,temp_df])
#save out controlfow events
remove_file(MOVEMENTS_LOG_CONTROLFLOW_OUT_DIR+"temp.csv")
tqdm.write(f"saving event universe...")
controflow_events_df.to_csv(MOVEMENTS_LOG_CONTROLFLOW_OUT_DIR+MOVEMENTS_LOG_CONTROLFLOW_OUT_CSV,index=False)
tqdm.write(f"finished...")
| 49.534884
| 144
| 0.77277
|
0f2519cf30fd1ad6b2fb6a9ca5a1ac0a0e66de12
| 6,112
|
py
|
Python
|
feed/views.py
|
ThusharaX/mumbleapi
|
8435fe9d86869cce81961f42c9860fa3810c171b
|
[
"Apache-2.0"
] | 187
|
2021-04-24T14:49:44.000Z
|
2022-03-31T14:25:22.000Z
|
feed/views.py
|
shukl08vk/mumbleapi
|
101825d8aecba7eac4e31046e7b4b15b36c55f77
|
[
"Apache-2.0"
] | 119
|
2021-04-24T18:08:43.000Z
|
2022-01-09T00:57:19.000Z
|
feed/views.py
|
shukl08vk/mumbleapi
|
101825d8aecba7eac4e31046e7b4b15b36c55f77
|
[
"Apache-2.0"
] | 174
|
2021-04-24T15:57:23.000Z
|
2022-03-11T02:09:04.000Z
|
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .models import Mumble, MumbleVote
from .serializers import MumbleSerializer
# Create your views here.
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def mumbles(request):
query = request.query_params.get('q')
if query == None:
query = ''
user = request.user
following = user.following.select_related('user')
following = user.following.all()
ids = []
ids = [i.user.id for i in following]
ids.append(user.id)
print('IDS:', ids)
#Make sure parent==None is always on
#Query 5 mumbles form users you follow | TOP PRIORITY
mumbles = list(Mumble.objects.filter(parent=None, user__id__in=ids).order_by("-created"))[0:5]
#mumbles = list(mumbles.filter(Q(user__userprofile__name__icontains=query) | Q(content__icontains=query)))
recentMumbles = Mumble.objects.filter(Q(parent=None) & Q(vote_rank__gte=0) & Q(remumble=None)).order_by("-created")[0:5]
#Query top ranked mumbles and attach to end of original queryset
topMumbles = Mumble.objects.filter(Q(parent=None)).order_by("-vote_rank", "-created")
#Add top ranked mumbles to feed after prioritizing follow list
index = 0
for mumble in recentMumbles:
if mumble not in mumbles:
mumbles.insert(index, mumble)
index += 1
#Add top ranked mumbles to feed after prioritizing follow list
for mumble in topMumbles:
if mumble not in mumbles:
mumbles.append(mumble)
paginator = PageNumberPagination()
paginator.page_size = 10
result_page = paginator.paginate_queryset(mumbles, request)
serializer = MumbleSerializer(result_page, many=True)
return paginator.get_paginated_response(serializer.data)
@api_view(['GET'])
@permission_classes((IsAuthenticated,))
def mumble_details(request,pk):
try:
mumble = Mumble.objects.get(id=pk)
serializer = MumbleSerializer(mumble, many=False)
return Response(serializer.data)
except:
message = {
'detail':'Mumble doesn\'t exist'
}
return Response(message, status=status.HTTP_404_NOT_FOUND)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def create_mumble(request):
user = request.user
data = request.data
is_comment = data.get('isComment')
if is_comment:
parent = Mumble.objects.get(id=data['postId'])
mumble = Mumble.objects.create(
parent=parent,
user=user,
content=data['content'],
)
else:
mumble = Mumble.objects.create(
user=user,
content=data['content']
)
serializer = MumbleSerializer(mumble, many=False)
return Response(serializer.data)
@api_view(['PATCH'])
@permission_classes((IsAuthenticated,))
def edit_mumble(request,pk):
user = request.user
data = request.data
try:
mumble = Mumble.objects.get(id=pk)
if user != mumble.user:
return Response(status=status.HTTP_401_UNAUTHORIZED)
else:
serializer = MumbleSerializer(mumble,data = data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data,status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as e:
return Response(status=status.HTTP_204_NO_CONTENT)
@api_view(['DELETE'])
@permission_classes((IsAuthenticated,))
def delete_mumble(request, pk):
user = request.user
try:
mumble = Mumble.objects.get(id=pk)
if user != mumble.user:
return Response(status=status.HTTP_401_UNAUTHORIZED)
else:
mumble.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception as e:
return Response({'details': f"{e}"},status=status.HTTP_204_NO_CONTENT)
@api_view(['GET'])
def mumble_comments(request, pk):
mumble = Mumble.objects.get(id=pk)
comments = mumble.mumble_set.all()
serializer = MumbleSerializer(comments, many=True)
return Response(serializer.data)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def remumble(request):
user = request.user
data = request.data
original_mumble = Mumble.objects.get(id=data['id'])
if original_mumble.user == user:
return Response({'detail':'You can not remumble your own mumble.'},status=status.HTTP_403_FORBIDDEN)
try:
mumble = Mumble.objects.filter(
remumble=original_mumble,
user=user,
)
if mumble.exists():
return Response({'detail':'Already Mumbled'},status=status.HTTP_406_NOT_ACCEPTABLE)
else:
mumble = Mumble.objects.create(
remumble=original_mumble,
user=user,
)
serializer = MumbleSerializer(mumble, many=False)
return Response(serializer.data)
except Exception as e:
return Response({'detail':f'{e}'},status=status.HTTP_403_FORBIDDEN)
@api_view(['POST'])
@permission_classes((IsAuthenticated,))
def update_vote(request):
user = request.user
data = request.data
mumble = Mumble.objects.get(id=data['post_id'])
#What if user is trying to remove their vote?
vote, created = MumbleVote.objects.get_or_create(mumble=mumble, user=user)
if vote.value == data.get('value'):
#If same value is sent, user is clicking on vote to remove it
vote.delete()
else:
vote.value=data['value']
vote.save()
#We re-query the vote to get the latest vote rank value
mumble = Mumble.objects.get(id=data['post_id'])
serializer = MumbleSerializer(mumble, many=False)
return Response(serializer.data)
| 32.168421
| 124
| 0.669666
|
53256df41aa8aa17cbd674d3082d87d7400e7bd4
| 55
|
py
|
Python
|
test/test.py
|
Graham42/yaml_git_web_api
|
d8701695c70905efb6f8afc7a34e5b927978cde3
|
[
"MIT"
] | null | null | null |
test/test.py
|
Graham42/yaml_git_web_api
|
d8701695c70905efb6f8afc7a34e5b927978cde3
|
[
"MIT"
] | null | null | null |
test/test.py
|
Graham42/yaml_git_web_api
|
d8701695c70905efb6f8afc7a34e5b927978cde3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# some tests should go here
pass
| 13.75
| 27
| 0.727273
|
c383e0949adb898071bc41c66355af501729334b
| 35,372
|
py
|
Python
|
zolo/adapters/huobi_restful_adapters.py
|
firefirer1983/zolo
|
889409b491363eb54c2997e01333b77bc81e0c89
|
[
"MIT"
] | 2
|
2021-05-06T12:10:02.000Z
|
2021-08-15T09:25:31.000Z
|
zolo/adapters/huobi_restful_adapters.py
|
firefirer1983/zolo
|
889409b491363eb54c2997e01333b77bc81e0c89
|
[
"MIT"
] | null | null | null |
zolo/adapters/huobi_restful_adapters.py
|
firefirer1983/zolo
|
889409b491363eb54c2997e01333b77bc81e0c89
|
[
"MIT"
] | null | null | null |
import abc
import math
from decimal import Decimal
from functools import partial
from pprint import pprint
from typing import Dict, Tuple, Union, List, Optional
from zolo.dtypes import Lot, CREDENTIAL_EMPTY, POSITION_EMPTY, MARGIN_EMPTY
from zolo.posts import OrderPostType, MarketOrder, LimitOrder, LimitIocOrder, \
LimitFokOrder, OpponentIocOrder, OptimalFokOrder, OptimalIocOrder, \
OptimalOrder, OpponentOrder, OpponentFokOrder
from zolo.utils import round_down
from ..dtypes import Order, Bar, Trade, Fill, Margin, Position, Tick, \
Credential, InstrumentInfo, OrderType, OrderStatus, OrderBook
from zolo.consts import INVALID, MAX_INT
from datetime import datetime, timedelta
import time
from . import Adapter
from ..consts import RESTFUL, MAX_FLOAT, UNIX_EPOCH, BUY, SELL, LONG, SHORT, \
OPEN, CLOSE, DEFAULT_LEVERAGE
from huobi_restful.clients import HuobiCoinMarginSwap, HuobiUsdtMarginSwap, \
HuobiCoinMarginFuture, HuobiSpot
import logging
from ..exceptions import TickGetError, OrderBookGetError, OrderGetError, \
PositionGetError, MarginGetError, BalanceGetError, OrderPostError, \
AssetTransferError
log = logging.getLogger(__name__)
# 1. 火币的 equity 就是 margin balance
# 2. margin frozen就是在下单期间被冻结的保证金,也就是 init margin
# 3. available margin = margin balance - frozen margin - position margin
# 4. margin balance = deposit + realised pnl + unrealised pnl
# 5. wallet balance = margin balance - unrealised pnl = deposit + realised pnl
def str_to_datetime(s):
return datetime.strptime(s, "%Y-%m-%dT%H:%M:%S.%fZ")
def create_id_by_timestamp():
return f"{int(time.time() * 10000000)}"
def timestamp_to_utc(ts: Union[str, int]) -> datetime:
if isinstance(ts, str):
ts = int(ts)
return datetime.utcfromtimestamp(ts / 1000)
MAX_OPTIMAL_DEPTH = 150
class HuobiRestfulAdapter(Adapter, mode=RESTFUL, exchange="huobi"):
def __init__(self, *args):
super().__init__(*args)
def get_position(self, instrument_id) -> Position:
raise NotImplementedError
def get_margin(self, instrument_id) -> Margin:
raise NotImplementedError
def get_available_balance(self, symbol: str) -> float:
raise NotImplementedError
def get_order_by_client_oid(self, instrument_id, client_order_id) -> Order:
pass
def get_contract_value(self, instrument_id):
pass
def get_latest_bar(self, instrument_id: str, granularity: int) -> Bar:
granularity_sym = f"{granularity}min"
prev = datetime.utcnow() - timedelta(minutes=granularity)
try:
res = self._client.get_market_history_kline(
instrument_id, granularity_sym, size="2", from_ts=prev
)
except Exception as e:
log.exception(e)
raise e
res = sorted(res["data"], key=lambda r: r["id"])[0]
return Bar(
self.exchange, self.market, instrument_id,
datetime.utcfromtimestamp(int(res["id"])),
float(res["open"]), float(res["close"]), float(res["high"]),
float(res["low"]), volume=int(res["vol"]),
currency_volume=float(res["amount"]),
granularity=granularity
)
def get_max_order_size(self, instrument_id: str):
pass
def get_bars(
self, instrument_id: str, granularity: int, start: datetime,
end: datetime
):
pass
def get_last_n_bars(self, cnt: int, instrument_id: str, granularity: int):
pass
def get_fill(
self, instrument_id: str, before: datetime, after: datetime, limit=100
):
pass
class HuobiRestfulCoinMarginSwap(HuobiRestfulAdapter, market="swap@coin"):
def __init__(self, *args):
super().__init__(*args)
self._client = HuobiCoinMarginSwap(
self.credential.api_key, self.credential.secret_key
)
@property
def max_optimal_depth(self) -> int:
return MAX_OPTIMAL_DEPTH
def get_instrument_info(self, instrument_id: str):
raise NotImplementedError
class HuobiRestfulUsdtMarginSwap(HuobiRestfulAdapter, market="swap@usdt"):
def __init__(self, *args):
super().__init__(*args)
self._client = HuobiUsdtMarginSwap(
self.credential.api_key, self.credential.secret_key
)
@property
def max_optimal_depth(self) -> int:
return MAX_OPTIMAL_DEPTH
def get_instrument_info(self, instrument_id: str):
raise NotImplementedError
class HuobiRestfulCoinMarginFuture(HuobiRestfulAdapter, market="future@coin"):
def estimate_lot(
self, instrument_id: str, size: float, price: float = 0
) -> Lot:
instrument_id = instrument_id.upper()
contract_value = float(
self._instrument_registry[instrument_id].contract_value)
return Lot(int(size * price / contract_value))
def __init__(self, *args):
super().__init__(*args)
self._client = HuobiCoinMarginFuture(
self.credential.api_key, self.credential.secret_key)
self._instrument_registry = self.get_all_instruments()
self._leverage = DEFAULT_LEVERAGE
assert self._instrument_registry
@property
def max_optimal_depth(self) -> int:
return MAX_OPTIMAL_DEPTH
def get_tick(self, instrument_id) -> Tick:
try:
res = self._client.get_ticker(instrument_id)
if res["status"] != "ok":
raise ValueError(f"get tick failed!")
data = res["tick"]["data"][0]
price = data["price"]
ts = data["ts"]
except Exception as e:
log.exception(e)
raise TickGetError(str(e))
return Tick(
exchange=self.exchange,
market=self.market,
timestamp=timestamp_to_utc(ts),
instrument_id=instrument_id,
price=float(price),
)
def get_ticks(self, *instruments, pricing: str = "avg") -> List[Tick]:
try:
res = self._client.get_market_detail_merged()
if res["status"] != "ok":
raise ValueError(f"get ticks failed!")
ts = timestamp_to_utc(res["ts"])
res = res["ticks"]
except Exception as e:
log.exception(e)
raise TickGetError
ret = []
for r in res:
if pricing == "avg":
price = (float(r["bid"][0]) + float(r["ask"][0])) / 2
elif pricing == "ask":
price = float(r["ask"][0])
elif pricing == "bid":
price = float(r["bid"][0])
else:
raise ValueError
ret.append(Tick(
self.exchange, self.market, r["symbol"], ts, price
))
return ret
def transfer_margin_to_asset(self, symbol: str, amount: float) -> float:
amount = str(round_down(8, amount))
try:
res = self._client.transfer_margin_to_asset(symbol, amount)
if res["status"] != "ok":
log.error(f"{res}")
raise AssetTransferError
except Exception as e:
log.exception(e)
raise AssetTransferError
return float(amount)
def transfer_asset_to_future_margin(self, symbol: str, amount: float):
raise NotImplementedError(f"asset to margin is not avail in future "
f"adapter")
def transfer_asset_to_swap_margin(self, symbol: str, amount: float):
raise NotImplementedError(f"asset to margin is not avail in future "
f"adapter")
@staticmethod
def _get_underlying(symbol: str):
return f"{symbol}-USD"
@staticmethod
def _format_delivery_date(s: str) -> datetime:
assert s
return datetime.strptime(s, "%Y%m%d")
@staticmethod
def _get_sys_delivery_type(s: str):
if s == "this_week":
return "CW"
elif s == "next_week":
return "NW"
elif s == "quarter":
return "CQ"
elif s == "next_quarter":
return "NQ"
raise ValueError
def get_all_instruments(self) -> Dict[str, InstrumentInfo]:
ret = dict()
res = self._client.get_contract_contract_info()
assert res["status"] == "ok"
res = res["data"]
for r in res:
base = r["symbol"]
delivery_type = self._get_sys_delivery_type(r['contract_type'])
alias = f"{base}_{delivery_type}"
ret[r["contract_code"]] = InstrumentInfo(
instrument_type="futures",
instrument_id=r["contract_code"],
underlying=self._get_underlying(r["symbol"]),
commission="",
base_currency=base,
quote_currency="USD",
settle_currency=r["symbol"],
contract_value=r["contract_size"],
contract_value_currency="USD",
option_type="",
strike_price="",
list_time=self._format_delivery_date(r["create_date"]),
expire_time=timestamp_to_utc(int(r["delivery_time"])),
leverage="",
tick_size=r["price_tick"],
lot_size="1",
min_size="1",
contract_type="linear",
alias=alias,
state=bool(r["contract_status"])
)
return ret
def get_instrument_info(self, instrument_id: str) -> InstrumentInfo:
assert instrument_id
instrument_id = instrument_id.upper()
return self._instrument_registry[instrument_id]
def get_book(
self, instrument_id: str, depth: int
) -> OrderBook:
if depth not in (20, 150):
raise ValueError
if depth == 150:
depth_type = "step0"
else:
depth_type = "step6"
try:
res = self._client.get_market_depth(instrument_id, depth_type)
assert res["status"] == "ok"
asks, bids = res["tick"]["asks"], res["tick"]["bids"]
except Exception as e:
log.exception(e)
raise OrderBookGetError
else:
res = OrderBook(
exchange=self.exchange, market=self.market,
instrument_id=instrument_id,
timestamp=timestamp_to_utc(res["ts"]),
asks=[tuple(d) for d in asks],
bids=[tuple(d) for d in bids])
return res
@staticmethod
def get_sys_order_type(r: Union[str, OrderPostType]) -> str:
if isinstance(r, str):
if r == "limit":
return OrderType.LIMIT_GTC
elif r == "opponent":
return OrderType.OPPONENT_GTC
elif r == "optimal_5":
return OrderType.OPTIMAL_5_GTC
elif r == "optimal_10":
return OrderType.OPTIMAL_10_GTC
elif r == "optimal_20":
return OrderType.OPTIMAL_20_GTC
elif r == "optimal_5_fok":
return OrderType.OPTIMAL_5_FOK
elif r == "optimal_10_fok":
return OrderType.OPTIMAL_10_FOK
elif r == "optimal_20_fok":
return OrderType.OPTIMAL_20_FOK
else:
raise NotImplementedError
else:
if isinstance(r, LimitOrder):
return OrderType.LIMIT_GTC
elif isinstance(r, OpponentOrder):
return OrderType.OPPONENT_GTC
elif isinstance(r, OpponentIocOrder):
return OrderType.OPPONENT_IOC
elif isinstance(r, OptimalFokOrder):
return f"OPTIMAL_{r.depth}_FOK"
elif isinstance(r, OptimalIocOrder):
return f"OPTIMAL_{r.depth}_IOC"
elif isinstance(r, OptimalOrder):
return f"OPTIMAL_{r.depth}_GTC"
else:
raise NotImplementedError
@staticmethod
def get_order_type(post: OrderPostType) -> str:
if isinstance(post, LimitOrder):
return f"limit"
elif isinstance(post, OpponentOrder):
return f"opponent"
elif isinstance(post, OptimalOrder):
return f"optimal_{post.depth}"
elif isinstance(post, OpponentFokOrder):
return f"opponent_{post.depth}_fok"
elif isinstance(post, OpponentIocOrder):
return f"opponent_{post.depth}_ioc"
elif isinstance(post, OptimalFokOrder):
return f"optimal_{post.depth}_fok"
elif isinstance(post, OptimalIocOrder):
return f"optimal_{post.depth}_ioc"
else:
raise NotImplementedError
@staticmethod
def _get_direction_and_offset(post: OrderPostType) -> Tuple[str, str]:
post: LimitOrder = post
if post.pos_side == LONG and post.side == BUY:
return "buy", "open"
elif post.pos_side == LONG and post.side == SELL:
return "sell", "close"
elif post.pos_side == SHORT and post.side == SELL:
return "buy", "close"
elif post.pos_side == SHORT and post.side == BUY:
return "sell", "open"
else:
raise NotImplementedError
@staticmethod
def _get_sys_side_and_pos_side(
direction: str, offset: str
) -> Tuple[str, str]:
if direction == "buy" and offset == "open":
return BUY, LONG
elif direction == "sell" and offset == "close":
return SELL, LONG
elif direction == "sell" and offset == "open":
return SELL, SHORT
elif direction == "buy" and offset == "close":
return BUY, SHORT
else:
raise NotImplementedError
@staticmethod
def _get_sys_order_status(status: int):
if status == 1:
return OrderStatus.PREPARING
elif status == 2:
return OrderStatus.PREPARING
elif status == 3:
return OrderStatus.ONGOING
elif status == 4:
return OrderStatus.PARTIAL
elif status == 5:
return OrderStatus.PARTIAL_FILED_OTHER_CANCELED
elif status == 6:
return OrderStatus.FULFILLED
elif status == 7:
return OrderStatus.CANCELED
elif status == 11:
return OrderStatus.CANCELING
else:
raise NotImplementedError
def get_leverage(self, instrument_id: str) -> float:
log.warning(f"all the future@coin use one leverage")
assert instrument_id
instrument_id = instrument_id.upper()
mrg = self.get_margin(instrument_id)
if mrg != MARGIN_EMPTY:
self._leverage = int(mrg.leverage)
return self._leverage
def set_leverage(self, instrument_id: str, lv: float):
log.warning(f"all the future@coin use one leverage")
assert instrument_id
instrument_id = instrument_id.upper()
base_sym = self._instrument_registry[instrument_id].base_currency
self._leverage = int(lv)
self._client.post_contract_switch_lever_rate(base_sym, self._leverage)
def create_order(self, post: OrderPostType) -> str:
assert self._leverage != DEFAULT_LEVERAGE
assert self.exchange == post.exchange and self.market == post.market
instrument_id = post.instrument_id.upper()
instrument = self._instrument_registry[instrument_id]
price = round_down(str(instrument.tick_size), getattr(post, "price", 0))
qty = post.qty
direction, offset = self._get_direction_and_offset(post)
if qty < Decimal(instrument.min_size):
raise ValueError
client_oid = create_id_by_timestamp()
order_type = self.get_order_type(post)
try:
res = self._client.post_order(
contract_code=instrument_id, price=str(price), volume=str(qty),
order_price_type=order_type, client_order_id=client_oid,
direction=direction, offset=offset, lever_rate=self._leverage
)
if res["status"] != "ok":
log.error(f"{res}")
raise OrderPostError
except OrderPostError:
raise
except Exception as e:
log.exception(e)
return client_oid
def get_order_by_client_oid(self, instrument_id, client_order_id) -> Order:
assert instrument_id
instrument_id = instrument_id.upper()
symbol = self._instrument_registry[instrument_id].base_currency
try:
res = self._client.get_contract_order_info(
symbol=symbol, client_order_id=client_order_id)
assert res["status"] == "ok"
ts = timestamp_to_utc(res["ts"])
res = res["data"][0]
except Exception as e:
log.exception(e)
raise OrderGetError
else:
qty = float(res["volume"])
side, pos_side = self._get_sys_side_and_pos_side(
res["direction"], res["offset"])
order_type = self.get_sys_order_type(res["order_price_type"])
price = res.get("price", None) or 0
avg_entry_price = res.get("trade_avg_price", None) or 0
fee = float(res["fee"])
status = self._get_sys_order_status(int(res["status"]))
if status in (OrderStatus.FULFILLED, OrderStatus.CANCELED,
OrderStatus.PARTIAL_FILED_OTHER_CANCELED):
finished_at = ts
else:
finished_at = UNIX_EPOCH
if res["lever_rate"] != self._leverage:
raise RuntimeError()
return Order(
exchange=self.exchange, market=self.market, side=side,
pos_side=LONG, price=float(price), qty=qty,
avg_entry_price=float(avg_entry_price),
leverage=float(res["lever_rate"]),
instrument_id=instrument_id, client_oid=client_order_id,
order_type=order_type, fee=fee, fee_asset=res["fee_asset"],
pnl=0, order_id=res["order_id"], created_at=ts,
finished_at=finished_at, contract_size=0,
state=status, filled=qty, slippage=0
)
def get_position(
self, instrument_id: str = ""
) -> Union[List[Position], Position]:
if instrument_id:
instrument_id = instrument_id.upper()
base = self._instrument_registry[instrument_id].base_currency
else:
base = ""
try:
res = self._client.get_contract_position_info(base)
assert res["status"] == "ok"
res = res["data"]
except Exception as e:
log.exception(e)
raise PositionGetError
pos = list()
for r in res:
direction = r["direction"]
volume = int(r["volume"])
size = - volume if direction == "sell" else volume
avg_entry_price = float(r["cost_hold"])
unrealised_pnl = float(r["profit_unreal"])
realised_pnl = float(r["profit"]) - unrealised_pnl
last_price = float(r["last_price"])
contract_code = r["contract_code"].upper()
contract_value = \
float(self._instrument_registry[contract_code].contract_value)
leverage = int(r["lever_rate"])
home_notional = volume * contract_value / last_price
if instrument_id:
if contract_code != instrument_id:
continue
return Position(
exchange=self.exchange,
market=self.market,
instrument_id=instrument_id,
size=size,
avg_entry_price=avg_entry_price,
realised_pnl=realised_pnl,
unrealised_pnl=unrealised_pnl,
home_notional=home_notional,
leverage=leverage,
)
pos.append(Position(
exchange=self.exchange,
market=self.market,
instrument_id=contract_code,
size=size,
avg_entry_price=avg_entry_price,
realised_pnl=realised_pnl,
unrealised_pnl=unrealised_pnl,
home_notional=home_notional,
leverage=leverage,
))
if pos:
return pos
return POSITION_EMPTY if instrument_id else []
def get_margin(self, instrument_id: str = "") -> Union[List[Margin], Margin]:
if instrument_id:
instrument_id = instrument_id.upper()
symbol = self._instrument_registry[instrument_id].base_currency
else:
symbol = ""
try:
res = self._client.get_contract_account_info(symbol)
assert res["status"] == "ok"
except Exception as e:
log.exception(e)
raise MarginGetError
ret = list()
for r in res["data"]:
margin_balance = float(r["margin_balance"])
unrealised_pnl = float(r["profit_unreal"])
wallet_balance = margin_balance - unrealised_pnl
if symbol:
if symbol.upper() != r["symbol"].upper():
continue
return Margin(
exchange=self.exchange,
market=self.market,
symbol=r["symbol"].upper(),
wallet_balance=wallet_balance,
unrealised_pnl=unrealised_pnl,
realised_pnl=r["profit_real"],
init_margin=r["margin_frozen"],
maint_margin=r["margin_position"],
margin_balance=margin_balance,
leverage=float(r["lever_rate"]),
liquidation_price=float(r["liquidation_price"] or float(
"inf"))
)
ret.append(Margin(
exchange=self.exchange,
market=self.market,
symbol=r["symbol"].upper(),
wallet_balance=wallet_balance,
unrealised_pnl=unrealised_pnl,
realised_pnl=r["profit_real"],
init_margin=r["margin_frozen"],
maint_margin=r["margin_position"],
margin_balance=margin_balance,
leverage=float(r["lever_rate"]),
liquidation_price=float(r["liquidation_price"] or float("inf"))
))
if ret:
return ret
return MARGIN_EMPTY if instrument_id else []
def cancel_order(self, instrument_id: str, client_oid: str):
assert instrument_id
instrument_id = instrument_id.upper()
symbol = self._instrument_registry[instrument_id].base_currency
return self._client.cancel_order(symbol, client_oid)
def cancel_all_orders(self, instrument_id: str):
symbol = self._instrument_registry[instrument_id].base_currency
return self._client.cancel_all_orders(symbol)
class HuobiRestfulSpot(HuobiRestfulAdapter, market="spot"):
def __init__(self, *args):
super().__init__(*args)
self._client = HuobiSpot(
self.credential.api_key, self.credential.secret_key
)
self._instrument_registry: Dict[str, InstrumentInfo] = dict()
self._account_id: str = ""
res = self._client.get_symbols()
assert res["status"] == "ok"
for r in res["data"]:
self._instrument_registry[r["symbol"]] = InstrumentInfo(
instrument_type="spot",
instrument_id=r["symbol"],
underlying=INVALID,
commission=INVALID,
base_currency=r["base-currency"],
quote_currency=r["quote-currency"],
settle_currency=INVALID,
contract_value=INVALID,
contract_value_currency=INVALID,
option_type=INVALID,
strike_price=MAX_FLOAT,
list_time=UNIX_EPOCH,
expire_time=UNIX_EPOCH,
leverage=MAX_FLOAT,
tick_size=r["price-precision"],
lot_size=r["amount-precision"],
min_size=r["min-order-amt"],
contract_type=INVALID,
alias=INVALID,
state=bool(r["state"] == "online"))
self._account_id = INVALID
if self.credential != CREDENTIAL_EMPTY:
res = self._client.get_accounts()
assert res["status"] == "ok"
for r in res["data"]:
if r["type"] == "spot" and r["state"] == "working":
self._account_id = r["id"]
assert self._account_id
@property
def max_optimal_depth(self) -> int:
return MAX_INT
def get_all_instruments(self) -> Dict[str, InstrumentInfo]:
return self._instrument_registry.copy()
def get_instrument_info(self, instrument_id: str) -> InstrumentInfo:
return self._instrument_registry[instrument_id]
def estimate_lot(self, instrument_id: str, size: float, price: float = 0):
raise NotImplementedError("Lot is for delivery or swap")
def get_tick(self, instrument_id: str) -> Tick:
try:
res = self._client.get_market_trade(instrument_id)
assert res["status"] == "ok"
ts = timestamp_to_utc(res["ts"])
res = res["tick"]["data"][0]
except Exception as e:
log.exception(e)
raise TickGetError
price = float(res["price"])
return Tick(self.exchange, self.market, instrument_id, timestamp=ts,
price=price)
def get_ticks(self, *instruments, pricing: str = "avg"):
instruments = [s.lower() for s in instruments]
try:
res = self._client.get_tickers()
assert res["status"] == "ok"
ts = timestamp_to_utc(res["ts"])
res = res["data"]
except Exception as e:
log.exception(e)
raise TickGetError
ret = []
for r in res:
if instruments and r["symbol"] not in instruments:
continue
if pricing == "avg":
price = (float(r["ask"]) + float(r["bid"])) / 2
elif pricing == "ask":
price = float(r["ask"])
elif pricing == "bid":
price = float(r["bid"])
else:
raise ValueError
ret.append(Tick(self.exchange, self.market, r["symbol"], ts, price))
return ret
def get_book(self, instrument_id: str, depth: int) -> OrderBook:
if depth not in (5, 10, 20, 150):
raise ValueError
if depth == 150:
depth_type = "step0"
depth = 0
else:
depth_type = "step1"
try:
res = self._client.get_market_depth(
instrument_id, _type=depth_type, depth=depth)
assert res["status"] == "ok"
ts = timestamp_to_utc(res["ts"])
asks, bids = res["tick"]["asks"], res["tick"]["bids"]
except Exception as e:
log.exception(e)
raise OrderBookGetError
else:
return OrderBook(
exchange=self.exchange, market=self.market,
instrument_id=instrument_id, timestamp=ts,
asks=[tuple(d) for d in asks],
bids=[tuple(d) for d in bids]
)
def create_order(self, post: OrderPostType) -> str:
assert self.exchange == post.exchange and self.market == post.market
instrument_id = post.instrument_id
instrument = self._instrument_registry[instrument_id]
price = round_down(instrument.tick_size, getattr(post, "price", 0))
qty = round_down(instrument.lot_size, float(post.qty))
if qty < Decimal(instrument.min_size):
raise ValueError
client_oid = create_id_by_timestamp()
order_type = self.get_order_type(post)
try:
res = self._client.post_spot_order(
account_id=self._account_id, symbol=post.instrument_id,
order_type=order_type, amount=str(qty), price=str(price),
source="spot-api", client_order_id=client_oid,
stop_price="", operator=""
)
if res["status"] != "ok":
log.error(f"{res}")
raise OrderPostError
except OrderPostError:
raise
except Exception as e:
log.exception(e)
return client_oid
def get_order_by_client_oid(self, instrument_id, client_order_id) -> Order:
try:
res = self._client.get_order_by_client_oid(client_order_id)
if res.get("status") != "ok":
log.error(f"{res}")
raise OrderGetError
res = res["data"]
except Exception as e:
log.exception(e)
raise OrderGetError
else:
price = float(res["price"])
if price == 0.0:
price = float(res["field-cash-amount"]) \
/ float(res["field-amount"])
qty = float(res["field-amount"])
side = self._get_order_side(res["type"])
order_type = self.get_sys_order_type(res["type"])
fee = float(res["field-fees"])
created_at, finished_at = timestamp_to_utc(
res["created-at"]), timestamp_to_utc(res["finished-at"])
status = self._get_sys_order_status(res["state"])
return Order(
exchange=self.exchange, market=self.market, side=side,
pos_side=LONG, price=price, qty=qty, avg_entry_price=price,
leverage=float(res.get("lever_rate", DEFAULT_LEVERAGE)),
instrument_id=instrument_id, client_oid=client_order_id,
order_type=order_type, fee=fee, pnl=0, order_id=str(res["id"]),
created_at=created_at, finished_at=finished_at, contract_size=0,
state=status, filled=qty, slippage=0
)
@staticmethod
def get_order_type(post: OrderPostType) -> str:
side = "buy" if post.side == BUY else "sell"
if isinstance(post, MarketOrder):
return f"{side}-market"
elif isinstance(post, LimitOrder):
return f"{side}-limit"
elif isinstance(post, LimitIocOrder):
return f"{side}-ioc"
elif isinstance(post, LimitFokOrder):
return f"{side}-limit_fok"
else:
raise NotImplementedError
@staticmethod
def get_sys_order_type(order_type: str) -> str:
if order_type in ("buy-market", "sell-market"):
return OrderType.MARKET
elif order_type in ("buy-limit", "sell-limit"):
return OrderType.LIMIT_GTC
elif order_type in ("buy-ioc", "sell-ioc"):
return OrderType.LIMIT_IOC
elif order_type in ("buy-limit-fok", "sell-limit-fok"):
return OrderType.LIMIT_FOK
else:
raise NotImplementedError
@staticmethod
def _get_order_side(order_type: str) -> str:
if order_type in (
"buy-market", "buy-limit", "buy-ioc", "buy-limit-fok"
):
return BUY
elif order_type in (
"sell-market", "sell-limit", "sell-ioc", "sell-limit-fok"
):
return SELL
else:
raise NotImplementedError
@staticmethod
def _get_sys_order_status(status: str):
if status == "created":
return OrderStatus.PREPARING
elif status == "filled":
return OrderStatus.FULFILLED
elif status == "submitted":
return OrderStatus.ONGOING
elif status == "partial-filled":
return OrderStatus.PARTIAL
elif status == "partial-canceled":
return OrderStatus.PARTIAL_FILED_OTHER_CANCELED
elif status == "canceling":
return OrderStatus.CANCELING
elif status == "canceled":
return OrderStatus.CANCELED
else:
raise NotImplementedError
def transfer_margin_to_asset(self, symbol: str, amount: float):
raise NotImplementedError(f"margin to asset not avail in spot adapter")
def transfer_asset_to_future_margin(
self, symbol: str, amount: float) -> float:
amount = round_down(8, amount)
try:
res = self._client.transfer_asset_to_future_margin(symbol, amount)
if res["status"] != "ok":
log.error(f"{res}")
raise AssetTransferError
except Exception as e:
log.exception(e)
raise e
return float(amount)
def transfer_asset_to_swap_margin(
self, symbol: str, amount: float) -> float:
amount = round_down(8, amount)
try:
res = self._client.transfer_asset_to_swap_margin(symbol, amount)
if res["status"] != "ok":
log.error(f"{res}")
raise AssetTransferError
except Exception as e:
log.exception(e)
raise e
return float(amount)
def get_leverage(self, instrument_id: str):
raise NotImplementedError
def set_leverage(self, instrument_id: str, lv: float):
raise NotImplementedError
def get_available_balance(
self, symbol: str = 0
) -> Union[Dict[str, float], float]:
if symbol:
symbol = symbol.lower()
try:
res = self._client.get_accounts_balance(self._account_id)
assert res["status"] == "ok"
res = res["data"]["list"]
except Exception as e:
log.exception(e)
raise BalanceGetError
ret = dict()
for r in res:
currency, _type, balance = r["currency"], r["type"], r["balance"]
balance = float(balance)
if balance < 0.00000001:
balance = float(0)
if symbol:
if not (currency == symbol and _type == "trade"):
continue
return balance
if _type == "trade":
ret[currency] = balance
return ret or float(0)
def cancel_order(self, instrument_id: str, client_oid: str):
return self._client.post_cancel_order_by_client_oid(client_oid)
def cancel_all_orders(self, instrument_id: str):
return self._client.cancel_all_orders(instrument_id)
def set_leverage(self, instrument_id: str, leverage: float):
raise NotImplementedError
def get_leverage(self, instrument_id: str) -> float:
return DEFAULT_LEVERAGE
| 37.430688
| 81
| 0.571695
|
69a9479746c38cbdb29231f88d2cf6f7613a18d9
| 707
|
py
|
Python
|
books/python-testing-with-pytest/ch5_plugins/pytest-nice/pytest_nice.py
|
phiratio/lpthw
|
a32240d4355fb331805d515f96e1d009914e5c47
|
[
"MIT"
] | 1
|
2021-04-21T09:38:38.000Z
|
2021-04-21T09:38:38.000Z
|
books/python-testing-with-pytest/ch5_plugins/pytest-nice/pytest_nice.py
|
phiratio/lpthw
|
a32240d4355fb331805d515f96e1d009914e5c47
|
[
"MIT"
] | 34
|
2019-12-16T16:53:24.000Z
|
2022-01-13T02:29:30.000Z
|
books/python-testing-with-pytest/ch5_plugins/pytest-nice/pytest_nice.py
|
phiratio/lpthw
|
a32240d4355fb331805d515f96e1d009914e5c47
|
[
"MIT"
] | null | null | null |
"""Code for pytest-nice plugin."""
import pytest
def pytest_addoption(parser):
"""Turn nice features on with --nice option."""
group = parser.getgroup('nice')
group.addoption("--nice", action="store_true",
help="nice: turn FAILED into OPPORTUNITY for improvement")
def pytest_report_header():
"""Thank tester for running tests."""
if pytest.config.getoption('nice'):
return "Thanks for running the tests."
def pytest_report_teststatus(report):
"""Turn failures into opportunities."""
if report.when == 'call':
if report.failed and pytest.config.getoption('nice'):
return (report.outcome, 'O', 'OPPORTUNITY for improvement')
| 29.458333
| 78
| 0.660537
|
80eb8268de5d62a9f83e52a8569b65dbbc61d815
| 4,164
|
py
|
Python
|
tkmagicgrid/__main__.py
|
bmjcode/tkMagicGrid
|
bc92b0a11d40cced3a653888a1376f7da66bc0e6
|
[
"MIT"
] | 3
|
2018-12-14T16:42:33.000Z
|
2020-09-13T21:20:50.000Z
|
tkmagicgrid/__main__.py
|
bmjcode/tkMagicGrid
|
bc92b0a11d40cced3a653888a1376f7da66bc0e6
|
[
"MIT"
] | null | null | null |
tkmagicgrid/__main__.py
|
bmjcode/tkMagicGrid
|
bc92b0a11d40cced3a653888a1376f7da66bc0e6
|
[
"MIT"
] | null | null | null |
"""Demonstration of the MagicGrid widget."""
try:
# Python 3
from tkinter import *
from tkinter.messagebox import showinfo, showwarning
except (ImportError):
# Python 2
from Tkinter import *
from tkMessageBox import showinfo, showwarning
from .widget import MagicGrid
__all__ = ["demo"]
BEATLES = [
# Name Founding Fab Four 5th Beatle Solo Albums
("John Lennon", True, True, False, 11 ),
("Paul McCartney", True, True, False, 18 ),
("George Harrison", True, True, False, 12 ),
("Ringo Starr", False, True, False, 19 ),
("Stuart Sutcliffe", True, False, False, 0 ),
("Pete Best", True, False, False, 11 ),
("George Martin", False, False, True, 15 ),
("Billy Preston", False, False, True, 22 ),
("Eric Clapton", False, False, True, 22 ),
]
class MagicGridDemo(Frame):
"""A demonstration of the MagicGrid widget."""
def __init__(self, master=None):
Frame.__init__(self, master)
self.master = master
mg = self.mg = MagicGrid(self)
mg.pack(side="top", expand=1, fill="both")
# Variable for Radiobuttons
selection = self.selection = StringVar()
selection.set("Paul McCartney")
# Add a header row
header_cells = mg.add_header("", "Beatle",
"Founding?", "Fab Four?", "5th Beatle?",
"Solo Albums", "Notes", "Action")
# Set reasonable column widths
mg.configure_column(0, width=4)
for col in range(1, 5):
mg.configure_column(col, width=12)
for col in range(2, 5):
header_cells[col].configure(anchor="center", justify="center")
# The Notes column is stretchy
mg.configure_column(6, weight=1)
row_num = 1
for beatle, founding, fab_four, fifth_beatle, solo_albums in BEATLES:
# Row Number
mg.add_cell(row_num)
# Beatle
mg.add_widget_radiobutton(beatle,
value=beatle, variable=selection)
# Founding
c = mg.add_widget_checkbutton()
if founding: c.select()
# Fab Four
c = mg.add_widget_checkbutton()
if fab_four: c.select()
# 5th Beatle
c = mg.add_widget_checkbutton()
if fifth_beatle: c.select()
# Solo Albums
mg.add_widget_spinbox(solo_albums,
width=12, from_=0, to=100, increment=1)
# Notes
mg.add_widget_entry(width=48)
# Action
button_command = lambda beatle=beatle: self.spam(beatle)
mg.add_widget_button("Spam", command=button_command)
# End the current row
mg.end_row()
row_num += 1
# Provide plenty of ways to close the demo window
for seq in "<Escape>", "<Control-w>", "<Control-q>":
self.master.bind(seq, self.close)
def close(self, event=None):
"""Close the window."""
# Destroy the window
self.master.destroy()
def spam(self, beatle):
"""Display a silly pop-up message."""
if beatle == self.selection.get():
showinfo("Spam",
"You spammed {0}!".format(beatle),
parent=self)
else:
showwarning("Spam",
"You want to spam {0}, but {1} is selected!"
.format(beatle, self.selection.get()),
parent=self)
def demo():
"""Display a demonstration of the MagicGrid widget."""
root = Tk()
root.title("tkMagicGrid")
m = MagicGridDemo(root)
m.pack(side="top", expand=1, fill="both")
root.mainloop()
if __name__ == "__main__":
demo()
| 30.617647
| 78
| 0.506004
|
3c07e9184b4f6a3e431df4e526493f418c21890d
| 3,561
|
py
|
Python
|
lib/helpers/kodi_constants.py
|
MediaBrasil/script.module.metadatautils
|
4d55cb4591a703cd54d3b790f063fdaa5b9e95f0
|
[
"Apache-2.0"
] | null | null | null |
lib/helpers/kodi_constants.py
|
MediaBrasil/script.module.metadatautils
|
4d55cb4591a703cd54d3b790f063fdaa5b9e95f0
|
[
"Apache-2.0"
] | null | null | null |
lib/helpers/kodi_constants.py
|
MediaBrasil/script.module.metadatautils
|
4d55cb4591a703cd54d3b790f063fdaa5b9e95f0
|
[
"Apache-2.0"
] | 2
|
2017-07-09T15:03:15.000Z
|
2020-02-05T19:08:05.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
script.module.metadatautils
kodi_constants.py
Several common constants for use with Kodi json api
"""
import os, sys
if sys.version_info.major == 3:
from .utils import KODI_VERSION
else:
from utils import KODI_VERSION
FIELDS_BASE = ["dateadded", "file", "lastplayed", "plot", "title", "art", "playcount"]
FIELDS_FILE = FIELDS_BASE + ["streamdetails", "director", "resume", "runtime"]
FIELDS_MOVIES = FIELDS_FILE + ["plotoutline", "sorttitle", "cast", "votes", "showlink", "top250", "trailer", "year",
"country", "studio", "set", "genre", "mpaa", "setid", "rating", "tag", "tagline",
"writer", "originaltitle",
"imdbnumber"]
if KODI_VERSION > 16:
FIELDS_MOVIES.append("uniqueid")
FIELDS_TVSHOWS = FIELDS_BASE + ["sorttitle", "mpaa", "premiered", "year", "episode", "watchedepisodes", "votes",
"rating", "studio", "season", "genre", "cast", "episodeguide", "tag", "originaltitle",
"imdbnumber"]
FIELDS_EPISODES = FIELDS_FILE + ["cast", "productioncode", "rating", "votes", "episode", "showtitle", "tvshowid",
"season", "firstaired", "writer", "originaltitle"]
FIELDS_MUSICVIDEOS = FIELDS_FILE + ["genre", "artist", "tag", "album", "track", "studio", "year"]
FIELDS_FILES = FIELDS_FILE + ["plotoutline", "sorttitle", "cast", "votes", "trailer", "year", "country", "studio",
"genre", "mpaa", "rating", "tagline", "writer", "originaltitle", "imdbnumber",
"premiered", "episode", "showtitle",
"firstaired", "watchedepisodes", "duration", "season"]
FIELDS_SONGS = ["artist", "displayartist", "title", "rating", "fanart", "thumbnail", "duration", "disc",
"playcount", "comment", "file", "album", "lastplayed", "genre", "musicbrainzartistid", "track",
"dateadded"]
FIELDS_ALBUMS = ["title", "fanart", "thumbnail", "genre", "displayartist", "artist", "genreid",
"musicbrainzalbumartistid", "year", "rating", "artistid", "musicbrainzalbumid", "theme", "description",
"type", "style", "playcount", "albumlabel", "mood", "dateadded"]
FIELDS_ARTISTS = ["born", "formed", "died", "style", "yearsactive", "mood", "fanart", "thumbnail",
"musicbrainzartistid", "disbanded", "description", "instrument"]
FIELDS_RECORDINGS = ["art", "channel", "directory", "endtime", "file", "genre", "icon", "playcount", "plot",
"plotoutline", "resume", "runtime", "starttime", "streamurl", "title"]
FIELDS_CHANNELS = ["broadcastnow", "channeltype", "hidden", "locked", "lastplayed", "thumbnail", "channel"]
FILTER_UNWATCHED = {"operator": "lessthan", "field": "playcount", "value": "1"}
FILTER_WATCHED = {"operator": "isnot", "field": "playcount", "value": "0"}
FILTER_RATING = {"operator": "greaterthan", "field": "rating", "value": "7"}
FILTER_RATING_MUSIC = {"operator": "greaterthan", "field": "rating", "value": "3"}
FILTER_INPROGRESS = {"operator": "true", "field": "inprogress", "value": ""}
SORT_RATING = {"method": "rating", "order": "descending"}
SORT_RANDOM = {"method": "random", "order": "descending"}
SORT_TITLE = {"method": "title", "order": "ascending"}
SORT_DATEADDED = {"method": "dateadded", "order": "descending"}
SORT_LASTPLAYED = {"method": "lastplayed", "order": "descending"}
SORT_EPISODE = {"method": "episode"}
| 63.589286
| 120
| 0.595619
|
a9cd777b079fa09a7305004dd2d04785a5b25f48
| 4,872
|
py
|
Python
|
test/devices_tests/fan_test.py
|
M-o-a-T/xknx
|
0cf07a01a44c76478d916884437f3f4769a4ddd5
|
[
"MIT"
] | null | null | null |
test/devices_tests/fan_test.py
|
M-o-a-T/xknx
|
0cf07a01a44c76478d916884437f3f4769a4ddd5
|
[
"MIT"
] | null | null | null |
test/devices_tests/fan_test.py
|
M-o-a-T/xknx
|
0cf07a01a44c76478d916884437f3f4769a4ddd5
|
[
"MIT"
] | null | null | null |
"""Unit test for Fan objects."""
import asyncio
import unittest
from unittest.mock import patch
import pytest
pytestmark = pytest.mark.asyncio
from xknx import XKNX
from xknx.devices import Fan
from xknx.dpt import DPTArray, DPTBinary
from xknx.exceptions import CouldNotParseTelegram
from xknx.telegram import GroupAddress, Telegram, TelegramType
class TestFan(unittest.TestCase):
"""Class for testing Fan objects."""
# pylint: disable=too-many-public-methods
#
# SYNC
#
async def test_sync(self):
"""Test sync function / sending group reads to KNX bus."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed_state='1/2/3')
await fan.sync(False)
self.assertEqual(xknx.telegrams.qsize(), 1)
telegram1 = xknx.telegrams.get_nowait()
self.assertEqual(telegram1,
Telegram(GroupAddress('1/2/3'), TelegramType.GROUP_READ))
#
# SYNC WITH STATE ADDRESS
#
async def test_sync_state_address(self):
"""Test sync function / sending group reads to KNX bus."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3',
group_address_speed_state='1/2/4')
await fan.sync(False)
self.assertEqual(xknx.telegrams.qsize(), 1)
telegram1 = xknx.telegrams.get_nowait()
self.assertEqual(telegram1,
Telegram(GroupAddress('1/2/4'), TelegramType.GROUP_READ))
#
#
# TEST SET SPEED
#
async def test_set_speed(self):
"""Test setting the speed of a Fan."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3')
await fan.set_speed(55)
self.assertEqual(xknx.telegrams.qsize(), 1)
telegram = xknx.telegrams.get_nowait()
# 140 is 55% as byte (0...255)
self.assertEqual(telegram,
Telegram(GroupAddress('1/2/3'), payload=DPTArray(140)))
#
# TEST PROCESS
#
async def test_process_speed(self):
"""Test process / reading telegrams from telegram queue. Test if speed is processed."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3')
self.assertEqual(fan.current_speed, None)
# 140 is 55% as byte (0...255)
telegram = Telegram(GroupAddress('1/2/3'), payload=DPTArray(140))
await fan.process(telegram)
self.assertEqual(fan.current_speed, 55)
async def test_process_speed_wrong_payload(self): # pylint: disable=invalid-name
"""Test process wrong telegrams. (wrong payload type)."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3')
telegram = Telegram(GroupAddress('1/2/3'), payload=DPTBinary(1))
with self.assertRaises(CouldNotParseTelegram):
await fan.process(telegram)
async def test_process_fan_payload_invalid_length(self):
"""Test process wrong telegrams. (wrong payload length)."""
# pylint: disable=invalid-name
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3')
telegram = Telegram(GroupAddress('1/2/3'), payload=DPTArray((23, 24)))
with self.assertRaises(CouldNotParseTelegram):
await fan.process(telegram)
#
# TEST DO
#
async def test_do(self):
"""Test 'do' functionality."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3')
await fan.do("speed:50")
self.assertEqual(fan.current_speed, 50)
await fan.do("speed:25")
self.assertEqual(fan.current_speed, 25)
async def test_wrong_do(self):
"""Test wrong do command."""
xknx = XKNX()
fan = Fan(xknx,
name="TestFan",
group_address_speed='1/2/3')
with patch('logging.Logger.warning') as mock_warn:
await fan.do("execute")
self.assertEqual(xknx.telegrams.qsize(), 0)
mock_warn.assert_called_with('Could not understand action %s for device %s', 'execute', 'TestFan')
def test_has_group_address(self):
"""Test has_group_address."""
xknx = XKNX()
fan = Fan(xknx,
'TestFan',
group_address_speed='1/7/1',
group_address_speed_state='1/7/2')
self.assertTrue(fan.has_group_address(GroupAddress('1/7/1')))
self.assertTrue(fan.has_group_address(GroupAddress('1/7/2')))
self.assertFalse(fan.has_group_address(GroupAddress('1/7/3')))
| 33.6
| 110
| 0.586002
|
5a45cce640e0a2957c5cccf7b899a9c95a621116
| 36,922
|
py
|
Python
|
Visual Tracking/SiamR-CNN-master/data.py
|
shikivi/-
|
e83cc9342115801e1464e9907a971801dbd68335
|
[
"MIT"
] | 195
|
2020-04-04T13:50:08.000Z
|
2022-03-24T03:29:38.000Z
|
Visual Tracking/SiamR-CNN-master/data.py
|
shikivi/-
|
e83cc9342115801e1464e9907a971801dbd68335
|
[
"MIT"
] | 30
|
2020-04-12T08:14:34.000Z
|
2021-09-20T13:36:12.000Z
|
Visual Tracking/SiamR-CNN-master/data.py
|
shikivi/-
|
e83cc9342115801e1464e9907a971801dbd68335
|
[
"MIT"
] | 40
|
2020-04-05T00:10:49.000Z
|
2022-03-09T08:34:10.000Z
|
# -*- coding: utf-8 -*-
# File: data.py
import copy
import platform
import numpy as np
import bisect
import cv2
import glob
import random
import os
import PIL
from tabulate import tabulate
from termcolor import colored
import xmltodict
from tensorpack.dataflow import (
DataFromList, MapDataComponent, MultiProcessMapDataZMQ, MultiThreadMapData, MapData, TestDataSpeed, imgaug)
from tensorpack.utils import logger
from tensorpack.utils.argtools import log_once, memoized
from common import (
CustomResize, DataFromListOfDict, box_to_point8,
filter_boxes_inside_shape, point8_to_box, segmentation_to_mask, np_iou)
from config import config as cfg
from dataset import DetectionDataset
from hard_example_utils import subsample_nns
from utils.generate_anchors import generate_anchors
from utils.np_box_ops import area as np_area, ioa as np_ioa
# import tensorpack.utils.viz as tpviz
class MalformedData(BaseException):
pass
def print_class_histogram(roidbs):
"""
Args:
roidbs (list[dict]): the same format as the output of `load_training_roidbs`.
"""
dataset = DetectionDataset()
hist_bins = np.arange(dataset.num_classes + 1)
# Histogram of ground-truth objects
gt_hist = np.zeros((dataset.num_classes,), dtype=np.int)
for entry in roidbs:
# filter crowd?
gt_inds = np.where(
(entry['class'] > 0) & (entry['is_crowd'] == 0))[0]
gt_classes = entry['class'][gt_inds]
gt_hist += np.histogram(gt_classes, bins=hist_bins)[0]
data = [[dataset.class_names[i], v] for i, v in enumerate(gt_hist)]
data.append(['total', sum([x[1] for x in data])])
table = tabulate(data, headers=['class', '#box'], tablefmt='pipe')
logger.info("Ground-Truth Boxes:\n" + colored(table, 'cyan'))
@memoized
def get_all_anchors(stride=None, sizes=None):
"""
Get all anchors in the largest possible image, shifted, floatbox
Args:
stride (int): the stride of anchors.
sizes (tuple[int]): the sizes (sqrt area) of anchors
Returns:
anchors: SxSxNUM_ANCHORx4, where S == ceil(MAX_SIZE/STRIDE), floatbox
The layout in the NUM_ANCHOR dim is NUM_RATIO x NUM_SIZE.
"""
if stride is None:
stride = cfg.RPN.ANCHOR_STRIDE
if sizes is None:
sizes = cfg.RPN.ANCHOR_SIZES
# Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
# are centered on stride / 2, have (approximate) sqrt areas of the specified
# sizes, and aspect ratios as given.
cell_anchors = generate_anchors(
stride,
scales=np.array(sizes, dtype=np.float) / stride,
ratios=np.array(cfg.RPN.ANCHOR_RATIOS, dtype=np.float))
# anchors are intbox here.
# anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)
max_size = cfg.PREPROC.MAX_SIZE
field_size = int(np.ceil(max_size / stride))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.flatten()
shift_y = shift_y.flatten()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Kx4, K = field_size * field_size
K = shifts.shape[0]
A = cell_anchors.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))
# FSxFSxAx4
# Many rounding happens inside the anchor code anyway
# assert np.all(field_of_anchors == field_of_anchors.astype('int32'))
field_of_anchors = field_of_anchors.astype('float32')
field_of_anchors[:, :, :, [2, 3]] += 1
return field_of_anchors
@memoized
def get_all_anchors_fpn(strides=None, sizes=None):
"""
Returns:
[anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.
"""
if strides is None:
strides = cfg.FPN.ANCHOR_STRIDES
if sizes is None:
sizes = cfg.RPN.ANCHOR_SIZES
assert len(strides) == len(sizes)
foas = []
for stride, size in zip(strides, sizes):
foa = get_all_anchors(stride=stride, sizes=(size,))
foas.append(foa)
return foas
def get_anchor_labels(anchors, gt_boxes, crowd_boxes):
"""
Label each anchor as fg/bg/ignore.
Args:
anchors: Ax4 float
gt_boxes: Bx4 float, non-crowd
crowd_boxes: Cx4 float
Returns:
anchor_labels: (A,) int. Each element is {-1, 0, 1}
anchor_boxes: Ax4. Contains the target gt_box for each anchor when the anchor is fg.
"""
# This function will modify labels and return the filtered inds
def filter_box_label(labels, value, max_num):
curr_inds = np.where(labels == value)[0]
if len(curr_inds) > max_num:
disable_inds = np.random.choice(
curr_inds, size=(len(curr_inds) - max_num),
replace=False)
labels[disable_inds] = -1 # ignore them
curr_inds = np.where(labels == value)[0]
return curr_inds
NA, NB = len(anchors), len(gt_boxes)
assert NB > 0 # empty images should have been filtered already
box_ious = np_iou(anchors, gt_boxes) # NA x NB
ious_argmax_per_anchor = box_ious.argmax(axis=1) # NA,
ious_max_per_anchor = box_ious.max(axis=1)
ious_max_per_gt = np.amax(box_ious, axis=0, keepdims=True) # 1xNB
# for each gt, find all those anchors (including ties) that has the max ious with it
anchors_with_max_iou_per_gt = np.where(box_ious == ious_max_per_gt)[0]
# Setting NA labels: 1--fg 0--bg -1--ignore
anchor_labels = -np.ones((NA,), dtype='int32') # NA,
# the order of setting neg/pos labels matter
anchor_labels[anchors_with_max_iou_per_gt] = 1
anchor_labels[ious_max_per_anchor >= cfg.RPN.POSITIVE_ANCHOR_THRESH] = 1
anchor_labels[ious_max_per_anchor < cfg.RPN.NEGATIVE_ANCHOR_THRESH] = 0
# label all non-ignore candidate boxes which overlap crowd as ignore
if crowd_boxes.size > 0:
cand_inds = np.where(anchor_labels >= 0)[0]
cand_anchors = anchors[cand_inds]
ioas = np_ioa(crowd_boxes, cand_anchors)
overlap_with_crowd = cand_inds[ioas.max(axis=0) > cfg.RPN.CROWD_OVERLAP_THRESH]
anchor_labels[overlap_with_crowd] = -1
# Subsample fg labels: ignore some fg if fg is too many
target_num_fg = int(cfg.RPN.BATCH_PER_IM * cfg.RPN.FG_RATIO)
fg_inds = filter_box_label(anchor_labels, 1, target_num_fg)
# Keep an image even if there is no foreground anchors
# if len(fg_inds) == 0:
# raise MalformedData("No valid foreground for RPN!")
# Subsample bg labels. num_bg is not allowed to be too many
old_num_bg = np.sum(anchor_labels == 0)
if old_num_bg == 0:
# No valid bg in this image, skip.
raise MalformedData("No valid background for RPN!")
target_num_bg = cfg.RPN.BATCH_PER_IM - len(fg_inds)
filter_box_label(anchor_labels, 0, target_num_bg) # ignore return values
# Set anchor boxes: the best gt_box for each fg anchor
anchor_boxes = np.zeros((NA, 4), dtype='float32')
fg_boxes = gt_boxes[ious_argmax_per_anchor[fg_inds], :]
anchor_boxes[fg_inds, :] = fg_boxes
# assert len(fg_inds) + np.sum(anchor_labels == 0) == cfg.RPN.BATCH_PER_IM
return anchor_labels, anchor_boxes
def get_rpn_anchor_input(im, boxes, is_crowd):
"""
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
The anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWxNA
fm_boxes: fHxfWxNAx4
NA will be NUM_ANCHOR_SIZES x NUM_ANCHOR_RATIOS
"""
boxes = boxes.copy()
all_anchors = np.copy(get_all_anchors())
# fHxfWxAx4 -> (-1, 4)
featuremap_anchors_flatten = all_anchors.reshape((-1, 4))
# only use anchors inside the image
inside_ind, inside_anchors = filter_boxes_inside_shape(featuremap_anchors_flatten, im.shape[:2])
# obtain anchor labels and their corresponding gt boxes
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1])
# Fill them back to original size: fHxfWx1, fHxfWx4
anchorH, anchorW = all_anchors.shape[:2]
featuremap_labels = -np.ones((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, ), dtype='int32')
featuremap_labels[inside_ind] = anchor_labels
featuremap_labels = featuremap_labels.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR))
featuremap_boxes = np.zeros((anchorH * anchorW * cfg.RPN.NUM_ANCHOR, 4), dtype='float32')
featuremap_boxes[inside_ind, :] = anchor_gt_boxes
featuremap_boxes = featuremap_boxes.reshape((anchorH, anchorW, cfg.RPN.NUM_ANCHOR, 4))
return featuremap_labels, featuremap_boxes
def get_multilevel_rpn_anchor_input(im, boxes, is_crowd):
"""
Args:
im: an image
boxes: nx4, floatbox, gt. shoudn't be changed
is_crowd: n,
Returns:
[(fm_labels, fm_boxes)]: Returns a tuple for each FPN level.
Each tuple contains the anchor labels and target boxes for each pixel in the featuremap.
fm_labels: fHxfWx NUM_ANCHOR_RATIOS
fm_boxes: fHxfWx NUM_ANCHOR_RATIOS x4
"""
boxes = boxes.copy()
anchors_per_level = get_all_anchors_fpn()
flatten_anchors_per_level = [k.reshape((-1, 4)) for k in anchors_per_level]
all_anchors_flatten = np.concatenate(flatten_anchors_per_level, axis=0)
inside_ind, inside_anchors = filter_boxes_inside_shape(all_anchors_flatten, im.shape[:2])
anchor_labels, anchor_gt_boxes = get_anchor_labels(inside_anchors, boxes[is_crowd == 0], boxes[is_crowd == 1])
# map back to all_anchors, then split to each level
num_all_anchors = all_anchors_flatten.shape[0]
all_labels = -np.ones((num_all_anchors, ), dtype='int32')
all_labels[inside_ind] = anchor_labels
all_boxes = np.zeros((num_all_anchors, 4), dtype='float32')
all_boxes[inside_ind] = anchor_gt_boxes
start = 0
multilevel_inputs = []
for level_anchor in anchors_per_level:
assert level_anchor.shape[2] == len(cfg.RPN.ANCHOR_RATIOS)
anchor_shape = level_anchor.shape[:3] # fHxfWxNUM_ANCHOR_RATIOS
num_anchor_this_level = np.prod(anchor_shape)
end = start + num_anchor_this_level
multilevel_inputs.append(
(all_labels[start: end].reshape(anchor_shape),
all_boxes[start: end, :].reshape(anchor_shape + (4,))
))
start = end
assert end == num_all_anchors, "{} != {}".format(end, num_all_anchors)
return multilevel_inputs
def get_bbox_from_segmentation_mask_np(mask):
object_locations = (np.stack(np.where(np.equal(mask, 1))).T[:, :2]).astype(np.int32)
y0 = np.min(object_locations[:, 0])
x0 = np.min(object_locations[:, 1])
y1 = np.max(object_locations[:, 0]) + 1
x1 = np.max(object_locations[:, 1]) + 1
bbox = np.stack([x0, y0, x1, y1])
return bbox
def _augment_boxes(boxes, aug, params):
points = box_to_point8(boxes)
points = aug.augment_coords(points, params)
boxes = point8_to_box(points)
#assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!"
if np.min(np_area(boxes)) <= 0:
return None
return boxes
def _preprocess_common(ref_box, target_box, ref_im, target_im, aug):
ref_boxes = np.array([ref_box], dtype=np.float32)
target_boxes = np.array([target_box], dtype=np.float32)
klass = np.array([1], dtype=np.int32)
# augmentation:
target_im, target_params = aug.augment_return_params(target_im)
ref_im, ref_params = aug.augment_return_params(ref_im)
ref_boxes = _augment_boxes(ref_boxes, aug, ref_params)
target_boxes = _augment_boxes(target_boxes, aug, target_params)
if ref_boxes is None or target_boxes is None:
return None
# additional augmentations:
# motion blur
if cfg.DATA.MOTION_BLUR_AUGMENTATIONS:
do_motion_blur_ref = np.random.rand() < 0.25
if do_motion_blur_ref:
# generating the kernel
kernel_size = np.random.randint(5, 15)
kernel_motion_blur = np.zeros((kernel_size, kernel_size))
kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
kernel_motion_blur = kernel_motion_blur / kernel_size
# applying the kernel
ref_im = cv2.filter2D(ref_im, -1, kernel_motion_blur)
do_motion_blur_target = np.random.rand() < 0.25
if do_motion_blur_target:
# generating the kernel
kernel_size = np.random.randint(5, 15)
kernel_motion_blur = np.zeros((kernel_size, kernel_size))
kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
kernel_motion_blur = kernel_motion_blur / kernel_size
# applying the kernel
target_im = cv2.filter2D(target_im, -1, kernel_motion_blur)
# grayscale
if cfg.DATA.GRAYSCALE_AUGMENTATIONS:
do_grayscale = np.random.rand() < 0.25
if do_grayscale:
grayscale_aug = imgaug.Grayscale()
ref_im = np.tile(grayscale_aug.augment(ref_im), [1, 1, 3])
target_im = np.tile(grayscale_aug.augment(target_im), [1, 1, 3])
if cfg.DATA.DEBUG_VIS:
import matplotlib.pyplot as plt
ref_im_vis = ref_im.copy()
#ref_im_vis[int(ref_boxes[0][1]):int(ref_boxes[0][3]), int(ref_boxes[0][0]):int(ref_boxes[0][2]), 0] = 255
ref_im_vis[int(ref_boxes[0][1]):int(ref_boxes[0][3]), int(ref_boxes[0][0]):int(ref_boxes[0][2]), 2] = \
(0.5 * ref_im_vis[int(ref_boxes[0][1]):int(ref_boxes[0][3]), int(ref_boxes[0][0]):int(ref_boxes[0][2]), 2] + 120).astype(np.uint8)
plt.imshow(ref_im_vis[..., ::-1])
plt.show()
target_im_vis = target_im.copy()
target_im_vis[int(target_boxes[0][1]):int(target_boxes[0][3]), int(target_boxes[0][0]):int(target_boxes[0][2]), 2] = \
(0.5 * target_im_vis[int(target_boxes[0][1]):int(target_boxes[0][3]), int(target_boxes[0][0]):int(target_boxes[0][2]), 2] + 120).astype(np.uint8)
plt.imshow(target_im_vis[..., ::-1])
plt.show()
is_crowd = np.array([0], dtype=np.int32)
ret = {'ref_image': ref_im, 'ref_box': ref_boxes[0], 'image': target_im}
if cfg.DATA.DEBUG_VIS:
return ret
# rpn anchor:
try:
if cfg.MODE_FPN:
multilevel_anchor_inputs = get_multilevel_rpn_anchor_input(target_im, target_boxes, is_crowd)
for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs):
ret['anchor_labels_lvl{}'.format(i + 2)] = anchor_labels
ret['anchor_boxes_lvl{}'.format(i + 2)] = anchor_boxes
else:
# anchor_labels, anchor_boxes
ret['anchor_labels'], ret['anchor_boxes'] = get_rpn_anchor_input(target_im, target_boxes, is_crowd)
ret['gt_boxes'] = target_boxes
ret['gt_labels'] = klass
if not len(target_boxes):
raise MalformedData("No valid gt_boxes!")
except MalformedData as e:
log_once("Input is filtered for training: {}".format(str(e)), 'warn')
return None
return ret
def _preprocess_imagenet_vid(roidb, aug, hard_example_index, hard_example_names):
vid_name = roidb
ann_path = os.path.join(cfg.DATA.IMAGENET_VID_ROOT, "Annotations/VID/train/", vid_name)
ann_files = sorted(glob.glob(ann_path + "/*.xml"))
# randomly select two files
ref_idx = np.random.randint(len(ann_files))
target_idx = np.random.randint(1, len(ann_files))
ref_ann_file = ann_files[ref_idx]
target_ann_file = ann_files[target_idx]
def get_id_to_data(ann):
id_to_data = {}
if "object" in ann:
obj_anns = ann["object"]
if not isinstance(obj_anns, list):
obj_anns = [obj_anns]
for obj_ann in obj_anns:
id_ = obj_ann["trackid"]
id_to_data[id_] = obj_ann
return id_to_data
ref_ann = xmltodict.parse(open(ref_ann_file).read())["annotation"]
target_ann = xmltodict.parse(open(target_ann_file).read())["annotation"]
ref_id_to_data = get_id_to_data(ref_ann)
target_id_to_data = get_id_to_data(target_ann)
ref_obj_ids = set(ref_id_to_data.keys())
target_obj_ids = set(target_id_to_data.keys())
obj_ids = ref_obj_ids & target_obj_ids
obj_ids = list(obj_ids)
if len(obj_ids) == 0:
# this happens quite often, do not print it for now
#log_once("Inputs {},{} filtered for training because of no common objects".format(ref_fname, target_fname),
# 'warn')
return None
random.shuffle(obj_ids)
obj_id = obj_ids[0]
def obj_data_to_bbox(obj_ann):
bbox = obj_ann['bndbox']
x1 = bbox['xmin']
y1 = bbox['ymin']
x2 = bbox['xmax']
y2 = bbox['ymax']
box = [x1, y1, x2, y2]
return box
ref_ann = ref_id_to_data[obj_id]
target_ann = target_id_to_data[obj_id]
ref_box = obj_data_to_bbox(ref_ann)
target_box = obj_data_to_bbox(target_ann)
ref_fname = ref_ann_file.replace("/Annotations/", "/Data/").replace(".xml", ".JPEG")
ref_im = cv2.imread(ref_fname, cv2.IMREAD_COLOR)
target_fname = target_ann_file.replace("/Annotations/", "/Data/").replace(".xml", ".JPEG")
target_im = cv2.imread(target_fname, cv2.IMREAD_COLOR)
data = _preprocess_common(ref_box, target_box, ref_im, target_im, aug)
vid_name = roidb.replace("/", "_") + "_" + str(obj_id)
return _maybe_add_hard_example_data(data, ref_fname, vid_name, hard_example_index,
hard_example_names, dataset_name="ImageNetVID")
def _preprocess_davis_like(roidb, aug, ann_folder, dataset_name="YouTubeVOS", hard_example_index=None,
hard_example_names=None):
vid_name = roidb
ann_path = os.path.join(ann_folder, vid_name)
ann_files = sorted(glob.glob(ann_path + "/*.png"))
if len(ann_files) == 0:
logger.info("no annotations found, skipping {}...".format(ann_path))
return None
# randomly select two files
ref_idx = np.random.randint(len(ann_files))
target_idx = np.random.randint(1, len(ann_files))
ref_ann_file = ann_files[ref_idx]
target_ann_file = ann_files[target_idx]
ref_masks = np.array(PIL.Image.open(ref_ann_file))
target_masks = np.array(PIL.Image.open(target_ann_file))
ref_obj_ids = set(np.setdiff1d(np.unique(ref_masks), [0]))
target_obj_ids = set(np.setdiff1d(np.unique(target_masks), [0]))
obj_ids = ref_obj_ids & target_obj_ids
obj_ids = list(obj_ids)
if len(obj_ids) == 0:
# this happens quite often, do not print it for now
# log_once("Inputs {},{} filtered for training because of no common objects".format(ref_fname, target_fname),
# 'warn')
return None
random.shuffle(obj_ids)
obj_id = obj_ids[0]
ref_mask = ref_masks == obj_id
target_mask = target_masks == obj_id
# convert mask to bbox!
ref_box = get_bbox_from_segmentation_mask_np(ref_mask)
target_box = get_bbox_from_segmentation_mask_np(target_mask)
ref_fname = ref_ann_file.replace("/Annotations/", "/JPEGImages/").replace("/Annotations/", "/JPEGImages/").replace(".png", ".jpg")
ref_im = cv2.imread(ref_fname, cv2.IMREAD_COLOR)
target_fname = target_ann_file.replace("/Annotations/", "/JPEGImages/").replace("/Annotations/", "/JPEGImages/").replace(".png", ".jpg")
target_im = cv2.imread(target_fname, cv2.IMREAD_COLOR)
data = _preprocess_common(ref_box, target_box, ref_im, target_im, aug)
vid_name = roidb + "_" + str(obj_id)
return _maybe_add_hard_example_data(data, ref_fname, vid_name, hard_example_index,
hard_example_names, dataset_name=dataset_name)
def _preprocess_lasot(roidb, aug, hard_example_index, hard_example_names):
category = roidb.split("-")[0]
data_path = os.path.join(cfg.DATA.LASOT_ROOT, category, roidb)
gt_file = os.path.join(data_path, "groundtruth.txt")
oov_file = os.path.join(data_path, "out_of_view.txt")
full_occ_file = os.path.join(data_path, "full_occlusion.txt")
boxes = []
with open(gt_file) as f:
for l in f:
sp = l.strip().split(",")
box = [float(x) for x in sp]
box[2] += box[0]
box[3] += box[1]
boxes.append(box)
with open(oov_file) as f:
sp = f.read().strip().split(",")
oovs = [int(x) for x in sp]
with open(full_occ_file) as f:
sp = f.read().strip().split(",")
full_occs = [int(x) for x in sp]
n_frames = len(boxes)
assert len(boxes) == len(oovs) == len(full_occs)
data = list(zip(range(n_frames), boxes, oovs, full_occs))
data = [x for x in data if x[2] == 0 and x[3] == 0]
assert len(data) > 0
n_frames = len(data)
ref_idx = np.random.randint(n_frames)
target_idx = np.random.randint(n_frames)
ref_box = data[ref_idx][1]
target_box = data[target_idx][1]
ref_time_idx = data[ref_idx][0]
target_time_idx = data[target_idx][0]
ref_fname = os.path.join(data_path, "img", "%08d.jpg" % (ref_time_idx + 1))
ref_im = cv2.imread(ref_fname, cv2.IMREAD_COLOR)
target_fname = os.path.join(data_path, "img", "%08d.jpg" % (target_time_idx + 1))
target_im = cv2.imread(target_fname, cv2.IMREAD_COLOR)
data = _preprocess_common(ref_box, target_box, ref_im, target_im, aug)
return _maybe_add_hard_example_data(data, ref_fname, roidb, hard_example_index,
hard_example_names, dataset_name="LaSOT")
def _preprocess_got10k(roidb, aug, hard_example_index, hard_example_names):
vid_name = roidb
data_path = os.path.join(cfg.DATA.GOT10K_ROOT, "train", vid_name)
gt_file = os.path.join(data_path, "groundtruth.txt")
absent_file = os.path.join(data_path, "absence.label")
boxes = []
absence = []
with open(gt_file) as f:
for l in f:
sp = l.strip().split(",")
box = [float(x) for x in sp]
box[2] += box[0]
box[3] += box[1]
boxes.append(box)
with open(absent_file) as f:
for l in f:
absent = int(l.strip())
absence.append(absent)
n_frames = len(boxes)
assert len(boxes) == len(absence)
data = list(zip(range(n_frames), boxes, absence))
data = [x for x in data if x[2] == 0]
assert len(data) > 0
n_frames = len(data)
ref_idx = np.random.randint(n_frames)
target_idx = np.random.randint(n_frames)
ref_box = data[ref_idx][1]
target_box = data[target_idx][1]
ref_time_idx = data[ref_idx][0]
target_time_idx = data[target_idx][0]
ref_fname = os.path.join(data_path, "%08d.jpg" % (ref_time_idx + 1))
ref_im = cv2.imread(ref_fname, cv2.IMREAD_COLOR)
target_fname = os.path.join(data_path, "%08d.jpg" % (target_time_idx + 1))
target_im = cv2.imread(target_fname, cv2.IMREAD_COLOR)
data = _preprocess_common(ref_box, target_box, ref_im, target_im, aug)
return _maybe_add_hard_example_data(data, ref_fname, vid_name, hard_example_index, hard_example_names,
dataset_name="GOT10k")
def _maybe_add_hard_example_data(data, ref_fname, vid_name, hard_example_index, hard_example_names, dataset_name):
if not cfg.MODE_HARD_MINING:
return data
data = data.copy()
name_for_idx = dataset_name + "/" + vid_name + "/"
if dataset_name == "GOT10k":
name_for_idx += ref_fname.split("/")[-1].replace(".jpg", "")
this_fmt = "%08d"
elif dataset_name == "ImageNetVID":
name_for_idx += str(int(ref_fname.split("/")[-1].replace(".JPEG", "")))
this_fmt = "%06d"
elif dataset_name == "LaSOT":
name_for_idx += str(int(ref_fname.split("/")[-1].replace(".jpg", "")))
this_fmt = "%08d"
elif dataset_name == "YouTubeVOS":
name_for_idx += str(int(ref_fname.split("/")[-1].replace(".jpg", "")))
this_fmt = "%05d"
else:
assert False, ("unknown dataset", dataset_name)
try:
idx = hard_example_names["all"].index(name_for_idx)
except ValueError:
log_once("Not found in index: {}".format(name_for_idx), 'warn')
return None
if dataset_name == "LaSOT":
nns = hard_example_index.get_nns_by_item(idx, cfg.HARD_MINING_KNN_LASOT)
else:
nns = hard_example_index.get_nns_by_item(idx, cfg.HARD_MINING_KNN)
if cfg.MODE_HARD_NEGATIVES_ONLY_CROSSOVER or \
(cfg.MODE_HARD_NEGATIVES_ONLY_CROSSOVER_YOUTUBEVOS and dataset_name == "YouTubeVOS"):
nn_names = [hard_example_names["all"][nn] for nn in nns]
nn_datasets = [x.split("/")[0] for x in nn_names]
nns = [nn for nn, ds_ in zip(nns, nn_datasets) if ds_ != dataset_name]
remove_query = False
else:
remove_query = True
nns = subsample_nns(vid_name, nns, hard_example_names["all"], cfg.N_HARD_NEGATIVES_TO_SAMPLE,
remove_query=remove_query)
feats = []
for nn in nns:
sp = hard_example_names["all"][nn].split("/")
if sp[0] == "GOT10k":
fmt = "%08d"
elif sp[0] == "ImageNetVID":
fmt = "%06d"
elif sp[0] == "LaSOT":
fmt = "%08d"
elif sp[0] == "YouTubeVOS":
fmt = "%05d"
else:
assert False, ("unknown dataset", sp[0])
feat_fn = os.path.join(cfg.HARD_MINING_DATA_PATH, sp[0], "det_feats_compressed", sp[1],
fmt % int(sp[2]) + ".npz")
feat = np.load(feat_fn)
feat = feat["f"]
feats.append(feat)
feats = np.stack(feats, axis=0)
data['hard_negative_features'] = feats
if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:
hard_example_names_dataset = hard_example_names[dataset_name]
#hpens_oldversion = [x for x in hard_example_names_dataset if x.startswith(vid_name)]
left = right = bisect.bisect_left(hard_example_names_dataset, vid_name)
while left > 0:
if hard_example_names_dataset[left - 1].startswith(vid_name):
left -= 1
else:
break
while right < len(hard_example_names_dataset):
if hard_example_names_dataset[right].startswith(vid_name):
right += 1
else:
break
hpens = hard_example_names_dataset[left:right]
assert len(hpens) > 0, vid_name
random.shuffle(hpens)
hpens = hpens[:cfg.N_HARD_POS_TO_SAMPLE]
feats = []
ious = []
gt_boxes = []
jitter_boxes = []
for hpen in hpens:
sp = hpen.split("/")
feat_fn = os.path.join(cfg.HARD_MINING_DATA_PATH, dataset_name, "det_feats_compressed", sp[0],
this_fmt % int(sp[1]) + ".npz")
npz_data = np.load(feat_fn)
feat = npz_data["f"]
iou_data = npz_data["i"]
feats.append(feat)
iou = [float(x) for x in iou_data[-3:]]
ious.append(iou)
box_xyxy = [float(x) for x in iou_data[:4]]
gt_boxes.append(box_xyxy)
jitter_box_xyxy = np.array([float(x) for x in iou_data[4:16]]).reshape(3, 4)
jitter_boxes.append(jitter_box_xyxy)
feats = np.stack(feats, axis=0)
# atm just sample from same sequence, does not need to be hard
data['hard_positive_features'] = feats
data['hard_positive_ious'] = np.stack(ious, axis=0)
data['hard_positive_gt_boxes'] = np.stack(gt_boxes, axis=0)
data['hard_positive_jitter_boxes'] = np.stack(jitter_boxes, axis=0)
return data
def _preprocess_youtube_bb(roidb, aug):
ann_path = os.path.join(cfg.DATA.YOUTUBE_BB_ROOT, "annotations", roidb)
ann_files = glob.glob(os.path.join(ann_path, "*.xml"))
random.shuffle(ann_files)
def ann_to_bbox(ann):
if 'object' not in ann:
return None
if 'bndbox' not in ann['object']:
return None
bbox = ann['object']['bndbox']
width = float(ann["size"]["width"])
height = float(ann["size"]["height"])
x1 = int(round(float(bbox['xmin']) * width))
y1 = int(round(float(bbox['ymin']) * height))
x2 = int(round(float(bbox['xmax']) * width))
y2 = int(round(float(bbox['ymax']) * height))
box = [x1, y1, x2, y2]
return box
# randomly select two files with bounding box
sampled = []
idx = 0
while len(sampled) < 2 and idx < len(ann_files):
ann = xmltodict.parse(open(ann_files[idx]).read())["annotation"]
bbox = ann_to_bbox(ann)
if bbox is not None:
sampled.append((ann_files[idx], bbox))
idx += 1
if len(sampled) < 2:
#print("did not find 2 bounding boxes in", roidb)
return None
ref_ann_file = sampled[0][0]
target_ann_file = sampled[1][0]
ref_box = sampled[0][1]
target_box = sampled[1][1]
ref_fname = ref_ann_file.replace("/annotations/", "/frames/").replace(".xml", ".jpg")
ref_im = cv2.imread(ref_fname, cv2.IMREAD_COLOR)
target_fname = target_ann_file.replace("/annotations/", "/frames/").replace(".xml", ".jpg")
target_im = cv2.imread(target_fname, cv2.IMREAD_COLOR)
return _preprocess_common(ref_box, target_box, ref_im, target_im, aug)
def _preprocess_trackingnet(roidb, aug):
part, vid_name = roidb.split("____")
data_path = os.path.join(cfg.DATA.TRACKINGNET_ROOT, part)
gt_file = os.path.join(data_path, "anno", vid_name + ".txt")
boxes = []
with open(gt_file) as f:
for l in f:
sp = l.strip().split(",")
box = [float(x) for x in sp]
box[2] += box[0]
box[3] += box[1]
# there are negative coordinates in the data... should we compensate for that?
if box[0] < 0:
box[0] = 0.0
if box[1] < 0:
box[1] = 0.0
boxes.append(box)
n_frames = len(boxes)
ref_idx = np.random.randint(n_frames)
target_idx = np.random.randint(n_frames)
ref_box = boxes[ref_idx]
target_box = boxes[target_idx]
ref_fname = os.path.join(data_path, "frames", vid_name, str(ref_idx) + ".jpg")
ref_im = cv2.imread(ref_fname, cv2.IMREAD_COLOR)
target_fname = os.path.join(data_path, "frames", vid_name, str(target_idx) + ".jpg")
target_im = cv2.imread(target_fname, cv2.IMREAD_COLOR)
return _preprocess_common(ref_box, target_box, ref_im, target_im, aug)
def get_train_dataflow():
roidbs = DetectionDataset().load_training_roidbs(cfg.DATA.TRAIN)
ds = DataFromList(roidbs, shuffle=True)
# for now let's not do flipping to keep things simple
aug = imgaug.AugmentorList(
[CustomResize(cfg.PREPROC.TRAIN_SHORT_EDGE_SIZE, cfg.PREPROC.MAX_SIZE)])#,
#imgaug.Flip(horiz=True)])
if cfg.MODE_HARD_MINING:
from annoy import AnnoyIndex
hard_mining_index = AnnoyIndex(128, 'euclidean')
hard_mining_index.load(cfg.HARD_MINING_DATA_PATH + "/index_all/index.ann")
names_path = cfg.HARD_MINING_DATA_PATH + "index_all/names.txt"
hard_mining_names_all = []
with open(names_path) as f:
for l in f:
hard_mining_names_all.append(l.strip())
hard_example_names_got = [x[7:] for x in hard_mining_names_all if x.startswith("GOT10k/")]
hard_example_names_vid = [x[12:] for x in hard_mining_names_all if x.startswith("ImageNetVID/")]
hard_example_names_ytbvos = [x[11:] for x in hard_mining_names_all if x.startswith("YouTubeVOS/")]
hard_example_names_lasot = [x[6:] for x in hard_mining_names_all if x.startswith("LaSOT/")]
assert len(hard_example_names_got) > 0
assert len(hard_example_names_vid) > 0
assert len(hard_example_names_ytbvos) > 0
assert len(hard_example_names_lasot) > 0
hard_example_names_got.sort()
hard_example_names_vid.sort()
hard_example_names_ytbvos.sort()
hard_example_names_lasot.sort()
hard_mining_names = {"all": hard_mining_names_all, "GOT10k": hard_example_names_got,
"ImageNetVID": hard_example_names_vid, "YouTubeVOS": hard_example_names_ytbvos,
"LaSOT": hard_example_names_lasot}
else:
hard_mining_index = None
hard_mining_names = None
def preprocess(roidb):
if roidb.startswith("VID/"):
return _preprocess_imagenet_vid(roidb[4:], aug, hard_mining_index, hard_mining_names)
elif roidb.startswith("DAVIS/"):
return _preprocess_davis_like(roidb[6:], aug, os.path.join(cfg.DATA.DAVIS2017_ROOT, "Annotations",
"480p"))
elif roidb.startswith("YouTubeVOS/"):
return _preprocess_davis_like(roidb[11:], aug, os.path.join(cfg.DATA.YOUTUBE_VOS_ROOT, "train",
"Annotations"),
"YouTubeVOS", hard_mining_index, hard_mining_names)
elif roidb.startswith("GOT10K/"):
return _preprocess_got10k(roidb[7:], aug, hard_mining_index, hard_mining_names)
elif roidb.startswith("LaSOT/"):
return _preprocess_lasot(roidb[6:], aug, hard_mining_index, hard_mining_names)
elif roidb.startswith("YouTube-BB/"):
return _preprocess_youtube_bb(roidb[11:], aug)
elif roidb.startswith("TrackingNet/"):
return _preprocess_trackingnet(roidb[12:], aug)
else:
assert False
#ds = MultiProcessMapDataZMQ(ds, 10, preprocess)
#ds = MapData(ds, preprocess)
if cfg.DATA.DEBUG_VIS or not cfg.DATA.MULTITHREAD:
ds = MapData(ds, preprocess)
else:
#ds = MultiThreadMapData(ds, 6, preprocess)
ds = MultiThreadMapData(ds, 8, preprocess, buffer_size=80)
return ds
def get_eval_dataflow(name, shard=0, num_shards=1):
seqs = []
with open("davis2017_fast_val_ids.txt") as f:
for l in f:
seqs.append(l.strip())
seqs_timesteps = []
for seq in seqs:
files = sorted(glob.glob(cfg.DATA.DAVIS2017_ROOT + "/JPEGImages/480p/" + seq.split("__")[0] + "/*.jpg"))[1:-1]
timesteps = [f.split('/')[-1].replace(".jpg", "") for f in files]
for timestep in timesteps:
ann_fn = cfg.DATA.DAVIS2017_ROOT + "/Annotations/480p/" + seq.split("__")[0] + '/' + timestep + ".png"
ann = np.array(PIL.Image.open(ann_fn))
ann_mask = ann == int(seq.split("__")[1])
if ann_mask.any():
seqs_timesteps.append((seq.split('__')[0], seq.split('__')[1], timestep))
# seqs_timesteps += [(seq.split('__')[0], seq.split('__')[1], timestep) for timestep in timesteps]
num_seqs_timesteps = len(seqs_timesteps)
seqs_timesteps_per_shard = num_seqs_timesteps // num_shards
seqs_timesteps_range = (shard * seqs_timesteps_per_shard, (shard + 1) * seqs_timesteps_per_shard if shard + 1 < num_shards else num_seqs_timesteps)
ds = DataFromList(seqs_timesteps[seqs_timesteps_range[0]: seqs_timesteps_range[1]])
def preprocess(seq_timestep):
seq, obj_id, timestep = seq_timestep
ann_fn = cfg.DATA.DAVIS2017_ROOT + "/Annotations/480p/" + seq + '/' + timestep + ".png"
ann = np.array(PIL.Image.open(ann_fn))
ann_mask = ann == int(obj_id)
if not ann_mask.any():
return None, None, None, None, None
# ann_box = np.array([-1000000, -1000000, 100000, 100000])
else:
ann_box = get_bbox_from_segmentation_mask_np(ann_mask)
ff_fn = cfg.DATA.DAVIS2017_ROOT + "/Annotations/480p/" + seq + '/' + str(0).zfill(5) + ".png"
ff = np.array(PIL.Image.open(ff_fn))
ff_mask = ff == int(obj_id)
ff_box = get_bbox_from_segmentation_mask_np(ff_mask)
x1, y1, x2, y2 = [float(x) for x in ann_box]
target_bbox = np.array([x1, y1, x2, y2], dtype=np.float32)
x1, y1, x2, y2 = [float(x) for x in ff_box]
ref_bbox = np.array([x1, y1, x2, y2], dtype=np.float32)
target_img_fn = cfg.DATA.DAVIS2017_ROOT + "/JPEGImages/480p/" + seq + "/" + timestep + ".jpg"
ref_img_fn = cfg.DATA.DAVIS2017_ROOT + "/JPEGImages/480p/" + seq + "/" + str(0).zfill(5) + ".jpg"
target_img = cv2.imread(target_img_fn, cv2.IMREAD_COLOR)
ref_img = cv2.imread(ref_img_fn, cv2.IMREAD_COLOR)
return ref_img, ref_bbox, target_img, target_bbox, "__".join(seq_timestep)
ds = MapData(ds, preprocess)
return ds
| 42.536866
| 157
| 0.642246
|
498048860d059500a9b8e2d9734607a4b382d1cf
| 414
|
py
|
Python
|
nested_q_8.py
|
Samridhi-88/nested_list
|
1087cf46db5520fd569ea67caef77c0af77a95b5
|
[
"MIT"
] | null | null | null |
nested_q_8.py
|
Samridhi-88/nested_list
|
1087cf46db5520fd569ea67caef77c0af77a95b5
|
[
"MIT"
] | null | null | null |
nested_q_8.py
|
Samridhi-88/nested_list
|
1087cf46db5520fd569ea67caef77c0af77a95b5
|
[
"MIT"
] | null | null | null |
element=[23,14,56,12,19,9,15,25,31,42,43]
i=0
sum1=0
sum2=0
list1=[]
list2=[]
while i<len(element):
if element[i]%2==0:
sum1+=element[i]
list1.append(element[i])
everaj=sum1/len(list1)
else:
sum2+=element[i]
list2.append(element[i])
everaj1=sum2/len(list2)
i+=1
print(sum1,"even")
print(sum2,"even")
print(list1)
print(list2)
print(everaj)
print(everaj1)
| 18.818182
| 41
| 0.608696
|
782ff0f8a18cb523d3b951681bb0b46e685bce36
| 1,176
|
py
|
Python
|
flaked/plugins/flaked_shire.py
|
ischaojie/flaked
|
3b5b6156ad52c78c95b963d61572ce91b84ff956
|
[
"MIT"
] | 1
|
2022-03-18T10:40:15.000Z
|
2022-03-18T10:40:15.000Z
|
flaked/plugins/flaked_shire.py
|
ischaojie/flaked
|
3b5b6156ad52c78c95b963d61572ce91b84ff956
|
[
"MIT"
] | 2
|
2022-03-20T14:46:18.000Z
|
2022-03-21T13:38:59.000Z
|
flaked/plugins/flaked_shire.py
|
ischaojie/flaked
|
3b5b6156ad52c78c95b963d61572ce91b84ff956
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ast import NodeVisitor
__version__ = "0.1.4"
SHIRE_MODULES = ("luz", "luzong", "corelib")
MESSAGE = "SH00 shire old style import found"
class ShireImportVisitor(NodeVisitor):
def __init__(self):
super(ShireImportVisitor, self).__init__()
self.imports = []
def visit_ImportFrom(self, node):
n_modules = node.module.split(".") if node.module else []
if len(set(n_modules).intersection(SHIRE_MODULES)) > 0:
self.imports.append(node)
def visit_Import(self, node):
for n_name in node.names:
if n_name.name in SHIRE_MODULES:
self.imports.append(node)
class ShireChecker:
name = "flaked-shire"
version = __version__
def __init__(self, tree, filename):
self.tree = tree
self.filename = filename
def run(self):
visitor = ShireImportVisitor()
visitor.visit(self.tree)
if not visitor.imports:
return
for import_node in visitor.imports:
yield (
import_node.lineno,
0,
MESSAGE,
type(self),
)
| 24.5
| 65
| 0.583333
|
54ad505442430bc86da03ef3e1674f4da382e732
| 621
|
py
|
Python
|
necrobot/match/matchglobals.py
|
Khold6458/necrobot
|
0bf9397eba43c6f7e8df77451f772b8de835cd1b
|
[
"MIT"
] | null | null | null |
necrobot/match/matchglobals.py
|
Khold6458/necrobot
|
0bf9397eba43c6f7e8df77451f772b8de835cd1b
|
[
"MIT"
] | 1
|
2018-10-08T22:05:30.000Z
|
2018-10-08T22:05:30.000Z
|
necrobot/match/matchglobals.py
|
Khold6458/necrobot
|
0bf9397eba43c6f7e8df77451f772b8de835cd1b
|
[
"MIT"
] | null | null | null |
import discord
from necrobot.util.singleton import Singleton
class MatchGlobals(metaclass=Singleton):
def __init__(self):
self._deadline_fn = MatchGlobals._default_deadline
self._channel_category = None
def set_deadline_fn(self, f):
self._deadline_fn = f
def set_channel_category(self, channel: discord.Channel):
self._channel_category = channel
@property
def deadline(self):
return self._deadline_fn()
@property
def channel_category(self):
return self._channel_category
@staticmethod
def _default_deadline():
return None
| 23
| 61
| 0.698873
|
6d1cb49382a5c76f3db877497eadf9d22a9e4f98
| 956
|
py
|
Python
|
server/views.py
|
Decision2016/IndigoRobot
|
da427cc6a960eb211c31fa029f724d83dc5b0531
|
[
"MIT"
] | null | null | null |
server/views.py
|
Decision2016/IndigoRobot
|
da427cc6a960eb211c31fa029f724d83dc5b0531
|
[
"MIT"
] | 5
|
2021-03-30T12:41:35.000Z
|
2021-06-10T18:15:43.000Z
|
server/views.py
|
Decision2016/IndigoRobot
|
da427cc6a960eb211c31fa029f724d83dc5b0531
|
[
"MIT"
] | null | null | null |
from utils.baseclasses import BaseAPIView
from account.decorators import cq_permission_required
from account.models import User
from .statistics import Statistics
from mcserver.views import MinecraftServerControl
class ServerAPI(BaseAPIView):
@cq_permission_required
def post(self, request):
data = request.data
user = User.objects.get(username=request.query_params['username'])
statistic = Statistics(user)
if data['post_type'] == 'message' and data['message_type'] == 'group':
statistic.handle_message(data)
MinecraftServerControl(user).command_analyse(data['message'], user_id=data['sender']['user_id'], group_id=data['group_id'])
elif data['post_type'] == 'message' and data['message_type'] == 'private':
statistic.handle_private(data, user)
elif data['post_type'] == 'notice':
statistic.handle_group(data)
return self.success("Success")
| 43.454545
| 135
| 0.696653
|
6f2059b6abffb9ae6b59a1755c3528ccc462e6c4
| 244
|
py
|
Python
|
leetcode/344. ReverseString/soln.py
|
saisankargochhayat/algo_quest
|
a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc
|
[
"Apache-2.0"
] | 3
|
2017-02-15T20:55:04.000Z
|
2018-09-26T18:48:24.000Z
|
leetcode/344. ReverseString/soln.py
|
saisankargochhayat/algo_quest
|
a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc
|
[
"Apache-2.0"
] | 4
|
2017-10-07T18:59:20.000Z
|
2019-10-08T05:43:25.000Z
|
leetcode/344. ReverseString/soln.py
|
saisankargochhayat/algo_quest
|
a24f9a22c019ab31d56bd5a7ca5ba790d54ce5dc
|
[
"Apache-2.0"
] | 1
|
2017-10-08T06:52:21.000Z
|
2017-10-08T06:52:21.000Z
|
class Solution:
def reverseString(self, s: List[str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
l = len(s)
for i in range(l//2):
s[i], s[l-i-1] = s[l-i-1], s[i]
| 27.111111
| 58
| 0.471311
|
c3bc847307be0962712f086d54beab846faec110
| 1,188
|
py
|
Python
|
daemon/daemon_controller.py
|
naren-m/home_assistant
|
a20d64b79d97cbb2c9bae197c0900515ed8bfbfc
|
[
"MIT"
] | null | null | null |
daemon/daemon_controller.py
|
naren-m/home_assistant
|
a20d64b79d97cbb2c9bae197c0900515ed8bfbfc
|
[
"MIT"
] | null | null | null |
daemon/daemon_controller.py
|
naren-m/home_assistant
|
a20d64b79d97cbb2c9bae197c0900515ed8bfbfc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys, time
import action_part as ap
import logging
import pyttsx, time
import schedule
import app_logger
from daemon import Daemon
logger = app_logger.logger
engine = pyttsx.init()
# https://github.com/dbader/schedule
try:
actions = ap.MyActions()
except Exception, e:
print "Exception, ", e
raise
class MyDaemon(Daemon):
def run(self):
try:
scheduled_time = "07:00"
schedule.every().day.at(scheduled_time).do(actions.play_specified_song)
logger.info("Playing at %s", time)
except Exception, e:
logger.error("Exception in MyDaemon run", e)
raise
while True:
schedule.run_pending()
time.sleep(1)
# engine.say('Naren')
# engine.runAndWait()
if __name__ == "__main__":
file_name = '/tmp/daemon-music.pid'
daemon = MyDaemon(file_name)
logger.info("Started daemon process, pid stored in %s", file_name)
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
| 19.47541
| 74
| 0.68266
|
c17b1a8a2f8f5f5c9cadd53c6d2c17ab31e5df9e
| 75,522
|
py
|
Python
|
build/scripts-3.10/find_primes.py
|
git4robot/pypi_find_primes
|
8426950789045863c85dae855a87d71e611115bf
|
[
"MIT"
] | null | null | null |
build/scripts-3.10/find_primes.py
|
git4robot/pypi_find_primes
|
8426950789045863c85dae855a87d71e611115bf
|
[
"MIT"
] | null | null | null |
build/scripts-3.10/find_primes.py
|
git4robot/pypi_find_primes
|
8426950789045863c85dae855a87d71e611115bf
|
[
"MIT"
] | null | null | null |
#!python3
'''
The CLI Tool.
'''
from math import log, sqrt, log2, ceil, floor, gcd
from random import randint, randrange
from functools import reduce
from operator import mul
from argparse import ArgumentParser
NUMPY_ENABLED = True
try:
from numpy import ones, nonzero, __version__
print('Detected numpy version {__version__}'.format(**locals()))
except ImportError:
print('Numpy is not found! Finding primes will be slower!')
NUMPY_ENABLED = False
print()
def _check_num(n):
'''
Internel function to check the input.
'''
if not isinstance(n, int):
raise TypeError('Type of argument n should be int, got {type(n).__name__}'.format(**locals()))
if n <= 0:
raise ValueError('The number of argument n should be greater than 0, got {n}'.format(**locals()))
def _check_factors(ans, n, retry = 1, max_retries = 3):
'''
Internel function to check the output.
'''
if reduce(mul, ans) == n:
return 0
if retry == max_retries + 1:
print('Factor Error. The multiplication of {ans} is not {n}.'.format(**locals()))
raise FactorError('Factor Error. The multiplication of {ans} is not {n}.'.format(**locals()))
print('Factor Error. The multiplication of {ans} is not {n}. Retry {retry}.'.format(**locals()))
return retry + 1
def is_prime(n):
'''
If n is prime, return True.
'''
_check_num(n)
if n in [2, 3, 5, 7]:
return True
if not (n % 10 % 2) or n % 10 not in [1, 3, 7, 9] or n == 1 or not isinstance(n, int):
return False
for i in range(2, int(n ** 0.5 + 1)):
if n % i == 0:
return False
return True
def all_primes(n, output = 'array'):
'''
Return a prime list below n.
Arguments:
output ----- 'array' or 'list' ----- The output type of the function.
'''
_check_num(n)
if NUMPY_ENABLED:
sieve = ones(n + 1, dtype = bool)
else:
sieve = [True] * (n + 1)
for i in range(2, int(n ** 0.5) + 1):
if sieve[i]:
for j in range(i ** 2, n + 1, i):
sieve[j] = False
if NUMPY_ENABLED:
s = nonzero(sieve)[0]
if output == 'list':
return s.tolist()[2:]
return s[2:]
else:
return [x for x in range(2, n + 1) if sieve[x]]
class FactorError(Exception):
pass
def factor_siqs(n):
'''
Return a list that has all factors of n.
'''
MAX_DIGITS_POLLARD = 30
POLLARD_QUICK_ITERATIONS = 20
MIN_DIGITS_POLLARD_QUICK2 = 45
POLLARD_QUICK2_ITERATIONS = 25
SIQS_TRIAL_DIVISION_EPS = 25
SIQS_MIN_PRIME_POLYNOMIAL = 400
SIQS_MAX_PRIME_POLYNOMIAL = 4000
class Polynomial:
def __init__(self, coeff = [], a = None, b = None):
self.coeff = coeff
self.a = a
self.b = b
def eval(self, x):
res = 0
for a in self.coeff[::-1]:
res *= x
res += a
return res
class FactorBasePrime:
def __init__(self, p, tmem, lp):
self.p = p
self.soln1 = None
self.soln2 = None
self.tmem = tmem
self.lp = lp
self.ainv = None
def lowest_set_bit(a):
b = (a & -a)
low_bit = -1
while (b):
b >>= 1
low_bit += 1
return low_bit
def to_bits(k):
k_binary = bin(k)[2:]
return (bit == '1' for bit in k_binary[::-1])
def pow_mod(a, k, m):
r = 1
b = a
for bit in to_bits(k):
if bit:
r = (r * b) % m
b = (b * b) % m
return r
def is_quadratic_residue(a, p):
return legendre(a, (p - 1) // 2, 1, p) == 1
def legendre(a, q, l, n):
x = q ** l
if x == 0:
return 1
z = 1
a %= n
while x != 0:
if x % 2 == 0:
a = (a ** 2) % n
x //= 2
else:
x -= 1
z = (z * a) % n
return z
def sqrt_mod_prime(a, p):
if a == 0:
return 0
if p == 2:
return a
if p % 2 == 0:
return None
p_mod_8 = p % 8
if p_mod_8 == 1:
q = p // 8
e = 3
while q % 2 == 0:
q //= 2
e += 1
while True:
x = randint(2, p - 1)
z = pow_mod(x, q, p)
if pow_mod(z, 2 ** (e - 1), p) != 1:
break
y = z
r = e
x = pow_mod(a, (q - 1) // 2, p)
v = (a * x) % p
w = (v * x) % p
while True:
if w == 1:
return v
k = 1
while pow_mod(w, 2 ** k, p) != 1:
k += 1
d = pow_mod(y, 2 ** (r - k - 1), p)
y = (d ** 2) % p
r = k
v = (d * v) % p
w = (w * y) % p
elif p_mod_8 == 5:
v = pow_mod(2 * a, (p - 5) // 8, p)
i = (2 * a * v * v) % p
return (a * v * (i - 1)) % p
else:
return pow_mod(a, (p + 1) // 4, p)
def inv_mod(a, m):
return eea(a, m)[0] % m
def eea(a, b):
if a == 0:
return (0, 1, b)
x = eea(b % a, a)
return (x[1] - b // a * x[0], x[0], x[2])
def is_prime(n):
if n in [2, 3, 5, 7]:
return True
if not (n % 10 % 2) or n % 10 not in [1, 3, 7, 9] or n == 1 or not isinstance(n, int):
return False
for i in range(2, int(n ** 0.5 + 1)):
if n % i == 0:
return False
return True
def siqs_factor_base_primes(n, nf):
global small_primes
factor_base = []
for p in small_primes:
if is_quadratic_residue(n, p):
t = sqrt_mod_prime(n % p, p)
lp = round(log2(p))
factor_base.append(FactorBasePrime(p, t, lp))
if len(factor_base) >= nf:
break
return factor_base
def siqs_find_first_poly(n, m, factor_base):
p_min_i = None
p_max_i = None
for i, fb in enumerate(factor_base):
if p_min_i is None and fb.p >= SIQS_MIN_PRIME_POLYNOMIAL:
p_min_i = i
if p_max_i is None and fb.p > SIQS_MAX_PRIME_POLYNOMIAL:
p_max_i = i - 1
break
if p_max_i is None:
p_max_i = len(factor_base) - 1
if p_min_i is None or p_max_i - p_min_i < 20:
p_min_i = min(p_min_i, 5)
target = sqrt(2 * float(n)) / m
target1 = target / ((factor_base[p_min_i].p + factor_base[p_max_i].p) / 2) ** 0.5
best_q, best_a, best_ratio = None, None, None
for _ in range(30):
a = 1
q = []
while a < target1:
p_i = 0
while p_i == 0 or p_i in q:
p_i = randint(p_min_i, p_max_i)
p = factor_base[p_i].p
a *= p
q.append(p_i)
ratio = a / target
if (best_ratio is None or (ratio >= 0.9 and ratio < best_ratio) or best_ratio < 0.9 and ratio > best_ratio):
best_q = q
best_a = a
best_ratio = ratio
a = best_a
q = best_q
s = len(q)
B = []
for l in range(s):
fb_l = factor_base[q[l]]
q_l = fb_l.p
gamma = (fb_l.tmem * inv_mod(a // q_l, q_l)) % q_l
if gamma > q_l // 2:
gamma = q_l - gamma
B.append(a // q_l * gamma)
b = sum(B) % a
b_orig = b
if (2 * b > a):
b = a - b
g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig)
h = Polynomial([b, a])
for fb in factor_base:
if a % fb.p != 0:
fb.ainv = inv_mod(a, fb.p)
fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p
fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p
return g, h, B
def siqs_find_next_poly(n, factor_base, i, g, B):
v = lowest_set_bit(i) + 1
z = -1 if ceil(i / (2 ** v)) % 2 == 1 else 1
b = (g.b + 2 * z * B[v - 1]) % g.a
a = g.a
b_orig = b
if (2 * b > a):
b = a - b
g = Polynomial([b * b - n, 2 * a * b, a * a], a, b_orig)
h = Polynomial([b, a])
for fb in factor_base:
if a % fb.p != 0:
fb.soln1 = (fb.ainv * (fb.tmem - b)) % fb.p
fb.soln2 = (fb.ainv * (-fb.tmem - b)) % fb.p
return g, h
def siqs_sieve(factor_base, m):
sieve_array = [0] * (2 * m + 1)
for fb in factor_base:
if fb.soln1 is None:
continue
p = fb.p
i_start_1 = -((m + fb.soln1) // p)
a_start_1 = fb.soln1 + i_start_1 * p
lp = fb.lp
if p > 20:
for a in range(a_start_1 + m, 2 * m + 1, p):
sieve_array[a] += lp
i_start_2 = -((m + fb.soln2) // p)
a_start_2 = fb.soln2 + i_start_2 * p
for a in range(a_start_2 + m, 2 * m + 1, p):
sieve_array[a] += lp
return sieve_array
def siqs_trial_divide(a, factor_base):
divisors_idx = []
for i, fb in enumerate(factor_base):
if a % fb.p == 0:
exp = 0
while a % fb.p == 0:
a //= fb.p
exp += 1
divisors_idx.append((i, exp))
if a == 1:
return divisors_idx
return None
def siqs_trial_division(n, sieve_array, factor_base, smooth_relations, g, h, m, req_relations):
sqrt_n = sqrt(float(n))
limit = log2(m * sqrt_n) - SIQS_TRIAL_DIVISION_EPS
for (i, sa) in enumerate(sieve_array):
if sa >= limit:
x = i - m
gx = g.eval(x)
divisors_idx = siqs_trial_divide(gx, factor_base)
if divisors_idx is not None:
u = h.eval(x)
v = gx
smooth_relations.append((u, v, divisors_idx))
if (len(smooth_relations) >= req_relations):
return True
return False
def siqs_build_matrix(factor_base, smooth_relations):
fb = len(factor_base)
M = []
for sr in smooth_relations:
mi = [0] * fb
for j, exp in sr[2]:
mi[j] = exp % 2
M.append(mi)
return M
def siqs_build_matrix_opt(M):
m = len(M[0])
cols_binary = [''] * m
for mi in M:
for j, mij in enumerate(mi):
cols_binary[j] += '1' if mij else '0'
return [int(cols_bin[::-1], 2) for cols_bin in cols_binary], len(M), m
def add_column_opt(M_opt, tgt, src):
M_opt[tgt] ^= M_opt[src]
def find_pivot_column_opt(M_opt, j):
if M_opt[j] == 0:
return None
return lowest_set_bit(M_opt[j])
def siqs_solve_matrix_opt(M_opt, n, m):
row_is_marked = [False] * n
pivots = [-1] * m
for j in range(m):
i = find_pivot_column_opt(M_opt, j)
if i is not None:
pivots[j] = i
row_is_marked[i] = True
for k in range(m):
if k != j and (M_opt[k] >> i) & 1:
add_column_opt(M_opt, k, j)
perf_squares = []
for i in range(n):
if not row_is_marked[i]:
perfect_sq_indices = [i]
for j in range(m):
if (M_opt[j] >> i) & 1:
perfect_sq_indices.append(pivots[j])
perf_squares.append(perfect_sq_indices)
return perf_squares
def siqs_calc_sqrts(square_indices, smooth_relations):
res = [1, 1]
for idx in square_indices:
res[0] *= smooth_relations[idx][0]
res[1] *= smooth_relations[idx][1]
res[1] = sqrt_int(res[1])
return res
def sqrt_int(n):
a = n
s = 0
o = 1 << (floor(log2(n)) & ~1)
while o != 0:
t = s + o
if a >= t:
a -= t
s = (s >> 1) + o
else:
s >>= 1
o >>= 2
return s
def kth_root_int(n, k):
u = n
s = n + 1
while u < s:
s = u
t = (k - 1) * s + n // pow(s, k - 1)
u = t // k
return s
def siqs_factor_from_square(n, square_indices, smooth_relations):
sqrt1, sqrt2 = siqs_calc_sqrts(square_indices, smooth_relations)
return gcd(abs(sqrt1 - sqrt2), n)
def siqs_find_factors(n, perfect_squares, smooth_relations):
factors = []
rem = n
non_prime_factors = set()
prime_factors = set()
for square_indices in perfect_squares:
fact = siqs_factor_from_square(n, square_indices, smooth_relations)
if fact != 1 and fact != rem:
if is_prime(fact):
if fact not in prime_factors:
prime_factors.add(fact)
while rem % fact == 0:
factors.append(fact)
rem //= fact
if rem == 1:
break
if is_prime(rem):
factors.append(rem)
rem = 1
break
else:
if fact not in non_prime_factors:
non_prime_factors.add(fact)
if rem != 1 and non_prime_factors:
non_prime_factors.add(rem)
for fact in sorted(siqs_find_more_factors_gcd(non_prime_factors)):
while fact != 1 and rem % fact == 0:
factors.append(fact)
rem //= fact
if rem == 1 or is_prime(rem):
break
if rem != 1:
factors.append(rem)
return factors
def siqs_find_more_factors_gcd(numbers):
res = set()
for n in numbers:
res.add(n)
for m in numbers:
if n != m:
fact = gcd(n, m)
if fact != 1 and fact != n and fact != m:
if fact not in res:
res.add(fact)
res.add(n // fact)
res.add(m // fact)
return res
def siqs_choose_nf_m(d):
if d <= 34:
return 200, 65536
if d <= 36:
return 300, 65536
if d <= 38:
return 400, 65536
if d <= 40:
return 500, 65536
if d <= 42:
return 600, 65536
if d <= 44:
return 700, 65536
if d <= 48:
return 1000, 65536
if d <= 52:
return 1200, 65536
if d <= 56:
return 2000, 65536 * 3
if d <= 60:
return 4000, 65536 * 3
if d <= 66:
return 6000, 65536 * 3
if d <= 74:
return 10000, 65536 * 3
if d <= 80:
return 30000, 65536 * 3
if d <= 88:
return 50000, 65536 * 3
if d <= 94:
return 60000, 65536 * 9
return 100000, 65536 * 9
def siqs_factorise(n):
dig = len(str(n))
nf, m = siqs_choose_nf_m(dig)
factor_base = siqs_factor_base_primes(n, nf)
required_relations_ratio = 1.05
success = False
smooth_relations = []
prev_cnt = 0
i_poly = 0
while not success:
required_relations = round(len(factor_base) * required_relations_ratio)
enough_relations = False
while not enough_relations:
if i_poly == 0:
g, h, B = siqs_find_first_poly(n, m, factor_base)
else:
g, h = siqs_find_next_poly(n, factor_base, i_poly, g, B)
i_poly += 1
if i_poly >= 2 ** (len(B) - 1):
i_poly = 0
sieve_array = siqs_sieve(factor_base, m)
enough_relations = siqs_trial_division(n, sieve_array, factor_base, smooth_relations, g, h, m, required_relations)
if (len(smooth_relations) >= required_relations or i_poly % 8 == 0 and len(smooth_relations) > prev_cnt):
prev_cnt = len(smooth_relations)
M = siqs_build_matrix(factor_base, smooth_relations)
M_opt, M_n, M_m = siqs_build_matrix_opt(M)
perfect_squares = siqs_solve_matrix_opt(M_opt, M_n, M_m)
factors = siqs_find_factors(n, perfect_squares, smooth_relations)
if len(factors) > 1:
success = True
else:
required_relations_ratio += 0.05
return factors
def check_factor(n, i, factors):
while n % i == 0:
n //= i
factors.append(i)
if is_prime(n):
factors.append(n)
n = 1
return n
def trial_div_init_primes(n, upper_bound):
global small_primes
is_prime = [True] * (upper_bound + 1)
is_prime[0:2] = [False] * 2
factors = []
small_primes = []
max_i = sqrt_int(upper_bound)
rem = n
for i in range(2, max_i + 1):
if is_prime[i]:
small_primes.append(i)
rem = check_factor(rem, i, factors)
if rem == 1:
return factors, 1
for j in (range(i ** 2, upper_bound + 1, i)):
is_prime[j] = False
for i in range(max_i + 1, upper_bound + 1):
if is_prime[i]:
small_primes.append(i)
rem = check_factor(rem, i, factors)
if rem == 1:
return factors, 1
return factors, rem
def pollard_brent_f(c, n, x):
x1 = (x * x) % n + c
if x1 >= n:
x1 -= n
return x1
def pollard_brent_find_factor(n, max_iter = None):
y, c, m = (randint(1, n - 1) for _ in range(3))
r, q, g = 1, 1, 1
i = 0
while g == 1:
x = y
for _ in range(r):
y = pollard_brent_f(c, n, y)
k = 0
while k < r and g == 1:
ys = y
for _ in range(min(m, r - k)):
y = pollard_brent_f(c, n, y)
q = (q * abs(x - y)) % n
g = gcd(q, n)
k += m
r *= 2
if max_iter:
i += 1
if (i == max_iter):
return None
if g == n:
while True:
ys = pollard_brent_f(c, n, ys)
g = gcd(abs(x - ys), n)
if g > 1:
break
return g
def pollard_brent_quick(n, factors):
rem = n
while True:
if is_prime(rem):
factors.append(rem)
rem = 1
break
digits = len(str(n))
if digits < MIN_DIGITS_POLLARD_QUICK2:
max_iter = POLLARD_QUICK_ITERATIONS
else:
max_iter = POLLARD_QUICK2_ITERATIONS
f = pollard_brent_find_factor(rem, max_iter)
if f and f < rem:
if is_prime(f):
factors.append(f)
rem //= f
else:
rem_f = pollard_brent_quick(f, factors)
rem = (rem // f) * rem_f
else:
break
return rem
def check_perfect_power(n):
largest_checked_prime = small_primes[-1]
for b in small_primes:
bth_root = kth_root_int(n, b)
if bth_root < largest_checked_prime:
break
if (bth_root ** b == n):
return (bth_root, b)
return None
def find_prime_factors(n):
perfect_power = check_perfect_power(n)
if perfect_power:
factors = [perfect_power[0]]
else:
digits = len(str(n))
if digits <= MAX_DIGITS_POLLARD:
factors = [pollard_brent_find_factor(n)]
else:
factors = siqs_factorise(n)
prime_factors = []
for f in set(factors):
for pf in find_all_prime_factors(f):
prime_factors.append(pf)
return prime_factors
def find_all_prime_factors(n):
rem = n
factors = []
while rem > 1:
if is_prime(rem):
factors.append(rem)
break
for f in find_prime_factors(rem):
while rem % f == 0:
rem //= f
factors.append(f)
return factors
def factor(n):
if type(n) != int or n < 1:
return
if n == 1:
return []
if is_prime(n):
return [n]
factors, rem = trial_div_init_primes(n, 1000000)
if rem != 1:
digits = len(str(rem))
if digits > MAX_DIGITS_POLLARD:
rem = pollard_brent_quick(rem, factors)
if rem > 1:
for fr in find_all_prime_factors(rem):
factors.append(fr)
factors.sort()
return factors
return factor(n)
def factor_mpqs(n):
'''
Return a list that has all factors of n.
'''
PRIMES_31 = (2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31)
PRIMONIAL_31 = reduce(mul, PRIMES_31)
def lcm(a, b):
return a // gcd(a, b) * b
class FactoredInteger():
def __init__(self, integer, factors = None):
self.integer = int(integer)
if factors is None:
self.factors = dict(_factor(self.integer)[0])
else:
self.factors = dict(factors)
@classmethod
def from_partial_factorization(cls, integer, partial):
partial_factor = 1
for p, e in partial.iteritems():
partial_factor *= p ** e
return cls(integer // partial_factor) * cls(partial_factor, partial)
def __iter__(self):
return self.factors.iteritems()
def __mul__(self, other):
if isinstance(other, FactoredInteger):
integer = self.integer * other.integer
new_factors = self.factors.copy()
for p in other.factors:
new_factors[p] = new_factors.get(p, 0) + other.factors[p]
return self.__class__(integer, new_factors)
else:
return self * FactoredInteger(other)
__rmul__ = __mul__
def __pow__(self, other):
new_integer = self.integer ** other
new_factors = {}
for p in self.factors:
new_factors[p] = self.factors[p] * other
return self.__class__(new_integer, new_factors)
def __mod__(self, other):
try:
if other.integer in self.factors:
return 0
return self.integer % other.integer
except AttributeError:
if int(other) in self.factors:
return 0
return self.integer % int(other)
def copy(self):
return self.__class__(self.integer, self.factors.copy())
def is_divisible_by(self, other):
if int(other) in self.factors:
return True
return not self.integer % int(other)
def exact_division(self, other):
divisor = int(other)
quotient = self.copy()
if divisor in quotient.factors:
if quotient.factors[divisor] == 1:
del quotient.factors[divisor]
else:
quotient.factors[divisor] -= 1
elif not isinstance(other, FactoredInteger):
dividing = divisor
for p, e in self.factors.iteritems():
while not dividing % p:
dividing //= p
if quotient.factors[p] == 1:
del quotient.factors[p]
assert dividing % p, dividing
else:
quotient.factors[p] -= 1
if dividing == 1:
break
assert dividing == 1
else:
for p, e in other.factors.iteritems():
assert p in quotient.factors and quotient.factors[p] >= e
if quotient.factors[p] == e:
del quotient.factors[p]
else:
quotient.factors[p] -= e
quotient.integer //= divisor
return quotient
__floordiv__ = exact_division
def divisors(self):
divs = [FactoredInteger(1)]
for p, e in self.factors.iteritems():
q = FactoredInteger(1)
pcoprimes = list(divs)
for j in range(1, e + 1):
q *= FactoredInteger(p, {p:1})
divs += [k * q for k in pcoprimes]
return divs
def proper_divisors(self):
return self.divisors()[1:-1]
def prime_divisors(self):
return self.factors.keys()
class TestPrime():
primes = PRIMES_31
primecache = set(primes)
def __init__(self, t = 12):
if isinstance(t, int):
self.t = FactoredInteger(t)
else:
assert isinstance(t, FactoredInteger)
self.t = t
powerof2 = self.t.factors[2] + 2
self.et = FactoredInteger(2 ** powerof2, {2:powerof2})
for d in self.t.divisors():
p = d.integer + 1
if p & 1 and (p in self.primecache or is_prime(p, d.factors)):
self.et = self.et * FactoredInteger(p, {p:1})
if p in self.t.factors:
e = self.t.factors[p]
self.et = self.et * FactoredInteger(p ** e, {p:e})
self.primecache.add(p)
def next(self):
eu = []
for p in self.primes:
if p in self.t.factors:
eu.append((p - 1) * p ** (self.t.factors[p] - 1))
else:
eu.append(p - 1)
break
p = self.primes[eu.index(min(eu))]
return self.__class__(self.t * FactoredInteger(p, {p:1}))
def primitive_root(p):
pd = FactoredInteger(p - 1).proper_divisors()
for i in range(2, p):
for d in pd:
if pow(i, (p - 1) // d, p) == 1:
break
else:
return i
class Zeta():
def __init__(self, size, pos = None, val = 1):
self.size = size
self.z = [0] * self.size
if pos is not None:
self.z[pos % self.size] = val
def __add__(self, other):
if self.size == other.size:
m = self.size
zr_a = Zeta(m)
for i in range(m):
zr_a.z[i] = self.z[i] + other.z[i]
return zr_a
else:
m = lcm(self.size, other.size)
return self.promote(m) + other.promote(m)
def __mul__(self, other):
if not isinstance(other, Zeta):
zr_m = Zeta(self.size)
zr_m.z = [x * other for x in self.z]
return zr_m
elif self.size == other.size:
zr_m = Zeta(self.size)
other = abs(other)
for k in range(other.size):
if not other.z[k]:
continue
elif other.z[k] == 1:
zr_m = zr_m + (self << k)
else:
zr_m = zr_m + (self << k) * other.z[k]
return zr_m
else:
m = lcm(self.size, other.size)
return self.promote(m) * other.promote(m)
__rmul__ = __mul__
def promote(self, size):
if size == self.size:
return abs(self)
new = Zeta(size)
r = size // self.size
for i in range(self.size):
new.z[i * r] = self.z[i]
return new
def weight(self):
return len(filter(None, self.z))
def mass(self):
return sum(self.z)
def is_prime(n):
if n in [2, 3, 5, 7]:
return True
if not (n % 10 % 2) or n % 10 not in [1, 3, 7, 9] or n <= 1 or not isinstance(n, int):
return False
if gcd(n, PRIMONIAL_31) > 1:
return (n in PRIMES_31)
if n < 999999999999999:
for i in range(2, int(n ** 0.5 + 1)):
if n % i == 0:
return False
return True
if not smallSpsp(n):
return False
if n < 10 ** 12:
return True
return apr(n)
class Status():
def __init__(self):
self.d = {}
def yet(self, key):
self.d[key] = 0
def done(self, key):
self.d[key] = 1
def yet_keys(self):
return [k for k in self.d if not self.d[k]]
def isDone(self, key):
return self.d[key]
def subodd(self, p, q, n, J):
s = J.get(1, p, q)
Jpq = J.get(1, p, q)
m = s.size
for x in range(2, m):
if x % p == 0:
continue
sx = Zeta(m)
i = x
j = 1
while i > 0:
sx[j] = Jpq[i]
i = (i + x) % m
j += 1
sx[0] = Jpq[0]
sx = pow(sx, x, n)
s = s * sx % n
s = pow(s, n // m, n)
r = n % m
t = 1
for x in range(1, m):
if x % p == 0:
continue
c = (r * x) // m
if c:
tx = Zeta(m)
i = x
j = 1
while i > 0:
tx[j] = Jpq[i]
i = (i + x) % m
j += 1
tx[0] = Jpq[0]
tx = pow(tx, c, n)
t = t * tx % n
s = abs(t * s % n)
if s.weight() == 1 and s.mass() == 1:
for i in range(1, m):
if gcd(m, s.z.index(1)) == 1:
self.done(p)
return True
return False
def sub8(self, q, k, n, J):
s = J.get(3, q)
J3 = J.get(3, q)
m = len(s)
sx_z = {1:s}
x = 3
step = 2
while m > x:
z_4b = Zeta(m)
i = x
j = 1
while i != 0:
z_4b[j] = J3[i]
i = (i + x) % m
j += 1
z_4b[0] = J3[0]
sx_z[x] = z_4b
s = pow(sx_z[x], x, n) * s
step = 8 - step
x += step
s = pow(s, n // m, n)
r = n % m
step = 2
x = 3
while m > x:
c = r*x
if c > m:
s = pow(sx_z[x], c // m, n) * s
step = 8 - step
x += step
r = r & 7
if r == 5 or r == 7:
s = J.get(2, q).promote(m) * s
s = abs(s % n)
if s.weight() == 1 and s.mass() == 1:
if gcd(m, s.z.index(1)) == 1 and pow(q, (n-1) >> 1, n) == n-1:
self.done(2)
return True
elif s.weight() == 1 and s.mass() == n-1:
if gcd(m, s.z.index(n-1)) == 1 and pow(q, (n-1) >> 1, n) == n-1:
self.done(2)
return True
return False
def sub4(self, q, n, J):
j2 = J.get(1, 2, q) ** 2
s = q * j2 % n
s = pow(s, n >> 2, n)
if n & 3 == 3:
s = s * j2 % n
s = abs(s % n)
if s.weight() == 1 and s.mass() == 1:
i = s.z.index(1)
if (i == 1 or i == 3) and pow(q, (n-1) >> 1, n) == n-1:
self.done(2)
return True
return False
def sub2(self, q, n):
s = pow(n - q, (n - 1) >> 1, n)
if s == n-1:
if n & 3 == 1:
self.done(2)
elif s != 1:
return False
return True
def subrest(self, p, n, et, J, ub = 200):
if p == 2:
q = 5
while q < 2 * ub + 5:
q += 2
if not is_prime(q) or et % q == 0:
continue
if n % q == 0:
return False
k = vp(q - 1, 2)[0]
if k == 1:
if n & 3 == 1 and not self.sub2(q, n):
return False
elif k == 2:
if not self.sub4(q, n, J):
return False
else:
if not self.sub8(q, k, n, J):
return False
if self.isDone(p):
return True
else:
return
else:
step = p * 2
q = 1
while q < step * ub + 1:
q += step
if not is_prime(q) or et % q == 0:
continue
if n % q == 0:
return False
if not self.subodd(p, q, n, J):
return False
if self.isDone(p):
return True
else:
return
def _factor(n):
def factor(n):
if n % 2 == 0:
return 2
a = 2
i = 2
while True:
a = pow(a, i, n)
d = gcd(a - 1, n)
if d > 1:
return d
i += 1
num = n
ans = []
if is_prime(n):
ans.append(n)
return ans
while True:
d = factor(num)
ans.append(d)
r = num // d
if is_prime(r):
ans.append(r)
break
else:
num = r
ans.sort()
result = list(set([(x, ans.count(x)) for x in ans]))
return result, ans
class JacobiSum():
def __init__(self):
self.shelve = {}
def get(self, group, p, q = None):
if q:
assert group == 1
if (group, p, q) not in self.shelve:
self.make(q)
return self.shelve[group, p, q]
else:
assert group == 2 or group == 3
if (group, p) not in self.shelve:
self.make(p)
return self.shelve[group, p]
def make(self, q):
fx = self.makefx(q)
qpred = q - 1
qt = _factor(qpred)[0]
qt2 = [k for (p, k) in qt if p == 2][0]
k, pk = qt2, 2 ** qt2
if k >= 2:
J2q = Zeta(pk, 1 + fx[1])
for j in range(2, qpred):
J2q[j + fx[j]] = J2q[j + fx[j]] + 1
self.shelve[1, 2, q] = +J2q
if k >= 3:
J2 = Zeta(8, 3 + fx[1])
J3 = Zeta(pk, 2 + fx[1])
for j in range(2, qpred):
J2[j * 3 + fx[j]] = J2[j * 3 + fx[j]] + 1
J3[j * 2 + fx[j]] = J3[j * 2 + fx[j]] + 1
self.shelve[3, q] = abs(self.shelve[1, 2, q] * J3)
self.shelve[2, q] = abs(J2 ** 2)
else:
self.shelve[1, 2, q] = 1
for (p, k) in qt:
if p == 2:
continue
pk = p ** k
Jpq = Zeta(pk, 1 + fx[1])
for j in range(2, qpred):
Jpq[j + fx[j]] = Jpq[j + fx[j]] + 1
self.shelve[1, p, q] = +Jpq
@staticmethod
def makefx(q):
g = primitive_root(q)
qpred = q - 1
qd2 = qpred >> 1
g_mf = [0, g]
for _ in range(2, qpred):
g_mf.append((g_mf[-1] * g) % q)
fx = {}
for i in range(1, qd2):
if i in fx:
continue
j = g_mf.index(q + 1 - g_mf[i])
fx[i] = j
fx[j] = i
fx[qpred - i] = (j - i + qd2) % qpred
fx[fx[qpred - i]] = qpred - i
fx[qpred - j] = (i - j + qd2) % qpred
fx[fx[qpred - j]] = qpred - j
return fx
def apr(n):
L = Status()
rb = floorsqrt(n) + 1
el = TestPrime()
while el.et <= rb:
el = el.next()
plist = el.t.factors.keys()
plist.remove(2)
L.yet(2)
for p in plist:
if pow(n, p - 1, p ** 2) != 1:
L.done(p)
else:
L.yet(p)
qlist = el.et.factors.keys()
qlist.remove(2)
J = JacobiSum()
for q in qlist:
for p in plist:
if (q - 1) % p != 0:
continue
if not L.subodd(p, q, n, J):
return False
k = vp(q - 1, 2)[0]
if k == 1:
if not L.sub2(q, n):
return False
elif k == 2:
if not L.sub4(q, n, J):
return False
else:
if not L.sub8(q, k, n, J):
return False
for p in L.yet_keys():
if not L.subrest(p, n, el.et, J):
return False
r = int(n)
for _ in range(1, el.t.integer):
r = (r * n) % el.et.integer
if n % r == 0 and r != 1 and r != n:
return False
return True
def spsp(n, base, s = None, t = None):
if s is None or t is None:
s, t = vp(n - 1, 2)
z = pow(base, t, n)
if z != 1 and z != n-1:
j = 0
while j < s:
j += 1
z = pow(z, 2, n)
if z == n - 1:
break
else:
return False
return True
def smallSpsp(n, s = None, t = None):
if s is None or t is None:
s, t = vp(n - 1, 2)
for p in (2, 13, 23, 1662803):
if not spsp(n, p, s, t):
return False
return True
def extgcd(x, y):
a, b, g, u, v, w = 1, 0, x, 0, 1, y
while w:
q, t = divmod(g, w)
a, b, g, u, v, w = u, v, w, a - q * u, b - q * v, t
if g >= 0:
return (a, b, g)
else:
return (-a, -b, -g)
def legendre(a, m):
a %= m
symbol = 1
while a != 0:
while a & 1 == 0:
a >>= 1
if m & 7 == 3 or m & 7 == 5:
symbol = -symbol
a, m = m, a
if a & 3 == 3 and m & 3 == 3:
symbol = -symbol
a %= m
if m == 1:
return symbol
return 0
def inverse(x, n):
x %= n
y = extgcd(n, x)
if y[2] == 1:
if y[1] < 0:
r = n + y[1]
return r
else:
return y[1]
def vp(n, p, k = 0):
q = p
while not (n % q):
k += 1
q *= p
return (k, n // (q // p))
def modsqrt(n, p, e = 1):
if 1 < e:
x = modsqrt(n, p)
if 0 == x:
return
ppower = p
z = inverse(x << 1, p)
for i in range(e - 1):
x += (n - x ** 2) // ppower * z % p * ppower
ppower *= p
return x
symbol = legendre(n, p)
if symbol == 1:
pmod8 = p & 7
if pmod8 != 1:
n %= p
if pmod8 == 3 or pmod8 == 7:
x = pow(n, (p >> 2) + 1, p)
else:
x = pow(n, (p >> 3) + 1, p)
c = pow(x, 2, p)
if c != n:
x = (x * pow(2, p >> 2, p)) % p
else:
d = 2
while legendre(d, p) != -1:
d = randrange(3, p)
s, t = vp(p-1, 2)
A = pow(n, t, p)
D = pow(d, t, p)
m = 0
for i in range(1, s):
if pow(A*(D**m), 1 << (s-1-i), p) == (p-1):
m += 1 << i
x = (pow(n, (t+1) >> 1, p) * pow(D, m >> 1, p)) % p
return x
elif symbol == 0:
return 0
else:
return
def floorsqrt(a):
if a < (1 << 59):
return int(sqrt(a))
else:
x = pow(10, (int(log(a, 10)) >> 1) + 1)
while True:
x_new = (x + a // x) >> 1
if x <= x_new:
return x
x = x_new
class QS(object):
def __init__(self, n, sieverange, factorbase):
self.number = n
self.sqrt_n = int(sqrt(n))
for i in PRIMES_31:
if n % i == 0:
return n % 1
self.digit = log(self.number, 10) + 1
self.Srange = sieverange
self.FBN = factorbase
self.move_range = range(self.sqrt_n - self.Srange, self.sqrt_n + self.Srange + 1)
i = 0
k = 0
factor_base = [-1]
FB_log = [0]
while True:
ii = primes_table[i]
if legendre(self.number, ii) == 1:
factor_base.append(ii)
FB_log.append(primes_log_table[i])
k += 1
i += 1
if k == self.FBN:
break
else:
i += 1
self.FB = factor_base
self.FB_log = FB_log
self.maxFB = factor_base[-1]
N_sqrt_list = []
for i in self.FB:
if i != 2 and i != -1:
e = int(log(2*self.Srange, i))
N_sqrt_modp = sqroot_power(self.number, i, e)
N_sqrt_list.append(N_sqrt_modp)
self.solution = N_sqrt_list
poly_table = []
log_poly = []
minus_val = []
for j in self.move_range:
jj = (j ** 2) - self.number
if jj < 0:
jj = -jj
minus_val.append(j - self.sqrt_n + self.Srange)
elif jj == 0:
jj = 1
lj = int((log(jj) * 30) * 0.97)
poly_table.append(jj)
log_poly.append(lj)
self.poly_table = poly_table
self.log_poly = log_poly
self.minus_check = minus_val
def run_sieve(self):
M = self.Srange
start_location = []
logp = [0] * (2 * M + 1)
j = 2
for i in self.solution:
k = 0
start_p = []
while k < len(i):
q = int((self.sqrt_n) / (self.FB[j] ** (k + 1)))
s_1 = q * (self.FB[j] ** (k + 1)) + i[k][0]
s_2 = q * (self.FB[j] ** (k + 1)) + i[k][1]
while True:
if s_1 < self.sqrt_n-M:
s_1 += (self.FB[j] ** (k + 1))
break
else:
s_1 -= (self.FB[j] ** (k + 1))
while True:
if s_2 < self.sqrt_n-M:
s_2 += (self.FB[j] ** (k + 1))
break
else:
s_2 -= (self.FB[j] ** (k + 1))
start_p.append([s_1 - self.sqrt_n + M, s_2 - self.sqrt_n + M])
k += 1
start_location.append(start_p)
j += 1
self.start_location = start_location
if self.poly_table[0] & 1 == 0:
i = 0
while i <= 2 * M:
j = 1
while True:
if self.poly_table[i] % (2 ** (j + 1)) == 0:
j += 1
else:
break
logp[i] += self.FB_log[1] * j
i += 2
else:
i = 1
while i <= 2 * M:
j = 1
while True:
if self.poly_table[i] % (2 ** (j + 1)) == 0:
j += 1
else:
break
logp[i] += self.FB_log[1] * j
i += 2
L = 2
for j in self.start_location:
k = 0
while k < len(j):
s_1 = j[k][0]
s_2 = j[k][1]
h_1 = 0
h_2 = 0
while s_1 + h_1 <= 2 * M:
logp[s_1 + h_1] += self.FB_log[L]
h_1 += self.FB[L] ** (k + 1)
while s_2 + h_2 <= 2 * M:
logp[s_2 + h_2] += self.FB_log[L]
h_2 += self.FB[L] ** (k + 1)
k += 1
L += 1
self.logp = logp
smooth = []
for t in range(2 * M + 1):
if logp[t] >= self.log_poly[t]:
poly_val = self.poly_table[t]
index_vector = []
for p in self.FB:
if p == -1:
if t in self.minus_check:
index_vector.append(1)
else:
index_vector.append(0)
else:
r = 0
while poly_val % (p ** (r + 1)) == 0:
r += 1
v = r & 1
index_vector.append(v)
smooth.append([index_vector, (poly_val, t + self.sqrt_n - M)])
self.smooth = smooth
return smooth
class MPQS(object):
def __init__(self, n, sieverange = 0, factorbase = 0, multiplier = 0):
self.number = n
if is_prime(self.number):
return [n]
for i in PRIMES_31:
if n % i == 0:
return n % 1
self.sievingtime = 0
self.coefficienttime = 0
self.d_list = []
self.a_list = []
self.b_list = []
self.digit = int(log(self.number, 10) + 1)
if sieverange != 0:
self.Srange = sieverange
if factorbase != 0:
self.FBN = factorbase
elif self.digit < 9:
self.FBN = parameters_for_mpqs[0][1]
else:
self.FBN = parameters_for_mpqs[self.digit - 9][1]
elif factorbase != 0:
self.FBN = factorbase
if self.digit < 9:
self.Srange = parameters_for_mpqs[0][0]
else:
self.Srange = parameters_for_mpqs[self.digit - 9][0]
elif self.digit < 9:
self.Srange = parameters_for_mpqs[0][0]
self.FBN = parameters_for_mpqs[0][1]
elif self.digit > 53:
self.Srange = parameters_for_mpqs[44][0]
self.FBN = parameters_for_mpqs[44][1]
else:
self.Srange = parameters_for_mpqs[self.digit - 9][0]
self.FBN = parameters_for_mpqs[self.digit - 9][1]
self.move_range = range(-self.Srange, self.Srange + 1)
if multiplier == 0:
self.sqrt_state = []
for i in [3, 5, 7, 11, 13]:
s = legendre(self.number, i)
self.sqrt_state.append(s)
if self.number % 8 == 1 and self.sqrt_state == [1, 1, 1, 1, 1]:
k = 1
else:
index8 = (self.number & 7) >> 1
j = 0
while self.sqrt_state != prime_8[index8][j][1]:
j += 1
k = prime_8[index8][j][0]
else:
if n & 3 == 1:
k = 1
else:
if multiplier == 1:
return n
else:
k = multiplier
self.number = k * self.number
self.multiplier = k
i = 0
k = 0
factor_base = [-1]
FB_log = [0]
while k < self.FBN:
ii = primes_table[i]
if legendre(self.number,ii) == 1:
factor_base.append(ii)
FB_log.append(primes_log_table[i])
k += 1
i += 1
self.FB = factor_base
self.FB_log = FB_log
self.maxFB = factor_base[-1]
N_sqrt_list = []
for i in self.FB:
if i != 2 and i != -1:
e = int(log(2 * self.Srange, i))
N_sqrt_modp = sqroot_power(self.number, i, e)
N_sqrt_list.append(N_sqrt_modp)
self.Nsqrt = N_sqrt_list
def make_poly(self):
if self.d_list == []:
d = int(sqrt((sqrt(self.number) / (sqrt(2) * self.Srange))))
if d & 1 == 0:
if (d + 1)& 3 == 1:
d += 3
else:
d += 1
elif d & 3 == 1:
d += 2
else:
d = self.d_list[-1]
while d in self.d_list or not is_prime(d) or legendre(self.number, d) != 1 or d in self.FB:
d += 4
a = d ** 2
h_0 = pow(self.number, (d - 3) >> 2, d)
h_1 = (h_0*self.number) % d
h_2 = ((inverse(2, d) * h_0 * (self.number - h_1 ** 2)) // d) % d
b = (h_1 + h_2 * d) % a
if b & 1 == 0:
b -= a
self.d_list.append(d)
self.a_list.append(a)
self.b_list.append(b)
solution = []
i = 0
for s in self.Nsqrt:
k = 0
p_solution = []
ppow = 1
while k < len(s):
ppow *= self.FB[i+2]
a_inverse = inverse(2 * self.a_list[-1], ppow)
x_1 = ((-b + s[k][0]) * a_inverse) % ppow
x_2 = ((-b + s[k][1]) * a_inverse) % ppow
p_solution.append([x_1, x_2])
k += 1
i += 1
solution.append(p_solution)
self.solution = solution
def run_sieve(self):
self.make_poly()
M = self.Srange
a = self.a_list[-1]
b = self.b_list[-1]
c = (b ** 2 - self.number) // (4 * a)
d = self.d_list[-1]
self.poly_table = []
self.log_poly = []
self.minus_check = []
for j in self.move_range:
jj = (a * j + b) * j + c
if jj < 0:
jj = -jj
self.minus_check.append(j + M)
elif jj == 0:
jj = 1
lj = int((log(jj) * 30) * 0.95)
self.poly_table.append(jj)
self.log_poly.append(lj)
y = inverse(2 * d, self.number)
start_location = []
logp = [0] * (2 * M + 1)
j = 2
for i in self.solution:
start_p = []
ppow = 1
for k in range(len(i)):
ppow *= self.FB[j]
q = -M // ppow
s_1 = (q + 1) * ppow + i[k][0]
s_2 = (q + 1) * ppow + i[k][1]
while s_1 + M >= ppow:
s_1 -=ppow
while s_2 + M >= ppow:
s_2 -= ppow
start_p.append([s_1 + M, s_2 + M])
start_location.append(start_p)
j += 1
self.start_location = start_location
i = self.poly_table[0] & 1
while i <= 2 * M:
j = 1
while self.poly_table[i] % (2 ** (j + 1)) == 0:
j += 1
logp[i] += self.FB_log[1] * j
i += 2
L = 2
for plocation in self.start_location:
for k in range(len(plocation)):
s_1 = plocation[k][0]
s_2 = plocation[k][1]
ppow = self.FB[L] ** (k + 1)
while s_1 <= 2 * M:
logp[s_1] += self.FB_log[L]
s_1 += ppow
while s_2 <= 2 * M:
logp[s_2] += self.FB_log[L]
s_2 += ppow
L += 1
self.logp = logp
smooth = []
for t in range(2 * M + 1):
if logp[t] >= self.log_poly[t]:
poly_val = self.poly_table[t]
index_vector = []
H = (y * (2 * a * (t-self.Srange) + b)) % self.number
for p in self.FB:
if p == -1:
if t in self.minus_check:
index_vector.append(1)
else:
index_vector.append(0)
else:
r = 0
while poly_val % (p ** (r + 1)) == 0:
r += 1
v = r & 1
index_vector.append(v)
smooth.append([index_vector, (poly_val, H)])
return smooth
def get_vector(self):
P = len(self.FB)
if P < 100:
V = -5
else:
V = 0
smooth = []
i = 0
while P * 1 > V:
n = self.run_sieve()
V += len(n)
smooth += n
i += 1
if P < 100:
V += 5
self.smooth = smooth
return smooth
class Elimination():
def __init__(self, smooth):
self.vector = []
self.history = []
i = 0
for vec in smooth:
self.vector.append(vec[0])
self.history.append({i:1})
i += 1
self.FB_number = len(self.vector[0])
self.row_size = len(self.vector)
self.historytime = 0
def vector_add(self, i, j):
V_i = self.vector[i]
V_j = self.vector[j]
k = 0
while k < len(V_i):
if V_i[k] == 1:
if V_j[k] == 1:
V_j[k] = 0
else:
V_j[k] = 1
k += 1
def transpose(self):
Transe_vector = []
i = 0
while i < self.FB_number:
j = 0
vector = []
while j < self.row_size:
vector.append(self.vector[j][i])
j += 1
Transe_vector.append(vector)
i += 1
self.Transe = Transe_vector
def history_add(self, i, j):
H_i = self.history[i].keys()
H_j = self.history[j].keys()
for k in H_i:
if k in H_j:
del self.history[j][k]
else:
self.history[j][k] = 1
def gaussian(self):
pivot = []
FBnum = self.FB_number
Smooth = len(self.vector)
for j in range(self.FB_number):
for k in range(Smooth):
if k in pivot or not self.vector[k][j]:
continue
pivot.append(k)
V_k = self.vector[k]
for h in range(Smooth):
if h in pivot or not self.vector[h][j]:
continue
self.history_add(k, h)
V_h = self.vector[h]
for q in range(j, FBnum):
if V_k[q]:
V_h[q] = not V_h[q]
break
self.pivot = pivot
zero_vector = []
for check in range(Smooth):
if check not in pivot:
g = 0
while g < FBnum:
if self.vector[check][g] == 1:
break
g += 1
if g == FBnum:
zero_vector.append(check)
return zero_vector
def qs(n, s, f):
Q = QS(n, s, f)
Q.run_sieve()
V = Elimination(Q.smooth)
A = V.gaussian()
answerX_Y = []
N_factors = []
for i in A:
B = V.history[i].keys()
X = 1
Y = 1
for j in B:
X *= Q.smooth[j][1][0]
Y *= Q.smooth[j][1][1]
Y = Y % Q.number
X = sqrt_modn(X, Q.number)
answerX_Y.append(X - Y)
for k in answerX_Y:
if k != 0:
factor = gcd(k, Q.number)
if factor not in N_factors and factor != 1 and factor != Q.number and is_prime(factor) == 1:
N_factors.append(factor)
N_factors.sort()
def mpqs(n, s = 0, f = 0, m = 0):
M = MPQS(n, s, f, m)
M.get_vector()
N = M.number // M.multiplier
V = Elimination(M.smooth)
A = V.gaussian()
answerX_Y = []
N_prime_factors = []
N_factors = []
output = []
for i in A:
B = V.history[i].keys()
X = 1
Y = 1
for j in B:
X *= M.smooth[j][1][0]
Y *= M.smooth[j][1][1]
Y %= M.number
X = sqrt_modn(X, M.number)
if X != Y:
answerX_Y.append(X-Y)
NN = 1
for k in answerX_Y:
factor = gcd(k, N)
if factor not in N_factors and factor != 1 and factor != N and factor not in N_prime_factors:
if is_prime(factor):
NN *= factor
N_prime_factors.append(factor)
else:
N_factors.append(factor)
if NN == N:
N_prime_factors.sort()
for p in N_prime_factors:
N = N // p
i = vp(N, p, 1)[0]
output.append((p, i))
return output
elif NN != 1:
f = N // NN
if is_prime(f):
N_prime_factors.append(f)
N_prime_factors.sort()
for p in N_prime_factors:
N = N // p
i = vp(N, p, 1)[0]
output.append((p, i))
return output
for F in N_factors:
for FF in N_factors:
if F != FF:
Q = gcd(F, FF)
if is_prime(Q) and Q not in N_prime_factors:
N_prime_factors.append(Q)
NN *= Q
N_prime_factors.sort()
for P in N_prime_factors:
i, N = vp(N, P)
output.append((P, i))
if N == 1:
return output
for F in N_factors:
g = gcd(N, F)
if is_prime(g):
N_prime_factors.append(g)
N = N // g
i = vp(N, g, 1)[0]
output.append((g, i))
if N == 1:
return output
elif is_prime(N):
output.append((N, 1))
return output
else:
N_factors.sort()
return output, N_factors
def eratosthenes(n):
sieve = [True] * (n + 1)
for i in range(2, int(n ** 0.5) + 1):
if sieve[i]:
for j in range(i ** 2, n + 1, i):
sieve[j] = False
return [x for x in range(2, n + 1) if sieve[x]]
def prime_mod8(n):
primes = eratosthenes(n)
PrimeList = {1:[], 3:[], 5:[], 7:[]}
LegendreList = {1:[], 3:[], 5:[], 7:[]}
sp = [2, 3, 5, 7, 11, 13]
for p in primes:
if p not in sp:
leg = [legendre(p, q) for q in sp[1:]]
if leg not in PrimeList[p & 7]:
LegendreList[p & 7].append(leg)
PrimeList[p & 7].append([p, leg])
return [PrimeList[1], PrimeList[3], PrimeList[5], PrimeList[7]]
def eratosthenes_log(n):
primes = eratosthenes(n)
primes_log = []
for i in primes:
l = int(log(i) * 30)
primes_log.append(l)
return primes_log
def sqrt_modn(n, modulo):
factorOfN = _factor(n)[0]
prod = 1
for p, e in factorOfN:
prod = (prod * pow(p, e >> 1, modulo)) % modulo
return prod
def sqroot_power(a, p, n):
x = modsqrt(a, p)
answer = [[x, p - x]]
ppower = p
i = inverse(x << 1, p)
for i in range(n - 1):
x += (a - x ** 2) // ppower * i % p * ppower
ppower *= p
answer.append([x, ppower - x])
return answer
primes_table = eratosthenes(10 ** 5)
primes_log_table = eratosthenes_log(10 ** 5)
prime_8 = prime_mod8(8090)
mpqs_p_100 = [[100, x] for x in [20, 21, 22, 24, 26, 29, 32]]
mpqs_p_300 = [[300, x] for x in [40, 60, 80, 100, 120, 140]]
mpqs_p_2000 = [[2000, x] for x in [240, 260, 280, 325, 355, 375, 400, 425, 550]]
mpqs_p_15000 = [[15000, x] for x in [1300, 1600, 1900, 2200]]
parameters_for_mpqs = mpqs_p_100 + [[200, 35]] + mpqs_p_300 + [[600, 160]] + [[900, 180]] + [[1200, 200]] + [[1000,220]] + mpqs_p_2000 + [[3000, 650]] + [[5000, 750]] + [[4000, 850]] + [[4000, 950]] + [[5000, 1000]] + [[14000, 1150]] + mpqs_p_15000 + [[20000,2500]]
def mpqsfind(n, s = 0, f = 0, m = 0):
M = MPQS(n, s, f, m)
M.get_vector()
N = M.number // M.multiplier
V = Elimination(M.smooth)
A = V.gaussian()
differences = []
for i in A:
B = V.history[i].keys()
X = 1
Y = 1
for j in B:
X *= M.smooth[j][1][0]
Y *= M.smooth[j][1][1]
Y %= M.number
X = floorsqrt(X) % M.number
if X != Y:
differences.append(X - Y)
for diff in differences:
divisor = gcd(diff, N)
if 1 < divisor < N:
return divisor
def mpqs(n, retry = 1, min_ = 20):
num = n
ans = []
if is_prime(n):
ans.append(n)
return ans
while True:
r = num
try:
if len(str(r)) >= min_:
d = mpqsfind(num)
ans.append(d)
r = num // d
if is_prime(r):
ans.append(r)
break
else:
num = r
else:
ans = [x for x in _factor(num)[1]]
break
except TypeError:
ans = [x for x in _factor(num)[1]]
break
checked = _check_factors(ans, n, retry)
if checked == 0:
ans.sort()
return ans
return mpqs(n, checked)
return mpqs(n)
def factor_lenstra(n):
'''
Return a list that has all factors of n.
'''
class Point():
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
class Curve():
def __init__(self, a, b, m):
self.a = a
self.b = b
self.m = m
def double_point(P, curve):
X = P.x
Y = P.y
Z = P.z
a = curve.a
m = curve.m
if Y == 0:
return Point(0, 1, 0)
if Z == 0:
return P
W = a * pow(Z, 2, m) + 3 * pow(X, 2, m)
S = Y * Z
B = X * Y * S
H = pow(W, 2, m) - 8 * B
X2 = (2 * H * S) % m
Y2 = (W * (4 * B - H) - 8 * pow(Y, 2, m) * pow(S, 2, m)) % m
Z2 = pow(2 * S, 3, m)
return Point(X2, Y2, Z2)
def add_points(P1, P2, curve):
if P1.z == 0:
return P2
if P2.z == 0:
return P1
X1 = P1.x
Y1 = P1.y
Z1 = P1.z
X2 = P2.x
Y2 = P2.y
Z2 = P2.z
m = curve.m
U1 = Y2 * Z1 % m
U2 = Y1 * Z2 % m
V1 = X2 * Z1 % m
V2 = X1 * Z2 % m
if V1 == V2:
if U1 == U2:
return double_point(P1, curve)
else:
return Point(0, 1, 0)
V = (V1 - V2) % m
U = (U1 - U2) % m
W = (Z1 * Z2) % m
A = pow(U, 2, m) * W - pow(V, 3, m) - 2 * pow(V, 2, m) * V2
X3 = (V * A) % m
Y3 = (U * (pow(V, 2, m) * V2 - A) - pow(V, 3, m) * U2) % m
Z3 = (pow(V, 3, m) * W) % m
return Point(X3, Y3, Z3)
def multiply_point(P, k, curve):
if k == 1:
return P
P2 = Point(0, 1, 0)
k2 = 0
bit = 1 << (len(bin(k)) - 3)
while k != k2:
k2 <<= 1
if k2:
P2 = double_point(P2, curve)
if k & bit:
k2 += 1
P2 = add_points(P, P2, curve)
bit >>= 1
return P2
def factor(n, mode = 1, tries = 10, retry = 1):
factors = []
for i in (2, 3):
while n % i == 0:
factors.append(i)
n //= i
if n == 1:
return factors
if is_prime(n):
factors.append(n)
factors.sort()
return factors
max_points = int(2 * n ** 0.25 + n ** 0.5 + 1)
for current_try in range(1, tries + 1):
a = 0
b = 0
while (4 * pow(a, 3, n) + 27 * pow(b, 2, n)) % n == 0:
x = 1
y = current_try
a = randint(1, n - 1)
b = (pow(y, 2, n) - a * x - pow(x, 3, n)) % n
P = Point(x, y, 1)
curve = Curve(a, b, n)
P2 = P
i = 1
while True:
i += 1
if mode == 1:
P2 = multiply_point(P2, i, curve)
elif mode == 2:
if i == 2:
k = 2
k_plus = 4
elif i <= 5:
k = 2 * i - 3
else:
k += k_plus
k_plus = 6 - k_plus
k2 = k
while k2 <= max_points:
P2 = multiply_point(P2, k, curve)
k2 *= k
if P2.z == 0:
break
divisor = gcd(n, P2.z)
if divisor != 1:
divisor2 = n // divisor
f2 = factor(divisor, mode, tries)
for f in f2:
factors.append(f)
f2 = factor(divisor2, mode, tries)
for f in f2:
factors.append(f)
factors.sort()
return factors
if i >= max_points:
factors.append(n)
factors.sort()
return factors
factors.append(n)
checked = _check_factors(factors, n, retry)
if checked == 0:
factors.sort()
return factors
return factor(n, retry = checked)
return factor(n)
def factor_pollardpm1(n, retry = 1):
'''
Return a list that has all factors of n.
'''
def factor(n):
if n % 2 == 0:
return 2
a = 2
i = 2
while True:
a = pow(a, i, n)
d = gcd(a - 1, n)
if d > 1:
return d
i += 1
num = n
ans = []
if is_prime(n):
ans.append(n)
return ans
while True:
d = factor(num)
ans.append(d)
r = num // d
if is_prime(r):
ans.append(r)
break
else:
num = r
checked = _check_factors(ans, n, retry)
if checked == 0:
ans.sort()
return ans
return factor_pollardpm1(n, checked)
def factor_williamspp1(n, retry = 1):
'''
Return a list that has all factors of n.
'''
def v_lucas(P, r, n = 1):
bstr = bin(r).lstrip('0b')[1:]
vkm1, vk = 2, P
if r == 0:
return vkm1
if r == 1:
return vk
for b in bstr:
if b == '0':
vkm1 = (vk * vkm1 - P) % n
vk = (vk * vk - 2) % n
else:
tmp = vkm1
vkm1 = (vk ** 2 - 2) % n
vk = (P * (vk ** 2) - vk * tmp - P) % n
return vk
def factor(n, B = 10 ** 6):
if n % 2 == 0:
return 2
v = 3
for q in all_primes(B, 'list'):
m = int(log(n, q))
v = v_lucas(v, pow(q, m), n)
g = gcd(v - 2, n)
if 1 < g < n:
return g
num = n
ans = []
if is_prime(n):
ans.append(n)
return ans
while True:
d = factor(num)
ans.append(d)
r = num // d
if is_prime(r):
ans.append(r)
break
else:
num = r
checked = _check_factors(ans, n, retry)
if checked == 0:
ans.sort()
return ans
return factor_williamspp1(n, checked)
def add_args():
'''
Add args.
'''
global args
parser = ArgumentParser(description = 'A module to find all kinds of primes and factors of big numbers.')
parser.add_argument('-n', metavar = 'num', type = int, help = 'The number')
parser.add_argument('-method', metavar = 'method', type = str, help = 'The method of the factor function. (siqs, mpqs, lenstra, pollardpm1, williamspp1)')
parser.add_argument('--is_prime', metavar = 'is_prime', const = is_prime, nargs = '?', help = is_prime.__doc__)
parser.add_argument('--all_primes', metavar = 'all_primes', const = all_primes, nargs = '?', help = all_primes.__doc__)
parser.add_argument('--factor', metavar = 'factor', const = factor_mpqs, nargs = '?', help = factor_mpqs.__doc__)
args = parser.parse_args()
print_help_is_prime = False
print_help_all_primes = False
print_help_factor = False
if args.is_prime:
print(is_prime(args.n))
else:
print_help_is_prime = True
if args.all_primes:
print(all_primes(args.n))
else:
print_help_all_primes = True
if args.factor:
if args.method == 'siqs':
print(factor_siqs(args.n))
elif args.method == 'mpqs':
print(factor_mpqs(args.n))
elif args.method == 'lenstra':
print(factor_lenstra(args.n))
elif args.method == 'pollardpm1':
print(factor_pollardpm1(args.n))
elif args.method == 'williamspp1':
print(factor_williamspp1(args.n))
else:
print_help_factor = True
if print_help_is_prime and print_help_all_primes and print_help_factor:
print('''usage: find_primes.py [-h] [-n num] [-method method] [--is_prime [is_prime]] [--all_primes [all_primes]]
[--factor [factor]]
A module to find all kinds of primes and factors of big numbers.
options:
-h, --help show this help message and exit
-n num The number
-method method The method of the factor function. (siqs, mpqs, lenstra, pollardpm1, williamspp1)
--is_prime [is_prime]
If n is prime, return True.
--all_primes [all_primes]
Return a prime list below n. Arguments: output ----- 'array' or 'list' ----- The output type
of the function.
--factor [factor] Return a list that has all factors of n.''')
add_args()
| 27.878184
| 269
| 0.393051
|
f1b37d98392f02e33bfcd121708d0a19bae665c3
| 5,541
|
py
|
Python
|
torch_attack_mask_03.py
|
beibuwandeluori/Attack-ImageNet-tianchi
|
85294952ac1a190c26bba5e8f141b1c68e72668a
|
[
"MIT"
] | null | null | null |
torch_attack_mask_03.py
|
beibuwandeluori/Attack-ImageNet-tianchi
|
85294952ac1a190c26bba5e8f141b1c68e72668a
|
[
"MIT"
] | null | null | null |
torch_attack_mask_03.py
|
beibuwandeluori/Attack-ImageNet-tianchi
|
85294952ac1a190c26bba5e8f141b1c68e72668a
|
[
"MIT"
] | null | null | null |
# Helper function for extracting features from pre-trained models
import sys, os
import argparse
import torch
import torch.nn as nn
import cv2
import numpy as np
import glob
from attacker import Attacker, AttackerPGD, AttackerTPGD, AttackerMIFGSM
import torchattacks
from loader import ImageNet_A, input_diversity
from models.models import model_selection
from utils.Resnet import resnet152_denoise, resnet101_denoise, resnet152
from utils.Normalize import Normalize, Permute, Resize
class Ensemble(nn.Module):
def __init__(self, model1, model2=None, model3=None):
super(Ensemble, self).__init__()
self.model1 = model1
self.model2 = model2
self.model3 = model3
def forward(self, x):
logits1 = self.model1(x)
logits2 = self.model2(x)
if self.model3 is not None:
logits3 = self.model3(x)
logits = (logits1 + logits2 + logits3) / 3
else:
logits = (logits1 + logits2) / 2
return logits
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# /raid/chenby/tianchi/imagenet/
# abg: attack_background
parser.add_argument('--input_dir', default='/raid/chenby/tianchi/imagenet//', type=str, help='path to data')
parser.add_argument('--output_dir', default='./results/10_ensemble3_MIM_div_mask_step100_16_step100_8/', type=str, help='path to results')
parser.add_argument('--batch_size', default=8, type=int, help='mini-batch size')
parser.add_argument('--steps', default=100, type=int, help='iteration steps')
parser.add_argument('--max_norm', default=16, type=float, help='Linf limit')
parser.add_argument('--div_prob', default=0.9, type=float, help='probability of diversity')
args = parser.parse_args()
output_dir = os.path.join(args.output_dir, 'images')
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
# ensemble model
model1 = model_selection(model_name='efficientnet-b5', advprop=False) # efficientnet-b5
model1 = nn.Sequential(
Resize(input_size=[456, 456]),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
model1
)
model2 = model_selection(model_name='resnet50') # efficientnet-b5
model2 = nn.Sequential(
Resize(input_size=[224, 224]),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
model2
)
model3 = model_selection(model_name='inceptionv4') # efficientnet-b5
model3 = nn.Sequential(
Resize(input_size=[299, 299]),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
model3
)
model = Ensemble(model1, model2, model3)
# print(model)
model.cuda()
model.eval()
# set dataset
mask_root_dir = '/raid/chenby/tianchi/imagenet/cam_mask_res101'
# mask_root_dir = '/raid/chenby/tianchi/imagenet/cam_mask_se_res101_4d_03'
dataset = ImageNet_A(args.input_dir, mask_root_dir=mask_root_dir,
attack_background=False)
loader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False)
# set attacker
# attacker = Attacker(steps=args.steps,
# max_norm=args.max_norm/255.0,
# div_prob=args.div_prob,
# device=torch.device('cuda'),
# low=200,
# high=500)
# attacker = torchattacks.PGD(model, eps=args.max_norm/255.0, alpha=2/255.0, steps=args.steps)
# attacker = AttackerPGD(model, eps=args.max_norm/255.0, alpha=2/255.0, steps=args.steps, low=200, high=500,
# div_prob=args.div_prob)
# attacker = AttackerTPGD(model, eps=args.max_norm/255.0, alpha=2/255.0, steps=args.steps, low=200, high=500,
# div_prob=args.div_prob)
attacker = AttackerMIFGSM(model, eps=args.max_norm / 255.0, decay=1.0, steps=args.steps, low=200, high=500,
div_prob=args.div_prob)
attacker_02 = AttackerMIFGSM(model, eps=args.max_norm / 2.0 / 255.0, decay=1.0, steps=args.steps, low=200, high=500,
div_prob=args.div_prob)
for ind, (img, mask, label_true, label_target, filenames) in enumerate(loader):
# if os.path.exists(os.path.join(output_dir, os.path.split(filenames[-1])[-1])) and not flag:
# continue
# flag = False
# for filename in filenames:
# if '184.jpg' in filename:
# flag = True
# break
# if not flag:
# continue
# run attack
adv = attacker(img.cuda(), label_true.cuda(), mask.cuda())
adv = attacker_02(adv.cuda(), label_true.cuda(), mask.cuda())
# save results
for bind, filename in enumerate(filenames):
out_img = adv[bind].detach().cpu().numpy()
delta_img = np.abs(out_img - img[bind].numpy()) * 255.0
print('Attack on {}:'.format(os.path.split(filename)[-1]))
print('Max: {0:.0f}, Mean: {1:.2f}'.format(np.max(delta_img), np.mean(delta_img)))
out_img = np.transpose(out_img, axes=[1, 2, 0]) * 255.0
out_img = out_img[:, :, ::-1]
out_filename = os.path.join(output_dir, os.path.split(filename)[-1])
# print(out_filename, out_img.shape, type(out_img))
cv2.imwrite(out_filename, out_img)
| 41.977273
| 142
| 0.616495
|
92fb33038ea2443006f63e05c4d073f4ff596192
| 3,228
|
py
|
Python
|
app/user/handlers.py
|
dogukangungordi/cinetify-Movie
|
85946010f4471cef0fb42873d50d59493372d060
|
[
"MIT"
] | null | null | null |
app/user/handlers.py
|
dogukangungordi/cinetify-Movie
|
85946010f4471cef0fb42873d50d59493372d060
|
[
"MIT"
] | null | null | null |
app/user/handlers.py
|
dogukangungordi/cinetify-Movie
|
85946010f4471cef0fb42873d50d59493372d060
|
[
"MIT"
] | null | null | null |
import json
import falcon
from psycopg2 import IntegrityError
from app.utils.auth import hash_password, verify_password, generate_token
from app.utils.hooks import open_cursor_hook, close_cursor_hook, auth_required
from app.utils.misc import make_code
from app.user.validation import (
validate_user_create, validate_user_auth, validate_request_password_reset,
validate_confirm_password_reset)
@falcon.before(open_cursor_hook)
@falcon.after(close_cursor_hook)
class UserResource(object):
@falcon.before(validate_user_create)
def on_post(self, req, res):
email = req.context['data']['email']
password = hash_password(req.context['data']['password'])
try:
self.cursor.callproc('sp_user_insert', [email, password])
except IntegrityError:
title = 'Conflict'
description = 'Email in use'
raise falcon.HTTPConflict(title, description)
result = self.cursor.fetchone()[0]
res.status = falcon.HTTP_201
res.body = json.dumps({
'token': generate_token(result)
})
@falcon.before(open_cursor_hook)
@falcon.after(close_cursor_hook)
class AuthenticationResource(object):
@falcon.before(validate_user_auth)
def on_post(self, req, res):
unauthorized_title = 'Unauthorized'
unauthorized_description = 'Invalid credentials'
email = req.context['data']['email']
password = req.context['data']['password']
self.cursor.callproc('sp_lookup_user_by_email', [email, ])
result = self.cursor.fetchone()
if result is None:
raise falcon.HTTPUnauthorized(unauthorized_title, unauthorized_description)
result = result[0]
valid_password = verify_password(password, result.pop('password'))
if not valid_password:
raise falcon.HTTPUnauthorized(unauthorized_title, unauthorized_description)
res.status = falcon.HTTP_200
res.body = json.dumps({
'token': generate_token(result)
})
@falcon.before(open_cursor_hook)
@falcon.after(close_cursor_hook)
class PasswordResetRequestResource(object):
@falcon.before(validate_request_password_reset)
def on_post(self, req, res):
email = req.context['data']['email']
self.cursor.callproc('sp_reset_password_request', [email, make_code(), ])
res.status = falcon.HTTP_201
res.body = json.dumps({})
@falcon.before(open_cursor_hook)
@falcon.after(close_cursor_hook)
class PasswordResetConfirmResource(object):
@falcon.before(validate_confirm_password_reset)
def on_post(self, req, res):
code = req.context['data']['code']
password = hash_password(req.context['data']['password'])
self.cursor.callproc('sp_reset_password', [code, password, ])
result = self.cursor.fetchone()
res.status = falcon.HTTP_200 if result[0] else falcon.HTTP_401
res.body = json.dumps({})
# Handlers for test routes
class AuthTestResource(object):
@falcon.before(auth_required)
def on_get(self, req, res):
res.status = falcon.HTTP_200
res.body = json.dumps({
'email': req.context['auth_user']['email']
})
| 31.960396
| 87
| 0.684015
|
1bb8709f22e0d043ecaf77e27536b2f14b168852
| 102,207
|
py
|
Python
|
tensorflow_estimator/python/estimator/estimator.py
|
Suraj-Upadhyay/estimator
|
781c0d30c6bf100aa174591dd97cb70fc39d294d
|
[
"Apache-2.0"
] | 1
|
2020-09-29T15:21:14.000Z
|
2020-09-29T15:21:14.000Z
|
tensorflow_estimator/python/estimator/estimator.py
|
Suraj-Upadhyay/estimator
|
781c0d30c6bf100aa174591dd97cb70fc39d294d
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_estimator/python/estimator/estimator.py
|
Suraj-Upadhyay/estimator
|
781c0d30c6bf100aa174591dd97cb70fc39d294d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import tempfile
import numpy as np
import six
import tensorflow as tf
from google.protobuf import message
from tensorflow.core.framework import summary_pb2
from tensorflow.python.distribute import estimator_training as distribute_coordinator_training
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import device_setter
from tensorflow.python.training import evaluation
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.training.tracking import graph_view
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import compat_internal
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import estimator_export
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator import run_config
from tensorflow_estimator.python.estimator import util as estimator_util
from tensorflow_estimator.python.estimator.export import export_lib
from tensorflow_estimator.python.estimator.mode_keys import ModeKeys
_VALID_MODEL_FN_ARGS = set(
['features', 'labels', 'mode', 'params', 'self', 'config'])
_estimator_api_gauge = monitoring.BoolGauge('/tensorflow/api/estimator',
'estimator api usage', 'method')
_canned_estimator_api_gauge = monitoring.StringGauge(
'/tensorflow/api/estimator/canned_estimator',
'Gauge to track the type of canned estimator used', 'ClassType')
@estimator_export(v1=['estimator.Estimator'])
class Estimator(object):
"""Estimator class to train and evaluate TensorFlow models.
The `Estimator` object wraps a model which is specified by a `model_fn`,
which, given inputs and a number of other parameters, returns the ops
necessary to perform training, evaluation, or predictions.
All outputs (checkpoints, event files, etc.) are written to `model_dir`, or a
subdirectory thereof. If `model_dir` is not set, a temporary directory is
used.
The `config` argument can be passed `tf.estimator.RunConfig` object containing
information about the execution environment. It is passed on to the
`model_fn`, if the `model_fn` has a parameter named "config" (and input
functions in the same manner). If the `config` parameter is not passed, it is
instantiated by the `Estimator`. Not passing config means that defaults useful
for local execution are used. `Estimator` makes config available to the model
(for instance, to allow specialization based on the number of workers
available), and also uses some of its fields to control internals, especially
regarding checkpointing.
The `params` argument contains hyperparameters. It is passed to the
`model_fn`, if the `model_fn` has a parameter named "params", and to the input
functions in the same manner. `Estimator` only passes params along, it does
not inspect it. The structure of `params` is therefore entirely up to the
developer.
None of `Estimator`'s methods can be overridden in subclasses (its
constructor enforces this). Subclasses should use `model_fn` to configure
the base class, and may add methods implementing specialized functionality.
See [estimators](https://tensorflow.org/guide/estimators) for more
information.
To warm-start an `Estimator`:
```python
estimator = tf.estimator.DNNClassifier(
feature_columns=[categorical_feature_a_emb, categorical_feature_b_emb],
hidden_units=[1024, 512, 256],
warm_start_from="/path/to/checkpoint/dir")
```
For more details on warm-start configuration, see
`tf.estimator.WarmStartSettings`.
@compatibility(eager)
Calling methods of `Estimator` will work while eager execution is enabled.
However, the `model_fn` and `input_fn` is not executed eagerly, `Estimator`
will switch to graph mode before calling all user-provided functions (incl.
hooks), so their code has to be compatible with graph mode execution. Note
that `input_fn` code using `tf.data` generally works in both graph and eager
modes.
@end_compatibility
"""
def __init__(self,
model_fn,
model_dir=None,
config=None,
params=None,
warm_start_from=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* `features` -- This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `tf.Tensor` or `dict` of same.
* `labels` -- This is the second item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `tf.Tensor` or `dict` of same (for multi-head models). If
mode is `tf.estimator.ModeKeys.PREDICT`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept `mode`, the
`model_fn` must still be able to handle `labels=None`.
* `mode` -- Optional. Specifies if this is training, evaluation or
prediction. See `tf.estimator.ModeKeys`.
`params` -- Optional `dict` of hyperparameters. Will receive what is
passed to Estimator in `params` parameter. This allows to configure
Estimators from hyper parameter tuning.
* `config` -- Optional `estimator.RunConfig` object. Will receive what
is passed to Estimator as its `config` parameter, or a default
value. Allows setting up things in your `model_fn` based on
configuration such as `num_ps_replicas`, or `model_dir`.
* Returns -- `tf.estimator.EstimatorSpec`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into an estimator to
continue training a previously saved model. If `PathLike` object, the
path will be resolved. If `None`, the model_dir in `config` will be used
if set. If both are set, they must be same. If both are `None`, a
temporary directory will be used.
config: `estimator.RunConfig` configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If None, only TRAINABLE variables are
warm-started. If the string filepath is provided instead of a
`tf.estimator.WarmStartSettings`, then all variables are warm-started,
and it is assumed that vocabularies and `tf.Tensor` names are unchanged.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
ValueError: if this is called via a subclass and if that class overrides
a member of `Estimator`.
"""
_estimator_api_gauge.get_cell('init').set(True)
# We do not endorse Estimator child classes to override methods in
# Estimator, other than a select few. You're on your own if you cleverly
# override the method "_assert_members_are_not_overridden".
self.__class__._assert_members_are_not_overridden(self) # pylint: disable=protected-access
self._config = maybe_overwrite_model_dir_and_session_config(
config, model_dir)
# The distribute field contains an instance of tf.distribute.Strategy.
self._train_distribution = self._config.train_distribute
self._eval_distribution = self._config.eval_distribute
# Model directory.
self._model_dir = self._config.model_dir
self._session_config = self._config.session_config
tf.compat.v1.logging.info('Using config: %s', str(vars(self._config)))
self._device_fn = (
self._config.device_fn or _get_replica_device_setter(self._config))
if model_fn is None:
raise ValueError('model_fn must be provided to Estimator.')
model_fn_lib.verify_model_fn_args(model_fn, params)
self._model_fn = model_fn
self._params = copy.deepcopy(params or {})
# pylint: disable=protected-access
self._warm_start_settings = _get_default_warm_start_settings(
warm_start_from)
# pylint: enable=protected-access
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return copy.deepcopy(self._config)
@property
def params(self):
return copy.deepcopy(self._params)
@property
def model_fn(self):
"""Returns the `model_fn` which is bound to `self.params`.
Returns:
The `model_fn` with following signature:
`def model_fn(features, labels, mode, config)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config)
return public_model_fn
# TODO(ispir): support a list of names
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string or a list of string, name of the tensor.
Returns:
Numpy array - value of the tensor.
Raises:
ValueError: If the `Estimator` has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return tf.train.load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
Raises:
ValueError: If the `Estimator` has not produced a checkpoint yet.
"""
_check_checkpoint_available(self.model_dir)
with context.graph_mode():
return [name for name, _ in tf.train.list_variables(self.model_dir)]
def latest_checkpoint(self):
"""Finds the filename of the latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
with context.graph_mode():
return checkpoint_management.latest_checkpoint(self.model_dir)
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
"""Trains a model given training data `input_fn`.
Args:
input_fn: A function that provides input data for training as minibatches.
See [Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* A `tf.data.Dataset` object: Outputs of `Dataset` object must be a
tuple `(features, labels)` with same constraints as below.
* A tuple `(features, labels)`: Where `features` is a `tf.Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
steps: Number of steps for which to train the model. If `None`, train
forever or train until `input_fn` generates the `tf.errors.OutOfRange`
error or `StopIteration` exception. `steps` works incrementally. If you
call two times `train(steps=10)` then training occurs in total 20 steps.
If `OutOfRange` or `StopIteration` occurs in the middle, training stops
before 20 steps. If you don't want to have incremental behavior please
set `max_steps` instead. If set, `max_steps` must be `None`.
max_steps: Number of total steps for which to train model. If `None`,
train forever or train until `input_fn` generates the
`tf.errors.OutOfRange` error or `StopIteration` exception. If set,
`steps` must be `None`. If `OutOfRange` or `StopIteration` occurs in the
middle, training stops before `max_steps` steps. Two calls to
`train(steps=100)` means 200 training iterations. On the other hand, two
calls to `train(max_steps=100)` means that the second call will not do
any iteration since first call did all 100 steps.
saving_listeners: list of `CheckpointSaverListener` objects. Used for
callbacks that run immediately before or after checkpoint savings.
Returns:
`self`, for chaining.
Raises:
ValueError: If both `steps` and `max_steps` are not `None`.
ValueError: If either `steps` or `max_steps <= 0`.
"""
_estimator_api_gauge.get_cell('train').set(True)
if self.config.task_type in (run_config.TaskType.EVALUATOR,
run_config.TaskType.PS):
raise ValueError(
'Train has been called wrong configuration. Please use '
'tf.estimator.train_and_evaluate which calls proper API according '
'to given configuration. Current configuration: {}.'.format(
self.config))
with context.graph_mode():
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
if max_steps is not None:
start_step = _load_global_step_from_checkpoint_dir(self._model_dir)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_train_steps_to_hooks(steps, max_steps))
saving_listeners = _check_listeners_type(saving_listeners)
loss = self._train_model(input_fn, hooks, saving_listeners)
logging.info('Loss for final step: %s.', loss)
return self
def _convert_train_steps_to_hooks(self, steps, max_steps):
"""Create hooks to run correct number of steps in training.
Args:
steps: number of steps to run during training.
max_steps: maximum number of steps to be run during training. It'll be the
maximum number of steps the model will train to after restoring from
checkpoint even across multiple estimator.train calls.
Returns:
List of hooks to be passed to the estimator.
"""
if steps is not None or max_steps is not None:
if self._train_distribution:
steps_per_run = getattr(self._train_distribution.extended,
'steps_per_run', 1)
if steps_per_run > 1:
return [
basic_session_run_hooks._MultiStepStopAtStepHook( # pylint: disable=protected-access
steps, max_steps, steps_per_run)
]
return [tf.compat.v1.train.StopAtStepHook(steps, max_steps)]
else:
return []
def eval_dir(self, name=None):
"""Shows the directory name where evaluation metrics are dumped.
Args:
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A string which is the path of directory contains evaluation metrics.
"""
return os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name)
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
"""Evaluates the model given evaluation data `input_fn`.
For each step, calls `input_fn`, which returns one batch of data.
Evaluates until:
- `steps` batches are processed, or
- `input_fn` raises an end-of-input exception (`tf.errors.OutOfRangeError`
or `StopIteration`).
Args:
input_fn: A function that constructs the input data for evaluation. See
[Premade Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* A `tf.data.Dataset` object: Outputs of `Dataset` object must be a
tuple `(features, labels)` with same constraints as below.
* A tuple `(features, labels)`: Where `features` is a `tf.Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
steps: Number of steps for which to evaluate model. If `None`, evaluates
until `input_fn` raises an end-of-input exception.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the evaluation call.
checkpoint_path: Path of a specific checkpoint to evaluate. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, evaluation is run with newly initialized `Variables`
instead of ones restored from checkpoint.
name: Name of the evaluation if user needs to run multiple evaluations on
different data sets, such as on training data vs test data. Metrics for
different evaluations are saved in separate folders, and appear
separately in tensorboard.
Returns:
A dict containing the evaluation metrics specified in `model_fn` keyed by
name, as well as an entry `global_step` which contains the value of the
global step for which this evaluation was performed. For canned
estimators, the dict contains the `loss` (mean loss per mini-batch) and
the `average_loss` (mean loss per sample). Canned classifiers also return
the `accuracy`. Canned regressors also return the `label/mean` and the
`prediction/mean`.
Raises:
ValueError: If `steps <= 0`.
"""
_estimator_api_gauge.get_cell('evaluate').set(True)
# pylint: disable=protected-access
if (self._eval_distribution and
hasattr(self._config, '_distribute_coordinator_mode') and
self._config._distribute_coordinator_mode):
return distribute_coordinator_training.estimator_evaluate(
self,
lambda est, s, eval_hooks: est._actual_eval( # pylint: disable=g-long-lambda
input_fn,
strategy=s,
steps=steps,
hooks=eval_hooks,
checkpoint_path=checkpoint_path,
name=name),
hooks)
# pylint: enable=protected-access
else:
return self._actual_eval(
input_fn,
strategy=self._eval_distribution,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
def _actual_eval(self,
input_fn,
strategy=None,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
"""The method that does evaluation actually."""
with context.graph_mode():
hooks = _check_hooks_type(hooks)
hooks.extend(self._convert_eval_steps_to_hooks(steps))
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not latest_path:
tf.compat.v1.logging.info(
'Could not find trained model in model_dir: {}, running '
'initialization to evaluate.'.format(self._model_dir))
checkpoint_path = latest_path
def _evaluate():
(scaffold, update_op, eval_dict, all_hooks) = (
self._evaluate_build_graph(input_fn, hooks, checkpoint_path))
return self._evaluate_run(
checkpoint_path=checkpoint_path,
scaffold=scaffold,
update_op=update_op,
eval_dict=eval_dict,
all_hooks=all_hooks,
output_dir=self.eval_dir(name))
with tf.Graph().as_default():
if strategy:
# We want to create the iterations variable outside the distribution
# scope as that is just stored on the host and mainly used to drive
# the loop and doesn't need to be a Mirrored/Device variable.
training.get_or_create_steps_per_run_variable()
with strategy.scope():
return _evaluate()
else:
return _evaluate()
def _convert_eval_steps_to_hooks(self, steps):
"""Create hooks to run correct number of steps in evaluation.
Args:
steps: number of steps to run during evaluation.
Raises:
ValueError: if steps is less than or equal to zero.
Returns:
List of hooks to be passed to the estimator.
"""
if steps is None:
return []
if steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
# The hooks are declared as private in evaluation.py discourage the use
# by other libraries or open source users. This should be the only usage
# of the estimator evaluation hooks.
if self._eval_distribution:
steps_per_run = getattr(self._eval_distribution.extended, 'steps_per_run',
1)
if steps_per_run > 1:
return [
evaluation._MultiStepStopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps,
steps_per_run=steps_per_run)
]
return [evaluation._StopAfterNEvalsHook(num_evals=steps)] # pylint: disable=protected-access
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
"""Yields predictions for given features.
Please note that interleaving two predict outputs does not work. See:
[issue/20506](
https://github.com/tensorflow/tensorflow/issues/20506#issuecomment-422208517)
Args:
input_fn: A function that constructs the features. Prediction continues
until `input_fn` raises an end-of-input exception
(`tf.errors.OutOfRangeError` or `StopIteration`). See [Premade
Estimators](
https://tensorflow.org/guide/premade_estimators#create_input_functions)
for more information. The function should construct and return one of
the following:
* `tf.data.Dataset` object -- Outputs of `Dataset` object must have
same constraints as below.
* features -- A `tf.Tensor` or a dictionary of string feature name to
`Tensor`. features are consumed by `model_fn`. They should satisfy
the expectation of `model_fn` from inputs.
* A tuple, in which case
the first item is extracted as features.
predict_keys: list of `str`, name of the keys to predict. It is used if
the `tf.estimator.EstimatorSpec.predictions` is a `dict`. If
`predict_keys` is used then rest of the predictions will be filtered
from the dictionary. If `None`, returns all.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the prediction call.
checkpoint_path: Path of a specific checkpoint to predict. If `None`, the
latest checkpoint in `model_dir` is used. If there are no checkpoints
in `model_dir`, prediction is run with newly initialized `Variables`
instead of ones restored from checkpoint.
yield_single_examples: If `False`, yields the whole batch as returned by
the `model_fn` instead of decomposing the batch into individual
elements. This is useful if `model_fn` returns some tensors whose first
dimension is not equal to the batch size.
Yields:
Evaluated values of `predictions` tensors.
Raises:
ValueError: If batch length of predictions is not the same and
`yield_single_examples` is `True`.
ValueError: If there is a conflict between `predict_keys` and
`predictions`. For example if `predict_keys` is not `None` but
`tf.estimator.EstimatorSpec.predictions` is not a `dict`.
"""
_estimator_api_gauge.get_cell('predict').set(True)
with context.graph_mode():
hooks = _check_hooks_type(hooks)
# Check that model has been trained.
if not checkpoint_path:
checkpoint_path = checkpoint_management.latest_checkpoint(
self._model_dir)
if not checkpoint_path:
tf.compat.v1.logging.info(
'Could not find trained model in model_dir: {}, running '
'initialization to predict.'.format(self._model_dir))
with tf.Graph().as_default() as g:
tf.compat.v1.random.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(g)
features, input_hooks = self._get_features_from_input_fn(
input_fn, ModeKeys.PREDICT)
estimator_spec = self._call_model_fn(features, None, ModeKeys.PREDICT,
self.config)
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
predictions = self._extract_keys(estimator_spec.predictions,
predict_keys)
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(estimator_spec.prediction_hooks or []))
with tf.compat.v1.train.MonitoredSession(
session_creator=tf.compat.v1.train.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
master=self._config.master,
scaffold=estimator_spec.scaffold,
config=self._session_config),
hooks=all_hooks) as mon_sess:
while not mon_sess.should_stop():
preds_evaluated = mon_sess.run(predictions)
if not yield_single_examples:
yield preds_evaluated
elif not isinstance(predictions, dict):
for pred in preds_evaluated:
yield pred
else:
for i in range(self._extract_batch_length(preds_evaluated)):
yield {
key: value[i]
for key, value in six.iteritems(preds_evaluated)
}
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
_assert_members_are_not_overridden(Estimator, self)
def export_saved_model(self,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
experimental_mode=ModeKeys.PREDICT):
# pylint: disable=line-too-long
"""Exports inference graph as a `SavedModel` into the given dir.
For a detailed guide, see
[SavedModel from
Estimators](https://tensorflow.org/guide/saved_model#savedmodels_from_estimators).
This method builds a new graph by first calling the
`serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling
this `Estimator`'s `model_fn` to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given `export_dir_base`, and writes
a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the `export_outputs` dict returned from the `model_fn`, named
using the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which signature will be served when a serving request does not
specify one. For each signature, the outputs are provided by the
corresponding `tf.estimator.export.ExportOutput`s, and the inputs are always
the input receivers provided by the `serving_input_receiver_fn`.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
The experimental_mode parameter can be used to export a single
train/eval/predict graph as a `SavedModel`.
See `experimental_export_all_saved_models` for full docs.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
serving_input_receiver_fn: A function that takes no argument and returns a
`tf.estimator.export.ServingInputReceiver` or
`tf.estimator.export.TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
experimental_mode: `tf.estimator.ModeKeys` value indicating with mode will
be exported. Note that this feature is experimental.
Returns:
The path to the exported directory as a bytes object.
Raises:
ValueError: if no `serving_input_receiver_fn` is provided, no
`export_outputs` are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
if not serving_input_receiver_fn:
raise ValueError('An input_receiver_fn must be defined.')
input_receiver_fn_map = {experimental_mode: serving_input_receiver_fn}
return self._export_all_saved_models(
export_dir_base,
input_receiver_fn_map,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=True)
def experimental_export_all_saved_models(self,
export_dir_base,
input_receiver_fn_map,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports a `SavedModel` with `tf.MetaGraphDefs` for each requested mode.
For each mode passed in via the `input_receiver_fn_map`,
this method builds a new graph by calling the `input_receiver_fn` to obtain
feature and label `Tensor`s. Next, this method calls the `Estimator`'s
`model_fn` in the passed mode to generate the model graph based on
those features and labels, and restores the given checkpoint
(or, lacking that, the most recent checkpoint) into the graph.
Only one of the modes is used for saving variables to the `SavedModel`
(order of preference: `tf.estimator.ModeKeys.TRAIN`,
`tf.estimator.ModeKeys.EVAL`, then
`tf.estimator.ModeKeys.PREDICT`), such that up to three
`tf.MetaGraphDefs` are saved with a single set of variables in a single
`SavedModel` directory.
For the variables and `tf.MetaGraphDefs`, a timestamped export directory
below `export_dir_base`, and writes a `SavedModel` into it containing the
`tf.MetaGraphDef` for the given mode and its associated signatures.
For prediction, the exported `MetaGraphDef` will provide one `SignatureDef`
for each element of the `export_outputs` dict returned from the `model_fn`,
named using the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which signature will be served when a serving request does not
specify one. For each signature, the outputs are provided by the
corresponding `tf.estimator.export.ExportOutput`s, and the inputs are always
the input receivers provided by the `serving_input_receiver_fn`.
For training and evaluation, the `train_op` is stored in an extra
collection, and loss, metrics, and predictions are included in a
`SignatureDef` for the mode in question.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to
`input_receiver_fn` mappings, where the `input_receiver_fn` is a
function that takes no arguments and returns the appropriate subclass of
`InputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The path to the exported directory as a bytes object.
Raises:
ValueError: if any `input_receiver_fn` is `None`, no `export_outputs`
are provided, or no checkpoint can be found.
"""
return self._export_all_saved_models(
export_dir_base,
input_receiver_fn_map,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=True)
def _export_all_saved_models(self,
export_dir_base,
input_receiver_fn_map,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=True):
"""Exports multiple modes in the model function to a SavedModel."""
# TODO(b/65561022): Consider allowing multiple input_receiver_fns per mode.
with context.graph_mode():
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = self.latest_checkpoint()
if not checkpoint_path:
if self._warm_start_settings:
checkpoint_path = self._warm_start_settings.ckpt_to_initialize_from
if tf.compat.v1.gfile.IsDirectory(checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(checkpoint_path)
else:
raise ValueError("Couldn't find trained model at {}.".format(
self._model_dir))
export_dir = export_lib.get_timestamped_export_dir(export_dir_base)
temp_export_dir = export_lib.get_temp_export_dir(export_dir)
builder = tf.compat.v1.saved_model.Builder(temp_export_dir)
save_variables = True
# Note that the order in which we run here matters, as the first
# mode we pass through will be used to save the variables. We run TRAIN
# first, as that is also the mode used for checkpoints, and therefore
# we are not likely to have vars in PREDICT that are not in the checkpoint
# created by TRAIN.
if input_receiver_fn_map.get(ModeKeys.TRAIN):
self._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=ModeKeys.TRAIN,
strip_default_attrs=strip_default_attrs)
save_variables = False
if input_receiver_fn_map.get(ModeKeys.EVAL):
self._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=ModeKeys.EVAL,
strip_default_attrs=strip_default_attrs)
save_variables = False
if input_receiver_fn_map.get(ModeKeys.PREDICT):
self._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables,
mode=ModeKeys.PREDICT,
strip_default_attrs=strip_default_attrs)
save_variables = False
if save_variables:
raise ValueError('No valid modes for exporting found. Got {}.'.format(
input_receiver_fn_map.keys()))
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
tf.compat.as_bytes(temp_export_dir),
tf.compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
tf.compat.as_bytes(assets_extra_path),
tf.compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
tf.compat.v1.gfile.MakeDirs(dest_path)
tf.compat.v1.gfile.Copy(source, dest_absolute)
tf.compat.v1.gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
save_variables=True,
mode=ModeKeys.PREDICT,
export_tags=None,
check_variables=True,
strip_default_attrs=True):
"""Loads variables and adds them along with a `tf.MetaGraphDef` for saving.
Args:
builder: instance of `tf.saved_modle.builder.SavedModelBuilder` that will
be used for saving.
input_receiver_fn_map: dict of `tf.estimator.ModeKeys` to
`input_receiver_fn` mappings, where the `input_receiver_fn` is a
function that takes no argument and returns the appropriate subclass of
`InputReceiver`.
checkpoint_path: The checkpoint path to export.
save_variables: bool, whether variables should be saved. If `False`, just
the `tf.MetaGraphDef` will be saved. Note that `save_variables` should
only be `True` for the first call to this function, and the
`SavedModelBuilder` will raise an error if that is not the case.
mode: `tf.estimator.ModeKeys` value indicating which mode will be
exported.
export_tags: The set of tags with which to save `tf.MetaGraphDef`. If
`None`, a default set will be selected to matched the passed mode.
check_variables: bool, whether to check the checkpoint has all variables.
strip_default_attrs: bool, whether to strip default attributes. This may
only be True when called from the deprecated V1
Estimator.export_savedmodel.
Raises:
ValueError: if `save_variables` is `True` and `check_variable` is `False`.
"""
if export_tags is None:
export_tags = export_lib.EXPORT_TAG_MAP[mode]
input_receiver_fn = input_receiver_fn_map[mode]
with tf.Graph().as_default() as g:
self._create_and_assert_global_step(g)
tf.compat.v1.random.set_random_seed(self._config.tf_random_seed)
input_receiver = input_receiver_fn()
# Call the model_fn and collect the export_outputs.
estimator_spec = self._call_model_fn(
features=input_receiver.features,
labels=getattr(input_receiver, 'labels', None),
mode=mode,
config=self.config)
export_outputs = export_lib.export_outputs_for_mode(
mode=estimator_spec.mode,
serving_export_outputs=estimator_spec.export_outputs,
predictions=estimator_spec.predictions,
loss=estimator_spec.loss,
metrics=estimator_spec.eval_metric_ops)
# Build the SignatureDefs from receivers and all outputs
signature_def_map = export_lib.build_all_signature_defs(
input_receiver.receiver_tensors,
export_outputs,
getattr(input_receiver, 'receiver_tensors_alternatives', None),
serving_only=(mode == ModeKeys.PREDICT))
with tf.compat.v1.Session(config=self._session_config) as session:
if estimator_spec.scaffold.local_init_op is not None:
local_init_op = estimator_spec.scaffold.local_init_op
else:
local_init_op = tf.compat.v1.train.Scaffold.default_local_init_op()
# This saver will be used both for restoring variables now,
# and in saving out the metagraph below. This ensures that any
# Custom Savers stored with the Scaffold are passed through to the
# SavedModel for restore later.
if isinstance(estimator_spec.scaffold.saver, trackable_util.Checkpoint):
graph_saver = tf.compat.v1.train.Saver(
var_list=graph_view.ObjectGraphView(
estimator_spec.scaffold.saver).frozen_saveable_objects(),
sharded=True)
else:
graph_saver = (
estimator_spec.scaffold.saver or
tf.compat.v1.train.Saver(sharded=True))
if save_variables and not check_variables:
raise ValueError('If `save_variables` is `True, `check_variables`'
'must not be `False`.')
if check_variables:
try:
graph_saver.restore(session, checkpoint_path)
except tf.errors.NotFoundError as e:
msg = ('Could not load all requested variables from checkpoint. '
'Please make sure your model_fn does not expect variables '
'that were not saved in the checkpoint.\n\n'
'Encountered error with mode `{}` while restoring '
'checkpoint from: `{}`. Full Traceback:\n\n{}').format(
mode, checkpoint_path, e)
raise ValueError(msg)
# We add the train op explicitly for now, so that we don't have to
# change the Builder public interface. Note that this is a no-op
# for prediction, where train_op is None.
builder._add_train_op(estimator_spec.train_op) # pylint: disable=protected-access
meta_graph_kwargs = dict(
tags=export_tags,
signature_def_map=signature_def_map,
assets_collection=tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS),
main_op=local_init_op,
saver=graph_saver,
strip_default_attrs=strip_default_attrs)
if save_variables:
builder.add_meta_graph_and_variables(session, **meta_graph_kwargs)
else:
builder.add_meta_graph(**meta_graph_kwargs)
def _get_features_from_input_fn(self, input_fn, mode):
"""Extracts the `features` from return values of `input_fn`."""
result = self._call_input_fn(input_fn, mode)
result, _, hooks = estimator_util.parse_input_fn_result(result)
self._validate_features_in_predict_input(result)
return result, hooks
def _validate_features_in_predict_input(self, result):
if not _has_dataset_or_queue_runner(result):
logging.warning('Input graph does not use tf.data.Dataset or contain a '
'QueueRunner. That means predict yields forever. '
'This is probably a mistake.')
def _get_iterator_from_input_fn(self, input_fn, mode, distribution=None):
"""Calls `input_fn` and returns an iterator."""
if distribution is not None:
# pylint: disable=g-long-lambda
iterator = distribution.make_input_fn_iterator(
lambda input_context: self._call_input_fn(input_fn, mode,
input_context))
input_hooks = [
estimator_util.DistributedIteratorInitializerHook(iterator)
]
else:
result = self._call_input_fn(input_fn, mode)
iterator = result.make_initializable_iterator()
input_hooks = [estimator_util._DatasetInitializerHook(iterator)] # pylint: disable=protected-access
return iterator, input_hooks
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
"""Extracts the `features` and labels from return values of `input_fn`."""
return estimator_util.parse_input_fn_result(
self._call_input_fn(input_fn, mode))
def _extract_batch_length(self, preds_evaluated):
"""Extracts batch length of predictions."""
batch_length = None
for key, value in six.iteritems(preds_evaluated):
batch_length = batch_length or value.shape[0]
if value.shape[0] != batch_length:
raise ValueError('Batch length of predictions should be same. %s has '
'different batch length than others.' % key)
return batch_length
def _extract_keys(self, predictions, predict_keys):
"""Extracts `predict_keys` from `predictions`."""
if not predict_keys:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'predict_keys argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in predict_keys
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, predict_keys))
return predictions
def _create_global_step(self, graph):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
be added to the collection `tf.GraphKeys.GLOBAL_STEP`.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `tf.Tensor`.
"""
return tf.compat.v1.train.create_global_step(graph)
def _create_and_assert_global_step(self, graph):
"""Creates and asserts properties of the global step.
Args:
graph: The graph in which to create the global step tensor.
Returns:
The global step `tf.Tensor`.
"""
step = self._create_global_step(graph)
assert step is tf.compat.v1.train.get_global_step()
assert step.dtype.is_integer
return step
def _call_input_fn(self, input_fn, mode, input_context=None):
"""Calls the input function.
Args:
input_fn: The input function.
mode: `tf.estimator.ModeKeys`
Returns:
The return value of the passed `input_fn`, which should be one of:
* A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a
tuple `(features, labels)` with same constraints as below.
* A tuple `(features, labels)`: Where `features` is a `Tensor` or a
dictionary of string feature name to `Tensor` and `labels` is a
`Tensor` or a dictionary of string label name to `Tensor`. Both
`features` and `labels` are consumed by `model_fn`. They should
satisfy the expectation of `model_fn` from inputs.
Raises:
ValueError: if `input_fn` takes invalid arguments.
"""
input_fn_args = function_utils.fn_args(input_fn)
kwargs = {}
if 'mode' in input_fn_args:
kwargs['mode'] = mode
if 'params' in input_fn_args:
kwargs['params'] = self.params
if 'config' in input_fn_args:
kwargs['config'] = self.config
if input_context and 'input_context' in input_fn_args:
tf.compat.v1.logging.info(
'The `input_fn` accepts an `input_context` which will '
'be given by DistributionStrategy')
kwargs['input_context'] = input_context
with tf.compat.v1.device('/cpu:0'):
return input_fn(**kwargs)
def _call_model_fn(self, features, labels, mode, config):
"""Calls model function.
Args:
features: features dict.
labels: labels dict.
mode: `tf.estimator.ModeKeys`
config: `tf.estimator.RunConfig`
Returns:
An `tf.estimator.EstimatorSpec` object.
Raises:
ValueError: if `model_fn` returns invalid objects.
"""
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
if 'labels' in model_fn_args:
kwargs['labels'] = labels
else:
if labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = config
logging.info('Calling model_fn.')
model_fn_results = self._model_fn(features=features, **kwargs)
logging.info('Done calling model_fn.')
if not isinstance(model_fn_results, model_fn_lib.EstimatorSpec):
raise ValueError('model_fn should return an EstimatorSpec.')
return model_fn_results
def _train_model(self, input_fn, hooks, saving_listeners):
if self._train_distribution:
return self._train_model_distributed(input_fn, hooks, saving_listeners)
else:
return self._train_model_default(input_fn, hooks, saving_listeners)
def _train_model_default(self, input_fn, hooks, saving_listeners):
"""Initiate training with `input_fn`, without `DistributionStrategies`.
Args:
input_fn: A function that provides input data for training as minibatches.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
Loss from training
"""
worker_hooks = []
with tf.Graph().as_default() as g, g.device(self._device_fn):
tf.compat.v1.random.set_random_seed(self._config.tf_random_seed)
global_step_tensor = self._create_and_assert_global_step(g)
# Skip creating a read variable if _create_and_assert_global_step
# returns None (e.g. tf.contrib.estimator.SavedModelEstimator).
if global_step_tensor is not None:
training_util._get_or_create_global_step_read(g) # pylint: disable=protected-access
features, labels, input_hooks = (
self._get_features_and_labels_from_input_fn(input_fn, ModeKeys.TRAIN))
worker_hooks.extend(input_hooks)
estimator_spec = self._call_model_fn(features, labels, ModeKeys.TRAIN,
self.config)
global_step_tensor = tf.compat.v1.train.get_global_step(g)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_model_distributed(self, input_fn, hooks, saving_listeners):
"""Initiate training with `input_fn`, using `DistributionStrategies`.
Args:
input_fn: A function that provides input data for training as minibatches.
hooks: List of `tf.train.SessionRunHook` subclass instances. Used for
callbacks inside the training loop.
saving_listeners: list of `tf.train.CheckpointSaverListener` objects. Used
for callbacks that run immediately before or after checkpoint savings.
Returns:
Loss from training
"""
# pylint: disable=protected-access
if (hasattr(self._config, '_distribute_coordinator_mode') and
self._config._distribute_coordinator_mode): # pylint: disable=protected-access
distribute_coordinator_training.estimator_train(
self,
lambda est, s, train_hooks: est._actual_train_model_distributed( # pylint: disable=g-long-lambda
s, input_fn, train_hooks, saving_listeners),
hooks)
return self
else:
self._config._train_distribute.configure(self._config.session_config)
return self._actual_train_model_distributed(
self._config._train_distribute, input_fn, hooks, saving_listeners)
# pylint: enable=protected-access
def _actual_train_model_distributed(self, strategy, input_fn, hooks,
saving_listeners):
"""That method that does actual training with distribution strategy."""
# TODO(sourabhbajaj): Remove this hack once we migrate the other strategies
# to use the new API
is_tpu_strategy = strategy.__class__.__name__.startswith('TPUStrategy')
worker_hooks = []
with tf.Graph().as_default() as g:
# We want to create the iterations variable outside the distribution scope
# as that is just stored on the host and mainly used to drive the loop
# and doesn't need to be a Mirrored/Device variable.
if is_tpu_strategy:
steps_per_run_variable = training.get_or_create_steps_per_run_variable()
# Set flag on the distribution strategy so that optimizer v1 is
# distribution aware and scales the losses by number of replicas.
# This is required only for backward compatibility with estimator and
# V1 optimizer. TF2 will not do this scaling.
if hasattr(strategy, '_scale_loss_for_estimator_enabled'):
scale_ctx = strategy._scale_loss_for_estimator_enabled() # pylint: disable=protected-access
else:
# TODO(psv): Remove this clause after estimator repo gets the
# distribute library changes related to loss scaling.
@tf_contextlib.contextmanager
def nullcontextmanager():
yield
scale_ctx = nullcontextmanager()
with strategy.scope(), scale_ctx:
tf.compat.v1.random.set_random_seed(self._config.tf_random_seed)
iterator, input_hooks = self._get_iterator_from_input_fn(
input_fn, ModeKeys.TRAIN, strategy)
worker_hooks.extend(input_hooks)
global_step_tensor = self._create_and_assert_global_step(g)
# we want to add to the global collection in the main thread not the
# replica threads.
tf.compat.v1.add_to_collection(
training_util.GLOBAL_STEP_READ_KEY,
strategy.extended.read_var(global_step_tensor))
if is_tpu_strategy:
# Create a step_fn from the train_op of grouped_estimator_spec
def step_fn(ctx, inputs):
"""A single step that is passed to run_on_dataset."""
if isinstance(inputs, tuple):
features, labels = inputs
else:
features = inputs
labels = None
estimator_spec = strategy.extended.call_for_each_replica(
self._call_model_fn,
args=(features, labels, ModeKeys.TRAIN, self.config))
ctx.set_last_step_output(
name='loss',
output=estimator_spec.loss,
reduce_op=_get_loss_reduce_op_for_reporting())
ctx.set_non_tensor_output(
name='estimator_spec', output=estimator_spec)
return estimator_spec.train_op
# Create new train_op post graph rewrites
initial_training_loss = tf.constant(1e7)
ctx = strategy.extended.experimental_run_steps_on_iterator(
step_fn,
iterator,
iterations=steps_per_run_variable,
initial_loop_values={'loss': initial_training_loss})
distributed_train_op = ctx.run_op
loss = ctx.last_step_outputs['loss']
grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']
else:
features, labels = estimator_util.parse_iterator_result(
iterator.get_next())
grouped_estimator_spec = strategy.extended.call_for_each_replica(
self._call_model_fn,
args=(
features,
labels, # although this will be None it seems
ModeKeys.TRAIN,
self.config))
loss = strategy.reduce(
_get_loss_reduce_op_for_reporting(),
grouped_estimator_spec.loss,
axis=None)
distributed_train_op = grouped_estimator_spec.train_op
scaffold = _combine_distributed_scaffold(
grouped_estimator_spec.scaffold, strategy)
# TODO(yuefengz): add a test for unwrapping per_device_hooks.
def get_hooks_from_the_first_device(per_device_hooks):
return [
self._train_distribution.experimental_local_results(
per_device_hook)[0] for per_device_hook in per_device_hooks
]
training_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_hooks)
training_chief_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.training_chief_hooks)
estimator_spec = model_fn_lib.EstimatorSpec(
mode=grouped_estimator_spec.mode,
loss=loss,
train_op=strategy.group(distributed_train_op),
training_hooks=training_hooks,
training_chief_hooks=training_chief_hooks,
scaffold=scaffold)
return self._train_with_estimator_spec(estimator_spec, worker_hooks,
hooks, global_step_tensor,
saving_listeners)
def _train_with_estimator_spec_distributed(self, estimator_spec, worker_hooks,
saving_listener):
"""Train a model with the given Estimator Spec and Distribution Strategy."""
if saving_listener:
raise ValueError('Saving listenor is not supported by the current '
'Distribution Strategies.')
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=tuple(estimator_spec.training_chief_hooks),
save_checkpoint_secs=self._config.save_checkpoints_secs,
save_checkpoint_steps=self._config.save_checkpoints_steps,
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config,
max_wait_secs=self._config.session_creation_timeout_secs,
log_step_count_steps=self._config.log_step_count_steps,
save_graph_def=self._config.checkpoint_save_graph_def) as mon_sess:
loss = None
any_step_done = False
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
any_step_done = True
if not any_step_done:
tf.compat.v1.logging.warn('Training with estimator made no steps. '
'Perhaps input is empty or misspecified.')
return loss
def _train_with_estimator_spec(self, estimator_spec, worker_hooks, hooks,
global_step_tensor, saving_listeners):
"""Train a model with the given Estimator Spec."""
if (self._warm_start_settings and
not tf.train.latest_checkpoint(self._model_dir)):
tf.compat.v1.logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
tf.compat.v1.train.warm_start(*self._warm_start_settings)
# Check if the user created a loss summary, and add one if they didn't.
# We assume here that the summary is called 'loss'. If it is not, we will
# make another one with the name 'loss' to ensure it shows up in the right
# graph in TensorBoard.
if not any([
x.op.name == 'loss' for x in ops.get_collection(ops.GraphKeys.SUMMARIES)
]):
summary.scalar('loss', estimator_spec.loss)
ops.add_to_collection(ops.GraphKeys.LOSSES, estimator_spec.loss)
worker_hooks.extend(hooks)
worker_hooks.append(tf.compat.v1.train.NanTensorHook(estimator_spec.loss))
if self._config.log_step_count_steps is not None:
worker_hooks.append(
tf.compat.v1.train.LoggingTensorHook(
{
'loss': estimator_spec.loss,
'step': global_step_tensor
},
every_n_iter=self._config.log_step_count_steps))
worker_hooks.extend(estimator_spec.training_hooks)
if not (estimator_spec.scaffold.saver or
tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.SAVERS)):
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.SAVERS,
tf.compat.v1.train.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
if (self._config.cluster_spec and type(
self._train_distribution).__name__ in ('CollectiveAllReduceStrategy',
'CollectiveAllReduceStrategyV1',
'MultiWorkerMirroredStrategy')):
return self._train_with_estimator_spec_distributed(
estimator_spec, worker_hooks, saving_listeners)
chief_hooks = []
all_hooks = worker_hooks + list(estimator_spec.training_chief_hooks)
saver_hooks = [
h for h in all_hooks
if isinstance(h, tf.compat.v1.train.CheckpointSaverHook)
]
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
if not saver_hooks:
chief_hooks = [
tf.compat.v1.train.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=estimator_spec.scaffold,
save_graph_def=self._config.checkpoint_save_graph_def)
]
saver_hooks = [chief_hooks[0]]
if saving_listeners:
if not saver_hooks:
raise ValueError(
'There should be a CheckpointSaverHook to use saving_listeners. '
'Please set one of the RunConfig.save_checkpoints_steps or '
'RunConfig.save_checkpoints_secs.')
else:
# It is expected to have one CheckpointSaverHook. If multiple, we pick
# up the first one to add listener.
for listener in saving_listeners:
# pylint: disable=protected-access
if listener not in saver_hooks[0]._listeners:
saver_hooks[0]._listeners.append(listener)
# pylint: disable=protected-access
# Add summary hooks to worker 0 if we are running with a master, to ensure
# that summaries are written at correct intervals even with long-running
# evaluations.
save_summary_steps = self._config.save_summary_steps
log_step_count_steps = self._config.log_step_count_steps
# Check existence of appropriate cluster spec fields, as well as master and
# worker nodes. As master also performs evaluation, summary writing must
# occur on a different node. The presence of a worker is also checked to
# prevent reassigning hooks for single-replica jobs with just a master node.
if (self._config.cluster_spec and self._config.cluster_spec.jobs and
(run_config.TaskType.WORKER in self._config.cluster_spec.jobs) and
(run_config.TaskType.MASTER in self._config.cluster_spec.jobs)):
# Update config values to prevent the default hooks from being created on
# the master or other workers.
save_summary_steps = 0
log_step_count_steps = None
if (self._config.task_type == run_config.TaskType.WORKER and
self._config.task_id == 0):
if (self._config.save_summary_steps and
self._config.save_summary_steps > 0):
worker_hooks.append(
tf.compat.v1.train.SummarySaverHook(
save_steps=self._config.save_summary_steps,
output_dir=self._config.model_dir,
scaffold=estimator_spec.scaffold))
if (self._config.log_step_count_steps and
self._config.log_step_count_steps > 0):
worker_hooks.append(
tf.compat.v1.train.StepCounterHook(
every_n_steps=self._config.log_step_count_steps,
output_dir=self._config.model_dir))
with training.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=estimator_spec.scaffold,
hooks=worker_hooks,
chief_only_hooks=(tuple(chief_hooks) +
tuple(estimator_spec.training_chief_hooks)),
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=save_summary_steps,
config=self._session_config,
max_wait_secs=self._config.session_creation_timeout_secs,
log_step_count_steps=log_step_count_steps,
save_graph_def=self._config.checkpoint_save_graph_def) as mon_sess:
loss = None
any_step_done = False
while not mon_sess.should_stop():
_, loss = mon_sess.run([estimator_spec.train_op, estimator_spec.loss])
any_step_done = True
if not any_step_done:
tf.compat.v1.logging.warn('Training with estimator made no steps. '
'Perhaps input is empty or misspecified.')
return loss
def _evaluate_build_graph(self, input_fn, hooks=None, checkpoint_path=None):
"""Builds the graph and related hooks to run evaluation."""
tf.compat.v1.random.set_random_seed(self._config.tf_random_seed)
self._create_and_assert_global_step(tf.compat.v1.get_default_graph())
if self._eval_distribution:
(scaffold, evaluation_hooks, input_hooks, update_op, eval_dict) = (
self._call_model_fn_eval_distributed(input_fn, self.config))
else:
(scaffold, evaluation_hooks, input_hooks, update_op, eval_dict) = (
self._call_model_fn_eval(input_fn, self.config))
global_step_tensor = tf.compat.v1.train.get_global_step(
tf.compat.v1.get_default_graph())
# Call to warm_start has to be after model_fn is called.
self._maybe_warm_start(checkpoint_path)
if tf.compat.v1.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because Estimator '
'already defines a default metric with the same name.')
eval_dict[tf.compat.v1.GraphKeys.GLOBAL_STEP] = global_step_tensor
all_hooks = list(input_hooks)
all_hooks.extend(hooks)
all_hooks.extend(list(evaluation_hooks or []))
# New local variables have been added, so update the estimator spec's
# local init op if it was defined.
if scaffold and scaffold.local_init_op:
# Ensure that eval step has been created before updating local init op.
evaluation._get_or_create_eval_step() # pylint: disable=protected-access
scaffold = tf.compat.v1.train.Scaffold(
local_init_op=tf.group(
scaffold.local_init_op,
tf.compat.v1.train.Scaffold.default_local_init_op()),
copy_from_scaffold=scaffold)
return scaffold, update_op, eval_dict, all_hooks
def _call_model_fn_eval(self, input_fn, config):
"""Call model_fn for evaluation and handle return values."""
features, labels, input_hooks = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.EVAL)
estimator_spec = self._call_model_fn(features, labels, ModeKeys.EVAL,
config)
eval_metric_ops = _verify_and_create_loss_metric(
estimator_spec.eval_metric_ops, estimator_spec.loss)
update_op, eval_dict = _extract_metric_update_ops(eval_metric_ops)
return (estimator_spec.scaffold, estimator_spec.evaluation_hooks,
input_hooks, update_op, eval_dict)
def _call_model_fn_eval_distributed(self, input_fn, config):
"""Call model_fn in distribution mode and handle return values."""
iterator, input_hooks = self._get_iterator_from_input_fn(
input_fn, ModeKeys.EVAL, self._eval_distribution)
is_tpu_strategy = (
self._eval_distribution.__class__.__name__.startswith('TPUStrategy'))
if is_tpu_strategy:
steps_per_run_variable = training.get_or_create_steps_per_run_variable()
def step_fn(ctx, inputs):
"""Runs one step of the eval computation and captures outputs."""
if isinstance(inputs, tuple):
features, labels = inputs
else:
features = inputs
labels = None
estimator_spec = self._eval_distribution.extended.call_for_each_replica(
self._call_model_fn, args=(features, labels, ModeKeys.EVAL, config))
eval_metric_ops = _verify_and_create_loss_metric(
estimator_spec.eval_metric_ops, estimator_spec.loss,
self._eval_distribution)
update_op, eval_dict = _extract_metric_update_ops(
eval_metric_ops, self._eval_distribution)
ctx.set_non_tensor_output(name='estimator_spec', output=estimator_spec)
ctx.set_non_tensor_output(name='eval_dict', output=eval_dict)
return update_op
# TODO(priyag): Fix eval step hook to account for steps_per_run.
ctx = self._eval_distribution.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run_variable)
update_op = ctx.run_op
eval_dict = ctx.non_tensor_outputs['eval_dict']
grouped_estimator_spec = ctx.non_tensor_outputs['estimator_spec']
else:
features, labels = estimator_util.parse_iterator_result(
iterator.get_next())
grouped_estimator_spec = (
self._eval_distribution.extended.call_for_each_replica(
self._call_model_fn,
args=(features, labels, ModeKeys.EVAL, config)))
eval_metric_ops = _verify_and_create_loss_metric(
grouped_estimator_spec.eval_metric_ops, grouped_estimator_spec.loss,
self._eval_distribution)
update_op, eval_dict = _extract_metric_update_ops(eval_metric_ops,
self._eval_distribution)
scaffold = _combine_distributed_scaffold(grouped_estimator_spec.scaffold,
self._eval_distribution)
def get_hooks_from_the_first_device(per_device_hooks):
return [
self._eval_distribution.experimental_local_results(per_device_hook)[0]
for per_device_hook in per_device_hooks
]
evaluation_hooks = get_hooks_from_the_first_device(
grouped_estimator_spec.evaluation_hooks)
return (scaffold, evaluation_hooks, input_hooks, update_op, eval_dict)
def _evaluate_run(self, checkpoint_path, scaffold, update_op, eval_dict,
all_hooks, output_dir):
"""Run evaluation."""
eval_results = evaluation._evaluate_once( # pylint: disable=protected-access
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=all_hooks,
config=self._session_config)
current_global_step = eval_results[tf.compat.v1.GraphKeys.GLOBAL_STEP]
_write_dict_to_summary(
output_dir=output_dir,
dictionary=eval_results,
current_global_step=current_global_step)
if checkpoint_path:
_write_checkpoint_path_to_summary(
output_dir=output_dir,
checkpoint_path=checkpoint_path,
current_global_step=current_global_step)
return eval_results
def _maybe_warm_start(self, checkpoint_path):
if not checkpoint_path and self._warm_start_settings:
tf.compat.v1.logging.info('Warm-starting with WarmStartSettings: %s' %
(self._warm_start_settings,))
tf.compat.v1.train.warm_start(*self._warm_start_settings)
@deprecation.deprecated(
None, 'This function has been renamed, use `export_saved_model` instead.')
def export_savedmodel(self,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a `SavedModel` into the given dir.
For a detailed guide, see
[SavedModel from
Estimators](https://tensorflow.org/guide/saved_model#savedmodels_from_estimators).
This method builds a new graph by first calling the
`serving_input_receiver_fn` to obtain feature `Tensor`s, and then calling
this `Estimator`'s `model_fn` to generate the model graph based on those
features. It restores the given checkpoint (or, lacking that, the most
recent checkpoint) into this graph in a fresh session. Finally it creates
a timestamped export directory below the given `export_dir_base`, and writes
a `SavedModel` into it containing a single `tf.MetaGraphDef` saved from this
session.
The exported `MetaGraphDef` will provide one `SignatureDef` for each
element of the `export_outputs` dict returned from the `model_fn`, named
using the same keys. One of these keys is always
`tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY`,
indicating which signature will be served when a serving request does not
specify one. For each signature, the outputs are provided by the
corresponding `tf.estimator.export.ExportOutput`s, and the inputs are always
the input receivers provided by the `serving_input_receiver_fn`.
Extra assets may be written into the `SavedModel` via the `assets_extra`
argument. This should be a dict, where each key gives a destination path
(including the filename) relative to the assets.extra directory. The
corresponding value gives the full path of the source file to be copied.
For example, the simple case of copying a single file without renaming it
is specified as `{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
Args:
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported `SavedModel`s.
serving_input_receiver_fn: A function that takes no argument and returns a
`tf.estimator.export.ServingInputReceiver` or
`tf.estimator.export.TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported `SavedModel`, or `None` if no extra assets are
needed.
as_text: whether to write the `SavedModel` proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the `NodeDef`s. For a detailed guide, see [Stripping
Default-Valued Attributes](
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The path to the exported directory as a bytes object.
Raises:
ValueError: if no `serving_input_receiver_fn` is provided, no
`export_outputs` are provided, or no checkpoint can be found.
"""
# pylint: enable=line-too-long
if not serving_input_receiver_fn:
raise ValueError('An input_receiver_fn must be defined.')
return self._export_all_saved_models(
export_dir_base, {ModeKeys.PREDICT: serving_input_receiver_fn},
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
@estimator_export('estimator.Estimator', v1=[]) # pylint: disable=missing-docstring
class EstimatorV2(Estimator):
__doc__ = Estimator.__doc__
export_savedmodel = deprecation.hide_attribute_from_api(
'`Estimator.export_savedmodel` has been deprecated. Please use '
'`export_saved_model` instead.')
def _assert_members_are_not_overridden(self):
"""Asserts members of `Estimator` are not overridden."""
_assert_members_are_not_overridden(EstimatorV2, self)
def _get_loss_reduce_op_for_reporting():
graph = tf.compat.v1.get_default_graph()
if getattr(graph, '_is_loss_scaled_by_optimizer', False): # pylint: disable=protected-access
return tf.compat.v1.distribute.get_loss_reduction()
return tf.distribute.ReduceOp.SUM
def _assert_members_are_not_overridden(cls, obj):
"""Assert Estimator methods are not overwritten."""
# TPUEstimator is special cased (owned by TF).
if obj.__class__.__name__ == 'TPUEstimator':
return
allowed_overrides = set([
'model_fn', '_create_and_assert_global_step', '_export_all_saved_models',
'_tf_api_names', '_tf_api_names_v1', '_estimator_api_names',
'_estimator_api_names_v1', '_estimator_api_constants',
'_estimator_api_constants_v1', 'latest_checkpoint'
])
estimator_members = set([m for m in dir(cls) if not m.startswith('__')])
subclass_members = set(obj.__class__.__dict__.keys())
common_members = estimator_members & subclass_members - allowed_overrides
overridden_members = [
m for m in common_members if getattr(cls, m) != getattr(obj.__class__, m)
]
if overridden_members:
raise ValueError(
'Subclasses of Estimator cannot override members of Estimator. '
'{} does override {}'.format(obj.__class__, overridden_members))
def _verify_and_create_loss_metric(eval_metric_ops, loss, distribution=None):
"""Creates a metric for loss and throws an error if one already exists."""
if model_fn_lib.LOSS_METRIC_KEY in eval_metric_ops:
raise ValueError(
'Metric with name "%s" is not allowed, because Estimator ' %
(model_fn_lib.LOSS_METRIC_KEY) +
'already defines a default metric with the same name.')
if distribution is None:
loss_metric = tf.compat.v1.metrics.mean(loss)
else:
loss_metric = distribution.extended.call_for_each_replica(
tf.compat.v1.metrics.mean, args=(loss,))
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
return eval_metric_ops
def maybe_overwrite_model_dir_and_session_config(config, model_dir):
"""Overwrite estimator config by `model_dir` and `session_config` if needed.
Args:
config: Original estimator config.
model_dir: Estimator model checkpoint directory.
Returns:
Overwritten estimator config.
Raises:
ValueError: Model directory inconsistent between `model_dir` and `config`.
"""
if config is None:
config = run_config.RunConfig()
tf.compat.v1.logging.info('Using default config.')
if not isinstance(config, run_config.RunConfig):
raise ValueError(
'config must be an instance of `RunConfig`, but provided %s.' % config)
if config.session_config is None:
session_config = run_config.get_default_session_config()
config = run_config.RunConfig.replace(config, session_config=session_config)
model_dir = compat_internal.path_to_str(model_dir)
if model_dir is not None:
if (getattr(config, 'model_dir', None) is not None and
config.model_dir != model_dir):
raise ValueError(
'`model_dir` are set both in constructor and `RunConfig`, but with '
"different values. In constructor: '{}', in `RunConfig`: "
"'{}' ".format(model_dir, config.model_dir))
if model_dir:
config = run_config.RunConfig.replace(config, model_dir=model_dir)
elif getattr(config, 'model_dir', None) is None:
model_dir = tempfile.mkdtemp()
tf.compat.v1.logging.warn('Using temporary folder as model directory: %s',
model_dir)
config = run_config.RunConfig.replace(config, model_dir=model_dir)
return config
def create_per_replica_ready_for_local_init_op(scaffold):
"""Create a `tf.train.Scaffold.ready_for_local_init_op` inside a replica."""
if scaffold.ready_for_local_init_op:
return scaffold.ready_for_local_init_op
def default_ready_for_local_init_op():
return tf.compat.v1.report_uninitialized_variables(
tf.compat.v1.global_variables())
return tf.compat.v1.train.Scaffold.get_or_default(
'ready_for_local_init_op', tf.compat.v1.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
def _combine_distributed_scaffold(grouped_scaffold, distribution):
"""Combines scaffold(s) returned from `call_for_each_replica`."""
# TODO(anjalisridhar): Figure out how to resolve the following scaffold
# parameters: init_feed_dict, init_fn.
scaffold_list = distribution.experimental_local_results(grouped_scaffold)
init_feed_dict = [
s.init_feed_dict for s in scaffold_list if s.init_feed_dict is not None
]
if init_feed_dict:
init_feed_dict = distribution.group(init_feed_dict)
else:
init_feed_dict = None
init_fn = [
s._user_init_fn for s in scaffold_list if s._user_init_fn is not None # pylint: disable=protected-access
]
if init_fn:
init_fn = init_fn[0]
else:
init_fn = None
init_op = [s.init_op for s in scaffold_list if s.init_op is not None]
if init_op:
init_op = distribution.group(init_op)
else:
init_op = None
def _unwrap_and_concat(value):
value = tf.nest.flatten(distribution.experimental_local_results(value))
if len(value) != 1:
return tf.concat(value, 0)
return value[0]
ready_op = distribution.extended.call_for_each_replica(
lambda scaffold: scaffold.ready_op, args=(grouped_scaffold,))
if ready_op is not None:
ready_op = _unwrap_and_concat(ready_op)
ready_for_local_init_op = distribution.extended.call_for_each_replica(
create_per_replica_ready_for_local_init_op, args=(grouped_scaffold,))
if ready_for_local_init_op is not None:
ready_for_local_init_op = _unwrap_and_concat(ready_for_local_init_op)
else:
ready_for_local_init_op = None
local_init_op = [
s.local_init_op for s in scaffold_list if s.local_init_op is not None
]
if local_init_op:
local_init_op = distribution.group(local_init_op)
else:
local_init_op = None
summary_op = [s.summary_op for s in scaffold_list if s.summary_op is not None]
if summary_op:
summary_op = distribution.group(summary_op)
else:
summary_op = None
savers = [s.saver for s in scaffold_list if s.saver is not None]
if savers:
saver = savers[0]
else:
saver = None
scaffold = tf.compat.v1.train.Scaffold(
init_op=init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op,
local_init_op=local_init_op,
summary_op=summary_op,
saver=saver,
init_feed_dict=init_feed_dict,
init_fn=init_fn)
return scaffold
def _check_checkpoint_available(model_dir):
latest_path = tf.train.latest_checkpoint(model_dir)
if not latest_path:
raise ValueError(
'Could not find trained model in model_dir: {}.'.format(model_dir))
def _check_hooks_type(hooks):
"""Returns hooks if all are `SessionRunHook`, raises TypeError otherwise."""
hooks = list(hooks or [])
for h in hooks:
if not isinstance(h, tf.compat.v1.train.SessionRunHook):
raise TypeError('Hooks must be a SessionRunHook, given: {}'.format(h))
return hooks
def _check_listeners_type(saving_listeners):
"""Check listeners type."""
listeners = list(saving_listeners or [])
for l in listeners:
if not isinstance(l, tf.compat.v1.train.CheckpointSaverListener):
raise TypeError(
'saving_listeners must be a list of CheckpointSaverListener, '
'given: {}'.format(l))
return listeners
def _get_replica_device_setter(config):
"""Creates a replica device setter if required as a default `device_fn`.
`Estimator` uses `tf.train.ReplicaDeviceSetter` as a default device placer. It
sets the distributed related arguments such as number of `ps_replicas` based
on given `config`.
Args:
config: A `tf.estimator.RunConfig` instance.
Returns:
A replica device setter, or `None`.
"""
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return tf.compat.v1.train.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=list(device_setter.STANDARD_PS_OPS),
cluster=config.cluster_spec)
else:
return None
def _verify_model_fn_args(model_fn, params):
"""Verifies `model_fn` arguments."""
args = set(function_utils.fn_args(model_fn))
if 'features' not in args:
raise ValueError('model_fn (%s) must include features argument.' % model_fn)
if params is not None and 'params' not in args:
raise ValueError('model_fn (%s) does not include params argument, '
'but params (%s) is passed to Estimator.' %
(model_fn, params))
if params is None and 'params' in args:
tf.compat.v1.logging.warn(
'Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.', model_fn)
non_valid_args = list(args - _VALID_MODEL_FN_ARGS)
if non_valid_args:
raise ValueError('model_fn (%s) has following not expected args: %s' %
(model_fn, non_valid_args))
def _load_global_step_from_checkpoint_dir(checkpoint_dir):
try:
checkpoint_reader = tf.compat.v1.train.NewCheckpointReader(
tf.train.latest_checkpoint(checkpoint_dir))
return checkpoint_reader.get_tensor(tf.compat.v1.GraphKeys.GLOBAL_STEP)
except: # pylint: disable=bare-except
return 0
def _extract_metric_update_ops(eval_dict, distribution=None):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name, value in sorted(six.iteritems(eval_dict)):
value_ops[name] = value[0]
update_ops.append(
distribution.group(value[1]) if distribution else value[1])
update_op = tf.group(*update_ops) if update_ops else None
return update_op, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(six.iteritems(dictionary))
if not isinstance(v, six.binary_type))
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
tf.compat.v1.logging.info('Saving dict for global step %d: %s',
current_global_step, _dict_to_str(dictionary))
summary_writer = tf.compat.v1.summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.binary_type):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = '%s/%d' % (key, i)
summary_proto.value.extend(summ.value)
except message.DecodeError:
tf.compat.v1.logging.warn(
'Skipping summary for %s, cannot parse string to Summary.', key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tf.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
# pylint: disable=line-too-long
tf.compat.v1.logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
# pylint: enable=line-too-long
else:
tf.compat.v1.logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _write_checkpoint_path_to_summary(output_dir, checkpoint_path,
current_global_step):
"""Writes `checkpoint_path` into summary file in the given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
checkpoint_path: `str`, checkpoint file path to be written to summary file.
current_global_step: `int`, the current global step.
"""
checkpoint_path_tag = 'checkpoint_path'
tf.compat.v1.logging.info('Saving \'%s\' summary for global step %d: %s',
checkpoint_path_tag, current_global_step,
checkpoint_path)
summary_proto = summary_pb2.Summary()
summary_proto.value.add(
tag=checkpoint_path_tag,
tensor=tf.make_tensor_proto(checkpoint_path, dtype=tf.dtypes.string))
summary_writer = tf.compat.v1.summary.FileWriterCache.get(output_dir)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
def _has_dataset_or_queue_runner(maybe_tensor):
"""Returns `True` if `Dataset` or `QueueRunner` has been used."""
# Check TF dataset first. Here, we use a simple algorithm to check the top
# level Tensors only, which should be sufficient for most users.
tensors = [
x for x in tf.nest.flatten(maybe_tensor) if isinstance(x, tf.Tensor)
]
if any([t.op.type == 'IteratorGetNext' for t in tensors]):
return True
# Now, check queue.
return tf.compat.v1.get_default_graph().get_collection(
tf.compat.v1.GraphKeys.QUEUE_RUNNERS)
VocabInfo = tf.compat.v1.train.VocabInfo # pylint: disable=invalid-name
estimator_export('estimator.VocabInfo')(VocabInfo)
@estimator_export('estimator.WarmStartSettings')
class WarmStartSettings(
collections.namedtuple('WarmStartSettings', [
'ckpt_to_initialize_from',
'vars_to_warm_start',
'var_name_to_vocab_info',
'var_name_to_prev_var_name',
])):
"""Settings for warm-starting in `tf.estimator.Estimators`.
Example Use with canned `tf.estimator.DNNEstimator`:
```
emb_vocab_file = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_file(
"sc_vocab_file", "new_vocab.txt", vocab_size=100),
dimension=8)
emb_vocab_list = tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
"sc_vocab_list", vocabulary_list=["a", "b"]),
dimension=8)
estimator = tf.estimator.DNNClassifier(
hidden_units=[128, 64], feature_columns=[emb_vocab_file, emb_vocab_list],
warm_start_from=ws)
```
where `ws` could be defined as:
Warm-start all weights in the model (input layer and hidden weights).
Either the directory or a specific checkpoint can be provided (in the case
of the former, the latest checkpoint will be used):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp")
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp/model-1000")
```
Warm-start only the embeddings (input layer):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=".*input_layer.*")
```
Warm-start all weights but the embedding parameters corresponding to
`sc_vocab_file` have a different vocab from the one used in the current
model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start only `sc_vocab_file` embeddings (and no other variables), which
have a different vocab from the one used in the current model:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt"
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
vars_to_warm_start=None,
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint, and only
100 of those entries were used:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
})
```
Warm-start all weights but the parameters corresponding to `sc_vocab_file`
have a different vocab from the one used in current checkpoint and the
parameters corresponding to `sc_vocab_list` have a different name from the
current checkpoint:
```
vocab_info = tf.estimator.VocabInfo(
new_vocab=sc_vocab_file.vocabulary_file,
new_vocab_size=sc_vocab_file.vocabulary_size,
num_oov_buckets=sc_vocab_file.num_oov_buckets,
old_vocab="old_vocab.txt",
old_vocab_size=100
)
ws = WarmStartSettings(
ckpt_to_initialize_from="/tmp",
var_name_to_vocab_info={
"input_layer/sc_vocab_file_embedding/embedding_weights": vocab_info
},
var_name_to_prev_var_name={
"input_layer/sc_vocab_list_embedding/embedding_weights":
"old_tensor_name"
})
```
Warm-start all TRAINABLE variables:
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=".*")
```
Warm-start all variables (including non-TRAINABLE):
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=[".*"])
```
Warm-start non-TRAINABLE variables "v1", "v1/Momentum", and "v2" but not
"v2/momentum":
```
ws = WarmStartSettings(ckpt_to_initialize_from="/tmp",
vars_to_warm_start=["v1", "v2[^/]"])
```
Attributes:
ckpt_to_initialize_from: [Required] A string specifying the directory with
checkpoint file(s) or path to checkpoint from which to warm-start the
model parameters.
vars_to_warm_start: [Optional] One of the following:
* A regular expression (string) that captures which variables to
warm-start (see tf.compat.v1.get_collection). This expression will only
consider variables in the TRAINABLE_VARIABLES collection -- if you need
to warm-start non_TRAINABLE vars (such as optimizer accumulators or
batch norm statistics), please use the below option.
* A list of strings, each a regex scope provided to
tf.compat.v1.get_collection with GLOBAL_VARIABLES (please see
tf.compat.v1.get_collection). For backwards compatibility reasons, this
is separate from the single-string argument type.
* A list of Variables to warm-start. If you do not have access to the
`Variable` objects at the call site, please use the above option.
* `None`, in which case only TRAINABLE variables specified in
`var_name_to_vocab_info` will be warm-started.
Defaults to `'.*'`, which warm-starts all variables in the
TRAINABLE_VARIABLES collection. Note that this excludes variables such as
accumulators and moving statistics from batch norm.
var_name_to_vocab_info: [Optional] Dict of variable names (strings) to
`tf.estimator.VocabInfo`. The variable names should be "full" variables,
not the names of the partitions. If not explicitly provided, the variable
is assumed to have no (changes to) vocabulary.
var_name_to_prev_var_name: [Optional] Dict of variable names (strings) to
name of the previously-trained variable in `ckpt_to_initialize_from`. If
not explicitly provided, the name of the variable is assumed to be same
between previous checkpoint and current model. Note that this has no
effect on the set of variables that is warm-started, and only controls
name mapping (use `vars_to_warm_start` for controlling what variables to
warm-start).
"""
def __new__(cls,
ckpt_to_initialize_from,
vars_to_warm_start='.*',
var_name_to_vocab_info=None,
var_name_to_prev_var_name=None):
if not ckpt_to_initialize_from:
raise ValueError(
'`ckpt_to_initialize_from` MUST be set in WarmStartSettings')
return super(WarmStartSettings, cls).__new__(
cls,
ckpt_to_initialize_from,
vars_to_warm_start,
var_name_to_vocab_info or {},
var_name_to_prev_var_name or {},
)
def _get_default_warm_start_settings(warm_start_from):
"""Returns default `tf.estimator.WarmStartSettings`.
Args:
warm_start_from: Either a string representing the filepath of a checkpoint
or `SavedModel` to initialize from, or an instance of
`tf.estimator.WarmStartSettings`.
Returns:
Either None or an instance of `WarmStartSettings`.
Raises:
ValueError: If `warm_start_from` is not `None` but is neither a string nor
an instance of `WarmStartSettings`.
"""
if warm_start_from is None:
return None
if isinstance(warm_start_from, (six.string_types, six.binary_type)):
# Infer that this is a SavedModel if export_path +
# 'variables/variables.index' exists, and if so, construct the
# WarmStartSettings pointing to the variables path
# (export_path + 'variables/variables').
if tf.compat.v1.gfile.Exists(
os.path.join(
saved_model_utils.get_variables_dir(warm_start_from),
tf.compat.as_text('variables.index'))):
tf.compat.v1.logging.info('Warm-starting from a SavedModel')
return WarmStartSettings(
ckpt_to_initialize_from=saved_model_utils.get_variables_path(
warm_start_from))
return WarmStartSettings(ckpt_to_initialize_from=warm_start_from)
elif isinstance(warm_start_from, WarmStartSettings):
return warm_start_from
else:
raise ValueError('warm_start_from must be a string or a WarmStartSettings, '
'instead got {}'.format(type(warm_start_from)))
| 42.764435
| 138
| 0.689464
|
f9c3108c4632c11bf707b81d8fb115c05a2e55ce
| 5,576
|
py
|
Python
|
tightbinding_code/helpers.py
|
prz3m37/TightBinding
|
2172a4d369c00772181ded3cd9e8083a87c2ad47
|
[
"MIT"
] | 4
|
2019-09-01T18:05:08.000Z
|
2020-08-06T12:28:48.000Z
|
tightbinding_code/helpers.py
|
prz3m37/TightBinding
|
2172a4d369c00772181ded3cd9e8083a87c2ad47
|
[
"MIT"
] | null | null | null |
tightbinding_code/helpers.py
|
prz3m37/TightBinding
|
2172a4d369c00772181ded3cd9e8083a87c2ad47
|
[
"MIT"
] | null | null | null |
import os
import datetime
import numpy as np
import config as cfg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class TightBindingHelpers(object):
"""
Class with helpers method such as plotting and saving results functions of calculations. This method is used in
ExecuteTightBindingCalculations class.
"""
def __init__(self, parametrization)->None:
"""
Method calls configuration file (named config.py)
"""
self.__settings = cfg.settings
self.__configuration = cfg.configuration[parametrization]
def create_saving_folder(self)->None:
"""
Method creates (if necessary) folder / directory where results will be saved
Returns: None
"""
directory = self.__settings['saving_directory']
if not os.path.exists(directory):
os.makedirs(directory)
return
def plot_DOS(self, save_file: str, num_of_atoms: str,density_of_states: np.array)->None:
"""
Method plots density of states and saves it (as png file).
Args:
save_file: name of png file
density_of_states: array with numerical values of density of states
num_of_atoms: number of atoms in lattice.
Returns: None
"""
start = self.__settings['start']
end = self.__settings['stop']
step = self.__settings['step']
E = np.arange(start, end, step)
plt.figure(figsize=(13.66, 7.68))
plt.plot(E, density_of_states)
plt.axhline(y=0, color='r', linestyle='-')
plt.xlabel('Energy [a.u.]')
plt.ylabel('DOS')
plt.title('Density of states for ' + str(num_of_atoms) + ' atoms')
plt.savefig(self.__settings['saving_directory'] + '/__DOS__' + save_file + '.png', dpi=400)
plt.close()
return
def plot_lattice(self, lattice: np.array, save_file:str, num_of_atoms: str, projection: str=None)->None:
"""
Method plots hexagonal grid.
Args:
lattice: array of coordinates of each point of lattice ([[x1,y1,z1], [x2,y2,z2], [...]])
save_file: name of file
projection: argument which tells if plot has to be plotted in 3d
num_of_atoms: number of atoms in lattice.
Returns: None
"""
x = []
y = []
z = []
for i in lattice:
x.append(i[0])
y.append(i[1])
z.append([i[2]])
if projection == '3d':
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, marker='o')
plt.savefig(self.__settings['saving_directory'] + '/_lattice_' + save_file + '.png', dpi=200)
plt.title('Lattice scheme for ' + str(num_of_atoms) + ' atoms')
plt.axis('off')
plt.close()
else:
plt.figure(figsize=(15, 10))
plt.scatter(x, y, marker='o')
plt.savefig(self.__settings['saving_directory'] + '/_lattice_' + save_file + '.png', dpi=200)
plt.axis('off')
plt.close()
return
def save_numerical_results(self, save_file: str, eigen_energies: np.array)->None:
"""
Method saves numerical results - eigen energies - into txt file
Args:
save_file: name of file
eigen_energies: array of eigen energies calculated by diagonalization of interaction matrix.
Returns: None
"""
with open(self.__settings['saving_directory'] + '/' + save_file + '.txt', "w") as file:
for eigen_energy in eigen_energies:
file.write(str(eigen_energy) + "\n")
return
def get_file_name(self, num_of_atoms)->str:
"""
Method creates name of saving file (txt and png) basing on chosen parameters.
Args:
num_of_atoms: number of atoms in lattice.
Returns: name of saving file
"""
title = self.__settings['title']
sigma = self.__configuration['fermi_level']
gauss_sigma = self.__settings['gauss_sigma']
x_num_of_steps = self.__configuration['x_num_of_steps']
calculation_type = self.__configuration['calculation_type']
if x_num_of_steps != None:
x_num_of_steps = self.__configuration['x_num_of_steps']
save_file = str(datetime.datetime.now()) + '_' + calculation_type + \
'_sigma=' + \
str(sigma) + \
'_gauss_sigma=' + \
str(gauss_sigma) + \
'_num_of_atoms=' + \
str(num_of_atoms) + '_' +\
str(title)
elif x_num_of_steps == None:
vertical_num_of_steps = self.__configuration['vertical_num_of_steps']
horizontal_num_of_steps = self.__configuration['horizontal_num_of_steps']
save_file = str(datetime.datetime.now()) + '_' + calculation_type + \
'_sigma=' + \
str(sigma) + \
'_gauss_sigma=' + \
str(gauss_sigma) + \
'_width=' + \
str(vertical_num_of_steps) + \
'_length=' + \
str(horizontal_num_of_steps) + \
'_num_of_atoms=' + \
str(num_of_atoms) + '_' +\
str(title)
return save_file
| 36.444444
| 115
| 0.553802
|
339e47df3c9bae1a46c07018f282a5a1402b9321
| 693
|
py
|
Python
|
hough.py
|
vgaurav3011/Machine_Learning_Algorithms
|
fc11f61964e17c87408ac9b4c1834333b2436e27
|
[
"MIT"
] | null | null | null |
hough.py
|
vgaurav3011/Machine_Learning_Algorithms
|
fc11f61964e17c87408ac9b4c1834333b2436e27
|
[
"MIT"
] | null | null | null |
hough.py
|
vgaurav3011/Machine_Learning_Algorithms
|
fc11f61964e17c87408ac9b4c1834333b2436e27
|
[
"MIT"
] | 1
|
2019-10-02T03:56:00.000Z
|
2019-10-02T03:56:00.000Z
|
import cv2
import numpy as np
img = cv2.imread("../1.jpg")
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
kernel = np.ones((5,5),np.float32)/25
gray = cv2.filter2D(gray,-1,kernel)
edges = cv2.Canny(gray,400,600,apertureSize = 5)
cv2.imshow('image',edges)
cv2.waitKey(0)
lines = cv2.HoughLines(edges,1,np.pi/180,15)
for i in range(8):
for rho,theta in lines[i]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
cv2.imshow('image',img)
cv2.waitKey(0)
| 26.653846
| 48
| 0.562771
|
6e2024a2c09799a8fc9fa137574a844f5655cafd
| 1,526
|
py
|
Python
|
tests/snuba/api/endpoints/test_project_tags.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/snuba/api/endpoints/test_project_tags.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
tests/snuba/api/endpoints/test_project_tags.py
|
pierredup/sentry
|
0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.testutils import APITestCase, SnubaTestCase
from sentry.testutils.helpers.datetime import before_now, iso_format
class ProjectTagsTest(APITestCase, SnubaTestCase):
def test_simple(self):
user = self.create_user()
org = self.create_organization()
team = self.create_team(organization=org)
self.create_member(organization=org, user=user, teams=[team])
project = self.create_project(organization=org, teams=[team])
self.store_event(
data={
"tags": {"foo": "oof", "bar": "rab"},
"timestamp": iso_format(before_now(minutes=1)),
},
project_id=project.id,
)
self.store_event(
data={"tags": {"bar": "rab2"}, "timestamp": iso_format(before_now(minutes=1))},
project_id=project.id,
)
self.login_as(user=user)
url = reverse(
"sentry-api-0-project-tags",
kwargs={"organization_slug": project.organization.slug, "project_slug": project.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
data = {v["key"]: v for v in response.data}
assert len(data) == 3
assert data["foo"]["canDelete"]
assert data["foo"]["uniqueValues"] == 1
assert data["bar"]["canDelete"]
assert data["bar"]["uniqueValues"] == 2
| 34.681818
| 98
| 0.611402
|
b0d4e22171f8c8bbdcaf4fb7566628871356e98b
| 6,874
|
py
|
Python
|
02_TensorFlow_Way/C0205_loss_functions.py
|
zhuyuanxiang/tensorflow_cookbook
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
[
"MIT"
] | 7
|
2019-11-30T05:42:47.000Z
|
2021-10-09T03:02:19.000Z
|
02_TensorFlow_Way/C0205_loss_functions.py
|
zhuyuanxiang/tensorflow_cookbook
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
[
"MIT"
] | null | null | null |
02_TensorFlow_Way/C0205_loss_functions.py
|
zhuyuanxiang/tensorflow_cookbook
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
[
"MIT"
] | 2
|
2019-12-05T06:44:48.000Z
|
2021-10-09T03:02:20.000Z
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : 526614962@qq.com
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0205_loss_functions.py
@Version : v0.1
@Time : 2019-10-29 14:59
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0205,P26
@Desc : TensorFlow 进阶,TensorFlow 实现损失函数
"""
# common imports
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_title, show_values
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
np.random.seed(42)
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()
# 2.5 损失函数
def regression_loss_functions():
show_title("回归算法的损失函数")
###### Numerical Predictions ######
x_vals = tf.linspace(-1., 1., 500)
target = tf.constant(0.)
# L2 loss(平方损失函数)(欧拉损失函数)
# L = (pred - actual)^2
l2_y_vals = tf.square(target - x_vals)
# show_values(l2_y_vals,"l2_y_vals")
l2_y_out = sess.run(l2_y_vals)
# L1 loss(绝对值损失函数)
# L = abs(pred - actual)
l1_y_vals = tf.abs(target - x_vals)
# show_values(l1_y_vals,"l1_y_vals")
l1_y_out = sess.run(l1_y_vals)
# Pseudo-Huber loss
# L = delta^2 * (sqrt(1 + ((pred - actual)/delta)^2) - 1)
delta1 = tf.constant(0.25)
phuber1_y_vals = tf.multiply(tf.square(delta1), tf.sqrt(1. + tf.square((target - x_vals) / delta1)) - 1.)
phuber1_y_vals = tf.square(delta1) * (tf.sqrt(1. + tf.square((target - x_vals) / delta1)) - 1.)
# show_values(phuber1_y_vals,"phuber1_y_vals")
phuber1_y_out = sess.run(phuber1_y_vals)
delta2 = tf.constant(5.)
phuber2_y_vals = tf.multiply(tf.square(delta2), tf.sqrt(1. + tf.square((target - x_vals) / delta2)) - 1.)
phuber1_y_vals = tf.square(delta2) * (tf.sqrt(1. + tf.square((target - x_vals) / delta2)) - 1.)
# show_values(phuber2_y_vals,"phuber2_y_vals")
phuber2_y_out = sess.run(phuber2_y_vals)
# Plot the output:
# x_array = show_values(x_vals, "x_vals = ")
x_array = sess.run(x_vals)
plt.plot(x_array, l2_y_out, 'b-', label = 'L2 Loss')
plt.plot(x_array, l1_y_out, 'r--', label = 'L1 Loss')
plt.plot(x_array, phuber1_y_out, 'k-.', label = 'P-Huber Loss (0.25)')
plt.plot(x_array, phuber2_y_out, 'g:', label = 'P-Huber Loss (5.0)')
plt.ylim(-0.2, 0.4)
plt.legend(loc = 'lower right', prop = {'size': 11})
plt.title("图2-4:各种回归算法的损失函数")
def classfication_loss_functions():
###### Categorical Predictions ######
x_vals = tf.linspace(-3., 5., 500)
target = tf.constant(1.)
targets = tf.fill([500, ], 1.)
# Hinge loss
# Use for predicting binary (-1, 1) classes
# 主要用在评估支持向量机算法,也可以评估神经网络算法
# 具体的公式需要根据算法中的情况决定,下面的公式仅供参考
# L = max(0, 1 - (pred * actual))
hinge_y_vals = tf.maximum(0., 1. - tf.multiply(target, x_vals))
hinge_y_vals = tf.maximum(0., 1. - (target * x_vals))
# hinge_y_out = show_values( hinge_y_vals,"hinge_y_vals")
hinge_y_out = sess.run(hinge_y_vals)
# Cross entropy loss
# 交叉熵损失函数
# L = -actual * (log(pred)) - (1-actual)(log(1-pred))
xentropy_y_vals = - tf.multiply(target, tf.log(x_vals)) - tf.multiply((1. - target), tf.log(1. - x_vals))
xentropy_y_vals = -(target * tf.log(x_vals)) - ((1. - target) * tf.log(1. - x_vals))
# xentropy_y_out = show_values( xentropy_y_vals,"xentropy_y_vals")
xentropy_y_out = sess.run(xentropy_y_vals)
# Sigmoid entropy loss
# Sigmoid 交叉熵损失函数
# L = -actual * (log(sigmoid(pred))) - (1-actual)(log(1-sigmoid(pred)))
# or
# L = max(actual, 0) - actual * pred + log(1 + exp(-abs(actual)))
# 书中代码的 labels 和 logits 搞反了
xentropy_sigmoid_y_vals = tf.nn.sigmoid_cross_entropy_with_logits(labels = targets, logits = x_vals)
# show_values(xentropy_sigmoid_y_vals,"xentropy_sigmoid_y_vals")
xentropy_sigmoid_y_out = sess.run(xentropy_sigmoid_y_vals)
# Weighted (Sigmoid) cross entropy loss
# Sigmoid 加权交叉熵损失函数
# L = targets * -log(sigmoid(logits)) * pos_weight + (1 - targets) * -log(1 - sigmoid(logits))
# L = -actual * (log(pred)) * weights - (1-actual)(log(1-pred))
# or
# L = (1 - pred) * actual + (1 + (weights - 1) * pred) * log(1 + exp(-actual))
weight = tf.constant(0.5)
xentropy_weighted_y_vals = tf.nn.weighted_cross_entropy_with_logits(
targets = targets, logits = x_vals, pos_weight = weight)
# show_values(xentropy_weighted_y_vals,"xentropy_weighted_y_vals")
xentropy_weighted_y_out = sess.run(xentropy_weighted_y_vals)
# Plot the output
x_array = sess.run(x_vals)
plt.plot(x_array, hinge_y_out, 'b-', label = 'Hinge损失函数')
plt.plot(x_array, xentropy_y_out, 'r--', label = '两类交叉熵损失函数')
plt.plot(x_array, xentropy_sigmoid_y_out, 'k-.', label = 'Sigmoid 交叉熵损失函数')
plt.plot(x_array, xentropy_weighted_y_out, 'g:', label = '加权交叉熵损失函数 (x0.5)')
plt.ylim(-1.5, 3)
# plt.xlim(-1, 3)
plt.legend(loc = 'lower right', prop = {'size': 11})
plt.title("图2-5:各种分类算法的损失函数")
# Softmax entropy loss
# Softmax 交叉熵损失函数
# L = -actual * (log(softmax(pred))) - (1-actual)(log(1-softmax(pred)))
unscaled_logits = tf.constant([[1., -3., 10.]])
target_dist = tf.constant([[0.1, 0.02, 0.88]])
softmax_xentropy = tf.nn.softmax_cross_entropy_with_logits(
logits = unscaled_logits, labels = target_dist)
show_values(softmax_xentropy, "softmax_xentropy")
# print(sess.run(softmax_xentropy))
# Sparse entropy loss
# 稀疏 Softmax 交叉熵损失函数
# Use when classes and targets have to be mutually exclusive
# L = sum( -actual * log(pred) )
unscaled_logits = tf.constant([[1., -3., 10.]])
sparse_target_dist = tf.constant([2])
sparse_xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = unscaled_logits, labels = sparse_target_dist)
show_values(sparse_xentropy, "sparse_xentropy")
# print(sess.run(sparse_xentropy))
# -----------------------------------------------------------------
if __name__ == "__main__":
# 2.5 损失函数
# regression_loss_functions()
classfication_loss_functions()
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
| 37.562842
| 109
| 0.650713
|
af71799c9a5b3574aba318001c5e89aacff351d1
| 4,046
|
py
|
Python
|
src/python/pants/backend/python/macros/python_requirements_caof.py
|
bastianwegge/pants
|
43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/python/macros/python_requirements_caof.py
|
bastianwegge/pants
|
43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2
|
[
"Apache-2.0"
] | 14
|
2020-09-26T02:01:56.000Z
|
2022-03-30T10:19:28.000Z
|
src/python/pants/backend/python/macros/python_requirements_caof.py
|
bastianwegge/pants
|
43f0b90d41622bee0ed22249dbaffb3ff4ad2eb2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from itertools import groupby
from pathlib import Path
from typing import Iterable, Mapping
from packaging.utils import canonicalize_name as canonicalize_project_name
from pants.backend.python.macros.caof_utils import (
OVERRIDES_TYPE,
flatten_overrides_to_dependency_field,
)
from pants.backend.python.target_types import normalize_module_mapping, parse_requirements_file
from pants.base.build_environment import get_buildroot
from pants.core.target_types import TargetGeneratorSourcesHelperTarget
class PythonRequirementsCAOF:
"""Translates a pip requirements file into an equivalent set of `python_requirement` targets.
If the `requirements.txt` file has lines `foo>=3.14` and `bar>=2.7`,
then this will translate to:
python_requirement(
name="foo",
requirements=["foo>=3.14"],
)
python_requirement(
name="bar",
requirements=["bar>=2.7"],
)
See the requirements file spec here:
https://pip.pypa.io/en/latest/reference/pip_install.html#requirements-file-format
You may also use the parameter `module_mapping` to teach Pants what modules each of your
requirements provide. For any requirement unspecified, Pants will default to the name of the
requirement. This setting is important for Pants to know how to convert your import
statements back into your dependencies. For example:
python_requirements(
module_mapping={
"ansicolors": ["colors"],
"setuptools": ["pkg_resources"],
}
)
"""
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(
self,
*,
source: str = "requirements.txt",
module_mapping: Mapping[str, Iterable[str]] | None = None,
type_stubs_module_mapping: Mapping[str, Iterable[str]] | None = None,
overrides: OVERRIDES_TYPE = None,
) -> None:
"""
:param module_mapping: a mapping of requirement names to a list of the modules they provide.
For example, `{"ansicolors": ["colors"]}`. Any unspecified requirements will use the
requirement name as the default module, e.g. "Django" will default to
`modules=["django"]`.
"""
req_file_tgt = self._parse_context.create_object(
TargetGeneratorSourcesHelperTarget.alias,
name=source.replace(os.path.sep, "_"),
sources=[source],
)
requirements_dep = f":{req_file_tgt.name}"
normalized_module_mapping = normalize_module_mapping(module_mapping)
normalized_type_stubs_module_mapping = normalize_module_mapping(type_stubs_module_mapping)
req_file = Path(get_buildroot(), self._parse_context.rel_path, source)
requirements = parse_requirements_file(
req_file.read_text(), rel_path=str(req_file.relative_to(get_buildroot()))
)
dependencies_overrides = flatten_overrides_to_dependency_field(
overrides, macro_name="python_requirements", build_file_dir=self._parse_context.rel_path
)
grouped_requirements = groupby(requirements, lambda parsed_req: parsed_req.project_name)
for project_name, parsed_reqs_ in grouped_requirements:
normalized_proj_name = canonicalize_project_name(project_name)
self._parse_context.create_object(
"python_requirement",
name=project_name,
requirements=list(parsed_reqs_),
modules=normalized_module_mapping.get(normalized_proj_name),
type_stub_modules=normalized_type_stubs_module_mapping.get(normalized_proj_name),
dependencies=[
requirements_dep,
*dependencies_overrides.get(normalized_proj_name, []),
],
)
| 38.903846
| 100
| 0.685615
|
ca5c08175164f9ece019b68d09ab03e6b6e3f36c
| 3,298
|
py
|
Python
|
cinder/quota_utils.py
|
whitepages/cinder
|
bd70ce6f4dd58ba904a7c941700cdce54e5a705e
|
[
"Apache-2.0"
] | null | null | null |
cinder/quota_utils.py
|
whitepages/cinder
|
bd70ce6f4dd58ba904a7c941700cdce54e5a705e
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:38:29.000Z
|
2021-03-21T11:38:29.000Z
|
cinder/quota_utils.py
|
isabella232/cinder
|
bd70ce6f4dd58ba904a7c941700cdce54e5a705e
|
[
"Apache-2.0"
] | 1
|
2021-03-21T11:37:47.000Z
|
2021-03-21T11:37:47.000Z
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from cinder import exception
from cinder.i18n import _LW
from cinder import quota
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
def get_volume_type_reservation(ctxt, volume, type_id,
reserve_vol_type_only=False):
# Reserve quotas for the given volume type
try:
reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
QUOTAS.add_volume_type_opts(ctxt,
reserve_opts,
type_id)
# If reserve_vol_type_only is True, just reserve volume_type quota,
# not volume quota.
if reserve_vol_type_only:
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
# Note that usually the project_id on the volume will be the same as
# the project_id in the context. But, if they are different then the
# reservations must be recorded against the project_id that owns the
# volume.
project_id = volume['project_id']
reservations = QUOTAS.reserve(ctxt,
project_id=project_id,
**reserve_opts)
except exception.OverQuota as e:
overs = e.kwargs['overs']
usages = e.kwargs['usages']
quotas = e.kwargs['quotas']
def _consumed(name):
return (usages[name]['reserved'] + usages[name]['in_use'])
for over in overs:
if 'gigabytes' in over:
s_size = volume['size']
d_quota = quotas[over]
d_consumed = _consumed(over)
LOG.warning(
_LW("Quota exceeded for %(s_pid)s, tried to create "
"%(s_size)sG volume - (%(d_consumed)dG of "
"%(d_quota)dG already consumed)"),
{'s_pid': ctxt.project_id,
's_size': s_size,
'd_consumed': d_consumed,
'd_quota': d_quota})
raise exception.VolumeSizeExceedsAvailableQuota(
requested=s_size, quota=d_quota, consumed=d_consumed)
elif 'volumes' in over:
LOG.warning(
_LW("Quota exceeded for %(s_pid)s, tried to create "
"volume (%(d_consumed)d volumes "
"already consumed)"),
{'s_pid': ctxt.project_id,
'd_consumed': _consumed(over)})
raise exception.VolumeLimitExceeded(
allowed=quotas[over])
return reservations
| 40.716049
| 78
| 0.574894
|
2dbfb178291f0c18dc8d8ce60a303706f3504927
| 3,556
|
py
|
Python
|
tests/test_js_expressions.py
|
AutomatedTester/amo-validator
|
a063002497395ce04085a3940713b4467f12e9fd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_js_expressions.py
|
AutomatedTester/amo-validator
|
a063002497395ce04085a3940713b4467f12e9fd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_js_expressions.py
|
AutomatedTester/amo-validator
|
a063002497395ce04085a3940713b4467f12e9fd
|
[
"BSD-3-Clause"
] | null | null | null |
from js_helper import _do_test_raw, _get_var
def test_boolean_comparison():
"""Test that true/false are properly compared."""
scope = _do_test_raw("""
var a = false < true,
b = true > false,
c = false > true,
d = true < false,
e = false < false,
f = true < true,
g = true == true,
h = false == false,
i = true > 0,
j = true == 1,
k = false < 1,
l = false == 0;
""")
assert _get_var(scope, 'a') is True
assert _get_var(scope, 'b') is True
assert _get_var(scope, 'c') is False
assert _get_var(scope, 'd') is False
assert _get_var(scope, 'e') is False
assert _get_var(scope, 'f') is False
assert _get_var(scope, 'g') is True
assert _get_var(scope, 'h') is True
assert _get_var(scope, 'i') is True
assert _get_var(scope, 'j') is True
assert _get_var(scope, 'k') is True
assert _get_var(scope, 'l') is True
def test_string_comparison():
"""Test that strings are properly compared."""
scope = _do_test_raw("""
var a = "string" < "string",
b = "astring" < "string",
c = "strings" < "stringy",
d = "strings" < "stringier",
e = "string" < "astring",
f = "string" < "strings";
""")
assert _get_var(scope, 'a') is False
assert _get_var(scope, 'b') is True
assert _get_var(scope, 'c') is True
assert _get_var(scope, 'd') is False
assert _get_var(scope, 'e') is False
assert _get_var(scope, 'f') is True
# We can assume that the converses are true; Spidermonkey makes that easy.
def test_signed_zero():
"""Test that signed zeroes are compared properly."""
scope = _do_test_raw("""
var a = 0 == 0,
b = 0 != 0,
c = 0 == -0,
d = 0 != -0,
e = -0 == 0,
f = -0 != 0;
""")
assert _get_var(scope, 'a') is True
assert _get_var(scope, 'b') is False
assert _get_var(scope, 'c') is True
assert _get_var(scope, 'd') is False
assert _get_var(scope, 'e') is True
assert _get_var(scope, 'f') is False
def test_typecasting():
"""Test that types are properly casted."""
scope = _do_test_raw("""
var a = 1 == '1',
b = 255 == '0xff',
c = 0 == '\\r';
""")
assert _get_var(scope, 'a') is True
assert _get_var(scope, 'b') is True
assert _get_var(scope, 'c') is True
def test_additive_typecasting():
"""
Test than additive and multiplicative expressions are evaluated properly.
"""
scope = _do_test_raw("""
var first = true,
second = "foo",
third = 345;
var a = first + second,
b = second + first,
c = Boolean(true) + String("foo"),
d = String("foo") + Boolean(false),
e = second + third,
f = String("foo") + Number(-100);
""")
assert _get_var(scope, 'a') == 'truefoo'
assert _get_var(scope, 'b') == 'footrue'
assert _get_var(scope, 'c') == 'truefoo'
assert _get_var(scope, 'd') == 'foofalse'
assert _get_var(scope, 'e') == 'foo345'
assert _get_var(scope, 'f') == 'foo-100'
def test_addition_expressions():
"""Test that varying types are added correctly."""
scope = _do_test_raw("""
var a = true + false,
b = Boolean(true) + Boolean(false);
var x = 100,
y = -1;
var c = x + y,
d = Number(x) + Number(y);
""")
assert _get_var(scope, 'a') == 1
assert _get_var(scope, 'b') == 1
assert _get_var(scope, 'c') == 99
assert _get_var(scope, 'd') == 99
| 28.222222
| 78
| 0.556243
|
38b6734b0070eca0f300dfc969549ba7903c629d
| 14,719
|
py
|
Python
|
test/unit/test_manager.py
|
NorthLandTeam/ncclient
|
ff6bba74c3304f0a5053087449f5a51e8eb13ed4
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_manager.py
|
NorthLandTeam/ncclient
|
ff6bba74c3304f0a5053087449f5a51e8eb13ed4
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_manager.py
|
NorthLandTeam/ncclient
|
ff6bba74c3304f0a5053087449f5a51e8eb13ed4
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from mock import patch, MagicMock
from ncclient import manager
from ncclient.devices.junos import JunosDeviceHandler
import logging
class TestManager(unittest.TestCase):
@patch('ncclient.transport.SSHSession')
def test_ssh(self, mock_ssh):
m = MagicMock()
mock_ssh.return_value = m
conn = self._mock_manager()
m.connect.assert_called_once_with(host='10.10.10.10',
port=22,
username='user',
password='password',
hostkey_verify=False, allow_agent=False,
timeout=3)
self.assertEqual(conn._session, m)
self.assertEqual(conn._timeout, 10)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh(self, mock_ssh):
manager.connect(host='host')
mock_ssh.assert_called_once_with(host='host')
@patch('ncclient.transport.SSHSession.load_known_hosts')
@patch('ncclient.transport.SSHSession.connect')
def test_connect_ssh1(self, mock_ssh, mock_load_known_hosts):
manager.connect(host='host')
mock_ssh.assert_called_once_with(host='host')
mock_load_known_hosts.assert_called_once_with()
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_connect_ssh2(self, mock_session, mock_hex, mock_trans, mock_socket):
conn = manager.connect_ssh(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
allow_agent=False,
keepalive=10)
self.assertEqual(mock_trans.called, 1)
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.transport.SSHSession.transport')
@patch('ncclient.transport.SSHSession.close')
def test_connect_exception(self, mock_close, mock_transport, mock_ssh):
mock_ssh.side_effect = Exception
try:
manager.connect(host='host')
except Exception:
Exception("connect occured exception")
mock_ssh.assert_called_once_with(host='host')
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.transport.SSHSession.take_notification')
def test_manager_take_notification(self, mock_take_notification, mock_ssh):
mock_take_notification.return_value = "test_take_notification"
conn = self._mock_manager()
ret = conn.take_notification()
mock_take_notification.assert_called_once_with(True, None)
self.assertEqual(ret, "test_take_notification")
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.operations.retrieve.GetConfig._request')
def test_manager_getattr(self, mock_request, mock_ssh):
conn = self._mock_manager()
conn.get_config("running")
mock_ssh.assert_called_once_with(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
allow_agent=False)
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.transport.Session.send')
@patch('ncclient.operations.rpc.RPC._request')
def test_manager_getattr2(self, mock_request, mock_send, mock_ssh):
conn = self._mock_manager()
conn.get_edit('config')
mock_ssh.assert_called_once_with(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
allow_agent=False)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh_with_hostkey_ed25519(self, mock_ssh):
hostkey = 'AAAAC3NzaC1lZDI1NTE5AAAAIIiHpGSf8fla6tCwLpwshvMGmUK+B/0v5CsRu+5v4uT7'
manager.connect(host='host', hostkey=hostkey)
mock_ssh.assert_called_once_with(host='host', hostkey=hostkey)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh_with_hostkey_ecdsa(self, mock_ssh):
hostkey = 'AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFJV9xLkuntH3Ry0GmK4FjYlW+01Ik4j/gbW+i3yIx+YEkF0B3iM7kiyDPqvmOPuVGfW+gq5oQzzdvHKspNkw70='
manager.connect(host='host', hostkey=hostkey)
mock_ssh.assert_called_once_with(host='host', hostkey=hostkey)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh_with_hostkey_rsa(self, mock_ssh):
hostkey = 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDfEAdDrz3l8+PF510ivzWyX/pjpn3Cp6UgjJOinXz82e1LTURZhKwm8blcP8aWe8Uri65Roe6Q/H1WMaR3jFJj4UW2EZY5N+M4esPhoP/APOnDu2XNKy9AK9yD/Bu64TYgkIPQ/6FHdotcQdYTAJ+ac+YfJMp5mhVPnRIh4rlF08a0/tDHzLJVMEoXzp5nfVHcA4W3+5RRhklbct10U0jxHmG8Db9XbKiEbhWs/UMy59UpJ+zr7zLUYPRntgqqkpCyyfeHFNK1P6m3FmyT06QekOioCFmY05y65dkjAwBlaO1RKj1X1lgCirRWu4vxYBo9ewIGPZtuzeyp7jnl7kGV'
manager.connect(host='host', hostkey=hostkey)
mock_ssh.assert_called_once_with(host='host', hostkey=hostkey)
@patch('ncclient.manager.connect_ssh')
def test_connect_outbound_ssh(self, mock_ssh):
manager.connect(host=None, sock_fd=6)
mock_ssh.assert_called_once_with(host=None, sock_fd=6)
@patch('ncclient.manager.connect_ioproc')
def test_connect_ioproc(self, mock_ssh):
manager.connect(host='localhost', device_params={'name': 'junos',
'local': True})
mock_ssh.assert_called_once_with(host='localhost',
device_params={'local': True, 'name': 'junos'})
@patch('paramiko.proxy.ProxyCommand')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_connect_with_ssh_config(self, mock_session, mock_hex, mock_trans, mock_proxy):
log = logging.getLogger('TestManager.test_connect_with_ssh_config')
ssh_config_path = 'test/unit/ssh_config'
conn = manager.connect(host='fake_host',
port=830,
username='user',
password='password',
hostkey_verify=False,
allow_agent=False,
ssh_config=ssh_config_path)
log.debug(mock_proxy.call_args[0][0])
self.assertEqual(mock_proxy.called, 1)
mock_proxy.assert_called_with('ssh -W 10.0.0.1:830 jumphost.domain.com')
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_ssh2(self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(mock_trans.called, 1)
self.assertEqual(conn._timeout, 10)
self.assertEqual(conn._device_handler.device_params, {'name': 'junos'})
self.assertEqual(
conn._device_handler.__class__.__name__,
"JunosDeviceHandler")
@patch('ncclient.transport.ssh.Session._post_connect')
@patch('ncclient.transport.third_party.junos.ioproc.IOProc.connect')
def test_ioproc(self, mock_connect, mock_ioproc):
conn = manager.connect(host='localhost',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
device_params={'local': True, 'name': 'junos'},
manager_params={'timeout': 10})
self.assertEqual(mock_connect.called, 1)
self.assertEqual(conn._timeout, 10)
self.assertEqual(conn._device_handler.device_params, {'local': True, 'name': 'junos'})
self.assertEqual(
conn._device_handler.__class__.__name__,
"JunosDeviceHandler")
def test_make_device_handler(self):
device_handler = manager.make_device_handler({'name': 'junos'})
self.assertEqual(
device_handler.__class__.__name__,
"JunosDeviceHandler")
def test_make_device_handler_provided_handler(self):
device_handler = manager.make_device_handler(
{'handler': JunosDeviceHandler})
self.assertEqual(
device_handler.__class__.__name__,
"JunosDeviceHandler")
@patch('ncclient.operations.LockContext')
def test_manager_locked(self, mock_lock):
conn = manager.Manager(None, None, timeout=20)
conn.locked(None)
mock_lock.assert_called_once_with(None, None, None)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_client_capability(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(
conn.client_capabilities,
conn._session.client_capabilities)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_server_capability(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(
conn.server_capabilities,
conn._session.server_capabilities)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_channel_id(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.channel_id, conn._session._channel_id)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_channel_name(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.channel_name, conn._session._channel_name)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_channel_session_id(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.session_id, conn._session.id)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_connected(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.connected, True)
@patch('ncclient.manager.Manager.HUGE_TREE_DEFAULT')
@patch('ncclient.transport.SSHSession')
@patch('ncclient.operations.rpc.RPC')
def test_manager_huge_node(self, mock_rpc, mock_session, default_value):
# Set default value to True only in this test through the default_value mock
default_value = True
# true should propagate all the way to the RPC
conn = self._mock_manager()
self.assertTrue(conn.huge_tree)
conn.execute(mock_rpc)
mock_rpc.assert_called_once()
self.assertTrue(mock_rpc.call_args[1]['huge_tree'])
# false should propagate all the way to the RPC
conn.huge_tree = False
self.assertFalse(conn.huge_tree)
mock_rpc.reset_mock()
conn.execute(mock_rpc)
mock_rpc.assert_called_once()
self.assertFalse(mock_rpc.call_args[1]['huge_tree'])
def _mock_manager(self):
conn = manager.connect(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False, allow_agent=False,
device_params={'name': 'junos'},
manager_params={'timeout': 10})
return conn
@patch('socket.fromfd')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_outbound_manager_connected(
self, mock_session, mock_hex, mock_trans, mock_fromfd):
conn = self._mock_outbound_manager()
self.assertEqual(conn.connected, True)
def _mock_outbound_manager(self):
conn = manager.connect(host=None,
sock_fd=6,
username='user',
password='password',
device_params={'name': 'junos'},
hostkey_verify=False, allow_agent=False)
return conn
@patch('socket.socket')
@patch('ncclient.manager.connect_ssh')
def test_call_home(self, mock_ssh, mock_socket_open):
mock_connected_socket = MagicMock()
mock_server_socket = MagicMock()
mock_socket_open.return_value = mock_server_socket
mock_server_socket.accept.return_value = (mock_connected_socket,
'remote.host')
with manager.call_home(host='0.0.0.0', port=1234) as chm:
mock_ssh.assert_called_once_with(host='0.0.0.0',
port=1234,
sock=mock_connected_socket)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestManager)
unittest.TextTestRunner(verbosity=2).run(suite)
| 45.150307
| 392
| 0.614308
|
7728dda6627b11f1d8611e058d8840c507db83ee
| 3,199
|
py
|
Python
|
TBWright/day17.py
|
techartorg/Advent_of_Code_2020
|
ae21164bc126352e7a2e9c9c6a0017ccb9d946cc
|
[
"MIT"
] | 3
|
2020-11-16T15:20:11.000Z
|
2020-12-11T17:01:42.000Z
|
TBWright/day17.py
|
techartorg/Advent_of_Code_2020
|
ae21164bc126352e7a2e9c9c6a0017ccb9d946cc
|
[
"MIT"
] | null | null | null |
TBWright/day17.py
|
techartorg/Advent_of_Code_2020
|
ae21164bc126352e7a2e9c9c6a0017ccb9d946cc
|
[
"MIT"
] | 1
|
2020-12-13T04:42:44.000Z
|
2020-12-13T04:42:44.000Z
|
"""
"""
from lib.helpers import timer
INACTIVE = '.'
ACTIVE = '#'
infinite_grid = {(x, y, 0, 0): state for y, line in enumerate(open("inputs/day17_input.txt", "r").read().splitlines()) for x, state in enumerate(line)}
def get_neighbor_cube_coords(coord):
xcoord, ycoord, zcoord, _ = coord
neighbor_coords = set()
for x in range(-1, 2):
for y in range(-1, 2):
for z in range(-1, 2):
neighbor_coord = (xcoord+x, ycoord+y, zcoord+z, 0)
neighbor_coords.add(neighbor_coord)
neighbor_coords.remove((xcoord, ycoord, zcoord, 0))
return neighbor_coords
def get_neighbor_cube_coords_w(coord):
xcoord, ycoord, zcoord, wcoord = coord
neighbor_coords = set()
for x in range(-1, 2):
for y in range(-1, 2):
for z in range(-1, 2):
for w in range(-1, 2):
neighbor_coord = (xcoord+x, ycoord+y, zcoord+z, wcoord+w)
neighbor_coords.add(neighbor_coord)
neighbor_coords.remove((xcoord, ycoord, zcoord, wcoord))
return neighbor_coords
def expand_grid(grid, iteration_to_expand_to):
new_grid = grid.copy()
x = y = 7 + iteration_to_expand_to
for x_range in range(-iteration_to_expand_to, x+1):
for y_range in range(-iteration_to_expand_to, y+1):
for z_range in range(-iteration_to_expand_to, iteration_to_expand_to+1):
if (x_range, y_range, z_range, 0) not in new_grid:
new_grid[(x_range, y_range, z_range, 0)] = INACTIVE
return new_grid
def expand_grid_w(grid, iteration_to_expand_to):
new_grid = grid.copy()
x = y = 7 + iteration_to_expand_to
for x_range in range(-iteration_to_expand_to, x+1):
for y_range in range(-iteration_to_expand_to, y+1):
for z_range in range(-iteration_to_expand_to, iteration_to_expand_to+1):
for w_range in range(-iteration_to_expand_to, iteration_to_expand_to+1):
if (x_range, y_range, z_range, w_range) not in new_grid:
new_grid[(x_range, y_range, z_range, w_range)] = INACTIVE
return new_grid
@timer
def solve(input_grid, cycles, use_w=False):
itr = 0
current_grid = input_grid.copy()
while itr < cycles:
current_grid = expand_grid(current_grid, itr+1) if not use_w else expand_grid_w(current_grid, itr+1)
buffer_grid = current_grid.copy()
for coord, state in current_grid.items():
neighbor_coords = get_neighbor_cube_coords(coord) if not use_w else get_neighbor_cube_coords_w(coord)
neighbor_states = [current_grid.get(coords, INACTIVE) for coords in neighbor_coords]
if state == INACTIVE and neighbor_states.count(ACTIVE) == 3:
buffer_grid[coord] = ACTIVE
elif state == ACTIVE and neighbor_states.count(ACTIVE) not in [2, 3]:
buffer_grid[coord] = INACTIVE
current_grid = buffer_grid.copy()
itr += 1
return current_grid
part1_grid = solve(infinite_grid, 6)
print(list(part1_grid.values()).count(ACTIVE))
part2_grid = solve(infinite_grid, 6, use_w=True)
print(list(part2_grid.values()).count(ACTIVE))
| 37.197674
| 151
| 0.64864
|
b7272d4fc4b0caf1e5a739349c14cac7b6b66e82
| 531
|
py
|
Python
|
src/products/migrations/0004_auto_20190704_2341.py
|
kirankotari/tryDjango
|
da931d1b6a21bb4b6c9ef8877de45f8161f1aef7
|
[
"MIT"
] | 2
|
2021-01-14T08:14:44.000Z
|
2021-07-14T11:41:09.000Z
|
src/products/migrations/0004_auto_20190704_2341.py
|
kirankotari/tryDjango
|
da931d1b6a21bb4b6c9ef8877de45f8161f1aef7
|
[
"MIT"
] | 3
|
2020-02-12T00:52:38.000Z
|
2021-06-10T21:41:20.000Z
|
src/products/migrations/0004_auto_20190704_2341.py
|
kirankotari/tryDjango
|
da931d1b6a21bb4b6c9ef8877de45f8161f1aef7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.9 on 2019-07-04 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20190704_1053'),
]
operations = [
migrations.AlterField(
model_name='product',
name='featured',
field=models.BooleanField(null=True),
),
migrations.AlterField(
model_name='product',
name='summary',
field=models.TextField(),
),
]
| 22.125
| 49
| 0.568738
|
6d4222389a4db5c5beb3f35cad590489b1c4a2a7
| 191
|
py
|
Python
|
setup.py
|
perfah/covid-19-in-my-region
|
402159a2789a798c8d96d96dc8647b99d3b961ed
|
[
"MIT"
] | 3
|
2020-04-30T22:02:33.000Z
|
2021-02-22T13:53:23.000Z
|
setup.py
|
perfah/covid-19-in-my-region
|
402159a2789a798c8d96d96dc8647b99d3b961ed
|
[
"MIT"
] | null | null | null |
setup.py
|
perfah/covid-19-in-my-region
|
402159a2789a798c8d96d96dc8647b99d3b961ed
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='covid_19_in_my_region',
packages=['covid_19_in_my_region'],
include_package_data=True,
install_requires=[
'flask',
],
)
| 17.363636
| 39
| 0.675393
|
2391f420f2e31b93fdf729ad96ea52465e190d86
| 3,281
|
py
|
Python
|
qiskit/extensions/standard/iswap.py
|
maddy-tod/qiskit-terra
|
f11740cf0375c725880fc5feea749fbb64011f11
|
[
"Apache-2.0"
] | 1
|
2020-04-16T22:03:40.000Z
|
2020-04-16T22:03:40.000Z
|
qiskit/extensions/standard/iswap.py
|
maddy-tod/qiskit-terra
|
f11740cf0375c725880fc5feea749fbb64011f11
|
[
"Apache-2.0"
] | 1
|
2020-04-08T05:17:25.000Z
|
2020-04-08T05:17:25.000Z
|
qiskit/extensions/standard/iswap.py
|
maddy-tod/qiskit-terra
|
f11740cf0375c725880fc5feea749fbb64011f11
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
iSWAP gate.
"""
import numpy as np
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
class iSwapGate(Gate):
r"""iSWAP gate.
A 2-qubit XX+YY interaction.
This is a Clifford and symmetric gate. Its action is to swap two qubit
states and phase the :math:`|01\rangle` and :math:`|10\rangle`
amplitudes by i.
**Circuit Symbol:**
.. parsed-literal::
q_0: ─⨂─
│
q_1: ─⨂─
**Reference Implementation:**
.. parsed-literal::
┌───┐┌───┐ ┌───┐
q_0: ┤ S ├┤ H ├──■──┤ X ├─────
├───┤└───┘┌─┴─┐└─┬─┘┌───┐
q_1: ┤ S ├─────┤ X ├──■──┤ H ├
└───┘ └───┘ └───┘
**Matrix Representation:**
.. math::
iSWAP = R_{XX+YY}(-\frac{\pi}{2})
= exp(i \frac{\pi}{4} (X{\otimes}X+Y{\otimes}Y)) =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & i & 0 \\
0 & i & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
This gate is equivalent to a SWAP up to a diagonal.
.. math::
iSWAP =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
. \begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & i & 0 & 0 \\
0 & 0 & i & 0 \\
0 & 0 & 0 & 1
\end{pmatrix}
"""
def __init__(self):
"""Create new iSwap gate."""
super().__init__('iswap', 2, [])
def _define(self):
"""
gate iswap a,b {
s q[0];
s q[1];
h q[0];
cx q[0],q[1];
cx q[1],q[0];
h q[1];
}
"""
from qiskit.extensions.standard.h import HGate
from qiskit.extensions.standard.s import SGate
from qiskit.extensions.standard.x import CXGate
q = QuantumRegister(2, 'q')
self.definition = [
(SGate(), [q[0]], []),
(SGate(), [q[1]], []),
(HGate(), [q[0]], []),
(CXGate(), [q[0], q[1]], []),
(CXGate(), [q[1], q[0]], []),
(HGate(), [q[1]], [])
]
def to_matrix(self):
"""Return a numpy.array for the iSWAP gate."""
return np.array([[1, 0, 0, 0],
[0, 0, 1j, 0],
[0, 1j, 0, 0],
[0, 0, 0, 1]], dtype=complex)
def iswap(self, qubit1, qubit2):
"""Apply :class:`~qiskit.extensions.standard.iSwapGate`.
"""
return self.append(iSwapGate(), [qubit1, qubit2], [])
QuantumCircuit.iswap = iswap
| 26.039683
| 77
| 0.455349
|
f17407d4136c2a2317e8b7809ceac85fa502ee5e
| 3,191
|
py
|
Python
|
fight_covid19/maps/views.py
|
hrithik098/Fight-Covid19
|
b61d3476c93bfd8011f970bb94eaf62f102306fe
|
[
"MIT"
] | null | null | null |
fight_covid19/maps/views.py
|
hrithik098/Fight-Covid19
|
b61d3476c93bfd8011f970bb94eaf62f102306fe
|
[
"MIT"
] | null | null | null |
fight_covid19/maps/views.py
|
hrithik098/Fight-Covid19
|
b61d3476c93bfd8011f970bb94eaf62f102306fe
|
[
"MIT"
] | null | null | null |
import requests
from django.conf import settings
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import HttpResponse
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import ListView
from django.views.generic import View
from django.views.generic.edit import FormView
from django.http import JsonResponse
from django.db.models import Q
from fight_covid19.maps import forms
from fight_covid19.maps.models import HealthEntry
class HomePage(View):
def get(self, request, *args, **kwargs):
c = {
"cases": "N/A",
"activeCases": "N/A",
"todayCases": "N/A",
"deaths": "N/A",
"todayDeaths": "N/A",
"recovered": "N/A",
"critical": "N/A",
}
# Creating health statistics
# HealthEntry.objects.filter(fever=True).count()
# HealthEntry.objects.filter(cough=True).count()
# HealthEntry.objects.filter(difficult_breathing=True).count()
sick_people = HealthEntry.objects.filter(
Q(fever=True) | Q(cough=True) | Q(difficult_breathing=True)
)
c["sickPeople"] = sick_people.count()
c["totalPeople"] = (
HealthEntry.objects.all().order_by("user").distinct("user_id").count()
)
# Fetching data from API
r = requests.get(settings.COVID19_STATS_API)
if r.status_code == 200:
data = r.json()
india_stats = list(filter(lambda x: x["country"] == "India", data))
c["cases"] = india_stats[0]["cases"]
c["todayCases"] = india_stats[0]["todayCases"]
c["deaths"] = india_stats[0]["deaths"]
c["todayDeaths"] = india_stats[0]["todayDeaths"]
c["recovered"] = india_stats[0]["recovered"]
c["active"] = india_stats[0]["active"]
c["critical"] = india_stats[0]["critical"]
return render(request, "pages/home.html", context=c)
HomePageView = HomePage.as_view()
class HealthForm(LoginRequiredMixin, FormView):
form_class = forms.HealthEntryForm
template_name = "maps/health_form.html"
success_url = reverse_lazy("maps:my_health")
def form_valid(self, form):
if form.is_valid():
entry = form.save(commit=False)
entry.user = self.request.user
entry.save()
return super().form_valid(form)
HealthFormView = HealthForm.as_view()
class MyHealth(LoginRequiredMixin, ListView):
model = HealthEntry
template_name = "maps/my_health.html"
context_object_name = "entries"
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).order_by(
"-creation_timestamp"
)
MyHealthView = MyHealth.as_view()
class MapMarkers(View):
def get(self, request, *args, **kwargs):
points = (
HealthEntry.objects.all()
.order_by("user", "-creation_timestamp")
.distinct("user")
.values("user_id", "latitude", "longitude")
)
return JsonResponse(list(points), safe=False)
MapMarkersView = MapMarkers.as_view()
| 31.284314
| 82
| 0.627076
|
54813e867964648a31081fcbd26ebbb2810882fc
| 25,594
|
py
|
Python
|
autotest/ogr/ogr_ngw.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 18
|
2021-01-27T00:07:35.000Z
|
2022-03-25T22:20:13.000Z
|
autotest/ogr/ogr_ngw.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 1
|
2015-04-14T00:19:57.000Z
|
2015-04-14T00:29:29.000Z
|
autotest/ogr/ogr_ngw.py
|
jpapadakis/gdal
|
f07aa15fd65af36b04291303cc6834c87f662814
|
[
"MIT"
] | 1
|
2021-11-21T02:33:51.000Z
|
2021-11-21T02:33:51.000Z
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
################################################################################
# Project: OGR NextGIS Web Driver
# Purpose: Tests OGR NGW Driver capabilities
# Author: Dmitry Baryshnikov, polimax@mail.ru
# Language: Python
################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2018-2020, NextGIS <info@nextgis.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import sys
sys.path.append('../pymod')
import gdaltest
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import time
import json
import pytest
import random
from datetime import datetime
def check_availability(url):
# Sandbox cleans at 1:05 on monday (UTC)
now = datetime.utcnow()
if now.weekday() == 0:
if now.hour >= 1 and now.hour < 3:
return False
version_url = url + '/api/component/pyramid/pkg_version'
if gdaltest.gdalurlopen(version_url) is None:
return False
# Check quota
quota_url = url + '/api/resource/quota'
quota_conn = gdaltest.gdalurlopen(quota_url)
try:
quota_json = json.loads(quota_conn.read())
quota_conn.close()
if quota_json is None:
return False
limit = quota_json['limit']
count = quota_json['count']
if limit is None or count is None:
return True
return limit - count > 10
except:
return False
def get_new_name():
return 'gdaltest_group_' + str(int(time.time())) + '_' + str(random.randint(10, 99))
###############################################################################
# Check driver existence.
def test_ogr_ngw_1():
gdaltest.ngw_ds = None
gdaltest.ngw_drv = None
gdaltest.ngw_drv = gdal.GetDriverByName('NGW')
if gdaltest.ngw_drv is None:
pytest.skip()
gdaltest.ngw_test_server = 'https://sandbox.nextgis.com'
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
###############################################################################
# Check create datasource.
def test_ogr_ngw_2():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
create_url = 'NGW:' + gdaltest.ngw_test_server + '/resource/0/' + get_new_name()
gdal.PushErrorHandler()
gdaltest.ngw_ds = gdaltest.ngw_drv.Create(create_url, 0, 0, 0, gdal.GDT_Unknown, \
options=['DESCRIPTION=GDAL Test group',])
gdal.PopErrorHandler()
assert gdaltest.ngw_ds is not None, 'Create datasource failed.'
assert gdaltest.ngw_ds.GetMetadataItem('description', '') == 'GDAL Test group', \
'Did not get expected datasource description.'
assert int(gdaltest.ngw_ds.GetMetadataItem('id', '')) > 0, \
'Did not get expected datasource identifier.'
gdaltest.group_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
###############################################################################
# Check rename datasource.
def test_ogr_ngw_3():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
new_name = get_new_name() + '_2'
ds_resource_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
rename_url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + ds_resource_id
assert gdaltest.ngw_drv.Rename(new_name, rename_url) == gdal.CE_None, \
'Rename datasource failed.'
###############################################################################
# Check datasource metadata.
def test_ogr_ngw_4():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
ds_resource_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
gdaltest.ngw_ds.SetMetadataItem('test_int.d', '777', 'NGW')
gdaltest.ngw_ds.SetMetadataItem('test_float.f', '777.555', 'NGW')
gdaltest.ngw_ds.SetMetadataItem('test_string', 'metadata test', 'NGW')
gdaltest.ngw_ds = None
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + ds_resource_id
gdaltest.ngw_ds = gdal.OpenEx(url, gdal.OF_UPDATE) # gdaltest.ngw_drv.Open(url, update=1)
assert gdaltest.ngw_ds is not None, \
'Open datasource failed.'
md_item = gdaltest.ngw_ds.GetMetadataItem('test_int.d', 'NGW')
assert md_item == '777', \
'Did not get expected datasource metadata item. test_int.d is equal {}, but should {}.'.format(md_item, '777')
md_item = gdaltest.ngw_ds.GetMetadataItem('test_float.f', 'NGW')
assert float(md_item) == pytest.approx(777.555, abs=0.00001), \
'Did not get expected datasource metadata item. test_float.f is equal {}, but should {}.'.format(md_item, '777.555')
md_item = gdaltest.ngw_ds.GetMetadataItem('test_string', 'NGW')
assert md_item == 'metadata test', \
'Did not get expected datasource metadata item. test_string is equal {}, but should {}.'.format(md_item, 'metadata test')
resource_type = gdaltest.ngw_ds.GetMetadataItem('resource_type', '')
assert resource_type is not None, 'Did not get expected datasource metadata item. Resourse type should be present.'
def create_fields(lyr):
fld_defn = ogr.FieldDefn('STRFIELD', ogr.OFTString)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_0_ALIAS', 'String field test')
fld_defn = ogr.FieldDefn('DECFIELD', ogr.OFTInteger)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_1_ALIAS', 'Integer field test')
fld_defn = ogr.FieldDefn('BIGDECFIELD', ogr.OFTInteger64)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_2_ALIAS', 'Integer64 field test')
fld_defn = ogr.FieldDefn('REALFIELD', ogr.OFTReal)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_3_ALIAS', 'Real field test')
fld_defn = ogr.FieldDefn('DATEFIELD', ogr.OFTDate)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_4_ALIAS', 'Date field test')
fld_defn = ogr.FieldDefn('TIMEFIELD', ogr.OFTTime)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_5_ALIAS', 'Time field test')
fld_defn = ogr.FieldDefn('DATETIMEFLD', ogr.OFTDateTime)
lyr.CreateField(fld_defn)
lyr.SetMetadataItem('FIELD_6_ALIAS', 'Date & time field test')
def fill_fields(f):
f.SetField('STRFIELD', "fo_o")
f.SetField('DECFIELD', 123)
f.SetField('BIGDECFIELD', 12345678901234)
f.SetField('REALFIELD', 1.23)
f.SetField('DATETIMEFLD', '2014/12/04 12:34:56')
def fill_fields2(f):
f.SetField('STRFIELD', "русский")
f.SetField('DECFIELD', 321)
f.SetField('BIGDECFIELD', 32145678901234)
f.SetField('REALFIELD', 21.32)
f.SetField('DATETIMEFLD', '2019/12/31 21:43:56')
def add_metadata(lyr):
lyr.SetMetadataItem('test_int.d', '777', 'NGW')
lyr.SetMetadataItem('test_float.f', '777,555', 'NGW')
lyr.SetMetadataItem('test_string', 'metadata test', 'NGW')
###############################################################################
# Check create vector layers.
def test_ogr_ngw_5():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
sr = osr.SpatialReference()
sr.ImportFromEPSG(3857)
lyr = gdaltest.ngw_ds.CreateLayer('test_pt_layer', srs=sr, geom_type=ogr.wkbMultiPoint, options=['OVERWRITE=YES', 'DESCRIPTION=Test point layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
# Test duplicated names.
fld_defn = ogr.FieldDefn('STRFIELD', ogr.OFTString)
assert lyr.CreateField(fld_defn) != 0, 'Expected not to create duplicated field'
# Test forbidden field names.
gdal.ErrorReset()
gdal.PushErrorHandler('CPLQuietErrorHandler')
fld_defn = ogr.FieldDefn('id', ogr.OFTInteger)
lyr.CreateField(fld_defn)
gdal.PopErrorHandler()
assert gdal.GetLastErrorMsg() != '', 'Expecting a warning'
add_metadata(lyr)
lyr = gdaltest.ngw_ds.CreateLayer('test_ln_layer', srs=sr, geom_type=ogr.wkbMultiLineString, options=['OVERWRITE=YES', 'DESCRIPTION=Test line layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
add_metadata(lyr)
lyr = gdaltest.ngw_ds.CreateLayer('test_pl_layer', srs=sr, geom_type=ogr.wkbMultiPolygon, options=['OVERWRITE=YES', 'DESCRIPTION=Test polygon layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
add_metadata(lyr)
# Test overwrite
lyr = gdaltest.ngw_ds.CreateLayer('test_pt_layer', srs=sr, geom_type=ogr.wkbPoint, options=['OVERWRITE=YES', 'DESCRIPTION=Test point layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
add_metadata(lyr)
lyr = gdaltest.ngw_ds.CreateLayer('test_ln_layer', srs=sr, geom_type=ogr.wkbLineString, options=['OVERWRITE=YES', 'DESCRIPTION=Test line layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
add_metadata(lyr)
lyr = gdaltest.ngw_ds.CreateLayer('test_pl_layer', srs=sr, geom_type=ogr.wkbPolygon, options=['OVERWRITE=YES', 'DESCRIPTION=Test polygon layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
add_metadata(lyr)
# Test without overwrite
lyr = gdaltest.ngw_ds.CreateLayer('test_pl_layer', srs=sr, geom_type=ogr.wkbMultiPolygon, options=['OVERWRITE=NO', 'DESCRIPTION=Test polygon layer 1'])
assert lyr is None, 'Create layer without overwrite should fail.'
lyr = gdaltest.ngw_ds.CreateLayer('test_pl_layer', srs=sr, geom_type=ogr.wkbMultiPolygon, options=['DESCRIPTION=Test point layer 1'])
assert lyr is None, 'Create layer without overwrite should fail.'
# Test geometry with Z
lyr = gdaltest.ngw_ds.CreateLayer('test_plz_layer', srs=sr, geom_type=ogr.wkbMultiPolygon25D, options=['OVERWRITE=YES', 'DESCRIPTION=Test polygonz layer'])
assert lyr is not None, 'Create layer failed.'
create_fields(lyr)
add_metadata(lyr)
ds_resource_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
gdaltest.ngw_ds = None
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + ds_resource_id
gdaltest.ngw_ds = gdal.OpenEx(url, gdal.OF_UPDATE) # gdaltest.ngw_drv.Open(url, update=1)
assert gdaltest.ngw_ds is not None, 'Open datasource failed.'
for layer_name in ['test_pt_layer', 'test_ln_layer', 'test_pl_layer', 'test_plz_layer']:
lyr = gdaltest.ngw_ds.GetLayerByName(layer_name)
assert lyr is not None, 'Get layer {} failed.'.format(layer_name)
md_item = lyr.GetMetadataItem('test_int.d', 'NGW')
assert md_item == '777', \
'Did not get expected layer metadata item. test_int.d is equal {}, but should {}.'.format(md_item, '777')
md_item = lyr.GetMetadataItem('test_float.f', 'NGW')
assert float(md_item) == pytest.approx(777.555, abs=0.00001), \
'Did not get expected layer metadata item. test_float.f is equal {}, but should {}.'.format(md_item, '777.555')
md_item = lyr.GetMetadataItem('test_string', 'NGW')
assert md_item == 'metadata test', \
'Did not get expected layer metadata item. test_string is equal {}, but should {}.'.format(md_item, 'metadata test')
resource_type = lyr.GetMetadataItem('resource_type', '')
assert resource_type is not None, 'Did not get expected layer metadata item. Resourse type should be present.'
assert lyr.GetGeomType() != ogr.wkbUnknown and lyr.GetGeomType() != ogr.wkbNone
###############################################################################
# Check open single vector layer.
def test_ogr_ngw_6():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
lyr_resource_id = lyr.GetMetadataItem('id', '')
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + lyr_resource_id
ds = gdal.OpenEx(url)
assert ds is not None and ds.GetLayerCount() == 1, \
'Failed to open single vector layer.'
###############################################################################
# Check insert, update and delete features.
def test_ogr_ngw_7():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
f = ogr.Feature(lyr.GetLayerDefn())
fill_fields(f)
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
ret = lyr.CreateFeature(f)
assert ret == 0 and f.GetFID() >= 0, \
'Create feature failed. Expected FID greater or equal 0, got {}.'.format(f.GetFID())
fill_fields2(f)
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT (3 4)'))
ret = lyr.SetFeature(f)
assert ret == 0, 'Failed to update feature #{}.'.format(f.GetFID())
lyr.DeleteFeature(f.GetFID())
# Expected fail to get feature
gdal.PushErrorHandler()
f = lyr.GetFeature(f.GetFID())
gdal.PopErrorHandler()
assert f is None, 'Failed to delete feature #{}.'.format(f.GetFID())
###############################################################################
# Check insert, update features in batch mode.
def test_ogr_ngw_8():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
ds_resource_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
gdaltest.ngw_ds = None
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + ds_resource_id
gdaltest.ngw_ds = gdal.OpenEx(url, gdal.OF_UPDATE, open_options=['BATCH_SIZE=2'])
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
f1 = ogr.Feature(lyr.GetLayerDefn())
fill_fields(f1)
f1.SetGeometry(ogr.CreateGeometryFromWkt('POINT (1 2)'))
ret = lyr.CreateFeature(f1)
assert ret == 0 and f1.GetFID() < 0
f2 = ogr.Feature(lyr.GetLayerDefn())
fill_fields2(f2)
f2.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 3)'))
ret = lyr.CreateFeature(f2)
assert ret == 0 and f2.GetFID() < 0
f3 = ogr.Feature(lyr.GetLayerDefn())
fill_fields(f3)
f3.SetGeometry(ogr.CreateGeometryFromWkt('POINT (3 4)'))
ret = lyr.CreateFeature(f3)
assert ret == 0
ret = lyr.SyncToDisk()
assert ret == 0
lyr.ResetReading()
feat = lyr.GetNextFeature()
counter = 0
while feat is not None:
counter += 1
assert feat.GetFID() >= 0, 'Expected FID greater or equal 0, got {}.'.format(feat.GetFID())
feat = lyr.GetNextFeature()
assert counter >= 3, 'Expected 3 or greater feature count, got {}.'.format(counter)
###############################################################################
# Check paging while GetNextFeature.
def test_ogr_ngw_9():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
ds_resource_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
gdaltest.ngw_ds = None
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + ds_resource_id
gdaltest.ngw_ds = gdal.OpenEx(url, gdal.OF_UPDATE, open_options=['PAGE_SIZE=2'])
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
lyr.ResetReading()
feat = lyr.GetNextFeature()
counter = 0
while feat is not None:
counter += 1
assert feat.GetFID() >= 0, 'Expected FID greater or equal 0, got {}.'.format(feat.GetFID())
feat = lyr.GetNextFeature()
assert counter >= 3, 'Expected 3 or greater feature count, got {}.'.format(counter)
###############################################################################
# Check native data.
def test_ogr_ngw_10():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
ds_resource_id = gdaltest.ngw_ds.GetMetadataItem('id', '')
gdaltest.ngw_ds = None
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + ds_resource_id
gdaltest.ngw_ds = gdal.OpenEx(url, gdal.OF_UPDATE, open_options=['NATIVE_DATA=YES'])
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
lyr.ResetReading()
feat = lyr.GetNextFeature()
feature_id = feat.GetFID()
native_data = feat.GetNativeData()
assert native_data is not None, 'Feature #{} native data should not be empty'.format(feature_id)
# {"description":null,"attachment":null}
assert feat.GetNativeMediaType() == 'application/json', 'Unsupported native media type'
# Set description
feat.SetNativeData('{"description":"Test feature description"}')
ret = lyr.SetFeature(feat)
assert ret == 0, 'Failed to update feature #{}.'.format(feature_id)
feat = lyr.GetFeature(feature_id)
native_data = feat.GetNativeData()
assert native_data is not None and native_data.find('Test feature description') != -1, 'Expected feature description text, got {}'.format(native_data)
###############################################################################
# Check ignored fields works ok
def test_ogr_ngw_11():
if gdaltest.ngw_drv is None or gdaltest.ngw_ds is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
lyr.SetIgnoredFields(['STRFIELD'])
feat = lyr.GetNextFeature()
assert not feat.IsFieldSet('STRFIELD'), 'got STRFIELD despite request to ignore it.'
assert feat.GetFieldAsInteger('DECFIELD') == 123, 'missing or wrong DECFIELD'
fd = lyr.GetLayerDefn()
fld = fd.GetFieldDefn(0) # STRFIELD
assert fld.IsIgnored(), 'STRFIELD unexpectedly not marked as ignored.'
fld = fd.GetFieldDefn(1) # DECFIELD
assert not fld.IsIgnored(), 'DECFIELD unexpectedly marked as ignored.'
assert not fd.IsGeometryIgnored(), 'geometry unexpectedly ignored.'
assert not fd.IsStyleIgnored(), 'style unexpectedly ignored.'
feat = None
lyr = None
###############################################################################
# Check attribute filter.
def test_ogr_ngw_12():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
lyr.SetAttributeFilter("STRFIELD = 'русский'")
fc = lyr.GetFeatureCount()
assert fc == 1, 'Expected feature count is 1, got {}.'.format(fc)
lyr.SetAttributeFilter("STRFIELD = 'fo_o' AND DECFIELD = 321")
fc = lyr.GetFeatureCount()
assert fc == 0, 'Expected feature count is 0, got {}.'.format(fc)
lyr.SetAttributeFilter('NGW:fld_STRFIELD=fo_o&fld_DECFIELD=123')
fc = lyr.GetFeatureCount()
assert fc == 2, 'Expected feature count is 2, got {}.'.format(fc)
lyr.SetAttributeFilter("DECFIELD < 321")
fc = lyr.GetFeatureCount()
assert fc == 2, 'Expected feature count is 2, got {}.'.format(fc)
lyr.SetAttributeFilter('NGW:fld_REALFIELD__gt=1.5')
fc = lyr.GetFeatureCount()
assert fc == 1, 'Expected feature count is 1, got {}.'.format(fc)
lyr.SetAttributeFilter("STRFIELD ILIKE '%O_O'")
fc = lyr.GetFeatureCount()
assert fc == 2, 'Expected feature count is 2, got {}.'.format(fc)
###############################################################################
# Check spatial filter.
def test_ogr_ngw_13():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
lyr = gdaltest.ngw_ds.GetLayerByName('test_pt_layer')
# Reset any attribute filters
lyr.SetAttributeFilter(None)
# Check intersecting POINT(3 4)
lyr.SetSpatialFilter(ogr.CreateGeometryFromWkt('POLYGON ((2.5 3.5,2.5 6,6 6,6 3.5,2.5 3.5))'))
fc = lyr.GetFeatureCount()
assert fc == 1, 'Expected feature count is 1, got {}.'.format(fc)
###############################################################################
# Check ExecuteSQL.
def test_ogr_ngw_14():
if gdaltest.ngw_drv is None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
gdaltest.ngw_ds.ExecuteSQL('DELLAYER:test_ln_layer')
lyr = gdaltest.ngw_ds.GetLayerByName('test_ln_layer')
assert lyr is None, 'Expected fail to get layer test_ln_layer.'
lyr = gdaltest.ngw_ds.GetLayerByName('test_pl_layer')
f = ogr.Feature(lyr.GetLayerDefn())
fill_fields(f)
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 0,0 0))'))
ret = lyr.CreateFeature(f)
assert ret == 0, 'Failed to create feature in test_pl_layer.'
assert lyr.GetFeatureCount() == 1, 'Expected feature count is 1, got {}.'.format(lyr.GetFeatureCount())
gdaltest.ngw_ds.ExecuteSQL('DELETE FROM test_pl_layer')
assert lyr.GetFeatureCount() == 0, 'Expected feature count is 0, got {}.'.format(lyr.GetFeatureCount())
gdaltest.ngw_ds.ExecuteSQL('ALTER TABLE test_pl_layer RENAME TO test_pl_layer777')
lyr = gdaltest.ngw_ds.GetLayerByName('test_pl_layer777')
assert lyr is not None, 'Get layer test_pl_layer777 failed.'
# Create 2 new features
f = ogr.Feature(lyr.GetLayerDefn())
fill_fields(f)
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((0 0,0 1,1 0,0 0))'))
ret = lyr.CreateFeature(f)
assert ret == 0, 'Failed to create feature in test_pl_layer777.'
f = ogr.Feature(lyr.GetLayerDefn())
fill_fields2(f)
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((1 1,1 2,2 1,1 1))'))
ret = lyr.CreateFeature(f)
assert ret == 0, 'Failed to create feature in test_pl_layer777.'
lyr = gdaltest.ngw_ds.ExecuteSQL("SELECT STRFIELD,DECFIELD FROM test_pl_layer777 WHERE STRFIELD = 'fo_o'")
assert lyr is not None, 'ExecuteSQL: SELECT STRFIELD,DECFIELD FROM test_pl_layer777 WHERE STRFIELD = "fo_o"; failed.'
assert lyr.GetFeatureCount() == 2, 'Expected feature count is 2, got {}.'.format(lyr.GetFeatureCount())
gdaltest.ngw_ds.ReleaseResultSet(lyr)
###############################################################################
# Run test_ogrsf
def test_ogr_ngw_test_ogrsf():
if gdaltest.ngw_drv is None or gdal.GetConfigOption('SKIP_SLOW') is not None:
pytest.skip()
if check_availability(gdaltest.ngw_test_server) == False:
gdaltest.ngw_drv = None
pytest.skip()
if gdaltest.skip_on_travis():
pytest.skip()
if gdaltest.ngw_ds is None:
pytest.skip()
url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + gdaltest.group_id
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
pytest.skip()
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' ' + url)
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' ' + url + ' -oo PAGE_SIZE=100')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' ' + url + ' -oo BATCH_SIZE=5')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' ' + url + ' -oo BATCH_SIZE=5 -oo PAGE_SIZE=100')
assert ret.find('INFO') != -1 and ret.find('ERROR') == -1
###############################################################################
# Cleanup
def test_ogr_ngw_cleanup():
if gdaltest.ngw_drv is None:
pytest.skip()
if gdaltest.group_id is not None:
delete_url = 'NGW:' + gdaltest.ngw_test_server + '/resource/' + gdaltest.group_id
gdaltest.ngw_layer = None
gdaltest.ngw_ds = None
assert gdaltest.ngw_drv.Delete(delete_url) == gdal.CE_None, \
'Failed to delete datasource ' + delete_url + '.'
gdaltest.ngw_ds = None
| 37.418129
| 159
| 0.646402
|
478c7f57f46cb8749a99bbe054b26f9eb5bdf3ce
| 8,129
|
py
|
Python
|
emsm/plugins/server.py
|
KronK0321/emsm
|
2d8882003ff6d688cd4074dcce17f3171f99a69f
|
[
"MIT"
] | 82
|
2015-02-17T19:26:51.000Z
|
2022-03-30T20:13:43.000Z
|
emsm/plugins/server.py
|
KronK0321/emsm
|
2d8882003ff6d688cd4074dcce17f3171f99a69f
|
[
"MIT"
] | 55
|
2015-01-01T19:49:25.000Z
|
2021-06-11T19:45:26.000Z
|
emsm/plugins/server.py
|
SilkAndSlug/emsm
|
2c1cdb305205942e797fdf47fd030c87080d19f9
|
[
"MIT"
] | 32
|
2015-01-15T11:47:04.000Z
|
2021-12-19T21:49:20.000Z
|
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2014-2018 <see AUTHORS.txt>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
About
-----
This plugin provides a user interface for the server wrapper. It can handle
the server files and their configuration parameters easily.
Download
--------
You can find the latest version of this plugin in the **EMSM**
`GitHub repository <https://github.com/benediktschmitt/emsm>`_.
Configuration
-------------
.. code-block:: ini
[server]
update_message = The server is going down for an update.
Come back soon.
**update_message**
Message sent to a world before stopping the world due to an server
update.
Arguments
---------
.. note::
Make sure to select the server via ``-s, --server``.
.. option:: --usage
Prints the names of the worlds, powered by a server.
.. option:: --list
Prints the names of all server supported by the EMSM.
.. option:: --update
Updates the server software.
"""
# Modules
# ------------------------------------------------
# std
import os
import sys
import logging
# third party
import termcolor
# local
import emsm
from emsm.core.base_plugin import BasePlugin
# Data
# ------------------------------------------------
PLUGIN = "Server"
log = logging.getLogger(__file__)
# Classes
# ------------------------------------------------
class Server(BasePlugin):
VERSION = "6.0.0-beta"
DESCRIPTION = __doc__
def __init__(self, application, name):
"""
"""
BasePlugin.__init__(self, application, name)
self._setup_conf()
self._setup_argparser()
return None
def _setup_conf(self):
"""
Reads all configuration options.
"""
conf = self.global_conf()
self._update_message = conf.get(
"update_message",
"The server is going down for an update.\nCome back soon."
)
conf["update_message"] = self._update_message
return None
def _setup_argparser(self):
"""
Sets the argument parser of this plugin up.
"""
parser = self.argparser()
parser.description = "Manage your server executables"
# We allow only one argument (keep it simple).
me_group = parser.add_mutually_exclusive_group()
me_group.add_argument(
"--usage",
action = "count",
dest = "server_usage",
help = "Prints all worlds powered by a server."
)
me_group.add_argument(
"--list",
action = "count",
dest = "server_list",
help = "Prints the names of all server supported by the EMSM."
)
me_group.add_argument(
"--update",
action = "count",
dest = "server_update",
help = "Updates the server software."
)
return None
def run(self, args):
"""
"""
if args.server_list:
self._print_list()
else:
# Sort the server by their names, before running.
sel_server = self.app().server().get_selected()
sel_server.sort(key = lambda s: s.name())
for server in sel_server:
if args.server_usage:
self._print_usage(server)
elif args.server_update:
self._update_server(server)
return None
def _print_usage(self, server):
"""
Prints all worlds that are powered by the server *server*.
"""
# Get all worlds powered by this server and sort them.
worlds = self.app().worlds().get_by_pred(
lambda w: w.server() is server
)
online_worlds = list(filter(lambda w: w.is_online(), worlds))
online_worlds.sort(key = lambda w: w.name())
offline_worlds = list(filter(lambda w: w.is_offline(), worlds))
offline_worlds.sort(key = lambda w: w.name())
# Print the worlds grouped by their current status (offline/online).
print(termcolor.colored("{}:".format(server.name()), "cyan"))
print("\t", "* {} worlds".format(len(worlds)))
print("\t", "* {} online worlds".format(len(online_worlds)))
if online_worlds:
for world in online_worlds:
print("\t\t", "- {}".format(world.name()))
print("\t", "* {} offline worlds".format(len(offline_worlds)))
if offline_worlds:
for world in offline_worlds:
print("\t\t", "- {}".format(world.name()))
return None
def _print_list(self):
"""
Prints a list with the names of all available server software.
"""
names = self.app().server().get_names()
names.sort()
for name in names:
print("* {}".format(name))
return None
def _update_server(self, server):
"""
Updates the server *server*.
All worlds which are currently online and powered by the *server* will
be stopped and restarted after the update.
"""
print(termcolor.colored("{}:".format(server.name()), "cyan"))
# Get all worlds, that are currently running the server.
worlds = self.app().worlds().get_by_pred(
lambda w: w.server() is server and w.is_online()
)
worlds.sort(key = lambda w: w.name())
# Stop those worlds.
try:
for world in worlds:
print("\t", "stopping the world '{}' ...".format(world.name()))
world.stop(message=self._update_message)
# Do not continue if a world could not be stopped.
except emsm.core.worlds.WorldStopFailed as err:
print("\t", termcolor.colored("error:", "red"),
"the world '{}' could not be stopped.".format(err.world.name())
)
log.exception(err)
# Continue with the server update if all worlds are offline.
# Note, that a ServerIsOnlineError can not occur since we stopped
# all worlds. (If a world would still be online, we would not have
# reached this line of code.)
else:
print("\t", "reinstalling the server ...")
try:
server.reinstall()
except emsm.core.server.ServerInstallationFailure as err:
print("\t", termcolor.colored("error:", "red"), err)
log.exception(err)
# Restart the worlds.
finally:
for world in worlds:
print("\t", "restarting the world '{}' ..."\
.format(world.name()))
try:
world.start()
except emsm.core.worlds.WorldStartFailed as err:
print("\t", termcolor.colored("error:", "red"),
"the world '{}' could not be restarted."\
.format(err.world.name())
)
log.exception(err)
return None
| 30.107407
| 81
| 0.575225
|
25948289db6fd050f35541c3834f5b256c638372
| 681
|
py
|
Python
|
globaltags/gravatar.py
|
xarg/snippify
|
b692a941a7a46959df9aff064b7ad056d0125484
|
[
"MIT"
] | 2
|
2016-08-19T06:24:02.000Z
|
2021-05-11T00:08:24.000Z
|
globaltags/gravatar.py
|
xarg/snippify
|
b692a941a7a46959df9aff064b7ad056d0125484
|
[
"MIT"
] | 5
|
2021-05-01T15:03:07.000Z
|
2021-05-01T15:03:08.000Z
|
globaltags/gravatar.py
|
xarg/snippify
|
b692a941a7a46959df9aff064b7ad056d0125484
|
[
"MIT"
] | null | null | null |
import hashlib
from django import template
register = template.Library()
@register.simple_tag
def gravatar(email, size=48):
"""
Simply gets the Gravatar for the commenter. There is no rating or
custom "not found" icon yet. Used with the Django comments.
If no size is given, the default is 48 pixels by 48 pixels.
Template Syntax::
{% gravatar comment.user_email [size] %}
Example usage::
{% gravatar comment.user_email 48 %}
"""
hash = hashlib.md5(email).hexdigest()
return """<img src="http://www.gravatar.com/avatar/%s?s=%s" width="%s"
height="%s" alt="gravatar" class="gravatar" />""" % (hash, size, size, size)
| 25.222222
| 80
| 0.654919
|
3dce02fe3e9dd4a73abe9b2ff3e73bca50c46b7d
| 355
|
py
|
Python
|
noticeboard/permissions/api.py
|
NdauwaRafael/py-noticeboard
|
bb3ffcf5e82315fb1a5e58d113766406fdce4211
|
[
"Apache-2.0"
] | null | null | null |
noticeboard/permissions/api.py
|
NdauwaRafael/py-noticeboard
|
bb3ffcf5e82315fb1a5e58d113766406fdce4211
|
[
"Apache-2.0"
] | 4
|
2021-03-09T01:44:19.000Z
|
2022-02-12T07:50:22.000Z
|
noticeboard/permissions/api.py
|
NdauwaRafael/py-noticeboard
|
bb3ffcf5e82315fb1a5e58d113766406fdce4211
|
[
"Apache-2.0"
] | null | null | null |
from permissions.models import Permission
from rest_framework import viewsets, permissions
from .serializers import PermissionSerializer
# Permission View Set
class PermissionViewSet(viewsets.ModelViewSet):
queryset = Permission.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = PermissionSerializer
| 27.307692
| 48
| 0.791549
|
d6bb29d9bf6f1c85ce6d91a7cd3a846d9535b80d
| 4,466
|
py
|
Python
|
seaborn/algorithms.py
|
Hagai/seaborn
|
64928c910264579bf406d317242de9d56870ca97
|
[
"BSD-3-Clause"
] | 1
|
2020-04-03T22:58:36.000Z
|
2020-04-03T22:58:36.000Z
|
seaborn/algorithms.py
|
Hagai/seaborn
|
64928c910264579bf406d317242de9d56870ca97
|
[
"BSD-3-Clause"
] | 3
|
2021-03-30T13:15:12.000Z
|
2021-09-22T18:55:59.000Z
|
seaborn/algorithms.py
|
Hagai/seaborn
|
64928c910264579bf406d317242de9d56870ca97
|
[
"BSD-3-Clause"
] | 2
|
2020-08-03T13:02:06.000Z
|
2020-11-04T03:15:44.000Z
|
"""Algorithms to support fitting routines in seaborn plotting functions."""
from __future__ import division
import numbers
import numpy as np
import warnings
def bootstrap(*args, **kwargs):
"""Resample one or more arrays with replacement and store aggregate values.
Positional arguments are a sequence of arrays to bootstrap along the first
axis and pass to a summary function.
Keyword arguments:
n_boot : int, default 10000
Number of iterations
axis : int, default None
Will pass axis to ``func`` as a keyword argument.
units : array, default None
Array of sampling unit IDs. When used the bootstrap resamples units
and then observations within units instead of individual
datapoints.
func : string or callable, default np.mean
Function to call on the args that are passed in. If string, tries
to use as named method on numpy array.
seed : Generator | SeedSequence | RandomState | int | None
Seed for the random number generator; useful if you want
reproducible resamples.
Returns
-------
boot_dist: array
array of bootstrapped statistic values
"""
# Ensure list of arrays are same length
if len(np.unique(list(map(len, args)))) > 1:
raise ValueError("All input arrays must have the same length")
n = len(args[0])
# Default keyword arguments
n_boot = kwargs.get("n_boot", 10000)
func = kwargs.get("func", np.mean)
axis = kwargs.get("axis", None)
units = kwargs.get("units", None)
random_seed = kwargs.get("random_seed", None)
if random_seed is not None:
msg = "`random_seed` has been renamed to `seed` and will be removed"
warnings.warn(msg)
seed = kwargs.get("seed", random_seed)
if axis is None:
func_kwargs = dict()
else:
func_kwargs = dict(axis=axis)
# Initialize the resampler
rng = _handle_random_seed(seed)
# Coerce to arrays
args = list(map(np.asarray, args))
if units is not None:
units = np.asarray(units)
# Allow for a function that is the name of a method on an array
if isinstance(func, str):
def f(x):
return getattr(x, func)()
else:
f = func
# Handle numpy changes
try:
integers = rng.integers
except AttributeError:
integers = rng.randint
# Do the bootstrap
if units is not None:
return _structured_bootstrap(args, n_boot, units, f,
func_kwargs, integers)
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n, n)
sample = [a.take(resampler, axis=0) for a in args]
boot_dist.append(f(*sample, **func_kwargs))
return np.array(boot_dist)
def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):
"""Resample units instead of datapoints."""
unique_units = np.unique(units)
n_units = len(unique_units)
args = [[a[units == unit] for unit in unique_units] for a in args]
boot_dist = []
for i in range(int(n_boot)):
resampler = integers(0, n_units, n_units)
sample = [np.take(a, resampler, axis=0) for a in args]
lengths = map(len, sample[0])
resampler = [integers(0, n, n) for n in lengths]
sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)]
for a in sample]
sample = list(map(np.concatenate, sample))
boot_dist.append(func(*sample, **func_kwargs))
return np.array(boot_dist)
def _handle_random_seed(seed=None):
"""Given a seed in one of many formats, return a random number generator.
Generalizes across the numpy 1.17 changes, preferring newer functionality.
"""
if isinstance(seed, np.random.RandomState):
rng = seed
else:
try:
# General interface for seeding on numpy >= 1.17
rng = np.random.default_rng(seed)
except AttributeError:
# We are on numpy < 1.17, handle options ourselves
if isinstance(seed, (numbers.Integral, np.integer)):
rng = np.random.RandomState(seed)
elif seed is None:
rng = np.random.RandomState()
else:
err = "{} cannot be used to seed the randomn number generator"
raise ValueError(err.format(seed))
return rng
| 33.833333
| 79
| 0.622033
|
e42effaeb8570588c8952cdc1025cf689ffce5ae
| 18,758
|
py
|
Python
|
voicefixer/restorer/model.py
|
ZFTurbo/voicefixer
|
21b46cc0b8bea0eff7b3f2b219958e442c94b68a
|
[
"MIT"
] | 1
|
2021-11-09T18:02:43.000Z
|
2021-11-09T18:02:43.000Z
|
voicefixer/restorer/model.py
|
ZFTurbo/voicefixer
|
21b46cc0b8bea0eff7b3f2b219958e442c94b68a
|
[
"MIT"
] | null | null | null |
voicefixer/restorer/model.py
|
ZFTurbo/voicefixer
|
21b46cc0b8bea0eff7b3f2b219958e442c94b68a
|
[
"MIT"
] | null | null | null |
# import pytorch_lightning as pl
import torch.utils
from voicefixer.tools.mel_scale import MelScale
import torch.utils.data
import matplotlib.pyplot as plt
import librosa.display
from voicefixer.vocoder.base import Vocoder
from voicefixer.tools.pytorch_util import *
# from voicefixer.models.restorer.mel_denoiser.model_kqq import UNetResComplex_100Mb
from voicefixer.restorer.model_kqq_bn import UNetResComplex_100Mb
from voicefixer.tools.random_ import *
from voicefixer.tools.wav import *
from voicefixer.tools.modules.fDomainHelper import FDomainHelper
from voicefixer.tools.io import load_json, write_json
from matplotlib import cm
os.environ['KMP_DUPLICATE_LIB_OK']='True'
EPS=1e-8
class BN_GRU(torch.nn.Module):
def __init__(self,input_dim,hidden_dim,layer=1, bidirectional=False, batchnorm=True, dropout=0.0):
super(BN_GRU, self).__init__()
self.batchnorm = batchnorm
if(batchnorm):self.bn = nn.BatchNorm2d(1)
self.gru = torch.nn.GRU(input_size=input_dim,
hidden_size=hidden_dim,
num_layers=layer,
bidirectional=bidirectional,
dropout=dropout,
batch_first=True)
self.init_weights()
def init_weights(self):
for m in self.modules():
if type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
torch.nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def forward(self,inputs):
# (batch, 1, seq, feature)
if(self.batchnorm):inputs = self.bn(inputs)
out,_ = self.gru(inputs.squeeze(1))
return out.unsqueeze(1)
class Generator(nn.Module):
def __init__(self,n_mel,hidden,channels):
super(Generator, self).__init__()
# todo the currently running trail don't have dropout
self.denoiser = nn.Sequential(
nn.BatchNorm2d(1),
nn.Linear(n_mel, n_mel * 2),
nn.ReLU(inplace=True),
nn.BatchNorm2d(1),
nn.Linear(n_mel*2, n_mel * 4),
nn.Dropout(0.5),
nn.ReLU(inplace=True),
BN_GRU(input_dim=n_mel * 4, hidden_dim=n_mel * 2, bidirectional=True, layer=2, batchnorm=True),
BN_GRU(input_dim=n_mel * 4, hidden_dim=n_mel * 2, bidirectional=True, layer=2, batchnorm=True),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.Linear(n_mel * 4, n_mel * 4),
nn.Dropout(0.5),
nn.BatchNorm2d(1),
nn.ReLU(inplace=True),
nn.Linear(n_mel * 4, n_mel),
nn.Sigmoid()
)
self.unet = UNetResComplex_100Mb(channels=channels)
def forward(self,sp, mel_orig):
# Denoising
noisy = mel_orig.clone()
clean = self.denoiser(noisy) * noisy
x = to_log(clean.detach())
unet_in = torch.cat([to_log(mel_orig),x],dim=1)
# unet_in = lstm_out
unet_out = self.unet(unet_in)['mel']
# masks
mel = unet_out + x
# todo mel and addition here are in log scales
return {'mel': mel, "lstm_out":unet_out, "unet_out":unet_out, "noisy": noisy, "clean": clean}
class VoiceFixer(nn.Module):
def __init__(self, channels, type_target="vocals", nsrc=1, loss="l1",
lr=0.002, gamma=0.9,
batchsize=None, frame_length=None,
sample_rate=None,
warm_up_steps=1000, reduce_lr_steps=15000,
# datas
check_val_every_n_epoch=5,
):
super(VoiceFixer, self).__init__()
if(sample_rate == 44100):
window_size = 2048
hop_size = 441
n_mel = 128
elif(sample_rate == 24000):
window_size = 768
hop_size = 240
n_mel = 80
elif(sample_rate == 16000):
window_size = 512
hop_size = 160
n_mel = 80
else:
raise ValueError("Error: Sample rate "+str(sample_rate)+" not supported")
center = True,
pad_mode = 'reflect'
window = 'hann'
freeze_parameters = True
# self.save_hyperparameters()
self.nsrc = nsrc
self.type_target = type_target
self.channels = channels
self.lr = lr
self.generated = None
self.gamma = gamma
self.sample_rate = sample_rate
self.sample_rate = sample_rate
self.batchsize = batchsize
self.frame_length = frame_length
# self.hparams['channels'] = 2
# self.am = AudioMetrics()
# self.im = ImgMetrics()
self.vocoder = Vocoder(sample_rate=44100)
self.valid = None
self.fake = None
self.train_step = 0
self.val_step = 0
self.val_result_save_dir = None
self.val_result_save_dir_step = None
self.downsample_ratio = 2 ** 6 # This number equals 2^{#encoder_blcoks}
self.check_val_every_n_epoch = check_val_every_n_epoch
self.f_helper = FDomainHelper(
window_size=window_size,
hop_size=hop_size,
center=center,
pad_mode=pad_mode,
window=window,
freeze_parameters=freeze_parameters,
)
hidden = window_size // 2 + 1
self.mel = MelScale(n_mels=n_mel, sample_rate=sample_rate, n_stft=hidden)
# masking
self.generator = Generator(n_mel,hidden,channels)
self.lr_lambda = lambda step: self.get_lr_lambda(step,
gamma = self.gamma,
warm_up_steps=warm_up_steps,
reduce_lr_steps=reduce_lr_steps)
self.lr_lambda_2 = lambda step: self.get_lr_lambda(step,
gamma = self.gamma,
warm_up_steps=10,
reduce_lr_steps=reduce_lr_steps)
self.mel_weight_44k_128 = torch.tensor([19.40951426, 19.94047336, 20.4859038, 21.04629067,
21.62194148, 22.21335214, 22.8210215, 23.44529231,
24.08660962, 24.74541882, 25.42234287, 26.11770576,
26.83212784, 27.56615283, 28.32007747, 29.0947679,
29.89060111, 30.70832636, 31.54828121, 32.41121487,
33.29780773, 34.20865341, 35.14437675, 36.1056621,
37.09332763, 38.10795802, 39.15039691, 40.22119881,
41.32154931, 42.45172373, 43.61293329, 44.80609379,
46.031602, 47.29070223, 48.58427549, 49.91327905,
51.27863232, 52.68119708, 54.1222372, 55.60274206,
57.12364703, 58.68617876, 60.29148652, 61.94081306,
63.63501986, 65.37562658, 67.16408954, 69.00109084,
70.88850318, 72.82736101, 74.81985537, 76.86654792,
78.96885475, 81.12900906, 83.34840929, 85.62810662,
87.97005418, 90.37689804, 92.84887686, 95.38872881,
97.99777002, 100.67862715, 103.43232942, 106.26140638,
109.16827015, 112.15470471, 115.22184756, 118.37439245,
121.6122689, 124.93877158, 128.35661454, 131.86761321,
135.47417938, 139.18059494, 142.98713744, 146.89771854,
150.91684347, 155.0446638, 159.28614648, 163.64270198,
168.12035831, 172.71749158, 177.44220154, 182.29556933,
187.28286676, 192.40502126, 197.6682721, 203.07516896,
208.63088733, 214.33770931, 220.19910108, 226.22363072,
232.41087124, 238.76803591, 245.30079083, 252.01064464,
258.90261676, 265.98474, 273.26010248, 280.73496362,
288.41440094, 296.30489752, 304.41180337, 312.7377183,
321.28877878, 330.07870237, 339.10812951, 348.38276173,
357.91393924, 367.70513992, 377.76413924, 388.09467408,
398.70920178, 409.61813793, 420.81980127, 432.33215467,
444.16083117, 456.30919947, 468.78589276, 481.61325588,
494.78824596, 508.31969844, 522.2238331, 536.51163441,
551.18859414, 566.26142988, 581.75006061, 597.66210737]) / 19.40951426
self.mel_weight_44k_128 = self.mel_weight_44k_128[None, None, None, ...]
self.g_loss_weight = 0.01
self.d_loss_weight = 1
def get_vocoder(self):
return self.vocoder
def get_f_helper(self):
return self.f_helper
def get_lr_lambda(self,step, gamma, warm_up_steps, reduce_lr_steps):
r"""Get lr_lambda for LambdaLR. E.g.,
.. code-block: python
lr_lambda = lambda step: get_lr_lambda(step, warm_up_steps=1000, reduce_lr_steps=10000)
from torch.optim.lr_scheduler import LambdaLR
LambdaLR(optimizer, lr_lambda)
"""
if step <= warm_up_steps:
return step / warm_up_steps
else:
return gamma ** (step // reduce_lr_steps)
def init_weights(self, module: nn.Module):
for m in module.modules():
if type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
torch.nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
def pre(self, input):
sp, _, _ = self.f_helper.wav_to_spectrogram_phase(input)
mel_orig = self.mel(sp.permute(0,1,3,2)).permute(0,1,3,2)
return sp, mel_orig
def forward(self, sp, mel_orig):
"""
Args:
input: (batch_size, channels_num, segment_samples)
Outputs:
output_dict: {
'wav': (batch_size, channels_num, segment_samples),
'sp': (batch_size, channels_num, time_steps, freq_bins)}
"""
return self.generator(sp, mel_orig)
def configure_optimizers(self):
optimizer_g = torch.optim.Adam([{'params': self.generator.parameters()}],
lr=self.lr, amsgrad=True, betas=(0.5, 0.999))
optimizer_d = torch.optim.Adam([{'params': self.discriminator.parameters()}],
lr=self.lr, amsgrad=True,
betas=(0.5, 0.999))
scheduler_g = {
'scheduler': torch.optim.lr_scheduler.LambdaLR(optimizer_g, self.lr_lambda),
'interval': 'step',
'frequency': 1,
}
scheduler_d = {
'scheduler': torch.optim.lr_scheduler.LambdaLR(optimizer_d, self.lr_lambda),
'interval': 'step',
'frequency': 1,
}
return [optimizer_g, optimizer_d ], [scheduler_g, scheduler_d]
def preprocess(self, batch, train=False, cutoff=None):
if(train):
vocal = batch[self.type_target] # final target
noise = batch['noise_LR'] # augmented low resolution audio with noise
augLR = batch[self.type_target+'_aug_LR'] # # augment low resolution audio
LR = batch[self.type_target+'_LR']
# embed()
vocal, LR, augLR, noise = vocal.float().permute(0, 2, 1), LR.float().permute(0, 2, 1), augLR.float().permute(0, 2, 1), noise.float().permute(0, 2, 1)
# LR, noise = self.add_random_noise(LR, noise)
snr, scale = [],[]
for i in range(vocal.size()[0]):
vocal[i,...], LR[i,...], augLR[i,...], noise[i,...], _snr, _scale = add_noise_and_scale_with_HQ_with_Aug(vocal[i,...],LR[i,...], augLR[i,...], noise[i,...], snr_l=-5,snr_h=45, scale_lower=0.6, scale_upper=1.0)
snr.append(_snr), scale.append(_scale)
# vocal, LR = self.amp_to_original_f(vocal, LR)
# noise = (noise * 0.0) + 1e-8 # todo
return vocal, augLR, LR, noise + augLR
else:
if(cutoff is None):
LR_noisy = batch["noisy"]
LR = batch["vocals"]
vocals = batch["vocals"]
vocals, LR, LR_noisy = vocals.float().permute(0, 2, 1), LR.float().permute(0, 2, 1), LR_noisy.float().permute(0, 2, 1)
return vocals, LR, LR_noisy, batch['fname'][0]
else:
LR_noisy = batch["noisy"+"LR"+"_"+str(cutoff)]
LR = batch["vocals" + "LR" + "_" + str(cutoff)]
vocals = batch["vocals"]
vocals, LR, LR_noisy = vocals.float().permute(0, 2, 1), LR.float().permute(0, 2, 1), LR_noisy.float().permute(0, 2, 1)
return vocals, LR, LR_noisy, batch['fname'][0]
def training_step(self, batch, batch_nb, optimizer_idx):
# dict_keys(['vocals', 'vocals_aug', 'vocals_augLR', 'noise'])
config = load_json("temp_path.json")
if("g_loss_weight" not in config.keys()):
config['g_loss_weight'] = self.g_loss_weight
config['d_loss_weight'] = self.d_loss_weight
write_json(config,"temp_path.json")
elif(config['g_loss_weight'] != self.g_loss_weight or config['d_loss_weight'] != self.d_loss_weight):
print("Update d_loss weight, from", self.d_loss_weight, "to",config['d_loss_weight'])
print("Update g_loss weight, from", self.g_loss_weight, "to",config['g_loss_weight'])
self.g_loss_weight = config['g_loss_weight']
self.d_loss_weight = config['d_loss_weight']
if (optimizer_idx == 0):
self.vocal, self.augLR, _, self.LR_noisy = self.preprocess(batch, train=True)
for i in range(self.vocal.size()[0]):
save_wave(tensor2numpy(self.vocal[i, ...]), str(i) + "vocal" + ".wav", sample_rate=44100)
save_wave(tensor2numpy(self.LR_noisy[i, ...]), str(i) + "LR_noisy" + ".wav", sample_rate=44100)
# all_mel_e2e in non-log scale
_, self.mel_target = self.pre(self.vocal)
self.sp_LR_target, self.mel_LR_target = self.pre(self.augLR)
self.sp_LR_target_noisy, self.mel_LR_target_noisy = self.pre(self.LR_noisy)
if (self.valid is None or self.valid.size()[0] != self.mel_target.size()[0]):
self.valid = torch.ones(self.mel_target.size()[0], 1, self.mel_target.size()[2], 1)
self.valid = self.valid.type_as(self.mel_target)
if (self.fake is None or self.fake.size()[0] != self.mel_target.size()[0]):
self.fake = torch.zeros(self.mel_target.size()[0], 1, self.mel_target.size()[2], 1)
self.fake = self.fake.type_as(self.mel_target)
self.generated = self(self.sp_LR_target_noisy, self.mel_LR_target_noisy)
denoise_loss = self.l1loss(self.generated['clean'], self.mel_LR_target)
targ_loss = self.l1loss(self.generated['mel'], to_log(self.mel_target))
self.log("targ-l", targ_loss, on_step=True, on_epoch=False, logger=True, sync_dist=True, prog_bar=True)
self.log("noise-l", denoise_loss, on_step=True, on_epoch=False, logger=True, sync_dist=True, prog_bar=True)
loss = targ_loss + denoise_loss
if(self.train_step >= 18000):
g_loss = self.bce_loss(self.discriminator(self.generated['mel']), self.valid)
self.log("g_l", g_loss, on_step=True, on_epoch=False, logger=True, sync_dist=True, prog_bar=True)
# print("g_loss", g_loss)
all_loss = loss + self.g_loss_weight * g_loss
self.log("all_loss", all_loss, on_step=True, on_epoch=True, logger=True, sync_dist=True)
else:
all_loss = loss
self.train_step += 0.5
return {"loss": all_loss}
elif(optimizer_idx == 1):
if(self.train_step >= 16000):
self.generated = self(self.sp_LR_target_noisy, self.mel_LR_target_noisy)
self.train_step += 0.5
real_loss = self.bce_loss(self.discriminator(to_log(self.mel_target)),self.valid)
self.log("r_l", real_loss, on_step=True, on_epoch=False, logger=True, sync_dist=True, prog_bar=True)
fake_loss = self.bce_loss(self.discriminator(self.generated['mel'].detach()), self.fake)
self.log("d_l", fake_loss, on_step=True, on_epoch=False, logger=True, sync_dist=True, prog_bar=True)
d_loss = self.d_loss_weight * (real_loss+fake_loss) / 2
self.log("discriminator_loss", d_loss, on_step=True, on_epoch=True, logger=True, sync_dist=True)
return {"loss": d_loss}
def draw_and_save(self, mel: torch.Tensor, path, clip_max=None, clip_min=None, needlog=True):
plt.figure(figsize=(15,5))
if(clip_min is None):
clip_max,clip_min = self.clip(mel)
mel = np.transpose(tensor2numpy(mel)[0,0,...],(1,0))
# assert np.sum(mel < 0) == 0, str(np.sum(mel < 0)) + str(np.sum(mel < 0))
if(needlog):
assert np.sum(mel < 0) == 0, str(np.sum(mel < 0))+"-"+path
mel_log = np.log10(mel+EPS)
else:
mel_log = mel
# plt.imshow(mel)
librosa.display.specshow(mel_log, sr=44100,x_axis='frames',y_axis='mel',cmap=cm.jet, vmax=clip_max, vmin=clip_min)
plt.colorbar()
plt.savefig(path)
plt.close()
def clip(self,*args):
val_max, val_min = [],[]
for each in args:
val_max.append(torch.max(each))
val_min.append(torch.min(each))
return max(val_max), min(val_min)
| 46.316049
| 225
| 0.555816
|
7c2372ebaf72e4413a6240bdd0fda204f4d56361
| 8,796
|
py
|
Python
|
get_landmarks_dermofit.py
|
Innes-BG/Morphometries_detectthia
|
712081ba937a86b0a8204b7c03c4e77ff7e33a4c
|
[
"MIT"
] | null | null | null |
get_landmarks_dermofit.py
|
Innes-BG/Morphometries_detectthia
|
712081ba937a86b0a8204b7c03c4e77ff7e33a4c
|
[
"MIT"
] | null | null | null |
get_landmarks_dermofit.py
|
Innes-BG/Morphometries_detectthia
|
712081ba937a86b0a8204b7c03c4e77ff7e33a4c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from numpy import array, transpose, mean, stack, where
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
import math
import os
import time
from math import atan2, pi, sin, cos
def sort_points_by_angle(landmarks):
#Sort points by angle
angles=[]
for p in landmarks:
angle= atan2(p[0],p[1])*360/(2*pi)
if angle<0:
angle=angle+360
angles.append(angle)
angle_inds = np.array(angles).argsort()
landmarks = np.array(landmarks)[angle_inds]
return landmarks
def get_landmarks_from_image(image, step=30, pca=True):
'''Loads image and calculates the landmarks of skin lesion at different angles
-image: as a mask, white pixel for lesion
-step: angle step between landmarks (degrees). Ej. step=10->36 landmarks per image
-pca: whether principal components transformation is made to orientate the images
RETURNS:
-list of extracted landmarks'''
print('Start')
tic = time.perf_counter()
img = cv2.imread(image,0)
#Fast way to go thrugh each pixels and get coordinates of pixels above 0
limit=0
x, y = (img > limit).nonzero()
##percentage of lesion area in the image.
# per=len(x)/(len(img)*len(img[0]))
# print(per)
##Show shape
# plt.plot(x, y, 'ro')
# plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
# plt.close()
#Principal components
pts1=np.transpose([x,y])
if pca:
pca = PCA(n_components=2, svd_solver='full')
pca.fit(pts1)
pts2 = pca.transform(pts1)
else:#Not pca, only center
x=transpose(pts1)[0]
y=transpose(pts1)[1]
x_c=x-mean(x)
y_c=y-mean(y)
pts2 = transpose([x_c, y_c])
# Show shape after PCA
x2=np.transpose(pts2)[0]
y2=np.transpose(pts2)[1]
plt.plot(x2, y2, 'ro')
plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
'''Landmarks are chosen every step of angle,
1st define the line for the angle, starting in 0,0
2nd get points in that line with a pixel margin ->pos_points
3rd find furthest pixel, it is the lesion limit
'''
angle=0
landmarks=[]
while angle in range(0,180):
angle_pi=angle*pi/180
#line constants:
a=sin(angle_pi)
b=cos(angle_pi)
#Get points with distance to angle line below 0.5 pixels and is distance to the origin
res = array([[p, (p[0]**2+p[1]**2)**0.5] for p in pts2 if abs(a*p[0]+b*p[1])/(a**2+b**2)<0.5], dtype="object")
res_t = transpose(res)
p_sel= stack(res_t[0], axis=0)
d2zero= res_t[1] #distance to centre
#Get point with max distance to zero
max_ind = where(d2zero == np.max(d2zero))
landmark1 = p_sel[max_ind][0]
#Furthest point from landmark1 in angle line
d2other = array([[((landmark1[0]-p[0])**2+(landmark1[1]-p[1])**2)**0.5] for p in p_sel], dtype="object")
max_ind2 = where(d2other == np.max(d2other))
landmark2 = p_sel[max_ind2[0]][0]
landmarks.append(landmark1)
landmarks.append(landmark2)
angle=angle+step
#Sort points by angle
landmarks = sort_points_by_angle(landmarks)
xl=np.transpose(landmarks)[0]
yl=np.transpose(landmarks)[1]
plt.plot(xl, yl, 'bo')
n=[1,2,3,4,5,6]
for i, txt in enumerate(n):
plt.annotate(txt, (xl[i], yl[i]))
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
plt.close()
toc = time.perf_counter()
print('Time for 1 image and ' + str(step_value) + 'º step angle:' + str(round(toc - tic,0)) + ' seconds')
return landmarks
def get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value, pca=True, mirror=True):
'''Calculates landmarks for all images in a diresctory and generates a mophologika file'''
#tic_ini = time.perf_counter()
subfolders=(next(os.walk(directory))[1])
i=0
filenames=[]
names=[]
landmarks_list=[]
labels=[]
names_cor=[]
for sf in subfolders:
files = os.listdir(directory + '/' + sf)
label=sf
for img_f in files:#Each image has its own folder
img_folders = os.listdir(directory + '/' + sf + '/' + img_f +'/')
for img in img_folders:
if img.endswith('mask.' + filetype):
filenames.append(img)
names.append(sf+str(i))
names_cor.append(sf+str(i))#Only for correspondence file, not affected by mirroring
labels.append(sf)
landmarks=get_landmarks_from_image(directory + '/' + sf+ '/' + img_f + '/' + img, step_value, pca)
landmarks_list.append(landmarks)
if mirror:
names.append(sf+str(i)+'_mx')
names.append(sf+str(i)+'_my')
labels.append(sf)
labels.append(sf)
#If mirro three sets of landmarks are created for each image, original, mirrored in x and mirrored in y
landmarks_mirror_x=np.transpose([-np.transpose(landmarks)[0],np.transpose(landmarks)[1]])
landmarks_mirror_x = sort_points_by_angle(landmarks_mirror_x)
landmarks_list.append(landmarks_mirror_x)
landmarks_mirror_y=np.transpose([np.transpose(landmarks)[0],-np.transpose(landmarks)[1]])
landmarks_mirror_y = sort_points_by_angle(landmarks_mirror_y)
landmarks_list.append(landmarks_mirror_y)
i+=1
'''Write morphologika file'''
res = open(results_file, 'w')
res.write('[individuals]'+'\n')
res.write(str(len(names))+'\n')
res.write('[landmarks]'+'\n')
res.write(str(len(landmarks_list[0]))+'\n')
res.write('[dimensions]'+'\n')
res.write(str(len(landmarks_list[0][0]))+'\n')
res.write('[names]'+'\n')
for name in names:
res.write(name + '\n')
res.write('[labels]'+'\n')
res.write('Sample'+'\n')
res.write('[labelvalues]'+'\n')
for label in labels:
res.write(label + '\n')
res.write('[rawpoints]'+'\n')
i=0
while i<len(names):
res.write("\'" + names[i]+'\n')
lm = landmarks_list[i]
for l in lm:
res.write(str(l[0])+' ')
res.write(str(l[1])+'\n')
res.write('\n')
i+=1
res.close()
'''Write matching names and filenames file'''
cor = open(correspondences_file, 'w')
i=0
while i<len(filenames):
cor.write(filenames[i] + ',' + names_cor[i]+'\n')
i+=1
cor.close()
#toc_fin = time.perf_counter()
#print('Time for ' +str(len(names)) + ' images and ' + str(step_value) + 'º step angle:' + str(round(toc_fin - tic_ini,0)) + ' seconds')
'''Same as get_landmarks but adapted for dermofit file system
Directory is a folder with one subfolder per label, the names of the subfolders are used as label
There is another subfolder for image with th image itself and the mask, with the same name + mask'''
directory='D:/USAL/Detectthia/Morfometrias/Pruebas_morfo/Images_dermofit'
filetype='png'
correspondences_file= directory +'/correspondence.txt'
# results_file= directory +'/Landmarks_10g_mirror.txt'
# step_value=10 #Angle step between landmarks
# get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value)
# results_file= directory +'/Landmarks_12g_mirror.txt'
# step_value=12 #Angle step between landmarks
# get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value)
# results_file= directory +'/Landmarks_15g_mirror.txt'
# step_value=15 #Angle step between landmarks
# get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value)
# results_file= directory +'/Landmarks_20g_mirror.txt'
# step_value=20 #Angle step between landmarks
# get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value)
# results_file= directory +'/Landmarks_30g_mirror.txt'
# step_value=30 #Angle step between landmarks
# get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value)
results_file= directory +'/Landmarks_60g_mirror.txt'
step_value=60 #Angle step between landmarks
get_landmarks_batch(directory, filetype, results_file, correspondences_file, step_value)
| 35.467742
| 148
| 0.609936
|
4f20f72cf2659886891f3d2ca99310ca0b3177bc
| 17,925
|
py
|
Python
|
eqparse/loadsim.py
|
TfedUD/eqparse
|
ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71
|
[
"MIT"
] | null | null | null |
eqparse/loadsim.py
|
TfedUD/eqparse
|
ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71
|
[
"MIT"
] | null | null | null |
eqparse/loadsim.py
|
TfedUD/eqparse
|
ab1fba5b4995bed3f5fa2f77cdf505bb613c7e71
|
[
"MIT"
] | null | null | null |
'''
the 'load' module provides common access to the 'sim' and 'hsr' modules, including
automated batch routines and plotting. all parsing logic for sim files/reports should
be accomplished in 'sim.py' or 'hsr.py'; this is mainly an API for script-running.
'''
import os
import xlwings as xw
import shutil
import pandas as pd
from .sim.sim import RptHandler
from .sim import batch
from .hsr.hsr import hsr_df as hsr_df
from .hsr.unit_dict import unit_dict
#from .inp import inp
from .spaceloads.spaceloads import spaceloads
class LoadSim:
'''
Main entry-point for eqparse module.
Can pass in sim file or hourly file or just general extensionless name, e.g.
"Prop.SIM", "Prop.hsr", "Prop" will all work.
'''
def __init__(self, file, hsr=False, inpfile=None):
file = file.replace('.SIM', '').replace('.hsr', '')
self.fname = file.split("\\")[-1].split("/")[-1]
self.sim = RptHandler(file + '.SIM')
self.path = os.path.dirname(file)
if hsr:
try:
self.hsr = hsr_df(file + '.hsr')
except:
print(
"HSR validation failed. Check eQUEST component names for commas or other special characters.")
if inpfile is not None:
self.inpfile = inpfile
# doesn't work, needs further parsing.
# spcloadfile = '-'.join(file.split('-')[:-1]) + '- SpaceLoads.csv'
# try:
# self.spaceloads = spaceloads(spcloadfile)
# except:
# self.spaceloads = 'Not Loaded: {0}'.format(spcloadfile)
def tidy_enduses(self, dropzeros=True, includetotal=False, rename=True, splitmeters=False):
'''
returns tidy dataframe with concatenated enduses + value
'''
try:
beps, bepu, cost = self.annual_summaries(writecsv=False)
iscost = True
except:
beps, bepu = self.annual_summaries(writecsv=False)
iscost = False
if iscost:
costmelt = pd.melt(
cost, id_vars=['File', 'Rate', 'Meter', 'Utility'])
costmelt['Enduse'] = costmelt['variable'] + \
' - ' + costmelt['Meter']
bepsmelt = pd.melt(beps, id_vars=['File', 'Meter', 'Utility', 'Value'])
bepsmelt['Enduse'] = bepsmelt['variable'] + ' - ' + bepsmelt['Meter']
bepumelt = pd.melt(bepu, id_vars=['File', 'Meter', 'Utility', 'Value'])
bepumelt['Enduse'] = bepumelt['variable'] + ' - ' + bepumelt['Meter']
if dropzeros:
if iscost:
costmelt = costmelt[costmelt.value != 0]
bepumelt = bepumelt[bepumelt.value != 0]
bepsmelt = bepsmelt[bepsmelt.value != 0]
if not includetotal:
if iscost:
costmelt = costmelt[costmelt.variable != 'Total']
bepumelt = bepumelt[bepumelt.variable != 'Total']
bepsmelt = bepsmelt[bepsmelt.variable != 'Total']
if iscost:
costmelt = costmelt[['Enduse', 'value']
].set_index('Enduse', drop=True)
bepumelt = bepumelt[['Enduse', 'value']].set_index('Enduse', drop=True)
bepsmelt = bepsmelt[['Enduse', 'value']].set_index('Enduse', drop=True)
if rename:
if iscost:
costmelt.columns = [self.fname + ' - COST']
bepumelt.columns = [self.fname + ' - BEPU']
bepsmelt.columns = [self.fname + ' - BEPS']
if splitmeters:
bepumelt['Enduse'] = bepumelt.apply(
lambda x: x.name.split(' - ')[0], axis=1)
bepumelt['Meter'] = bepumelt.apply(
lambda x: x.name.split(' - ')[1], axis=1)
bepsmelt['Enduse'] = bepsmelt.apply(
lambda x: x.name.split(' - ')[0], axis=1)
bepsmelt['Meter'] = bepsmelt.apply(
lambda x: x.name.split(' - ')[1], axis=1)
costmelt['Enduse'] = costmelt.apply(
lambda x: x.name.split(' - ')[0], axis=1)
costmelt['Meter'] = costmelt.apply(
lambda x: x.name.split(' - ')[1], axis=1)
bepsmelt = bepsmelt[list(
bepsmelt.columns[-2:]) + list(bepsmelt.columns[:-2])]
bepumelt = bepumelt[list(
bepumelt.columns[-2:]) + list(bepumelt.columns[:-2])]
costmelt = costmelt[list(
costmelt.columns[-2:]) + list(costmelt.columns[:-2])]
if iscost:
return {
'beps': bepsmelt,
'bepu': bepumelt,
'cost': costmelt
}
else:
return {
'beps': bepsmelt,
'bepu': bepumelt,
}
def annual_summaries(self, writecsv=True, opencsv=True):
'''
Exports the following:
fname_BEPS.csv,
fname_BEPU.csv,
fname_COST.csv,
fname_UNMET.csv,
Also returns dict of Pandas Dataframes
Available Kwargs:
writecsv: Bool
opencsv: Bool
reports:
'''
beps = self.sim.beps()
bepu = self.sim.bepu()
iscost = True
try:
cost = self.annual_cost_enduse()
except:
iscost = False
print(
'Rates have not been defined for this project; cost outputs will not be created.')
unmet_df, cool_ssr, heat_ssr = self.sim.unmet()
if writecsv:
beps_file = self.path + "/" + "__BEPS_"+self.fname+".csv"
bepu_file = self.path + "/" + "__BEPU_"+self.fname+".csv"
unmet_file = self.path + "/" + "__UNMET_"+self.fname+".csv"
beps.to_csv(beps_file, index=False)
bepu.to_csv(bepu_file, index=False)
if iscost:
cost_file = self.path + "/" + "__COST_"+self.fname+".csv"
cost.to_csv(cost_file, index=False)
# UNMET CONCAT
with open(unmet_file, 'w', newline='\n') as f:
unmet_df.to_csv(f)
with open(unmet_file, 'a', newline='\n') as f:
heat_ssr.to_csv(f)
cool_ssr.to_csv(f)
if opencsv:
book = xw.Book(beps_file)
book.close()
book = xw.Book(bepu_file)
book.close()
if iscost:
book = xw.Book(cost_file)
book.close()
book = xw.Book(unmet_file)
book.close()
else:
if iscost:
return beps, bepu, cost
else:
return beps, bepu
def hourly(self):
return self.hsr.df
def hourlyreports(self):
return self.hsr.df.columns
def hourly_results(self):
self.hsr.df.to_csv(self.path + "/" + self.fname + ".csv")
def leed_enduses(self, write_csv=True, open_csv=True, group_meters=True):
leed_psf = self.sim.psf(leedpivot=True)
if group_meters:
leed_psf = leed_psf.T.reset_index().groupby(
['Object', 'level_0']).sum().T.stack()
leed_psf.index.names = ['Enduse', 'Energy or Demand']
colnames = leed_psf.columns.tolist()
leed_psf = pd.melt(leed_psf.reset_index(), id_vars=[
'Enduse', 'Energy or Demand'], value_vars=colnames)
if write_csv:
fname = self.path + "/" + "__LEED_ENDUSES_"+self.fname+".csv"
leed_psf.to_csv(fname)
if open_csv:
book = xw.Book(fname)
book.close()
return leed_psf
def sim_print(self, reportlist, directory="Report Outputs"):
'''
for printing sim files (i.e. for code/LEED submission) tio PDF, returns
new *.SIM with only the items in the reportlist (e.g. ['ES-D', 'BEPS', 'BEPU'])
'''
simpath = self.path + '/' + self.fname + '.SIM'
fdir = self.path + '/' + directory
fname = '_outputs_' + self.fname + '.SIM'
fpath = fdir + '/' + fname
if not os.path.exists(fdir):
os.makedirs(fdir)
if os.path.isfile(fpath):
os.remove(fpath)
with open(simpath) as f:
f_list = f.readlines()
rptstart = []
for num, line in enumerate(f_list, 0):
for r in reportlist:
if r == 'PV-A':
parse_mod = True
else:
parse_mod = False
if r in line:
if parse_mod:
rptstart.append(int(num)-2)
else:
rptstart.append(int(num)-2)
for r in rptstart:
lines = 0
scan = f_list[r+3:(r+1000)]
if lines == 0:
for num, line in enumerate(scan):
rptlen = []
if "REPORT" in line:
rptlen.append(num)
lines = lines + 1
break
rpt_text_list = (f_list[r:(r+rptlen[0]+1)])
if 'PV-A' in rpt_text_list[2] or 'PS-E' in rpt_text_list[2] or 'PS-F' in rpt_text_list[2]:
rpt_text_list[-1] = rpt_text_list[-1][:-2]
with open(fpath, 'a') as output:
for l in rpt_text_list:
output.write(l)
print(
'Successfully Printed Requested Reports to {0}'.format((fpath)))
def annual_cost_enduse(self):
def get_utils(series):
utilcols = []
for s in series:
if "ELECTRICITY" in s:
utilcols.append('Electricity')
if "NATURAL-GAS" in s:
utilcols.append('Natural-Gas')
if "STEAM" in s:
utilcols.append('Steam')
if "CHILLED" in s:
utilcols.append('Chilled-Water')
return utilcols
bepu = self.sim.bepu()
ese = self.sim.ese()
mdict = {}
rate = list(ese.Object)
meters = list(ese.Meters)
for num, mtrlist in enumerate(meters):
for mtr in mtrlist:
mdict[mtr] = rate[num]
rdict = ese.groupby('Object').sum()[
['TOTAL CHARGE ($)', 'METERED ENERGY (KWH)']]
rdict['vrate'] = rdict['TOTAL CHARGE ($)'] / \
rdict['METERED ENERGY (KWH)']
vrate = rdict['vrate'].to_dict()
metervrate = {}
for key, value in mdict.items():
metervrate[key] = vrate[value]
utils = get_utils(bepu.index)
def try_rate(x):
try:
return mdict[x]
except:
return 0
def try_vrate(rate, metervrate):
try:
return metervrate[rate]
except:
if rate == 0:
return 0
else:
print(
'could not find associated vrate from meter: {0}'.format(rate))
bepu['UTILITY'] = utils
bepu.index = [x.replace(" ELECTRICITY", "").replace(" NATURAL-GAS", "").replace(
" STEAM", "").replace(" CHILLED-WATER", "").strip() for x in bepu.index]
bepu['meter'] = bepu.index
bepu['rate'] = bepu['meter'].apply(lambda x: try_rate(x))
bepu['vrate'] = bepu['rate'].apply(lambda x: try_vrate(x, vrate))
bepu['vrate'] = bepu['vrate'].fillna(0)
try:
cost = bepu[['Lights',
'Task Lights',
'Misc Equip',
'Space Heating',
'Space Cooling',
'Heat Reject',
'Pumps & Aux',
'Vent Fans',
'Refrig Display',
'Ht Pump Supplem',
'Domest Hot Wtr',
'Ext Usage',
'Total']].apply(lambda x: x * bepu['vrate'])
cost['Utility'] = utils
cost['File'] = bepu['File']
cost['Rate'] = bepu['rate']
cost['Meter'] = bepu['meter']
cost = cost[[
'File',
'Rate',
'Meter',
'Utility',
'Lights',
'Task Lights',
'Misc Equip',
'Space Heating',
'Space Cooling',
'Heat Reject',
'Pumps & Aux',
'Vent Fans',
'Refrig Display',
'Ht Pump Supplem',
'Domest Hot Wtr',
'Ext Usage',
'Total'
]]
return cost
except:
print('COULDNT\'T PARSE BEPU AND VRATE. CHECK FOR NAN OR -NAN IN RESULTS')
# print (bepu)
# print (sim)
def systemsummaries(self):
ssl = self.sim.ssl()
ssa = self.sim.ssa()
ssl['Month'] = ssl.index
sslsumm = ssl.copy()
sslsumm['Fan Power (kWh)'] = sslsumm['FAN ELEC DURING HEATING (KWH)'] + sslsumm['FAN ELEC DURING COOLING (KWH)'] + \
sslsumm['FAN ELEC DURING FLOATING (KWH)'] - \
sslsumm['FAN ELEC DURING HEAT & COOL KWH)']
sslsumm = sslsumm[['Month', 'Object', 'File', 'Fan Power (kWh)']]
ssasumm = ssa[[
'Month', 'Cooling Energy (MMBtu)', 'Heating Energy (MMBtu)', 'Object', 'File']]
systemsummaries = ssasumm.merge(sslsumm, how='left', left_on=[
'Month', 'Object'], right_on=['Month', 'Object'])
systemsummaries = systemsummaries[[
'Month', 'Cooling Energy (MMBtu)', 'Heating Energy (MMBtu)', 'Object', 'File_x', 'Fan Power (kWh)'
]]
systemsummaries = systemsummaries[[
'Object',
'Month',
'Cooling Energy (MMBtu)',
'Heating Energy (MMBtu)',
'Fan Power (kWh)',
'File_x'
]]
systemsummaries.columns = [
x.replace("File_x", "File") for x in systemsummaries]
return systemsummaries
def monthly_cost_enduse(self):
# only needed for rate parsing by month. otherwise use monthly_enduses
def monthly_vrate_dict(ese):
month_rate_costs = ese.groupby(['Object', 'Month'], sort=False).sum()[
'VIRTUAL RATE ($/UNIT)'].reset_index()
month_rate_cost_dict = {}
for rate in month_rate_costs['Object'].unique():
month_rate_cost_dict[rate] = {}
for month in month_rate_costs['Month'].unique():
month_rate_cost_dict[rate][month] = month_rate_costs[(month_rate_costs['Object'] == rate) & (
month_rate_costs['Month'] == month)]['VIRTUAL RATE ($/UNIT)'].tolist()[0]
return month_rate_cost_dict
ese = self.sim.ese()
psf = self.sim.psf()
vrate_dict = monthly_vrate_dict(ese)
psf = psf[(psf['Cons_Demand'] == 'Consumption')].groupby(
['Object', 'Month'], sort=False).sum().drop('Total', axis=1).reset_index()
enduses = ['Lights', 'Task Lights',
'Misc Equip', 'Space Heating', 'Space Cooling', 'Heat Reject',
'Pumps & Aux', 'Vent Fans', 'Refrig Display', 'Ht Pump Supplem',
'Domest Hot Wtr', 'Ext Usage']
mdict = {}
rate = list(ese.Object)
meters = list(ese.Meters)
for num, mtrlist in enumerate(meters):
for mtr in mtrlist:
mdict[mtr] = rate[num]
def try_rate(x):
try:
return mdict[x]
except:
try:
return mdict[x[0:4]]
except:
return 0
psf['rate'] = psf['Object'].apply(lambda x: try_rate(x))
def try_vrate(x, vrate_dict):
month = x['Month']
rate = x['rate']
try:
byrate = vrate_dict[rate]
bymonth = byrate[month]
return bymonth
except:
return 0
psf['vrate'] = psf.apply(lambda x: try_vrate(x, vrate_dict), axis=1)
cost_monthly_enduse = psf.copy()
for col in cost_monthly_enduse.columns:
try:
cost_monthly_enduse[col] = cost_monthly_enduse[col].astype(
float) * cost_monthly_enduse['vrate'].astype(float)
except:
pass
dflist = []
for meter in cost_monthly_enduse['Object'].unique():
mtrdf = cost_monthly_enduse[cost_monthly_enduse['Object'] == meter]
for use in enduses:
if mtrdf[use].sum() != 0:
series = mtrdf[use]
series.index = mtrdf.Month
series.name = series.name + '-' + meter
dflist.append(mtrdf[use])
cost_df = pd.DataFrame(dflist).T
return cost_df
def simrpt(self, listofsims, rpt):
return pd.concat([RptHandler(sim) for sim in listofsims], axis=0)
| 35.21611
| 125
| 0.476597
|
7850e6e0c8ce721cb49f5063e1ccb2e1bff5d82a
| 19,310
|
py
|
Python
|
pontoon/administration/views.py
|
st-l10n/pontoon
|
87404e1b17285f0e787094538c1fd16e2422cc65
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/administration/views.py
|
st-l10n/pontoon
|
87404e1b17285f0e787094538c1fd16e2422cc65
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/administration/views.py
|
st-l10n/pontoon
|
87404e1b17285f0e787094538c1fd16e2422cc65
|
[
"BSD-3-Clause"
] | 3
|
2017-10-15T13:15:47.000Z
|
2018-10-10T09:03:19.000Z
|
import csv
import logging
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db import transaction, IntegrityError
from django.db.models import Max
from django.http import Http404, HttpResponse, HttpResponseForbidden
from django.shortcuts import render
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.utils.datastructures import MultiValueDictKeyError
from pontoon.administration.forms import (
EntityFormSet,
ExternalResourceInlineFormSet,
ProjectForm,
RepositoryInlineFormSet,
SubpageInlineFormSet,
TagInlineFormSet,
)
from pontoon.base import utils
from pontoon.base.utils import require_AJAX, is_ajax
from pontoon.base.models import (
Entity,
Locale,
Project,
ProjectLocale,
Resource,
TranslatedResource,
Translation,
)
from pontoon.sync.models import SyncLog
from pontoon.sync.tasks import sync_project
from pontoon.pretranslation.tasks import pretranslate
log = logging.getLogger(__name__)
def admin(request):
"""Admin interface."""
if not request.user.has_perm("base.can_manage_project"):
raise PermissionDenied
projects = (
Project.objects.all()
.prefetch_related("latest_translation__user")
.order_by("name")
)
return render(request, "admin.html", {"admin": True, "projects": projects})
def get_slug(request):
"""Convert project name to slug."""
log.debug("Convert project name to slug.")
if not request.user.has_perm("base.can_manage_project"):
log.error("Insufficient privileges.")
return HttpResponse("error")
if not is_ajax(request):
log.error("Non-AJAX request")
return HttpResponse("error")
try:
name = request.GET["name"]
except MultiValueDictKeyError as e:
log.error(str(e))
return HttpResponse("error")
log.debug("Name: " + name)
slug = slugify(name)
log.debug("Slug: " + slug)
return HttpResponse(slug)
@transaction.atomic
def manage_project(request, slug=None, template="admin_project.html"):
"""Admin project."""
log.debug("Admin project.")
if not request.user.has_perm("base.can_manage_project"):
raise PermissionDenied
form = ProjectForm()
subpage_formset = SubpageInlineFormSet()
repo_formset = RepositoryInlineFormSet()
external_resource_formset = ExternalResourceInlineFormSet()
tag_formset = TagInlineFormSet()
locales_readonly = Locale.objects.none()
locales_selected = Locale.objects.none()
subtitle = "Add project"
pk = None
project = None
# Save project
if request.method == "POST":
locales_readonly = Locale.objects.filter(
pk__in=request.POST.getlist("locales_readonly")
)
locales_selected = Locale.objects.filter(
pk__in=request.POST.getlist("locales")
).exclude(pk__in=locales_readonly)
# Update existing project
try:
pk = request.POST["pk"]
project = Project.objects.visible_for(request.user).get(pk=pk)
form = ProjectForm(request.POST, instance=project)
# Needed if form invalid
subpage_formset = SubpageInlineFormSet(request.POST, instance=project)
repo_formset = RepositoryInlineFormSet(request.POST, instance=project)
tag_formset = (
TagInlineFormSet(request.POST, instance=project)
if project.tags_enabled
else None
)
external_resource_formset = ExternalResourceInlineFormSet(
request.POST, instance=project
)
subtitle = "Edit project"
# Add a new project
except MultiValueDictKeyError:
form = ProjectForm(request.POST)
# Needed if form invalid
subpage_formset = SubpageInlineFormSet(request.POST)
repo_formset = RepositoryInlineFormSet(request.POST)
external_resource_formset = ExternalResourceInlineFormSet(request.POST)
tag_formset = None
if form.is_valid():
project = form.save(commit=False)
subpage_formset = SubpageInlineFormSet(request.POST, instance=project)
repo_formset = RepositoryInlineFormSet(request.POST, instance=project)
external_resource_formset = ExternalResourceInlineFormSet(
request.POST, instance=project
)
if tag_formset:
tag_formset = TagInlineFormSet(request.POST, instance=project)
formsets_valid = (
subpage_formset.is_valid()
and repo_formset.is_valid()
and external_resource_formset.is_valid()
and (tag_formset.is_valid() if tag_formset else True)
)
if formsets_valid:
project.save()
# Manually save ProjectLocales due to intermediary model
locales_form = form.cleaned_data.get("locales", [])
locales_readonly_form = form.cleaned_data.get("locales_readonly", [])
locales = locales_form | locales_readonly_form
(
ProjectLocale.objects.filter(project=project)
.exclude(locale__pk__in=[l.pk for l in locales])
.delete()
)
for locale in locales:
ProjectLocale.objects.get_or_create(project=project, locale=locale)
# Update readonly flags
locales_readonly_pks = [l.pk for l in locales_readonly_form]
project_locales = ProjectLocale.objects.filter(project=project)
project_locales.exclude(locale__pk__in=locales_readonly_pks).update(
readonly=False
)
project_locales.filter(locale__pk__in=locales_readonly_pks,).update(
readonly=True
)
subpage_formset.save()
repo_formset.save()
external_resource_formset.save()
if tag_formset:
tag_formset.save()
# If the data source is database and there are new strings, save them.
if project.data_source == "database":
_save_new_strings(project, request.POST.get("new_strings", ""))
_create_or_update_translated_resources(project, locales)
# Properly displays formsets, but removes errors (if valid only)
subpage_formset = SubpageInlineFormSet(instance=project)
repo_formset = RepositoryInlineFormSet(instance=project)
external_resource_formset = ExternalResourceInlineFormSet(
instance=project
)
if project.tags_enabled:
tag_formset = TagInlineFormSet(instance=project)
subtitle += ". Saved."
pk = project.pk
else:
subtitle += ". Error."
else:
subtitle += ". Error."
# If URL specified and found, show edit, otherwise show add form
elif slug is not None:
try:
project = Project.objects.get(slug=slug)
pk = project.pk
form = ProjectForm(instance=project)
subpage_formset = SubpageInlineFormSet(instance=project)
repo_formset = RepositoryInlineFormSet(instance=project)
tag_formset = (
TagInlineFormSet(instance=project) if project.tags_enabled else None
)
external_resource_formset = ExternalResourceInlineFormSet(instance=project)
locales_readonly = Locale.objects.filter(
project_locale__readonly=True, project_locale__project=project,
)
locales_selected = project.locales.exclude(pk__in=locales_readonly)
subtitle = "Edit project"
except Project.DoesNotExist:
form = ProjectForm(initial={"slug": slug})
# Override default label suffix
form.label_suffix = ""
projects = []
for p in Project.objects.prefetch_related("locales").order_by("name"):
projects.append(
{
"name": p.name,
# Cannot use values_list() here, because it hits the DB again
"locales": [l.pk for l in p.locales.all()],
}
)
locales_available = Locale.objects.exclude(pk__in=locales_selected)
# Admins reason in terms of locale codes (see bug 1394194)
locales_readonly = locales_readonly.order_by("code")
locales_selected = locales_selected.order_by("code")
locales_available = locales_available.order_by("code")
data = {
"slug": slug,
"form": form,
"subpage_formset": subpage_formset,
"repo_formset": repo_formset,
"tag_formset": tag_formset,
"external_resource_formset": external_resource_formset,
"locales_readonly": locales_readonly,
"locales_selected": locales_selected,
"locales_available": locales_available,
"subtitle": subtitle,
"pk": pk,
"project": project,
"projects": projects,
}
# Set locale in Translate link
if Resource.objects.filter(project=project).exists() and locales_selected:
locale = (
utils.get_project_locale_from_request(request, project.locales)
or locales_selected[0].code
)
if locale:
data["translate_locale"] = locale
return render(request, template, data)
def _get_project_strings_csv(project, entities, output):
"""Return a CSV content of all strings and translations for a project and locale.
The file format looks as follow:
source, locale_code_1, locale_code_2
"string A", "tranlation A1", "tranlation A2"
"string B", "tranlation B1", "tranlation B2"
The first column has all source strings. Then there is one column per enabled locale, each
containing available translations for each source string (or an empty cell). The first line
contains the code of each locale, expect for the first cell which is always "source".
:arg Project project: the project from which to take strings
:arg list entities: the list of all entities of the project
:arg buffer output: a buffer to which the CSV writer will send its data
:returns: the same output object with the CSV data
"""
locales = Locale.objects.filter(project_locale__project=project)
translations = (
Translation.objects.filter(entity__resource__project=project, approved=True,)
.prefetch_related("locale")
.prefetch_related("entity")
)
all_data = dict((x.id, {"source": x.string}) for x in entities)
for translation in translations:
all_data[translation.entity.id][translation.locale.code] = translation.string
headers = ["source"] + [x.code for x in locales]
writer = csv.DictWriter(output, fieldnames=headers)
writer.writeheader()
writer.writerows(all_data.values())
return output
def _get_resource_for_database_project(project):
"""Return the Resource object of an in database project.
If the project has no resource yet, create a new one and return it.
Otherwise, return the existing resource.
Note that a database project should always have only one resource.
:arg Project project: the in-database Project object
:returns: the unique Resource object associated with the project
"""
try:
return Resource.objects.get(project=project,)
except Resource.DoesNotExist:
# There's no resource for that project yet, create one.
resource = Resource(path="database", project=project,)
resource.save()
return resource
except Resource.MultipleObjectsReturned:
# There are several resources for this project, that should not
# be allowed. Log an error and raise.
log.error(
"There is more than 1 Resource for in_database project %s" % project.name
)
raise
def _save_new_strings(project, source):
"""Save a batch of strings into an existing project.
This function takes a batch of new strings as a blob of text, separate individual
strings by new lines, and then stores each one as a new source string for the project.
:arg Project project: the Project object to which new strings will be associated
:arg string source: a text of new-line-separated source strings
:returns: True if new strings have been saved, False otherwise
"""
new_strings = source.strip().split("\n")
# Remove empty strings from the list.
new_strings = [x.strip() for x in new_strings if x.strip()]
if new_strings:
# Create a new fake resource for that project.
resource = _get_resource_for_database_project(project)
resource.total_strings = len(new_strings)
resource.save()
# Insert all new strings into Entity objects, associated to the fake resource.
new_entities = []
for index, new_string in enumerate(new_strings):
string = new_string.strip()
new_entities.append(Entity(string=string, resource=resource, order=index))
Entity.objects.bulk_create(new_entities)
return True
return False
def _create_or_update_translated_resources(
project, locales=None, resource=None,
):
if locales is None:
locales = Locale.objects.filter(project_locale__project=project)
if resource is None:
resource = _get_resource_for_database_project(project)
for locale in locales:
tr, created = TranslatedResource.objects.get_or_create(
locale_id=locale.pk, resource=resource,
)
tr.calculate_stats()
def manage_project_strings(request, slug=None):
"""View to manage the source strings of a project.
This view is only accessible for projects that do not have a "Source repo". It allows
admins to add new strings to a project in a batch, and then to edit, remove or comment on
any strings.
"""
if not request.user.has_perm("base.can_manage_project"):
raise PermissionDenied
try:
project = Project.objects.get(slug=slug)
except Project.DoesNotExist:
raise Http404
if project.data_source != "database":
return HttpResponseForbidden(
"Project %s's strings come from a repository, managing strings is forbidden."
% project.name
)
entities = Entity.objects.filter(resource__project=project, obsolete=False)
project_has_strings = entities.exists()
formset = EntityFormSet(queryset=entities)
if request.GET.get("format") == "csv":
# Return a CSV document containing all translations for this project.
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="%s.csv"' % project.name
return _get_project_strings_csv(project, entities, response)
if request.method == "POST":
if not project_has_strings:
# We are receiving new strings in a batch.
new_strings_source = request.POST.get("new_strings", "")
if _save_new_strings(project, new_strings_source):
project_has_strings = True # we have strings now!
_create_or_update_translated_resources(project)
else:
# Get all strings, find the ones that changed, update them in the database.
formset = EntityFormSet(request.POST, queryset=entities)
if formset.is_valid():
resource = Resource.objects.filter(project=project).first()
entity_max_order = entities.aggregate(Max("order"))["order__max"]
try:
# This line can purposefully cause an exception, and that
# causes trouble in tests, because all tests are
# encapsulated in a single transation. Django thus refuses
# to run any other requests after one has failed, until the
# end of the transation.
# Using transation.atomic here is the way to tell django
# that this is fine.
# See https://stackoverflow.com/questions/21458387/
with transaction.atomic():
formset.save()
except IntegrityError:
# This happens when the user creates a new string. By default,
# it has no resource, and that's a violation of the database
# constraints. So, we want to make sure all entries have a resource.
new_entities = formset.save(commit=False)
for entity in new_entities:
if not entity.resource_id:
entity.resource = resource
# We also use this opportunity to give the new entity
# an order.
entity_max_order += 1
entity.order = entity_max_order
# Note that we save all entities one by one. That shouldn't be a problem
# because we don't expect users to change thousands of strings at once.
# Also, django is smart and ``formset.save()`` only returns Entity
# objects that have changed.
entity.save()
# Update stats with the new number of strings.
resource.total_strings = Entity.objects.filter(
resource=resource, obsolete=False
).count()
resource.save()
_create_or_update_translated_resources(project, resource=resource)
# Reinitialize the formset.
formset = EntityFormSet(queryset=entities)
data = {
"project": project,
"entities": entities,
"project_has_strings": project_has_strings,
"entities_form": formset,
}
return render(request, "admin_project_strings.html", data)
@login_required(redirect_field_name="", login_url="/403")
@require_AJAX
def manually_sync_project(request, slug):
if not request.user.has_perm("base.can_manage_project") or not settings.MANUAL_SYNC:
return HttpResponseForbidden(
"Forbidden: You don't have permission for syncing projects"
)
sync_log = SyncLog.objects.create(start_time=timezone.now())
project = Project.objects.get(slug=slug)
sync_project.delay(project.pk, sync_log.pk)
return HttpResponse("ok")
@login_required(redirect_field_name="", login_url="/403")
@require_AJAX
def manually_pretranslate_project(request, slug):
if not request.user.has_perm("base.can_manage_project"):
return HttpResponseForbidden(
"Forbidden: You don't have permission for pretranslating projects"
)
project = Project.objects.get(slug=slug)
pretranslate.delay(project.pk)
return HttpResponse("ok")
| 37.568093
| 96
| 0.64174
|
7f3c03a94ea27b29b2d78944b90cabd88f60cd01
| 1,456
|
py
|
Python
|
supertokens_python/recipe/passwordless/api/phone_number_exists.py
|
SKOOTUK/supertokens-python
|
c0751dd5af911835199223efbe5a91e7c1f1f954
|
[
"Apache-2.0"
] | null | null | null |
supertokens_python/recipe/passwordless/api/phone_number_exists.py
|
SKOOTUK/supertokens-python
|
c0751dd5af911835199223efbe5a91e7c1f1f954
|
[
"Apache-2.0"
] | 3
|
2022-02-21T18:42:06.000Z
|
2022-03-04T11:54:15.000Z
|
supertokens_python/recipe/passwordless/api/phone_number_exists.py
|
SKOOTUK/supertokens-python
|
c0751dd5af911835199223efbe5a91e7c1f1f954
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from supertokens_python.exceptions import raise_bad_input_exception
from supertokens_python.recipe.passwordless.interfaces import (APIInterface,
APIOptions)
async def phone_number_exists(api_implementation: APIInterface, api_options: APIOptions):
if api_implementation.disable_phone_number_exists_get:
return None
phone_number = api_options.request.get_query_param('phoneNumber')
if phone_number is None:
raise_bad_input_exception(
'Please provide the phoneNumber as a GET param')
result = await api_implementation.phone_number_exists_get(phone_number, api_options, {})
api_options.response.set_json_content(result.to_json())
return api_options.response
| 45.5
| 92
| 0.751374
|
e5b826213bf4c19b41b7f78b1a83bd00a90c7462
| 2,297
|
py
|
Python
|
sim2real_drone_racing/learning/deep_drone_racing_learner/src/ddr_learner/common_flags.py
|
tianqi-wang1996/DeepRobustDroneRacing
|
a6eeaea1263a71a0fae8b35f53e8938daa966bfb
|
[
"MIT"
] | 2
|
2020-11-24T11:16:48.000Z
|
2021-04-02T05:14:44.000Z
|
sim2real_drone_racing/learning/deep_drone_racing_learner/src/ddr_learner/common_flags.py
|
tianqi-wang1996/DeepRobustDroneRacing
|
a6eeaea1263a71a0fae8b35f53e8938daa966bfb
|
[
"MIT"
] | 1
|
2021-02-19T14:31:39.000Z
|
2021-12-24T02:55:39.000Z
|
sim2real_drone_racing/learning/deep_drone_racing_learner/src/ddr_learner/common_flags.py
|
tianqi-wang1996/DeepRobustDroneRacing
|
a6eeaea1263a71a0fae8b35f53e8938daa966bfb
|
[
"MIT"
] | 3
|
2021-03-24T08:34:25.000Z
|
2022-02-28T07:17:54.000Z
|
import gflags
FLAGS = gflags.FLAGS
# Train parameters
gflags.DEFINE_integer('img_width', 300, 'Target Image Width')
gflags.DEFINE_integer('img_height', 200, 'Target Image Height')
gflags.DEFINE_integer('batch_size', 32, 'Batch size in training and evaluation')
gflags.DEFINE_float("learning_rate", 0.001, "Learning rate of for adam")
gflags.DEFINE_float("beta1", 0.9, "Momentum term of adam")
gflags.DEFINE_float("f", 1.0, "Model Width, float in [0,1]")
# final version
# gflags.DEFINE_integer('output_dim', 3, "Number of output dimensionality")
gflags.DEFINE_integer('output_dim', 3, "Number of output dimensionality")
gflags.DEFINE_string('train_dir', "../../data/Training", 'Folder containing'
' training experiments')
gflags.DEFINE_string('val_dir', "../../data/Testing", 'Folder containing'
' validation experiments')
gflags.DEFINE_string('checkpoint_dir', "/tmp/debug_learning", "Directory name to"
"save checkpoints and logs.")
# Input Queues reading
gflags.DEFINE_integer('num_threads', 8, 'Number of threads reading and '
'(optionally) preprocessing input files into queues')
gflags.DEFINE_integer('capacity_queue', 100, 'Capacity of input queue. A high '
'number speeds up computation but requires more RAM')
# Log parameters
gflags.DEFINE_integer("max_epochs", 100, "Maximum number of training epochs")
gflags.DEFINE_bool('resume_train', False, 'Whether to restore a trained'
' model for training')
gflags.DEFINE_integer("summary_freq", 100, "Logging every log_freq iterations")
gflags.DEFINE_integer("save_latest_freq", 100, \
"Save the latest model every save_latest_freq iterations"
"(overwrites the previous latest model)")
# Testing parameters
gflags.DEFINE_string('test_dir', "../../data/validation_sim2real/beauty", 'Folder containing'
' testing experiments')
gflags.DEFINE_string('output_dir', "./tests/test_0", 'Folder containing'
' testing experiments')
gflags.DEFINE_string("ckpt_file", "./results/best_model/model-2", "Checkpoint file")
gflags.DEFINE_integer('test_img_width', 300, 'Target Image Width')
gflags.DEFINE_integer('test_img_height', 200, 'Target Image Height')
| 47.854167
| 93
| 0.705703
|
894865140b3c7d224a0c8df6596fb455c64ab484
| 2,974
|
py
|
Python
|
pylearn2/scripts/datasets/make_stl10_patches.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 2,045
|
2015-01-01T14:07:52.000Z
|
2022-03-08T08:56:41.000Z
|
pylearn2/scripts/datasets/make_stl10_patches.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 305
|
2015-01-02T13:18:24.000Z
|
2021-08-20T18:03:28.000Z
|
pylearn2/scripts/datasets/make_stl10_patches.py
|
ikervazquezlopez/Pylearn2
|
2971e8f64374ffde572d4cf967aad5342beaf5e0
|
[
"BSD-3-Clause"
] | 976
|
2015-01-01T17:08:51.000Z
|
2022-03-25T19:53:17.000Z
|
"""
This script makes a dataset of two million approximately whitened patches,
extracted at random uniformly from a downsampled version of the STL-10
unlabeled and train dataset.
It assumes that you have already run make_downsampled_stl10.py,
which downsamples the STL-10 images to 1/3 of their original resolution.
This script is intended to reproduce the preprocessing used by Adam Coates
et. al. in their work from the first half of 2011.
"""
from __future__ import print_function
from pylearn2.utils import serial
from pylearn2.datasets import preprocessing
from pylearn2.utils import string_utils as string
import numpy as np
import textwrap
def main():
data_dir = string.preprocess('${PYLEARN2_DATA_PATH}/stl10')
print('Loading STL10-10 unlabeled and train datasets...')
downsampled_dir = data_dir + '/stl10_32x32'
data = serial.load(downsampled_dir + '/unlabeled.pkl')
supplement = serial.load(downsampled_dir + '/train.pkl')
print('Concatenating datasets...')
data.set_design_matrix(np.concatenate((data.X, supplement.X), axis=0))
del supplement
print("Preparing output directory...")
patch_dir = data_dir + '/stl10_patches'
serial.mkdir(patch_dir)
README = open(patch_dir + '/README', 'w')
README.write(textwrap.dedent("""
The .pkl files in this directory may be opened in python using
cPickle, pickle, or pylearn2.serial.load.
data.pkl contains a pylearn2 Dataset object defining an unlabeled
dataset of 2 million 6x6 approximately whitened, contrast-normalized
patches drawn uniformly at random from a downsampled (to 32x32)
version of the STL-10 train and unlabeled datasets.
preprocessor.pkl contains a pylearn2 Pipeline object that was used
to extract the patches and approximately whiten / contrast normalize
them. This object is necessary when extracting features for
supervised learning or test set classification, because the
extracted features must be computed using inputs that have been
whitened with the ZCA matrix learned and stored by this Pipeline.
They were created with the pylearn2 script make_stl10_patches.py.
All other files in this directory, including this README, were
created by the same script and are necessary for the other files
to function correctly.
"""))
README.close()
print("Preprocessing the data...")
pipeline = preprocessing.Pipeline()
pipeline.items.append(preprocessing.ExtractPatches(patch_shape=(6, 6),
num_patches=2*1000*1000))
pipeline.items.append(
preprocessing.GlobalContrastNormalization(use_std=True, sqrt_bias=10.))
pipeline.items.append(preprocessing.ZCA())
data.apply_preprocessor(preprocessor=pipeline, can_fit=True)
data.use_design_loc(patch_dir + '/data.npy')
serial.save(patch_dir + '/data.pkl', data)
serial.save(patch_dir + '/preprocessor.pkl', pipeline)
if __name__ == "__main__":
main()
| 36.716049
| 79
| 0.742771
|
acd5dc9089b165b51be2e386ebb7dbae59c46384
| 1,831
|
py
|
Python
|
test/chemistry/test_uccsd_hartree_fock.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
test/chemistry/test_uccsd_hartree_fock.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
test/chemistry/test_uccsd_hartree_fock.py
|
Sahar2/qiskit-aqua
|
a228fbe6b9613cff43e47796a7e4843deba2b051
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Test of UCCSD and HartreeFock Aqua extensions.
"""
from test.chemistry.common import QiskitChemistryTestCase
from qiskit.chemistry import QiskitChemistry
# from qiskit.chemistry import set_qiskit_chemistry_logging
# import logging
class TestUCCSDHartreeFock(QiskitChemistryTestCase):
"""Test for these aqua extensions."""
def setUp(self):
super().setUp()
self.config = {'driver': {'name': 'HDF5'},
'hdf5': {'hdf5_input': self._get_resource_path('test_driver_hdf5.hdf5')},
'operator': {'name': 'hamiltonian', 'qubit_mapping': 'parity', 'two_qubit_reduction': True},
'algorithm': {'name': 'VQE', 'operator_mode': 'matrix'},
'optimizer': {'name': 'SLSQP', 'maxiter': 100},
'variational_form': {'name': 'UCCSD'},
'initial_state': {'name': 'HartreeFock'},
'backend': {'provider': 'qiskit.BasicAer', 'name': 'statevector_simulator'}}
self.reference_energy = -1.1373060356951838
pass
def test_uccsd_hf(self):
# set_qiskit_chemistry_logging(logging.DEBUG)
solver = QiskitChemistry()
result = solver.run(self.config)
self.assertAlmostEqual(result['energy'], self.reference_energy, places=6)
| 39.804348
| 115
| 0.64828
|
673a3d9582101630594ed7d40964c9ec0b101133
| 6,595
|
py
|
Python
|
Scripts_Model/old/scripts_pytorch/alexnet_pytorch.py
|
zhangziyezzy/DeepLearningMugenKnock
|
e306f436fb41b5549d0adf9ad331d638e5906e29
|
[
"MIT"
] | 10
|
2021-12-17T06:07:25.000Z
|
2022-03-25T13:50:05.000Z
|
Scripts_Model/old/scripts_pytorch/alexnet_pytorch.py
|
karaage0703/DeepLearningMugenKnock
|
26830fe049c7da8001977ca0df12e946c0f030eb
|
[
"MIT"
] | null | null | null |
Scripts_Model/old/scripts_pytorch/alexnet_pytorch.py
|
karaage0703/DeepLearningMugenKnock
|
26830fe049c7da8001977ca0df12e946c0f030eb
|
[
"MIT"
] | 2
|
2022-03-15T02:42:09.000Z
|
2022-03-30T23:19:55.000Z
|
import torch
import torch.nn.functional as F
import argparse
import cv2
import numpy as np
from glob import glob
num_classes = 2
img_height, img_width = 227, 227
channel = 3
GPU = False
torch.manual_seed(0)
class AlexNet(torch.nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.conv1 = torch.nn.Conv2d(channel, 96, kernel_size=11, padding=0, stride=4)
self.conv2 = torch.nn.Conv2d(96, 256, kernel_size=5, padding=1)
self.conv3 = torch.nn.Conv2d(256, 384, kernel_size=3, padding=1)
self.conv4 = torch.nn.Conv2d(384, 384, kernel_size=3, padding=1)
self.conv5 = torch.nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.fc1 = torch.nn.Linear(6*6*256, 4096)
self.fc2 = torch.nn.Linear(4096, 4096)
self.fc_out = torch.nn.Linear(4096, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = torch.nn.modules.normalization.LocalResponseNorm(size=1)(x)
x = F.max_pool2d(x, 3, stride=2)
x = F.relu(self.conv2(x))
x = torch.nn.modules.normalization.LocalResponseNorm(size=1)(x)
x = F.max_pool2d(x, 3, stride=2)
x = F.max_pool2d(x, 2)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = x.view(-1, 6*6*256)
x = F.relu(self.fc1(x))
x = torch.nn.Dropout()(x)
x = F.relu(self.fc2(x))
x = torch.nn.Dropout()(x)
x = self.fc_out(x)
x = F.softmax(x, dim=1)
return x
CLS = ['akahara', 'madara']
# get train data
def data_load(path, hf=False, vf=False, rot=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot != False:
angle = rot
scale = 1
# show
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
#plt.subplot(h_num, w_num, count)
#plt.axis('off')
#plt.imshow(x)
#plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(x)
ts.append(t)
paths.append(path)
# show
#count += 1
#plt.subplot(h_num, w_num, count)
#plt.imshow(_x)
#plt.axis('off')
#plt.title("angle={}".format(angle))
angle += rot
#plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# GPU
device = torch.device("cuda" if GPU else "cpu")
# model
model = AlexNet().to(device)
opt = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
model.train()
xs, ts, paths = data_load('../Dataset/train/images/', hf=True, vf=True)
# training
mb = 16
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
loss_fn = torch.nn.NLLLoss()
for i in range(500):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = torch.tensor(xs[mb_ind], dtype=torch.float).to(device)
t = torch.tensor(ts[mb_ind], dtype=torch.long).to(device)
opt.zero_grad()
y = model(x)
loss = loss_fn(torch.log(y), t)
loss.backward()
opt.step()
pred = y.argmax(dim=1, keepdim=True)
acc = pred.eq(t.view_as(pred)).sum().item() / mb
print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', acc)
torch.save(model.state_dict(), 'cnn.pt')
# test
def test():
device = torch.device("cuda" if GPU else "cpu")
model = AlexNet().to(device)
model.eval()
model.load_state_dict(torch.load('cnn.pt'))
xs, ts, paths = data_load('../Dataset/test/images/')
with torch.no_grad():
for i in range(len(paths)):
x = xs[i]
t = ts[i]
path = paths[i]
x = np.expand_dims(x, axis=0)
x = torch.tensor(x, dtype=torch.float).to(device)
pred = model(x)
pred = F.softmax(pred, dim=1).detach().cpu().numpy()[0]
print("in {}, predicted probabilities >> {}".format(path, pred))
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
| 29.441964
| 87
| 0.498863
|
09cd12d84b0649077032851c4ad4adeb825703a4
| 21
|
py
|
Python
|
project_pist_v1/models/__init__.py
|
Herosbrine/odoo_project_
|
69066bd7bf172f8d19bdd156be6cc9b2975dd638
|
[
"Apache-2.0"
] | null | null | null |
project_pist_v1/models/__init__.py
|
Herosbrine/odoo_project_
|
69066bd7bf172f8d19bdd156be6cc9b2975dd638
|
[
"Apache-2.0"
] | null | null | null |
project_pist_v1/models/__init__.py
|
Herosbrine/odoo_project_
|
69066bd7bf172f8d19bdd156be6cc9b2975dd638
|
[
"Apache-2.0"
] | null | null | null |
from . import projet
| 10.5
| 20
| 0.761905
|
dcdde55589dbbb4fec65f77d72648deb6e3f6cdc
| 3,002
|
py
|
Python
|
nugoo/apps/people/migrations/0001_initial.py
|
edcwkim/nugoo
|
2a509310f8fe93aa631fc53a50e9634c16cf5045
|
[
"Apache-2.0"
] | 2
|
2022-03-11T01:29:27.000Z
|
2022-03-11T01:29:50.000Z
|
nugoo/apps/people/migrations/0001_initial.py
|
edcwkim/nugoo
|
2a509310f8fe93aa631fc53a50e9634c16cf5045
|
[
"Apache-2.0"
] | null | null | null |
nugoo/apps/people/migrations/0001_initial.py
|
edcwkim/nugoo
|
2a509310f8fe93aa631fc53a50e9634c16cf5045
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-26 05:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import nugoo.apps.people.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='이름')),
('reference', models.URLField(blank=True, verbose_name='참조 자료')),
],
),
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='이름')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=60, verbose_name='이름')),
('photo', models.ImageField(blank=True, upload_to=nugoo.apps.people.models.Person.get_photo_upload_to, verbose_name='사진')),
],
),
migrations.CreateModel(
name='PersonToEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='person_throughs', to='people.Event', verbose_name='사건')),
('hashtags', models.ManyToManyField(blank=True, related_name='person_to_event_set', to='people.Hashtag', verbose_name='해시태그')),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='event_throughs', to='people.Person', verbose_name='인물')),
],
),
migrations.CreateModel(
name='Stat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10, verbose_name='이름')),
('value', models.SmallIntegerField(verbose_name='능력치 값')),
('person', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='stat_set', to='people.Person', verbose_name='인물')),
],
),
migrations.AddField(
model_name='person',
name='events',
field=models.ManyToManyField(blank=True, related_name='person_set', through='people.PersonToEvent', to='people.Event', verbose_name='사건들'),
),
]
| 46.184615
| 185
| 0.606929
|
028abd1485669ca41b67e3b4e22552a7a817293a
| 194
|
py
|
Python
|
mobile.py
|
Abhishikta456/Normal-distribution
|
46d6fae088c10e5d4560a08bd5fcccb5b98042c0
|
[
"MIT"
] | null | null | null |
mobile.py
|
Abhishikta456/Normal-distribution
|
46d6fae088c10e5d4560a08bd5fcccb5b98042c0
|
[
"MIT"
] | null | null | null |
mobile.py
|
Abhishikta456/Normal-distribution
|
46d6fae088c10e5d4560a08bd5fcccb5b98042c0
|
[
"MIT"
] | null | null | null |
import plotly.figure_factory as ff
import pandas as pd
import csv
df = pd.read_csv("data.csv")
fig = ff.create_distplot([df["Avg Rating"].tolist()], ["Avg Rating"], show_hist=False)
fig.show()
| 24.25
| 86
| 0.726804
|
d0b380689ec8268a8008a146b0e5157af37d28e0
| 10,200
|
py
|
Python
|
pyathena/sixray_test/sixray_test.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | null | null | null |
pyathena/sixray_test/sixray_test.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | null | null | null |
pyathena/sixray_test/sixray_test.py
|
changgoo/pyathena-1
|
c461ac3390d773537ce52393e3ebf68a3282aa46
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from ..load_sim import LoadSimAll
def scp_to_pc(source, target='NEWCOOL',
hostname='kimjg.astro.princeton.edu', username='jgkim'):
"""Function to copy files to my directory
"""
from paramiko import SSHClient
from scp import SCPClient
if target == 'NEWCOOL':
target = '~/Dropbox/Apps/Overleaf/NEWCOOL/figures'
elif target == 'GMC-MHD':
target = '~/Dropbox/Apps/Overleaf/GMC-MHD/figures'
try:
client = SSHClient()
client.load_system_host_keys()
client.connect(hostname,username=username)
with SCPClient(client.get_transport()) as scp:
scp.put(source, target)
finally:
if client:
client.close()
def get_data_all(cool=False, dust_model='WD01'):
# Load data and compute abundances and cooling rates
models = dict(
Unshld_CRvar_Z1='/tigress/jk11/NEWCOOL/Unshld.CRvar.Z1/',
#Unshld_CRvar_Z1='/tiger/scratch/gpfs/jk11/NEWCOOL/Unshld.CRvar.Z1.again/',
Unshld_CRconst_Z1='/tigress/jk11/NEWCOOL/Unshld.CRconst.Z1/',
Jeans_CRvar_Z1='/tigress/jk11/NEWCOOL/Jeans.CRvar.Z1/')
sa = LoadSimAll(models)
da = dict()
print('[get_data_all] reading data:', end=' ')
for i, mdl in enumerate(sa.models):
print(mdl, end=' ')
s = sa.set_model(mdl)
da[mdl] = get_data(s, s.nums[-2], sel_kwargs=dict(z=0, method='nearest'),
cool=cool, dust_model=dust_model)
return sa, da
def get_data(s, num, sel_kwargs=dict(), cool=True, dust_model='WD01'):
D0 = 5.7e-11 # Dissociation rate for unshielded ISRF [s^-1]
ds = s.load_vtk(num)
if not 'CR_ionization_rate' in ds.field_list:
dd = ds.get_field(['nH','nH2','nHI','xH2','xHII','xe',
'xHI','xCII','chi_PE_ext',
'chi_LW_ext','chi_H2_ext','chi_CI_ext',
'T','pok','cool_rate','heat_rate'])
dd = dd.assign(xi_CR=dd['z']*0.0 + s.par['problem']['xi_CR0'])
else:
dd = ds.get_field(['nH','nH2','nHI','xH2','xHII','xe',
'xHI','xCII','chi_PE_ext', 'xi_CR',
'chi_LW_ext','chi_H2_ext','chi_CI_ext',
'T','pok','cool_rate','heat_rate'])
print('name:',s.basename, end=' ')
print('time:',ds.domain['time'])
from pyathena.microphysics.cool import \
get_xCO, heatPE, heatPE_BT94, heatPE_W03,\
heatCR, heatH2form, heatH2pump, heatH2diss,\
coolCII, coolOI, coolRec, coolRec_BT94, coolRec_W03,\
coolLya, coolCI, coolCO
Z_d = s.par['problem']['Z_dust']
Z_g = s.par['problem']['Z_gas']
xCstd = s.par['cooling']['xCstd']
xOstd = s.par['cooling']['xOstd']
xCO, ncrit = get_xCO(dd.nH, dd.xH2, dd.xCII, Z_d, Z_g,
dd['xi_CR'], dd['chi_LW_ext'], xCstd)
dd['xCO'] = xCO
dd['ncrit'] = ncrit
dd['xOI'] = np.maximum(0.0, xOstd*Z_g - dd['xCO'])
dd['xCI'] = np.maximum(0.0, xCstd*Z_g - dd.xCII - dd.xCO)
# Set nH and chi_PE as new dimensions
log_nH = np.log10(dd.sel(z=0,y=0,method='nearest')['nH'].data)
log_chi_PE = np.log10(dd.sel(z=0,x=0,method='nearest')['chi_PE_ext'].data)
dd = dd.rename(dict(x='log_nH'))
dd = dd.assign_coords(dict(log_nH=log_nH))
dd = dd.rename(dict(y='log_chi_PE'))
dd = dd.assign_coords(dict(log_chi_PE=log_chi_PE))
#dd = dd.drop(['nH'])
#dd = dd.drop(['y'])
# dd = dd.rename(dict(y='log_chi_PE', chi_PE_ext='chi_PE'))
# dd = dd.assign_coords(dict(log_chi_PE=log_chi_PE))
# print(sel_kwargs)
d = dd.sel(**sel_kwargs)
# Calculate heat/cool rates
if cool:
if dust_model == 'BT94':
d['heatPE'] = heatPE_BT94(d['nH'], d['T'], d['xe'], Z_d, d['chi_PE_ext'])
elif dust_model == 'W03':
d['heatPE'] = heatPE_W03(d['nH'], d['T'], d['xe'], Z_d, d['chi_PE_ext'])
else:
d['heatPE'] = heatPE(d['nH'], d['T'], d['xe'], Z_d, d['chi_PE_ext'])
d['heatCR'] = heatCR(d['nH'], d['xe'], d['xHI'], d['xH2'], d['xi_CR'])
d['heatH2pump'] = heatH2pump(d['nH'], d['T'], d['xHI'], d['xH2'], d['chi_H2_ext']*D0)
d['heatH2form'] = heatH2form(d['nH'], d['T'], d['xHI'], d['xH2'], Z_d)
d['heatH2diss'] = heatH2diss(d['xH2'], d['chi_H2_ext']*D0)
d['coolCII'] = coolCII(d['nH'],d['T'],d['xe'],d['xHI'],d['xH2'],d['xCII'])
d['coolOI'] = coolOI(d['nH'],d['T'],d['xe'],d['xHI'],d['xH2'],d['xOI'])
if dust_model == 'BT94':
d['coolRec'] = coolRec_BT94(d['nH'],d['T'],d['xe'],Z_d,d['chi_PE_ext'])
elif dust_model == 'W03':
d['coolRec'] = coolRec_W03(d['nH'],d['T'],d['xe'],Z_d,d['chi_PE_ext'])
else:
d['coolRec'] = coolRec(d['nH'],d['T'],d['xe'],Z_d,d['chi_PE_ext'])
d['coolLya'] = coolLya(d['nH'],d['T'],d['xe'],d['xHI'])
d['coolCI'] = coolCI(d['nH'],d['T'],d['xe'],d['xHI'],d['xH2'],d['xCI'])
d['coolCO'] = coolCO(d['nH'],d['T'],d['xe'],d['xHI'],d['xH2'],d['xCO'],3e-14)
d['cool'] = d['coolCI']+d['coolCII']+d['coolOI']+d['coolRec']+d['coolLya']+d['coolCO']
d['heat'] = d['heatPE']+d['heatCR'] + d['heatH2pump']
# Note that G_0 is in Habing units
d['charging'] = 1.7*d['chi_PE_ext']*d['T']**0.5/(d['nH']*d['xe'])
return d
def get_PTn_at_Pminmax(d, jump=1, kernel_width=12):
from scipy import interpolate
from scipy.signal import find_peaks
from astropy.convolution import Gaussian1DKernel, Box1DKernel, convolve
x = np.linspace(np.log10(d['nH']).min(),np.log10(d['nH']).max(),1200)
# Currently, cooling solver produces glitches (should be fixed),
# so we need to smooth data
gP = Gaussian1DKernel(kernel_width)
gT = Gaussian1DKernel(kernel_width)
#gT = Box1DKernel(15)
Pmin = np.zeros_like(d['log_chi_PE'][::jump])
Pmax = np.zeros_like(d['log_chi_PE'][::jump])
Tmin = np.zeros_like(d['log_chi_PE'][::jump])
Tmax = np.zeros_like(d['log_chi_PE'][::jump])
nmin = np.zeros_like(d['log_chi_PE'][::jump])
nmax = np.zeros_like(d['log_chi_PE'][::jump])
for i, log_chi_PE in enumerate(d['log_chi_PE'].data[::jump]):
dd = d.sel(log_chi_PE=float(log_chi_PE), method='nearest')
fP = interpolate.interp1d(np.log10(dd['nH']), np.log10(dd['pok']), kind='cubic')
fT = interpolate.interp1d(np.log10(dd['nH']), np.log10(dd['T']), kind='cubic')
yP = convolve(fP(x), gP, boundary='fill', fill_value=np.nan)
yT = convolve(fT(x), gT, boundary='fill', fill_value=np.nan)
try:
ind1 = find_peaks(-yP)[0]
ind2 = find_peaks(yP)[0]
# print(ind1,ind2)
if len(ind1) > 1:
print('Multiple local minimum log_chi,idx:',log_chi_PE,ind1)
i1 = ind1[0]
else:
i1 = ind1[0]
if len(ind2) > 1:
print('Multiple local maximum log_chi,idx:',log_chi_PE,ind2)
i2 = ind2[0]
else:
i2 = ind2[0]
Pmin[i] = 10.0**float(yP[i1])
Pmax[i] = 10.0**float(yP[i2])
Tmin[i] = 10.0**float(yT[i1])
Tmax[i] = 10.0**float(yT[i2])
nmin[i] = 10.0**float(x[i1])
nmax[i] = 10.0**float(x[i2])
except IndexError:
# print('Failed to find Pmin/Pmax, log_chi_PE:',log_chi_PE)
pass
# break
r = dict()
r['Pmin'] = Pmin
r['Pmax'] = Pmax
r['Tmin'] = Tmin
r['Tmax'] = Tmax
r['nmin'] = nmin
r['nmax'] = nmax
return r
def plt_nP_nT(da, model, savefig=True):
# Plot equilibrium density pressure and temperature relation
cmap = mpl.cm.viridis
norm = mpl.colors.Normalize(-1,3.7)
fig, axes = plt.subplots(2, 1, figsize=(7, 10), sharex=False)
axes = axes.flatten()
dd = da[model]
log_chi = np.array([-1.0,0.0,1.0,2.0,3.0])
lw = [1.5,3,1.5,1.5,1.5]
labels = [r'$\chi=0.1$',r'$\chi=1$',r'$\chi=10$',r'$\chi=10^2$',r'$\chi=10^3$']
for i,log_chi_PE_ in enumerate(log_chi):
d_ = dd.sel(log_chi_PE=log_chi_PE_, method='nearest')
plt.sca(axes[0])
l, = plt.loglog(d_['nH'], d_['pok'], label=labels[i],
c=cmap(norm(log_chi_PE_)), lw=lw[i])
plt.sca(axes[1])
plt.loglog(d_['nH'],d_['T'], c=l.get_color(), lw=lw[i])
plt.sca(axes[0])
plt.ylim(1e2,1e8)
plt.sca(axes[1])
plt.ylim(1e1,3e4)
for ax in axes:
ax.set_xlim(1e-2,1e5)
ax.xaxis.set_ticks_position('both')
ax.yaxis.set_ticks_position('both')
ax.grid()
for ax in axes:
ax.set_xlabel(r'$n_{\rm H}\;[{\rm cm}^{-3}]$')
axes[0].set_ylabel(r'$P/k_{\rm B}\;[{\rm K\,cm^{-3}}]$')
axes[1].set_ylabel(r'$T\;[{\rm K}]$')
if model == 'Unshld_CRvar_Z1':
# Label chi manually using plt.text()
logchi = [-1,0,1,2,3]
texts = [r'$0.1$',r'$\chi=1$',r'$10$',r'$10^2$',r'$10^3$']
xpos = [1, 22, 1e2, 0.8e3, 5e3]
ypos = [4e2, 4e3, 4.5e4, 7e5, 1.5e7]
for x,y,text,logchi_ in zip(xpos,ypos,texts,logchi):
axes[0].text(x, y, text,
verticalalignment='bottom', ha='right',
# transform=ax.transAxes,
color=cmap(norm(logchi_)), fontsize=14)
else:
# Label using legend
axes[0].legend(ncol=3, labelspacing=0.01)# , bbox_to_anchor=(0.85, 1.5)
# Add suptitle
if model == 'Unshld_CRvar_Z1':
suptitle = r'$\xi/\chi = 2\times 10^{-16}\,{\rm s}^{-1}$'
elif model == 'Unshld_CRconst_Z1':
suptitle = r'$\xi = 2\times 10^{-16}\,{\rm s}^{-1}$'
else:
suptitle = model
fig.suptitle(suptitle, x=0.55, ha='center', va='bottom')
plt.tight_layout()
if savefig:
savname = '/tigress/jk11/figures/NEWCOOL/paper/fig-equil-{0:s}.png'.format(model)
plt.savefig(savname, dpi=200, bbox_inches='tight')
scp_to_pc(savname)
print('saved to',savname)
return fig
| 38.202247
| 94
| 0.546961
|
e93310b4b53f187957bc89d4b0c2f6a59d4ab38e
| 5,415
|
py
|
Python
|
src/old/database/DbInitializer.py
|
ytyaru0/Github.Contributioner.20180303140000
|
950e2c932a1c594408648331e0ac0f3682c4983a
|
[
"CC0-1.0"
] | null | null | null |
src/old/database/DbInitializer.py
|
ytyaru0/Github.Contributioner.20180303140000
|
950e2c932a1c594408648331e0ac0f3682c4983a
|
[
"CC0-1.0"
] | null | null | null |
src/old/database/DbInitializer.py
|
ytyaru0/Github.Contributioner.20180303140000
|
950e2c932a1c594408648331e0ac0f3682c4983a
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
import os.path
from setting.Config import Config
import dataset
import glob
# 抽象クラス
class DbInitializer(metaclass=ABCMeta):
def __init__(self):
self._path_dir_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))
self._path_dir_this = os.path.abspath(os.path.dirname(__file__))
self.__db = None
def Initialize(self):
self._CreateDb()
self._ConnectDb()
self.Db.query('PRAGMA foreign_keys = false')
self._CreateTable()
self._InsertInitData()
self.Db.query('PRAGMA foreign_keys = true')
def _CreateDb(self):
if not os.path.isfile(self.DbFilePath):
with open(self.DbFilePath, 'w') as f: pass
def _ConnectDb(self):
self.__class__.Db = dataset.connect('sqlite:///' + self.DbFilePath, engine_kwargs={'pool_pre_ping':True})
# テーブル作成(CreateTable文)
def _CreateTable(self):
self.__CreateTableBySql()
self.__CreateTableByPy()
# 初期値の挿入(Insert文)
def _InsertInitData(self):
self.__InsertBySql()
self.__InsertByTsv()
self.__InsertByPy()
@property
def DbId(self): return self.__class__.__name__.replace(super().__thisclass__.__name__, '')
@property
def DbFileName(self): return 'Github.' + self.DbId + '.sqlite3'
#def DbFileName(self): return 'GitHub.' + self.DbId + '.sqlite3'
@property
def DbFilePath(self):
return os.path.join(Config()['Path']['Db'], self.DbFileName)
@property
def Db(self): return self.__class__.Db
# sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread.The object was created in thread id 1972434016 and this is thread id 1995735040
#try:
#except sqlite3.ProgrammingError as e:
# self.__ConnectDb()
#return self.__class__.Db
# SQLファイルによるテーブル作成
def __CreateTableBySql(self):
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
# Pythonコードによるテーブル作成
def __CreateTableByPy(self):
self.__ActionByPy(action='create')
# SQLファイルによる挿入
def __InsertBySql(self):
for path_sql in self.__GetCreateTableSqlFilePaths():
self.__ExecuteSqlFile(dbname, path_sql)
# TSVファイルによる挿入
def __InsertByTsv(self):
for path_tsv in self.__GetInsertTsvFilePaths():
table_name = os.path.splitext(table_name)[0]
loader = database.TsvLoader.TsvLoader()
loader.ToSqlite3(path_tsv, self.DbFilePath, table_name)
# Pythonコードによる挿入
def __InsertByPy(self):
self.__ActionByPy(action='insert')
# Pythonコードによる処理実行
def __ActionByPy(self, action='insert'):
path, namespace, module_name, class_name, method_name = self.__GetIds_ActionByPy(action)
if os.path.isdir(path):
# モジュール読込
import importlib
module = importlib.import_module(namespace_insert_py + module_name)
# クラスのインスタンス生成
#cls = module[module_name](self.DbFilePath)
cls = getattr(module, class_name)
##############################################################
# 引数は何にするか。現状、DbPath, dataset.connect(), client。これをビジネスロジック化によりclient渡し不要にしたい。
#ins = cls(self.DbFilePath)
ins = cls(self.Db)
##############################################################
# メソッドの取得と実行
#method = getattr(cls, method_name)
method = getattr(ins, method_name)
method()
def __GetIds_ActionByPy(self, action='insert'):
self.__CheckActionName(action)
path_root = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))))
path_l_py = 'database/init/{0}/{1}/py/'.format(self.DbId, action)
path_py = os.path.join(path_root, path_l_py)
namespace = path_l_py.replace('/', '.')
module_name = action[0].upper() + action[1:] + 'r' # Create[r], Inserte[r], Delete[r]
class_name = module_name
method_name = module_name[:-1] # Create, Insert, Delete
return path_py, namespace, module_name, class_name, method_name
def __CheckActionName(self, action):
valid_names = {'create', 'insert'}
if action not in valid_names: raise Exception('引数actionは{0}のいずれかのみ有効。: {1}'.format(valid_names, action))
# パス取得(テーブル作成用SQLファイル)
def __GetCreateTableSqlFilePaths(self):
path = os.path.join(self._path_dir_this, self.DbId, 'create', 'table', 'sql')
for path_sql in glob.glob(os.path.join(path + '*.sql')): yield path_sql
# パス取得(初期値挿入用TSVファイル)
def __GetInsertTsvFilePaths(self):
path = os.path.join(self._path_dir_this, self.DbId, 'insert', 'tsv')
for path_tsv in glob.glob(os.path.join(path + '*.tsv')): yield path_tsv
# パス取得(初期値挿入用SQLファイル)
def __GetInsertSqlFilePaths(self):
path = os.path.join(self._path_dir_this, self.DbId, 'insert', 'sql')
for path_tsv in glob.glob(os.path.join(path + '*.sql')): yield path_tsv
# SQLファイル発行
def __ExecuteSqlFile(self, sql_path):
with open(sql_path, 'r') as f:
sql = f.read()
self.__class__.Db.query(sql)
| 38.404255
| 187
| 0.633426
|
e9140dd94cf523c8acee8967e10f27c75066ad6f
| 1,229
|
py
|
Python
|
fixture/application.py
|
Shelkovica/python_training
|
c870008b251e8eae8f3ff2e451a15771a9d5f7ff
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
Shelkovica/python_training
|
c870008b251e8eae8f3ff2e451a15771a9d5f7ff
|
[
"Apache-2.0"
] | null | null | null |
fixture/application.py
|
Shelkovica/python_training
|
c870008b251e8eae8f3ff2e451a15771a9d5f7ff
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url, local_url, username, password, local_group_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie
else:
raise ValueError("Unrecognized browser %s" % browser)
self.session = SessionHelper(self)
self.base_url = base_url
self.local_url = local_url
self.username = username
self.password = password
self.group = GroupHelper(self, local_url, local_group_url)
self.contact = ContactHelper(self, local_url)
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
if not (wd.current_url.endswith(self.local_url) and len(wd.find_elements_by_name("add")) > 0):
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
| 31.512821
| 102
| 0.627339
|
ea195625e37daa64d5d5ff4f6bb28b71761cbb38
| 24,257
|
py
|
Python
|
contrib/regularized_embeddings.py
|
eigenfoo/word2gm
|
d6b89842803dd78c9ac2bf26a802c71fcef82f54
|
[
"BSD-3-Clause"
] | 3
|
2018-12-27T14:50:41.000Z
|
2019-03-11T16:03:07.000Z
|
contrib/regularized_embeddings.py
|
eigenfoo/word2gm
|
d6b89842803dd78c9ac2bf26a802c71fcef82f54
|
[
"BSD-3-Clause"
] | null | null | null |
contrib/regularized_embeddings.py
|
eigenfoo/word2gm
|
d6b89842803dd78c9ac2bf26a802c71fcef82f54
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Ben Athiwaratkun
Training code for Gaussian Mixture word embeddings model
Adapted from tensorflow's word2vec.py
(https://github.com/tensorflow/models/blob/master/tutorials/embedding/word2vec.py)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
import math
# Retrict to CPU only
os.environ["CUDA_VISIBLE_DEVICES"]=""
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
import pickle
#from tensorflow.models.embedding import gen_word2vec as word2vec
#word2vec = tf.load_op_library(os.path.join(os.path.di))
word2vec = tf.load_op_library(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'word2vec_ops.so'))
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries. (required)")
flags.DEFINE_string("train_data", None, "Training text file. (required)")
flags.DEFINE_integer("embedding_size", 50, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 5,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("batch_size", 256,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_float("regularization_coeff", 0.005,
"Regularization coefficient.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
flags.DEFINE_integer("num_mixtures", 2,
"Number of mixture component for Mixture of Gaussians")
flags.DEFINE_boolean("spherical", False,
"Whether the model should be spherical of diagonal"
"The default is spherical")
flags.DEFINE_float("var_scale", 0.05, "Variance scale")
flags.DEFINE_boolean("ckpt_all", False, "Keep all checkpoints"
"(Warning: This requires a large amount of disk space).")
flags.DEFINE_float("norm_cap", 3.0,
"The upper bound of norm of mean vector")
flags.DEFINE_float("lower_sig", 0.02,
"The lower bound for sigma element-wise")
flags.DEFINE_float("upper_sig", 5.0,
"The upper bound for sigma element-wise")
flags.DEFINE_float("mu_scale", 1.0,
"The average norm will be around mu_scale")
flags.DEFINE_float("objective_threshold", 1.0,
"The threshold for the objective")
flags.DEFINE_boolean("adagrad", False,
"Use Adagrad optimizer instead")
flags.DEFINE_float("loss_epsilon", 1e-4,
"epsilon parameter for loss function")
flags.DEFINE_boolean("constant_lr", False,
"Use constant learning rate")
flags.DEFINE_boolean("wout", False,
"Whether we would use a separate wout")
flags.DEFINE_boolean("max_pe", False,
"Using maximum of partial energy instead of the sum")
flags.DEFINE_integer("max_to_keep", 5,
"The maximum number of checkpoint files to keep")
flags.DEFINE_boolean("normclip", False,
"Whether to perform norm clipping (very slow)")
flags.DEFINE_string("rep", "gm", 'The type of representation. Either gm or vec')
flags.DEFINE_integer("fixvar", 0, "whether to fix the variance or not")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our Word2MultiGauss model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurgnt training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# George Ho: Regularization coefficient.
self.regularization_coeff = FLAGS.regularization_coeff
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
#################################
self.num_mixtures = FLAGS.num_mixtures # incorporated. needs testing
# upper bound of norm of mu
self.norm_cap = FLAGS.norm_cap
# element-wise lower bound for sigma
self.lower_sig = FLAGS.lower_sig
# element-wise upper bound for sigma
self.upper_sig = FLAGS.upper_sig
# whether to use spherical or diagonal covariance
self.spherical = FLAGS.spherical ## default to False please
self.var_scale = FLAGS.var_scale
self.ckpt_all = FLAGS.ckpt_all
self.mu_scale = FLAGS.mu_scale
self.objective_threshold = FLAGS.objective_threshold
self.adagrad = FLAGS.adagrad
self.loss_epsilon = FLAGS.loss_epsilon
self.constant_lr = FLAGS.constant_lr
self.wout = FLAGS.wout
self.max_pe = FLAGS.max_pe
self.max_to_keep = FLAGS.max_to_keep
self.normclip = FLAGS.normclip
## value clipping
self.norm_cap = FLAGS.norm_cap
self.upper_sig = FLAGS.upper_sig
self.lower_sig = FLAGS.lower_sig
self.rep = FLAGS.rep
self.fixvar = FLAGS.fixvar
class Word2GMtrainer(object):
def __init__(self, options, session):
self._options = options
# Ben A: print important opts
opts = options
print('--------------------------------------------------------')
print('Rep {}'.format(opts.rep))
print('Train data {}'.format(opts.train_data))
print('Norm cap {} lower sig {} upper sig {}'.format(opts.norm_cap,
opts.lower_sig, opts.upper_sig))
print('mu_scale {} var_scale {}'.format(opts.mu_scale, opts.var_scale))
print('Num Mixtures {} Spherical Mode = {}'.format(opts.num_mixtures, opts.spherical))
print('Emb dim {}'.format(opts.emb_dim))
print('Epochs to train {}'.format(opts.epochs_to_train))
print('Learning rate {} // constant {}'.format(opts.learning_rate, opts.constant_lr))
print('Using a separate Wout = {}'.format(opts.wout))
print('Subsampling rate = {}'.format(opts.subsample))
print('Using Max Partial Energy Loss = {}'.format(opts.max_pe))
print('Loss Epsilon = {}'.format(opts.loss_epsilon))
print('Saving results to = {}'.format(options.save_path))
print('--------------------------------------------------------')
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph() #
self.save_vocab()
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
if opts.constant_lr:
self._lr = tf.constant(opts.learning_rate)
else:
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(self._lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def optimize_adam(self, loss):
# deprecated
opts = self._options
# use automatic decay of learning rate in Adam
self._lr = tf.constant(opts.learning_rate)
self.adam_epsilon = opts.adam_epsilon
optimizer = tf.train.AdamOptimizer(self._lr, epsilon=self.adam_epsilon)
train = optimizer.minimize(loss, global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def optimize_adagrad(self, loss):
print('Using Adagrad optimizer')
opts = self._options
if opts.constant_lr:
self._lr = tf.constant(opts.learning_rate)
else:
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.AdagradOptimizer(self._lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def calculate_loss(self, word_idxs, pos_idxs):
# This is two methods in one (forward and nce_loss)
self.global_step = tf.Variable(0, name="global_step")
opts = self._options
#####################################################
# the model parameters
vocabulary_size = opts.vocab_size
embedding_size = opts.emb_dim
batch_size = opts.batch_size
regularization_coeff = opts.regularization_coeff
norm_cap = opts.norm_cap
lower_sig = opts.lower_sig
upper_sig = opts.upper_sig
self.norm_cap = norm_cap
self.lower_logsig = math.log(lower_sig)
self.upper_logsig = math.log(upper_sig)
num_mixtures = opts.num_mixtures
spherical = opts.spherical
objective_threshold = opts.objective_threshold
# the model parameters
mu_scale = opts.mu_scale*math.sqrt(3.0/(1.0*embedding_size))
mus = tf.get_variable('mu', initializer=tf.random_uniform([vocabulary_size, num_mixtures, embedding_size], -mu_scale, mu_scale))
if opts.wout:
mus_out = tf.get_variable('mu_out', tf.random_uniform([vocabulary_size, num_mixtures, embedding_size], -mu_scale, mu_scale))
# This intialization makes the variance around 1
var_scale = opts.var_scale
logvar_scale = math.log(var_scale)
print('mu_scale = {} var_scale = {}'.format(mu_scale, var_scale))
var_trainable = 1-self._options.fixvar
print('var trainable =', var_trainable)
if spherical:
logsigs = tf.get_variable('sigma',
initializer=tf.random_uniform([vocabulary_size, num_mixtures,1],
logvar_scale, logvar_scale), trainable=var_trainable)
if opts.wout:
logsigs_out = tf.get_variable('sigma_out',
initializer=tf.random_uniform([vocabulary_size, num_mixtures,1],
logvar_scale, logvar_scale), trainable=var_trainable)
else:
logsigs = tf.get_variable('sigma',
initializer=tf.random_uniform([vocabulary_size, num_mixtures, embedding_size],
logvar_scale, logvar_scale), trainable=var_trainable)
if opts.wout:
logsigs_out = tf.get_variable('sigma_out',
initializer=tf.random_uniform([vocabulary_size, num_mixtures, embedding_size],
logvar_scale, logvar_scale), trainable=var_trainable)
mixture = tf.get_variable('mixture', initializer=tf.random_uniform([vocabulary_size, num_mixtures], 0, 0))
if opts.wout:
mixture_out = tf.get_variable('mixxture_out', initializer=tf.random_uniform([vocabulary_size, num_mixtures], 0, 0))
if not opts.wout:
mus_out = mus
logsigs_out = logsigs
mixture_out = mixture
zeros_vec = tf.zeros([batch_size], name='zeros')
self._mus = mus
self._logsigs = logsigs
labels_matrix = tf.reshape(
tf.cast(pos_idxs,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
neg_idxs, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.batch_size, # Use 1 negative sample per positive sample
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist(), name='neg_idxs'))
self._neg_idxs = neg_idxs
def log_energy(mu1, sig1, mix1, mu2, sig2, mix2, only_bw_modes=False):
### need to pass mix that's compatible!
### George Ho: `only_bw_modes` precludes computing log energies
### between the same mode. For the regularization. Make sure to pass in
### mixes of the same length!
def partial_logenergy(cl1, cl2):
m1 = mu1[:,cl1,:]
m2 = mu2[:,cl2,:]
s1 = sig1[:,cl1,:]
s2 = sig2[:,cl2,:]
with tf.name_scope('partial_logenergy') as scope:
_a = tf.add(s1, s2) # should be do max add for stability?
epsilon = opts.loss_epsilon
if spherical:
logdet = embedding_size*tf.log(epsilon + tf.squeeze(_a))
else:
logdet = tf.reduce_sum(tf.log(epsilon + _a), reduction_indices=1, name='logdet')
ss_inv = 1./(epsilon + _a)
#diff = tf.sub(m1, m2)
diff = tf.subtract(m1, m2)
exp_term = tf.reduce_sum(diff*ss_inv*diff, reduction_indices=1, name='expterm')
pe = -0.5*logdet - 0.5*exp_term
return pe
with tf.name_scope('logenergy') as scope:
log_e_list = []
mix_list = []
for cl1 in xrange(num_mixtures):
for cl2 in xrange(num_mixtures):
if (not only_bw_modes) or cl1 != cl2:
log_e_list.append(partial_logenergy(cl1, cl2))
mix_list.append(mix1[:,cl1]*mix2[:,cl2])
log_e_pack = tf.stack(log_e_list)
log_e_max = tf.reduce_max(log_e_list, reduction_indices=0)
if opts.max_pe:
# Ben A: got this warning for max_pe
# UserWarning:
# Convering sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.
log_e_argmax = tf.argmax(log_e_list, dimension=0)
log_e = log_e_max*tf.gather(mix_list, log_e_argmax)
else:
mix_pack = tf.stack(mix_list)
log_e = tf.log(tf.reduce_sum(mix_pack*tf.exp(log_e_pack-log_e_max), reduction_indices=0))
log_e += log_e_max
return log_e
def Lfunc(word_idxs, pos_idxs, neg_idxs):
with tf.name_scope('LossCal') as scope:
mu_embed = tf.nn.embedding_lookup(mus, word_idxs, name='MuWord')
mu_embed_pos = tf.nn.embedding_lookup(mus_out, pos_idxs, name='MuPos')
mu_embed_neg = tf.nn.embedding_lookup(mus_out, neg_idxs, name='MuNeg')
sig_embed = tf.exp(tf.nn.embedding_lookup(logsigs, word_idxs), name='SigWord')
sig_embed_pos = tf.exp(tf.nn.embedding_lookup(logsigs_out, pos_idxs), name='SigPos')
sig_embed_neg = tf.exp(tf.nn.embedding_lookup(logsigs_out, neg_idxs), name='SigNeg')
mix_word = tf.nn.softmax(tf.nn.embedding_lookup(mixture, word_idxs), name='MixWord')
mix_pos = tf.nn.softmax(tf.nn.embedding_lookup(mixture_out, pos_idxs), name='MixPos')
mix_neg = tf.nn.softmax(tf.nn.embedding_lookup(mixture_out, neg_idxs), name='MixNeg')
epos = log_energy(mu_embed, sig_embed, mix_word, mu_embed_pos, sig_embed_pos, mix_pos)
eneg = log_energy(mu_embed, sig_embed, mix_word, mu_embed_neg, sig_embed_neg, mix_neg)
eself = log_energy(mu_embed, sig_embed, mix_word, mu_embed, sig_embed, mix_word, only_bw_modes=True)
loss_indiv = tf.maximum(zeros_vec, objective_threshold - epos + eneg,
name='CalculateIndividualLoss')
reg_indiv = regularization_coeff * eself
loss = tf.reduce_mean(loss_indiv, name='AveLoss')
reg = tf.reduce_mean(reg_indiv, name='AveReg')
return loss + tf.exp(-reg), loss, reg
loss_reg, loss, reg = Lfunc(word_idxs, pos_idxs, neg_idxs)
tf.summary.scalar('loss', loss)
tf.summary.scalar('reg', reg)
return loss_reg
def clip_ops_graph(self, word_idxs, pos_idxs, neg_idxs):
def clip_val_ref(embedding, idxs):
with tf.name_scope('clip_val'):
to_update = tf.nn.embedding_lookup(embedding, idxs)
to_update = tf.maximum(self.lower_logsig, tf.minimum(self.upper_logsig, to_update))
return tf.scatter_update(embedding, idxs, to_update)
def clip_norm_ref(embedding, idxs):
with tf.name_scope('clip_norm_ref') as scope:
to_update = tf.nn.embedding_lookup(embedding, idxs)
to_update = tf.clip_by_norm(to_update, self.norm_cap, axes=2)
return tf.scatter_update(embedding, idxs, to_update)
clip1 = clip_norm_ref(self._mus, word_idxs)
clip2 = clip_norm_ref(self._mus, pos_idxs)
clip3 = clip_norm_ref(self._mus, neg_idxs)
clip4 = clip_val_ref(self._logsigs, word_idxs)
clip5 = clip_val_ref(self._logsigs, pos_idxs)
clip6 = clip_val_ref(self._logsigs, neg_idxs)
return [clip1, clip2, clip3, clip4, clip5, clip6]
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram_word2vec(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
pickle.dump(self._word2id, open("word2id.pkl", 'wb'))
loss = self.calculate_loss(examples, labels)
self._loss = loss
if opts.normclip:
self._clip_ops = self.clip_ops_graph(self._examples, self._labels, self._neg_idxs)
if opts.adagrad:
print("Using Adagrad as an optimizer!")
self.optimize_adagrad(loss)
else:
# Using Standard SGD
self.optimize(loss)
# Properly initialize all variables.
self.check_op = tf.add_check_numerics_ops()
tf.initialize_all_variables().run()
try:
print('Try using saver version v2')
self.saver = tf.train.Saver(write_version=tf.train.SaverDef.V2, max_to_keep = opts.max_to_keep)
except:
print('Default to saver version v1')
self.saver = tf.train.Saver(max_to_keep=opts.max_to_keep)
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
# This is where the optimizer that minimizes loss (self._train) is run
if not self._options.normclip:
_, epoch = self._session.run([self._train, self._epoch])
else:
_, epoch, _ = self._session.run([self._train, self._epoch, self._clip_ops])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
step_manual = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\n" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
step_manual += 1
for t in workers:
t.join()
return epoch
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
if not FLAGS.train_data or not FLAGS.save_path:
print("--train_data and --save_path must be specified.")
sys.exit(1)
if not os.path.exists(FLAGS.save_path):
print('Creating new directory', FLAGS.save_path)
os.makedirs(FLAGS.save_path)
else:
print('The directory already exists', FLAGS.save_path)
opts = Options()
print('Saving results to {}'.format(opts.save_path))
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2GMtrainer(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train()
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
with tf.variable_scope('', reuse=tf.AUTO_REUSE):
sigmas = session.run(tf.get_variable("sigma"))
mus = session.run(tf.get_variable("mu"))
np.save("sigma.npy", sigmas)
np.save("mu.npy", mus)
if __name__ == "__main__":
tf.app.run()
| 38.935795
| 132
| 0.65041
|
1ddff97500d326bb73d06a97cdafe035251c7680
| 620
|
py
|
Python
|
venv/bin/rst2html.py
|
acounsel/zwazo
|
beeb3591674a8290a89704e3da56aa4d425418e8
|
[
"MIT"
] | null | null | null |
venv/bin/rst2html.py
|
acounsel/zwazo
|
beeb3591674a8290a89704e3da56aa4d425418e8
|
[
"MIT"
] | 10
|
2019-11-25T16:54:39.000Z
|
2022-02-10T08:29:51.000Z
|
venv/bin/rst2html.py
|
acounsel/zwazo
|
beeb3591674a8290a89704e3da56aa4d425418e8
|
[
"MIT"
] | null | null | null |
#!/Users/Marisa/Sites/zwazo/venv/bin/python3
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| 25.833333
| 78
| 0.737097
|
b0449121f98255a7ffbc8970e492aceb5a7f8d2a
| 1,245
|
py
|
Python
|
extensions/widgets/interactive/NumericInput/NumericInput.py
|
willingc/oh-missions-oppia-beta
|
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
|
[
"Apache-2.0"
] | null | null | null |
extensions/widgets/interactive/NumericInput/NumericInput.py
|
willingc/oh-missions-oppia-beta
|
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
|
[
"Apache-2.0"
] | 2
|
2021-06-10T23:58:39.000Z
|
2021-12-13T20:51:34.000Z
|
extensions/widgets/interactive/NumericInput/NumericInput.py
|
willingc/oh-missions-oppia-beta
|
3d97903a5155ec67f135b1aa2c02f3bb39eb02e7
|
[
"Apache-2.0"
] | null | null | null |
from core.domain import widget_domain
from extensions.objects.models import objects
class NumericInput(widget_domain.BaseWidget):
"""Definition of a widget.
Do NOT make any changes to this widget definition while the Oppia app is
running, otherwise things will break.
This class represents a widget, whose id is the name of the class. It is
auto-discovered when the default widgets are refreshed.
"""
# The human-readable name of the widget.
name = 'Numeric input'
# The category the widget falls under in the widget repository.
category = 'Basic Input'
# A description of the widget.
description = (
'A numeric input widget that can accept and classify integers and '
'floating point numbers.'
)
# Customization parameters and their descriptions, types and default
# values. This attribute name MUST be prefixed by '_'.
_params = []
# Actions that the reader can perform on this widget which trigger a
# feedback interaction, and the associated input types. Interactive widgets
# must have at least one of these. This attribute name MUST be prefixed by
# '_'.
_handlers = [{
'name': 'submit', 'input_type': objects.Real
}]
| 32.763158
| 79
| 0.699598
|
109422f948a8a29c875dcfdb424e7f71d2bd02c6
| 394
|
py
|
Python
|
apps/track/migrations/0022_auto_20210319_1551.py
|
martinlehoux/django_bike
|
05373d2649647fe8ebadb0aad54b9a7ec1900fe7
|
[
"MIT"
] | 1
|
2020-08-12T17:53:37.000Z
|
2020-08-12T17:53:37.000Z
|
apps/track/migrations/0022_auto_20210319_1551.py
|
martinlehoux/django_bike
|
05373d2649647fe8ebadb0aad54b9a7ec1900fe7
|
[
"MIT"
] | 12
|
2020-07-03T03:52:00.000Z
|
2021-09-22T18:00:44.000Z
|
apps/track/migrations/0022_auto_20210319_1551.py
|
martinlehoux/django_bike
|
05373d2649647fe8ebadb0aad54b9a7ec1900fe7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-03-19 15:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("track", "0021_auto_20200915_1528"),
]
operations = [
migrations.RemoveField(
model_name="track",
name="parser",
),
migrations.DeleteModel(
name="Point",
),
]
| 18.761905
| 47
| 0.560914
|
068edd676296c97bfe5055b82b80f1b57f846c2f
| 1,763
|
py
|
Python
|
barcodes/dxfwrite/tests/test_solid.py
|
sbarton272/AcousticBarcodes-Explorations
|
73f019228988727575af7d67d1b7c7119f6c49a6
|
[
"MIT"
] | null | null | null |
barcodes/dxfwrite/tests/test_solid.py
|
sbarton272/AcousticBarcodes-Explorations
|
73f019228988727575af7d67d1b7c7119f6c49a6
|
[
"MIT"
] | null | null | null |
barcodes/dxfwrite/tests/test_solid.py
|
sbarton272/AcousticBarcodes-Explorations
|
73f019228988727575af7d67d1b7c7119f6c49a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#coding:utf-8
# Created: 20.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
import unittest
from dxfwrite.base import dxfstr, DXFValidationError
from dxfwrite.entities import Solid
class TestSolid(unittest.TestCase):
def test_solid_no_attributes(self):
solid = Solid() # need at least 3 points
self.assertRaises( DXFValidationError, dxfstr, solid)
solid = Solid( [(0,0)] ) # need at least 3 points
self.assertRaises( DXFValidationError, dxfstr, solid )
solid = Solid( [(0,0), (1,0)] ) # need at least 3 points
self.assertRaises( DXFValidationError, dxfstr, solid )
def test_solid_3points(self):
solid = Solid( [(0,0), (1,0), (1,1)] )
expected = " 0\nSOLID\n 8\n0\n 10\n0.0\n 20\n0.0\n 30\n0.0\n 11\n1.0\n 21\n0.0\n 31\n0.0\n" \
" 13\n1.0\n 23\n1.0\n 33\n0.0\n 12\n1.0\n 22\n1.0\n 32\n0.0\n"
self.assertEqual(dxfstr(solid), expected)
def test_solid_4points(self):
solid = Solid( [(0,0), (1,0), (1,1), (0,1)] )
expected = " 0\nSOLID\n 8\n0\n 10\n0.0\n 20\n0.0\n 30\n0.0\n 11\n1.0\n 21\n0.0\n 31\n0.0\n" \
" 13\n1.0\n 23\n1.0\n 33\n0.0\n 12\n0.0\n 22\n1.0\n 32\n0.0\n"
self.assertEqual(dxfstr(solid), expected)
def test_solid_change_point(self):
solid = Solid( [(0,0), (1,0), (1,1), (0,1)] )
solid[3] = (0, 2) # tuple! not DXFPoint
expected = " 0\nSOLID\n 8\n0\n 10\n0.0\n 20\n0.0\n 30\n0.0\n 11\n1.0\n 21\n0.0\n 31\n0.0\n" \
" 13\n1.0\n 23\n1.0\n 33\n0.0\n 12\n0.0\n 22\n2.0\n 32\n0.0\n"
self.assertEqual(dxfstr(solid), expected)
if __name__=='__main__':
unittest.main()
| 39.177778
| 103
| 0.59671
|
45696041574414b6598c109704c2977dfc8d343e
| 182,959
|
py
|
Python
|
zvmsdk/smtclient.py
|
haolp/python-zvm-sdk
|
784b60b6528b57eb3fe9f795af439a25e20843b9
|
[
"Apache-2.0"
] | null | null | null |
zvmsdk/smtclient.py
|
haolp/python-zvm-sdk
|
784b60b6528b57eb3fe9f795af439a25e20843b9
|
[
"Apache-2.0"
] | null | null | null |
zvmsdk/smtclient.py
|
haolp/python-zvm-sdk
|
784b60b6528b57eb3fe9f795af439a25e20843b9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017,2020 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import hashlib
import math
# On SLES12, we found that if you import urllib.parse later
# than requests, you will find a error like 'not able to load
# urllib.parse, this is because urllib will be in sys.modules
# when first import requests
# as workaround here, we first import urllib then import requests
# later, we need consider to use urllib.request to replace
# requests if that's possible to avoid this kind of issue
from io import IOBase
import shutil
import six.moves.urllib.parse as urlparse
import requests
import threading
import os
import re
import six
import string
import subprocess
import tempfile
import time
from smtLayer import smt
from zvmsdk import config
from zvmsdk import constants as const
from zvmsdk import database
from zvmsdk import exception
from zvmsdk import log
from zvmsdk import returncode
from zvmsdk import utils as zvmutils
CONF = config.CONF
LOG = log.LOG
_LOCK = threading.Lock()
CHUNKSIZE = 4096
_SMT_CLIENT = None
def get_smtclient():
global _SMT_CLIENT
if _SMT_CLIENT is None:
try:
_SMT_CLIENT = zvmutils.import_object(
'zvmsdk.smtclient.SMTClient')
except ImportError:
LOG.error("Unable to get smtclient")
raise ImportError
return _SMT_CLIENT
class SMTClient(object):
def __init__(self):
self._smt = smt.SMT()
self._pathutils = zvmutils.PathUtils()
self._NetDbOperator = database.NetworkDbOperator()
self._GuestDbOperator = database.GuestDbOperator()
self._ImageDbOperator = database.ImageDbOperator()
def _request(self, requestData):
try:
results = self._smt.request(requestData)
except Exception as err:
LOG.error('SMT internal parse encounter error')
raise exception.SDKInternalError(msg=err, modID='smt')
def _is_smt_internal_error(results):
internal_error_list = returncode.SMT_INTERNAL_ERROR
for error in internal_error_list:
if results['overallRC'] != error[0]:
# overallRC does not match, continue next
continue
if error[1] is not None and results['rc'] != error[1]:
# rc match failed
continue
if error[2] is not None and results['rs'] not in error[2]:
# rs match failed
continue
# All match finish successfully, return true
return True
return False
if results['overallRC'] != 0:
results.pop('logEntries')
# Check whether this smt error belongs to internal error, if so,
# raise internal error, otherwise raise clientrequestfailed error
if _is_smt_internal_error(results):
msg = "SMT internal error. Results: %s" % str(results)
LOG.error(msg)
raise exception.SDKInternalError(msg=msg,
modID='smt',
results=results)
else:
msg = ("SMT request failed. RequestData: '%s', Results: '%s'"
% (requestData, str(results)))
raise exception.SDKSMTRequestFailed(results, msg)
return results
def get_guest_temp_path(self, userid):
return self._pathutils.get_guest_temp_path(userid)
def get_guest_path(self, userid):
return self._pathutils.get_guest_path(userid)
def clean_temp_folder(self, tmp_folder):
return self._pathutils.clean_temp_folder(tmp_folder)
def _generate_vdev(self, base, offset):
"""Generate virtual device number based on base vdev
:param base: base virtual device number, string of 4 bit hex.
:param offset: offset to base, integer.
"""
vdev = hex(int(base, 16) + offset)[2:]
return vdev.rjust(4, '0')
def _generate_increasing_nic_id(self, nic_id):
"""Generate increasing nic id string
:param nic_id: hexadecimal nic id like '1000'
:return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002'
"""
nic_id = str(hex(int(nic_id, 16)))[2:]
nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:]
nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:]
if len(nic_id_2) > 4:
errmsg = ("Virtual device number %s is not valid" % nic_id_2)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2)
def generate_disk_vdev(self, start_vdev=None, offset=0):
"""Generate virtual device number for disks
:param offset: offset of user_root_vdev.
:return: virtual device number, string of 4 bit hex.
"""
if not start_vdev:
start_vdev = CONF.zvm.user_root_vdev
vdev = self._generate_vdev(start_vdev, offset)
if offset >= 0 and offset < 254:
return vdev
else:
msg = ("Failed to generate disk vdev, invalid virtual device"
"number for disk:%s" % vdev)
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
def add_mdisks(self, userid, disk_list, start_vdev=None):
"""Add disks for the userid
:disks: A list dictionary to describe disk info, for example:
disk: [{'size': '1g',
'format': 'ext3',
'disk_pool': 'ECKD:eckdpool1'},
{'size': '1g',
'format': 'ext3'}]
"""
# Firstly, check disk_pool in disk_list, if disk_pool not specified
# and not configured(the default vaule is None), report error
# report error
for idx, disk in enumerate(disk_list):
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
disk['disk_pool'] = disk_pool
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
for idx, disk in enumerate(disk_list):
if 'vdev' in disk:
# this means user want to create their own device number
vdev = disk['vdev']
else:
vdev = self.generate_disk_vdev(start_vdev=start_vdev,
offset=idx)
self._add_mdisk(userid, disk, vdev)
disk['vdev'] = vdev
sizeUpper = disk.get('size').strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'G' and sizeUnit != 'M':
sizeValue = sizeUpper
disk_pool = disk.get('disk_pool')
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
# Convert the cylinders to bytes
convert = 737280
else:
# Convert the blocks to bytes
convert = 512
byteSize = float(float(int(sizeValue) * convert / 1024) / 1024)
unit = "M"
if (byteSize > 1024):
byteSize = float(byteSize / 1024)
unit = "G"
byteSize = "%.1f" % byteSize
disk['size'] = byteSize + unit
return disk_list
def remove_mdisks(self, userid, vdev_list):
for vdev in vdev_list:
self._remove_mdisk(userid, vdev)
def dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
:raddr: A real device number to be dedicated or attached
to the specified image
:mode: Specify a 1 if the virtual device is to be in read-only mode.
Otherwise, specify a 0.
"""
# dedicate device to directory entry
self._dedicate_device(userid, vaddr, raddr, mode)
def _dedicate_device(self, userid, vaddr, raddr, mode):
"""dedicate device."""
action = 'dedicate'
rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' %
{'uid': userid, 'act': action,
'va': vaddr, 'ra': raddr, 'mod': mode})
action = "dedicate device to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_fcp_info_by_status(self, userid, status):
"""get fcp information by the status.
:userid: The name of the image to query fcp info
:status: The status of target fcps. eg:'active', 'free' or 'offline'.
"""
results = self._get_fcp_info_by_status(userid, status)
return results
def _get_fcp_info_by_status(self, userid, status):
action = 'fcpinfo'
rd = ' '.join(['getvm', userid, action, status])
action = "query fcp info of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return results['response']
def undedicate_device(self, userid, vaddr):
"""undedicate device
:userid: The name of the image obtaining a dedicated device
:vaddr: The virtual device number of the device
"""
# undedicate device to directory entry
self._undedicate_device(userid, vaddr)
def _undedicate_device(self, userid, vaddr):
"""undedicate device."""
action = 'undedicate'
rd = ('changevm %(uid)s %(act)s %(va)s' %
{'uid': userid, 'act': action,
'va': vaddr})
action = "undedicate device from userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_image_performance_info(self, userid):
"""Get CPU and memory usage information.
:userid: the zvm userid to be queried
"""
pi_dict = self.image_performance_query([userid])
return pi_dict.get(userid, None)
def get_adapters_info(self, userid):
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid,
"--operands",
"-k 'image_device_number=*'"))
results = None
action = "get network info of userid '%s'" % str(userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ret = results['response']
# TODO: muti NIC support?
nic_count = 0
for line in ret:
if 'adapter_count=' in line:
nic_count = int(line.strip().split('=')[-1])
break
if nic_count < 1:
msg = 'get_network_info:No NIC found on userid %s' % userid
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# save network info into dict by index from 1 to nic_count
# Firstly, get adapter information
adapters_info = []
adapter = dict()
# if found IP, no need to continue
found_mac = False
for line in ret:
if 'adapter_address=' in line:
adapter_addr = line.strip().split('=')[-1]
adapter['adapter_address'] = adapter_addr
if 'adapter_status=' in line:
adapter_type = line.strip().split('=')[-1]
adapter['adapter_status'] = adapter_type
if 'lan_owner=' in line:
lan_owner = line.strip().split('=')[-1]
adapter['lan_owner'] = lan_owner
if 'lan_name=' in line:
lan_name = line.strip().split('=')[-1]
adapter['lan_name'] = lan_name
if 'mac_address=' in line and not found_mac:
mac_addr = line.strip().split('=')[-1]
pattern = re.compile('.{2}')
mac_address = ':'.join(pattern.findall(mac_addr))
adapter['mac_address'] = mac_address
if 'mac_ip_version=' in line:
ip_version = line.strip().split('=')[-1]
adapter['mac_ip_version'] = ip_version
if 'mac_ip_address=' in line:
# once we found mac_ip_address, assume this is the MAC
# we are using, then jump to next adapter
mac_ip = line.strip().split('=')[-1]
adapter['mac_ip_address'] = mac_ip
found_mac = True
if 'adapter_info_end' in line:
adapters_info.append(adapter)
# clear adapter and process next
adapter = dict()
found_mac = False
return adapters_info
def _parse_vswitch_inspect_data(self, rd_list):
""" Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get
inspect data.
"""
def _parse_value(data_list, idx, keyword, offset):
return idx + offset, data_list[idx].rpartition(keyword)[2].strip()
vsw_dict = {}
with zvmutils.expect_invalid_resp_data():
# vswitch count
idx = 0
idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2)
vsw_dict['vswitch_count'] = int(vsw_count)
# deal with each vswitch data
vsw_dict['vswitches'] = []
for i in range(vsw_dict['vswitch_count']):
vsw_data = {}
# skip vswitch number
idx += 1
# vswitch name
idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1)
vsw_data['vswitch_name'] = vsw_name
# uplink count
idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1)
# skip uplink data
idx += int(up_count) * 9
# skip bridge data
idx += 8
# nic count
vsw_data['nics'] = []
idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1)
nic_count = int(nic_count)
for j in range(nic_count):
nic_data = {}
idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1)
userid, toss, vdev = nic_id.partition(' ')
nic_data['userid'] = userid
nic_data['vdev'] = vdev
idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx,
'nic_fr_rx:', 1
)
idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_rx_dsc:', 1
)
idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx,
'nic_fr_rx_err:', 1
)
idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx,
'nic_fr_tx:', 1
)
idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx,
'nic_fr_tx_dsc:', 1
)
idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx,
'nic_fr_tx_err:', 1
)
idx, nic_data['nic_rx'] = _parse_value(rd_list, idx,
'nic_rx:', 1
)
idx, nic_data['nic_tx'] = _parse_value(rd_list, idx,
'nic_tx:', 1
)
vsw_data['nics'].append(nic_data)
# vlan count
idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1)
# skip vlan data
idx += int(vlan_count) * 3
# skip the blank line
idx += 1
vsw_dict['vswitches'].append(vsw_data)
return vsw_dict
def _is_vdev_valid(self, vdev, vdev_info):
for used_vdev in vdev_info:
if (((int(vdev, 16) >= int(used_vdev, 16)) and
(int(vdev, 16) <= int(used_vdev, 16) + 2)) or
((int(vdev, 16) < int(used_vdev, 16)) and
(int(vdev, 16) >= int(used_vdev, 16) - 2))):
return False
return True
def get_power_state(self, userid):
"""Get power status of a z/VM instance."""
LOG.debug('Querying power stat of %s' % userid)
requestData = "PowerVM " + userid + " status"
action = "query power state of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(requestData)
with zvmutils.expect_invalid_resp_data(results):
status = results['response'][0].partition(': ')[2]
return status
def _check_power_state(self, userid, action):
# Get the vm status
power_state = self.get_power_state(userid)
# Power on the vm if it is inactive
if power_state == 'off':
msg = ('The vm %s is powered off, please start up it '
'before %s' % (userid, action))
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
def guest_start(self, userid):
"""Power on VM."""
requestData = "PowerVM " + userid + " on"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_stop(self, userid, **kwargs):
"""Power off VM."""
requestData = "PowerVM " + userid + " off"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_softstop(self, userid, **kwargs):
"""Power off VM gracefully, it will call shutdown os then
deactivate vm"""
requestData = "PowerVM " + userid + " softoff --wait"
if 'timeout' in kwargs.keys() and kwargs['timeout']:
requestData += ' --maxwait ' + str(kwargs['timeout'])
else:
requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout)
if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']:
requestData += ' --poll ' + str(kwargs['poll_interval'])
else:
requestData += ' --poll ' + str(CONF.guest.softstop_interval)
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_pause(self, userid):
self._check_power_state(userid, 'pause')
requestData = "PowerVM " + userid + " pause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_unpause(self, userid):
self._check_power_state(userid, 'unpause')
requestData = "PowerVM " + userid + " unpause"
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reboot(self, userid):
requestData = ' '.join(("PowerVM", userid, "reboot"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def guest_reset(self, userid):
requestData = ' '.join(("PowerVM", userid, "reset"))
with zvmutils.log_and_reraise_smt_request_failed():
self._request(requestData)
def live_migrate_move(self, userid, destination, parms):
""" moves the specified virtual machine, while it continues to run,
to the specified system within the SSI cluster. """
rd = ('migratevm %(uid)s move --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
if 'maxtotal' in parms:
rd += ('--maxtotal ' + str(parms['maxTotal']))
if 'maxquiesce' in parms:
rd += ('--maxquiesce ' + str(parms['maxquiesce']))
if 'immediate' in parms:
rd += " --immediate"
if 'forcearch' in parms:
rd += " --forcearch"
if 'forcedomain' in parms:
rd += " --forcedomain"
if 'forcestorage' in parms:
rd += " --forcestorage"
action = "move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def live_migrate_test(self, userid, destination):
""" tests the specified virtual machine and reports whether or not
it is eligible to be relocated to the specified system. """
rd = ('migratevm %(uid)s test --destination %(dest)s ' %
{'uid': userid, 'dest': destination})
action = "test to move userid '%s' to SSI '%s'" % (userid, destination)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
def _get_ipl_param(self, ipl_from):
if len(ipl_from) > 0:
ipl_param = ipl_from
else:
ipl_param = CONF.zvm.user_root_vdev
return ipl_param
def create_vm(self, userid, cpu, memory, disk_list, profile,
max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam,
dedicate_vdevs, loaddev):
""" Create VM and add disks if specified. """
rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s '
'--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i '
'--maxMemSize %(max_mem)s --setReservedMem' %
{'uid': userid, 'mem': memory,
'pri': const.ZVM_USER_DEFAULT_PRIVILEGE,
'cpu': cpu, 'prof': profile,
'max_cpu': max_cpu, 'max_mem': max_mem})
if CONF.zvm.default_admin_userid:
rd += (' --logonby "%s"' % CONF.zvm.default_admin_userid)
# when use dasd as root disk, the disk_list[0] would be the boot
# disk.
# when boot from volume, ipl_from should be specified explicitly.
if (disk_list and 'is_boot_disk' in disk_list[0] and
disk_list[0]['is_boot_disk']) or ipl_from:
# we assume at least one disk exist, which means, is_boot_disk
# is true for exactly one disk.
rd += (' --ipl %s' % self._get_ipl_param(ipl_from))
# load param for ipl
if ipl_param:
rd += ' --iplParam %s' % ipl_param
if ipl_loadparam:
rd += ' --iplLoadparam %s' % ipl_loadparam
if dedicate_vdevs:
rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs)
if loaddev:
if 'portname' in loaddev:
rd += ' --loadportname %s' % loaddev['portname']
if 'lun' in loaddev:
rd += ' --loadlun %s' % loaddev['lun']
# now, we need consider swap only case, customer using boot
# from volume but no disk pool provided, we allow to create
# swap disk from vdisk by default, when we come to this logic
# we are very sure that if no disk pool, there is only one
# disk in disk_list and that's swap
vdisk = None
# this is swap only case, which means, you only create a swap
# disk (len disk_list is 1) and no other disks
if len(disk_list) == 1:
disk = disk_list[0]
if 'format' in disk and disk['format'].lower() == 'swap':
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
if disk_pool is None:
# if it's vdisk, then create user direct directly
vd = disk.get('vdev') or self.generate_disk_vdev(offset=0)
disk['vdev'] = vd
sizeUpper = disk['size'].strip().upper()
sizeUnit = sizeUpper[-1]
if sizeUnit != 'M' and sizeUnit != 'G':
errmsg = ("%s must has 'M' or 'G' suffix" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if sizeUnit == 'M':
size = int(sizeUpper[:-1])
if size > 2048:
errmsg = ("%s is great than 2048M" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
if sizeUnit == 'G':
size = int(sizeUpper[:-1])
if size > 2:
errmsg = ("%s is great than 2G" % sizeUpper)
raise exception.SDKInvalidInputFormat(msg=errmsg)
rd += ' --vdisk %s:%s' % (vd, sizeUpper)
vdisk = disk
action = "create userid '%s'" % userid
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 436) and (err.results['rs'] == 4)):
result = "Profile '%s'" % profile
raise exception.SDKObjectNotExistError(obj_desc=result,
modID='guest')
else:
msg = ''
if action is not None:
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
# Add the guest to db immediately after user created
action = "add guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.add_guest(userid)
# Continue to add disk, if vdisk is None, it means
# it's not vdisk routine and we need add disks
if vdisk is None and disk_list:
# not perform mkfs against root disk
if disk_list[0].get('is_boot_disk'):
disk_list[0].update({'format': 'none'})
return self.add_mdisks(userid, disk_list)
# we must return swap disk in order to make guest config
# handle other remaining jobs
return disk_list
def _add_mdisk(self, userid, disk, vdev):
"""Create one disk for userid
NOTE: No read, write and multi password specified, and
access mode default as 'MR'.
"""
size = disk['size']
fmt = disk.get('format', 'ext4')
disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool
# Check disk_pool, if it's None, report error
if disk_pool is None:
msg = ('disk_pool not configured for sdkserver.')
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=2, msg=msg)
[diskpool_type, diskpool_name] = disk_pool.split(':')
if (diskpool_type.upper() == 'ECKD'):
action = 'add3390'
else:
action = 'add9336'
rd = ' '.join(['changevm', userid, action, diskpool_name,
vdev, size, '--mode MR'])
if fmt and fmt != 'none':
rd += (' --filesystem %s' % fmt.lower())
action = "add mdisk to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_vm_list(self):
"""Get the list of guests that are created by SDK
return userid list"""
action = "list all guests in database"
with zvmutils.log_and_reraise_sdkbase_error(action):
guests_in_db = self._GuestDbOperator.get_guest_list()
guests_migrated = \
self._GuestDbOperator.get_migrated_guest_info_list()
# db query return value in tuple (uuid, userid, metadata, comments)
userids_in_db = [g[1].upper() for g in guests_in_db]
userids_migrated = [g[1].upper() for g in guests_migrated]
userid_list = list(set(userids_in_db) - set(userids_migrated))
return userid_list
def _remove_mdisk(self, userid, vdev):
rd = ' '.join(('changevm', userid, 'removedisk', vdev))
action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid)
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def guest_authorize_iucv_client(self, userid, client=None):
"""Punch a script that used to set the authorized client userid in vm
If the guest is in log off status, the change will take effect when
the guest start up at first time.
If the guest is in active status, power off and power on are needed
for the change to take effect.
:param str guest: the user id of the vm
:param str client: the user id of the client that can communicate to
guest using IUCV"""
client = client or zvmutils.get_smt_userid()
iucv_path = "/tmp/" + userid
if not os.path.exists(iucv_path):
os.makedirs(iucv_path)
iucv_auth_file = iucv_path + "/iucvauth.sh"
zvmutils.generate_iucv_authfile(iucv_auth_file, client)
try:
requestData = "ChangeVM " + userid + " punchfile " + \
iucv_auth_file + " --class x"
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:"
" %s" % (userid, err.format_message()))
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
finally:
self._pathutils.clean_temp_folder(iucv_path)
def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, skipzipl=False):
""" Refresh bootmap info of specific volume.
: param fcpchannels: list of fcpchannels.
: param wwpns: list of wwpns.
: param lun: string of lun.
: return value: dict with FCP as key and list of wwpns this FCP can
access as value.
"""
fcps = ','.join(fcpchannels)
ws = ','.join(wwpns)
fcs = "--fcpchannel=%s" % fcps
wwpns = "--wwpn=%s" % ws
lun = "--lun=%s" % lun
if skipzipl:
skipzipl = "--skipzipl=YES"
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun,
skipzipl]
else:
cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun]
LOG.info("Running command: %s", cmd)
try:
(rc, output) = zvmutils.execute(cmd, timeout=600)
except subprocess.TimeoutExpired as err:
err_msg = err.format_message()
raise exception.SDKVolumeOperationError(rs=7, msg=err_msg)
except PermissionError:
# because zvmsdk user dont have permission to kill background
# process so if the excute timeout, will raise PermissionError
# we also treat it as timeout exception
err_msg = ("Running command: %s timed out." % cmd)
raise exception.SDKVolumeOperationError(rs=7, msg=err_msg)
if rc != 0:
err_msg = ("refresh_bootmap failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKVolumeOperationError(rs=5,
errcode=rc,
errmsg=err_output)
output_lines = output.split('\n')
paths_dict = {}
for line in output_lines:
if line.__contains__("RESULT PATHS: "):
paths_str = line[14:]
# paths_str format: "FCP1:W1 W2,FCP2:W3 W4"
# convert paths string into a dict
paths_list = paths_str.split(',')
for path in paths_list:
fcp, wwpn = path.split(':')
wwpn_list = wwpn.split(' ')
paths_dict[fcp] = wwpn_list
return paths_dict
def guest_deploy(self, userid, image_name, transportfiles=None,
remotehost=None, vdev=None, skipdiskcopy=False):
""" Deploy image and punch config driver to target """
# (TODO: add the support of multiple disks deploy)
if skipdiskcopy:
msg = ('Start guest_deploy without unpackdiskimage, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
# Purge guest reader to clean dirty data
rd = ("changevm %s purgerdr" % userid)
action = "purge reader of '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
# Punch transport files if specified
if transportfiles:
# Copy transport file to local
msg = ('Start to send customized file to vm %s' % userid)
LOG.info(msg)
try:
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
if remotehost:
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
else:
cmd = ["/usr/bin/cp", transportfiles, local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy config drive with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
# Punch config drive to guest userid
rd = ("changevm %(uid)s punchfile %(file)s --class X" %
{'uid': userid, 'file': local_trans})
action = "punch config drive to userid '%s'" % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
finally:
# remove the local temp config drive folder
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Authorize iucv client
client_id = None
# try to re-use previous iucv authorized userid at first
if os.path.exists(const.IUCV_AUTH_USERID_PATH):
LOG.debug("Re-use previous iucv authorized userid")
with open(const.IUCV_AUTH_USERID_PATH) as f:
client_id = f.read().strip()
self.guest_authorize_iucv_client(userid, client_id)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
image_info = self._ImageDbOperator.image_query_record(image_name)
os_version = image_info[0]['imageosdistro']
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without unpackdiskimage finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_deploy_rhcos(self, userid, image_name, transportfiles,
remotehost=None, vdev=None, hostname=None,
skipdiskcopy=False):
""" Deploy image"""
# (TODO: add the support of multiple disks deploy)
if transportfiles is None:
err_msg = 'Ignition file is required when deploying RHCOS image'
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=13, userid=userid)
if skipdiskcopy:
msg = ('Start guest_deploy without copy disk, guest: %(vm)s'
'os_version: %(img)s' % {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = None
else:
msg = ('Start to deploy image %(img)s to guest %(vm)s'
% {'img': image_name, 'vm': userid})
LOG.info(msg)
image_file = '/'.join([self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev])
# Unpack image file to root disk
vdev = vdev or CONF.zvm.user_root_vdev
tmp_trans_dir = None
try:
if remotehost:
# download igintion file from remote host
tmp_trans_dir = tempfile.mkdtemp()
local_trans = '/'.join([tmp_trans_dir,
os.path.basename(transportfiles)])
cmd = ["/usr/bin/scp", "-B",
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
("%s:%s" % (remotehost, transportfiles)),
local_trans]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ('copy ignition file with command %(cmd)s '
'failed with output: %(res)s' %
{'cmd': str(cmd), 'res': output})
LOG.error(err_msg)
raise exception.SDKGuestOperationError(rs=4, userid=userid,
err_info=err_msg)
transportfiles = local_trans
cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name,
transportfiles, vdev,
image_file, hostname,
skipdiskcopy)
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("unpackdiskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
raise exception.SDKGuestOperationError(rs=3, userid=userid,
unpack_rc=rc,
err=err_output)
finally:
# remove the temp ignition file
if tmp_trans_dir:
self._pathutils.clean_temp_folder(tmp_trans_dir)
# Update os version in guest metadata
# TODO: may should append to old metadata, not replace
if skipdiskcopy:
os_version = image_name
else:
os_version = self.image_get_os_distro(image_name)
metadata = 'os_version=%s' % os_version
self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata)
if skipdiskcopy:
msg = ('guest_deploy without copy disk finish successfully, '
'guest: %(vm)s, os_version: %(img)s'
% {'img': image_name, 'vm': userid})
else:
msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s'
' successfully' % {'img': image_name, 'vm': userid,
'vdev': vdev})
LOG.info(msg)
def guest_capture(self, userid, image_name, capture_type='rootonly',
compress_level=6):
if capture_type == "alldisks":
func = ('Capture guest with type: %s' % capture_type)
msg = ('%s is not supported in current release' % func)
LOG.error(msg)
raise exception.SDKFunctionNotImplementError(func=func,
modID='guest')
msg = ('Start to capture %(vm)s to generate image %(img)s with '
'capture type %(type)s' % {'vm': userid,
'img': image_name,
'type': capture_type})
LOG.info(msg)
self._check_power_state(userid, 'capture')
# Make sure the iucv channel is ready for communication on source vm
try:
self.execute_cmd(userid, 'pwd')
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to check iucv status on capture source vm '
'%(vm)s with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
# Get the os version of the vm
try:
os_version = self._guest_get_os_version(userid)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on capture source vm %(vm)s'
'to get os version with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Error happened when parsing os version on source vm '
'%(vm)s with error: %(err)s' % {'vm': userid,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
msg = ('The os version of capture source vm %(vm)s is %(version)s' %
{'vm': userid,
'version': os_version})
LOG.info(msg)
# Find the root device according to the capture type
try:
capture_devices = self._get_capture_devices(userid, capture_type)
except exception.SDKSMTRequestFailed as err:
msg = ('Failed to execute command on source vm %(vm)s to get the '
'devices for capture with error %(err)s' % {'vm': userid,
'err': err.results['response'][0]})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except Exception as err:
msg = ('Internal error happened when getting the devices for '
'capture on source vm %(vm)s with error %(err)s' %
{'vm': userid,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
except exception.SDKGuestOperationError:
raise
# Shutdown the vm before capture
self.guest_softstop(userid)
# Prepare directory for writing image file
image_temp_dir = '/'.join((CONF.image.sdk_image_repository,
const.IMAGE_TYPE['CAPTURE'],
os_version,
image_name))
self._pathutils.mkdir_if_not_exist(image_temp_dir)
# Call creatediskimage to capture a vm to generate an image
# TODO:(nafei) to support multiple disk capture
vdev = capture_devices[0]
msg = ('Found the device %(vdev)s of %(vm)s for capture' %
{'vdev': vdev, 'vm': userid})
LOG.info(msg)
image_file_name = vdev
image_file_path = '/'.join((image_temp_dir, image_file_name))
cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev,
image_file_path, '--compression', str(compress_level)]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("creatediskimage failed with return code: %d." % rc)
err_output = ""
output_lines = output.split('\n')
for line in output_lines:
if line.__contains__("ERROR:"):
err_output += ("\\n" + line.strip())
LOG.error(err_msg + err_output)
self._pathutils.clean_temp_folder(image_temp_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=err_output)
# Move the generated image to netboot folder
image_final_dir = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
os_version,
image_name])
image_final_path = '/'.join((image_final_dir,
image_file_name))
self._pathutils.mkdir_if_not_exist(image_final_dir)
cmd = ['mv', image_file_path, image_final_path]
with zvmutils.expect_and_reraise_internal_error(modID='guest'):
(rc, output) = zvmutils.execute(cmd)
if rc != 0:
err_msg = ("move image file from staging to netboot "
"folder failed with return code: %d." % rc)
LOG.error(err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
self._pathutils.clean_temp_folder(image_final_dir)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
err=err_msg)
self._pathutils.clean_temp_folder(image_temp_dir)
msg = ('Updating the metadata for captured image %s ' % image_name)
LOG.info(msg)
# Get md5sum of image
real_md5sum = self._get_md5sum(image_final_path)
# Get disk_size_units of image
disk_size_units = self._get_disk_size_units(image_final_path)
# Get the image physical size
image_size = self._get_image_size(image_final_path)
# Create the image record in image database
self._ImageDbOperator.image_add_record(image_name, os_version,
real_md5sum, disk_size_units, image_size,
capture_type)
LOG.info('Image %s is captured and imported to image repository '
'successfully' % image_name)
def _guest_get_os_version(self, userid):
os_version = ''
release_file = self.execute_cmd(userid, 'ls /etc/*-release')
if '/etc/os-release' in release_file:
# Parse os-release file, part of the output looks like:
# NAME="Red Hat Enterprise Linux Server"
# ID="rhel"
# VERSION_ID="7.0"
release_info = self.execute_cmd(userid, 'cat /etc/os-release')
release_dict = {}
for item in release_info:
if item:
release_dict[item.split('=')[0]] = item.split('=')[1]
distro = release_dict['ID']
version = release_dict['VERSION_ID']
if '"' in distro:
distro = eval(distro)
if '"' in version:
version = eval(version)
os_version = '%s%s' % (distro, version)
return os_version
elif '/etc/redhat-release' in release_file:
# The output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/redhat-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
elif '/etc/SuSE-release' in release_file:
# The output for this file looks like:
# SUSE Linux Enterprise Server 11 (s390x)
# VERSION = 11
# PATCHLEVEL = 3
distro = 'sles'
release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release')
LOG.debug('OS release info is %s' % release_info)
release_version = '.'.join((release_info[1].split('=')[1].strip(),
release_info[2].split('=')[1].strip()))
os_version = ''.join((distro, release_version))
return os_version
elif '/etc/system-release' in release_file:
# For some rhel6.7 system, it only have system-release file and
# the output looks like:
# "Red Hat Enterprise Linux Server release 6.7 (Santiago)"
distro = 'rhel'
release_info = self.execute_cmd(userid, 'cat /etc/system-release')
distro_version = release_info[0].split()[6]
os_version = ''.join((distro, distro_version))
return os_version
def _get_capture_devices(self, userid, capture_type='rootonly'):
capture_devices = []
if capture_type == 'rootonly':
# Parse the /proc/cmdline to get root devices
proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline '
'| tr " " "\\n" | grep -a "^root=" | cut -c6-')
root_device_info = proc_cmdline[0]
if not root_device_info:
msg = ('Unable to get useful info from /proc/cmdline to '
'locate the device associated with the root directory '
'on capture source vm %s' % userid)
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
if 'UUID=' in root_device_info:
uuid = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-uuid', uuid))
elif 'LABEL=' in root_device_info:
label = root_device_info.split()[0].split('=')[1]
root_device = '/'.join(('/dev/disk/by-label', label))
elif 'mapper' in root_device_info:
msg = ('Capturing a disk with root filesystem on logical'
' volume is not supported')
raise exception.SDKGuestOperationError(rs=5, userid=userid,
msg=msg)
else:
root_device = root_device_info
root_device_node = self.execute_cmd(userid, 'readlink -f %s' %
root_device)[0]
# Get device node vdev by node name
cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' %
root_device_node.split('/')[-1].rstrip(string.digits))
result = self.execute_cmd(userid, cmd)[0]
root_device_vdev = result.split()[0][4:8]
capture_devices.append(root_device_vdev)
return capture_devices
else:
# For sysclone, parse the user directory entry to get the devices
# for capture, leave for future
pass
def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name,
transportfiles=None, vdev=None,
image_file=None, hostname=None,
skipdiskcopy=False):
if skipdiskcopy:
os_version = image_name
image_disk_type = 'SCSI'
else:
os_version = self.image_get_os_distro(image_name)
# Query image disk type
image_disk_type = self._get_image_disk_type(image_name)
if image_disk_type is None:
err_msg = ("failed to get image disk type for "
"image '%(image_name)s'."
% {'image_name': image_name})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
try:
# Query vm's disk pool type and image disk type
from zvmsdk import dist
_dist_manager = dist.LinuxDistManager()
linuxdist = _dist_manager.get_linux_dist(os_version)()
# Read coros fixed ip parameter from tempfile
fixed_ip_parameter = linuxdist.read_coreos_parameter(userid)
except Exception as err:
err_msg = ("failed to read coreos fixed ip "
"parameters for userid '%(userid)s',"
"error: %(err)s."
% {'userid': userid, 'err': err})
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if fixed_ip_parameter is None:
err_msg = ("coreos fixed ip parameters don't exist.")
raise exception.SDKGuestOperationError(rs=12, userid=userid,
err=err_msg)
if hostname:
# replace hostname to display name instead of userid
fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(),
hostname)
# read nic device id and change it into the form like
# "0.0.1000,0.0.1001,0.0.1002"
nic_id = self._generate_increasing_nic_id(
fixed_ip_parameter.split(":")[5].replace("enc", ""))
if image_disk_type == 'SCSI':
(wwpn, lun) = self._get_wwpn_lun(userid)
if wwpn is None or lun is None:
err_msg = ("wwpn and lun is required for FCP devices,"
" please set LOADDEV for userid %s" % userid)
raise exception.SDKGuestOperationError(rs=14, userid=userid,
msg=err_msg)
wwpn = '0x' + wwpn
lun = '0x' + lun
if skipdiskcopy:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, transportfiles, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev,
wwpn, lun, image_file, transportfiles,
image_disk_type, nic_id, fixed_ip_parameter]
else:
return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev,
image_file, transportfiles, image_disk_type, nic_id,
fixed_ip_parameter]
def grant_user_to_vswitch(self, vswitch_name, userid):
"""Set vswitch to grant user."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k grant_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to grant user %s to vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def _set_vswitch_exception(self, error, switch_name):
if ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)):
errmsg = ("Operation is not allowed for a "
"VLAN UNAWARE vswitch")
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2838) or
(error.results['rs'] == 2853) or
(error.results['rs'] == 2856) or
(error.results['rs'] == 2858) or
(error.results['rs'] == 3022) or
(error.results['rs'] == 3033))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=5,
vsw=switch_name,
msg=errmsg)
else:
raise error
def revoke_user_from_vswitch(self, vswitch_name, userid):
"""Revoke user for vswitch."""
smt_userid = zvmutils.get_smt_userid()
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid,
"--operands",
"-k switch_name=%s" % vswitch_name,
"-k revoke_userid=%s" % userid,
"-k persist=YES"))
try:
self._request(requestData)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to revoke user %s from vswitch %s, error: %s"
% (userid, vswitch_name, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
def image_performance_query(self, uid_list):
"""Call Image_Performance_Query to get guest current status.
:uid_list: A list of zvm userids to be queried
"""
if uid_list == []:
return {}
if not isinstance(uid_list, list):
uid_list = [uid_list]
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Image_Performance_Query" % smt_userid,
"--operands",
'-T "%s"' % (' '.join(uid_list)),
"-c %d" % len(uid_list)))
action = "get performance info of userid '%s'" % str(uid_list)
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def system_image_performance_query(self, namelist):
"""Call System_Image_Performance_Query to get guest current status.
:namelist: A namelist that defined in smapi namelist file.
"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API System_Image_Performance_Query" % smt_userid,
"--operands -T %s" % namelist))
action = "get performance info of namelist '%s'" % namelist
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
ipq_kws = {
'userid': "Guest name:",
'guest_cpus': "Guest CPUs:",
'used_cpu_time': "Used CPU time:",
'elapsed_cpu_time': "Elapsed time:",
'min_cpu_count': "Minimum CPU count:",
'max_cpu_limit': "Max CPU limit:",
'samples_cpu_in_use': "Samples CPU in use:",
'samples_cpu_delay': "Samples CPU delay:",
'used_memory': "Used memory:",
'max_memory': "Max memory:",
'min_memory': "Minimum memory:",
'shared_memory': "Shared memory:",
}
pi_dict = {}
pi = {}
rpi_list = ('\n'.join(results['response'])).split("\n\n")
for rpi in rpi_list:
try:
pi = zvmutils.translate_response_to_dict(rpi, ipq_kws)
except exception.SDKInternalError as err:
emsg = err.format_message()
# when there is only one userid queried and this userid is
# in 'off'state, the smcli will only returns the queried
# userid number, no valid performance info returned.
if(emsg.__contains__("No value matched with keywords.")):
continue
else:
raise err
for k, v in pi.items():
pi[k] = v.strip('" ')
if pi.get('userid') is not None:
pi_dict[pi['userid']] = pi
return pi_dict
def virtual_network_vswitch_query_byte_stats(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" %
smt_userid,
"--operands",
'-T "%s"' % smt_userid,
'-k "switch_name=*"'
))
action = "query vswitch usage info"
with zvmutils.log_and_reraise_smt_request_failed(action):
results = self._request(rd)
return self._parse_vswitch_inspect_data(results['response'])
def get_host_info(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost general")
host_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.RINV_HOST_KEYWORDS)
return host_info
def get_diskpool_info(self, pool):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getHost diskpoolspace %s" % pool)
dp_info = zvmutils.translate_response_to_dict(
'\n'.join(results['response']), const.DISKPOOL_KEYWORDS)
return dp_info
def get_vswitch_list(self):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid,
"--operands",
"-s \'*\'"))
try:
result = self._request(rd)
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
LOG.warning("No Virtual switch in the host")
return []
else:
LOG.error("Failed to get vswitch list, error: %s" %
err.format_message())
raise
with zvmutils.expect_invalid_resp_data():
if (not result['response'] or not result['response'][0]):
return []
else:
data = '\n'.join([s for s in result['response']
if isinstance(s, six.string_types)])
output = re.findall('VSWITCH: Name: (.*)', data)
return output
def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k grant_userid=%s" % userid,
"-k switch_name=%s" % vswitch_name,
"-k user_vlan_id=%s" % vlan_id,
"-k persist=YES"))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, "
"error: %s" %
(vlan_id, vswitch_name, userid, err.format_message()))
self._set_vswitch_exception(err, vswitch_name)
msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s '
'for guest %(vm)s successfully'
% {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid})
LOG.info(msg)
def add_vswitch(self, name, rdev=None, controller='*',
connection='CONNECT', network_type='ETHERNET',
router="NONROUTER", vid='UNAWARE', port_type='ACCESS',
gvrp='GVRP', queue_mem=8, native_vid=1, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Create_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % name))
if rdev is not None:
rd += " -k real_device_address" +\
"=\'%s\'" % rdev.replace(',', ' ')
if controller != '*':
rd += " -k controller_name=%s" % controller
rd = ' '.join((rd,
"-k connection_value=%s" % connection,
"-k queue_memory_limit=%s" % queue_mem,
"-k transport_type=%s" % network_type,
"-k vlan_id=%s" % vid,
"-k persist=%s" % (persist and 'YES' or 'NO')))
# Only if vswitch is vlan awared, port_type, gvrp and native_vid are
# allowed to specified
if isinstance(vid, int) or vid.upper() != 'UNAWARE':
rd = ' '.join((rd,
"-k port_type=%s" % port_type,
"-k gvrp_value=%s" % gvrp,
"-k native_vlanid=%s" % native_vid))
if router is not None:
rd += " -k routing_value=%s" % router
msg = ('Start to create vswitch %s' % name)
LOG.info(msg)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to create vswitch %s, error: %s" %
(name, err.format_message()))
raise
msg = ('Create vswitch %s successfully' % name)
LOG.info(msg)
def set_vswitch(self, switch_name, **kwargs):
"""Set vswitch"""
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Set_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name))
for k, v in kwargs.items():
rd = ' '.join((rd,
"-k %(key)s=\'%(value)s\'" %
{'key': k, 'value': v}))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to set vswitch %s, error: %s" %
(switch_name, err.format_message()))
self._set_vswitch_exception(err, switch_name)
def delete_vswitch(self, switch_name, persist=True):
smt_userid = zvmutils.get_smt_userid()
msg = ('Start to delete vswitch %s' % switch_name)
LOG.info(msg)
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" %
smt_userid,
"--operands",
"-k switch_name=%s" % switch_name,
"-k persist=%s" % (persist and 'YES' or 'NO')))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
if ((results['rc'] == 212) and
(results['rs'] == 40)):
LOG.warning("Vswitch %s does not exist", switch_name)
return
else:
LOG.error("Failed to delete vswitch %s, error: %s" %
(switch_name, err.format_message()))
raise
msg = ('Delete vswitch %s successfully' % switch_name)
LOG.info(msg)
def create_nic(self, userid, vdev=None, nic_id=None,
mac_addr=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'ID is %(id)s, address is %(address)s',
{'vdev': nic_vdev,
'id': nic_id or 'not specified',
'address': mac_addr or 'not specified'})
self._create_nic(userid, nic_vdev, nic_id=nic_id,
mac_addr=mac_addr, active=active)
return nic_vdev
def _create_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=7,
vdev=vdev, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _create_nic_active_exception(self, error, userid, vdev):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 28))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
elif ((error.results['rc'] == 396) and
(error.results['rs'] == 2797)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _is_active(self, userid):
# Get the vm status
power_state = self.get_power_state(userid)
if power_state == 'off':
LOG.error('The vm %s is powered off, '
'active operation is not allowed' % userid)
raise exception.SDKConflictError(modID='network', rs=1,
userid=userid)
def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None,
active=False):
if active:
self._is_active(userid)
msg = ('Start to create nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
if mac_addr is not None:
mac = ''.join(mac_addr.split(':'))[6:]
requestData += ' -k mac_id=%s' % mac
retry = 1
for secs in [1, 3, 5, 8, -1]:
try:
self._request(requestData)
break
except exception.SDKSMTRequestFailed as err:
if (err.results['rc'] == 400 and
err.results['rs'] == 12 and
retry < 5):
LOG.info("The VM is locked, will retry")
time.sleep(secs)
retry += 1
else:
LOG.error("Failed to create nic %s for user %s in "
"the guest's user direct, error: %s" %
(vdev, userid, err.format_message()))
self._create_nic_inactive_exception(err, userid, vdev)
if active:
if mac_addr is not None:
LOG.warning("Ignore the mac address %s when "
"adding nic on an active system" % mac_addr)
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Create_Extended' %
userid,
"--operands",
"-k image_device_number=%s" % vdev,
"-k adapter_type=QDIO"))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
msg1 = err1.format_message()
persist_OK = True
requestData = ' '.join((
'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid,
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results = err2.results
msg2 = err2.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._create_nic_active_exception(err1, userid, vdev)
else:
raise exception.SDKNetworkOperationError(rs=4,
nic=vdev, userid=userid,
create_err=msg1, revoke_err=msg2)
self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id)
msg = ('Create nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def get_user_direct(self, userid):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm %s directory" % userid)
return results.get('response', [])
def get_all_user_direct(self):
with zvmutils.log_and_reraise_smt_request_failed():
results = self._request("getvm alldirectory")
return results.get('response', [])
def _delete_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=8,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _delete_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=9,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def delete_nic(self, userid, vdev, active=False):
if active:
self._is_active(userid)
vdev_exist = False
nic_list = self._NetDbOperator.switch_select_record_for_userid(userid)
for p in nic_list:
if (int(p['interface'], 16) == int(vdev, 16)):
vdev_exist = True
vdev_info = p
break
if not vdev_exist:
# Device has already be removed from user direct
LOG.warning("Virtual device %s does not exist in the switch table",
vdev)
if active:
try:
resp = self.execute_cmd(userid, 'vmcp q %s' % vdev)
nic_info = "%s ON NIC" % vdev.zfill(4).upper()
osa_info = "%s ON OSA" % vdev.zfill(4).upper()
if nic_info in resp[0]:
pass
elif osa_info in resp[0]:
self._undedicate_nic(userid, vdev, active=active,
del_active_only=True)
return
else:
LOG.warning("Device %s of guest %s is not "
"network adapter" % (vdev, userid))
return
except exception.SDKSMTRequestFailed as err:
emsg = err.format_message()
ignored_msg = ('Device %s does not exist'
% vdev.zfill(4).upper())
if (emsg.__contains__(ignored_msg)):
LOG.warning("Virtual device %s does not exist for "
"active guest %s" % (vdev, userid))
return
else:
raise
else:
return
else:
# Device hasnot be removed from user direct,
# check whether it is related to a dedicated OSA device
if ((vdev_info["comments"] is not None) and
(vdev_info["comments"].__contains__('OSA='))):
self._undedicate_nic(userid, vdev, active=active)
return
msg = ('Start to delete nic device %(vdev)s for guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if vdev_exist:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete_DM" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to delete nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._delete_nic_inactive_exception(err, userid, vdev)
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
rd = ' '.join((
"SMAPI %s API Virtual_Network_Adapter_Delete" %
userid,
"--operands",
'-v %s' % vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to delete nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._delete_nic_active_exception(err, userid, vdev)
msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _couple_active_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 212) and
((error.results['rs'] == 28) or
(error.results['rs'] == 8))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)):
obj_desc = "Vswitch %s" % vswitch
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 396) and
((error.results['rs'] == 2788) or
(error.results['rs'] == 2848) or
(error.results['rs'] == 3034) or
(error.results['rs'] == 6011))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
else:
raise error
def _couple_inactive_exception(self, error, userid, vdev, vswitch):
if ((error.results['rc'] == 412) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=10,
vdev=vdev, userid=userid,
vsw=vswitch,
msg=errmsg)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=11,
vdev=vdev, userid=userid,
vsw=vswitch,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
raise error
def _couple_nic(self, userid, vdev, vswitch_name,
active=False):
"""Couple NIC to vswitch by adding vswitch into user direct."""
if active:
self._is_active(userid)
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Connect_Vswitch',
"--operands",
"-v %s" % vdev,
"-n %s" % vswitch_name))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err1:
results1 = err1.results
msg1 = err1.format_message()
if ((results1 is not None) and
(results1['rc'] == 204) and
(results1['rs'] == 20)):
LOG.warning("Virtual device %s already connected "
"on the active guest system", vdev)
else:
persist_OK = True
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect_DM',
"--operands",
'-v %s' % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
results2 = err2.results
msg2 = err2.format_message()
if ((results2 is not None) and
(results2['rc'] == 212) and
(results2['rs'] == 32)):
persist_OK = True
else:
persist_OK = False
if persist_OK:
self._couple_active_exception(err1, userid, vdev,
vswitch_name)
else:
raise exception.SDKNetworkOperationError(rs=3,
nic=vdev, vswitch=vswitch_name,
couple_err=msg1, revoke_err=msg2)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
vswitch_name)
msg = ('Couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s successfully'
% {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name})
LOG.info(msg)
def couple_nic_to_vswitch(self, userid, nic_vdev,
vswitch_name, active=False, vlan_id=-1):
"""Couple nic to vswitch."""
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Connect nic %s to switch %s %s",
nic_vdev, vswitch_name, msg)
# previously we use Virtual_Network_Adapter_Connect_Vswitch_DM
# but due to limitation in SMAPI, we have to create such user
# direct by our own due to no way to add VLAN ID
msg = ('Start to couple nic device %(vdev)s of guest %(vm)s '
'with vswitch %(vsw)s with vlan %(vlan_id)s:'
% {'vdev': nic_vdev, 'vm': userid, 'vsw': vswitch_name,
'vlan_id': vlan_id})
LOG.info(msg)
user_direct = self.get_user_direct(userid)
new_user_direct = []
nicdef = "NICDEF %s" % nic_vdev
for ent in user_direct:
if len(ent) > 0:
new_user_direct.append(ent)
if ent.upper().startswith(nicdef):
# vlan_id < 0 means no VLAN ID given
v = nicdef
if vlan_id < 0:
v += " LAN SYSTEM %s" % vswitch_name
else:
v += " LAN SYSTEM %s VLAN %s" % (vswitch_name, vlan_id)
new_user_direct.append(v)
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
# Replace user directory
try:
self._replace_user_direct(userid, new_user_direct)
except exception.SDKSMTRequestFailed as e:
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
self._couple_nic(userid, nic_vdev, vswitch_name, active=active)
def _uncouple_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=12,
vdev=vdev, userid=userid,
msg=errmsg)
else:
raise error
def _uncouple_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 404) and (error.results['rs'] == 8)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)):
obj_desc = "Guest %s" % vdev
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=13,
vdev=vdev, userid=userid,
obj=obj_desc)
else:
raise error
def _uncouple_nic(self, userid, vdev, active=False):
"""Uncouple NIC from vswitch"""
if active:
self._is_active(userid)
msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
requestData = ' '.join((
'SMAPI %s' % userid,
"API Virtual_Network_Adapter_Disconnect_DM",
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 212) and
(results['rs'] == 32)):
LOG.warning("Virtual device %s is already disconnected "
"in the guest's user direct", vdev)
else:
LOG.error("Failed to uncouple nic %s in the guest's user "
"direct, error: %s" % (vdev, emsg))
self._uncouple_inactive_exception(err, userid, vdev)
"""Update information in switch table."""
self._NetDbOperator.switch_update_record_with_switch(userid, vdev,
None)
# the inst must be active, or this call will failed
if active:
requestData = ' '.join((
'SMAPI %s' % userid,
'API Virtual_Network_Adapter_Disconnect',
"--operands",
"-v %s" % vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results is not None) and
(results['rc'] == 204) and
(results['rs'] == 48)):
LOG.warning("Virtual device %s is already "
"disconnected on the active "
"guest system", vdev)
else:
LOG.error("Failed to uncouple nic %s on the active "
"guest system, error: %s" % (vdev, emsg))
self._uncouple_active_exception(err, userid, vdev)
msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def uncouple_nic_from_vswitch(self, userid, nic_vdev,
active=False):
if active:
msg = ("both in the user direct of guest %s and on "
"the active guest system" % userid)
else:
msg = "in the user direct of guest %s" % userid
LOG.debug("Disconnect nic %s with network %s",
nic_vdev, msg)
self._uncouple_nic(userid, nic_vdev, active=active)
def delete_userid(self, userid):
rd = ' '.join(('deletevm', userid, 'directory'))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
if err.results['rc'] == 400 and err.results['rs'] == 4:
# guest vm definition not found
LOG.debug("The guest %s does not exist." % userid)
return
# ingore delete VM not finished error
if err.results['rc'] == 596 and err.results['rs'] == 6831:
# 596/6831 means delete VM not finished yet
LOG.warning("The guest %s deleted with 596/6831" % userid)
return
# ignore delete VM with VDISK format error
# DirMaint does not support formatting TDISK or VDISK extents.
if err.results['rc'] == 596 and err.results['rs'] == 3543:
LOG.debug("The guest %s deleted with 596/3543" % userid)
return
msg = "SMT error: %s" % err.format_message()
raise exception.SDKSMTRequestFailed(err.results, msg)
def delete_vm(self, userid):
self.delete_userid(userid)
# remove userid from smapi namelist
self.namelist_remove(zvmutils.get_namelist(), userid)
# revoke userid from vswitch
action = "revoke id %s authority from vswitch" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
switch_info = self._NetDbOperator.switch_select_record_for_userid(
userid)
switch_list = set()
for item in switch_info:
switch_list.add(item['switch'])
for item in switch_list:
if item is not None:
self.revoke_user_from_vswitch(item, userid)
# cleanup db record from network table
action = "delete network record for user %s" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._NetDbOperator.switch_delete_record_for_userid(userid)
# TODO: cleanup db record from volume table
pass
# cleanup persistent folder for guest
self._pathutils.remove_guest_path(userid)
# cleanup db record from guest table
action = "delete guest %s from database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.delete_guest_by_userid(userid)
def execute_cmd(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
with zvmutils.log_and_reraise_smt_request_failed(action='execute '
'command on vm via iucv channel'):
results = self._request(requestData)
ret = results['response']
return ret
def execute_cmd_direct(self, userid, cmdStr):
""""cmdVM."""
requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\''
results = self._smt.request(requestData)
return results
def image_import(self, image_name, url, image_meta, remote_host=None):
"""Import the image specified in url to SDK image repository, and
create a record in image db, the imported images are located in
image_repository/prov_method/os_version/image_name/, for example,
/opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100"""
image_info = []
try:
image_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image record %s doens't exist in SDK image datebase,"
" will import the image and create record now" % image_name)
LOG.info(msg)
# Ensure the specified image is not exist in image DB
if image_info:
msg = ("The image name %s has already exist in SDK image "
"database, please check if they are same image or consider"
" to use a different image name for import" % image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=13, img=image_name)
try:
image_os_version = image_meta['os_version'].lower()
target_folder = self._pathutils.create_import_image_repository(
image_os_version, const.IMAGE_TYPE['DEPLOY'],
image_name)
except Exception as err:
msg = ('Failed to create repository to store image %(img)s with '
'error: %(err)s, please make sure there are enough space '
'on zvmsdk server and proper permission to create the '
'repository' % {'img': image_name,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
if self.is_rhcos(image_os_version):
image_disk_type = image_meta.get('disk_type')
if ((image_disk_type is None) or
((image_disk_type.upper() != "DASD" and
image_disk_type.upper() != "SCSI"))):
msg = ('Disk type is required for RHCOS image import, '
'the value should be DASD or SCSI')
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
else:
comments = {'disk_type': image_disk_type.upper()}
comments = str(comments)
else:
comments = None
try:
import_image_fn = urlparse.urlparse(url).path.split('/')[-1]
import_image_fpath = '/'.join([target_folder, import_image_fn])
self._scheme2backend(urlparse.urlparse(url).scheme).image_import(
image_name, url,
import_image_fpath,
remote_host=remote_host)
# Check md5 after import to ensure import a correct image
# TODO change to use query image name in DB
expect_md5sum = image_meta.get('md5sum')
real_md5sum = self._get_md5sum(import_image_fpath)
if expect_md5sum and expect_md5sum != real_md5sum:
msg = ("The md5sum after import is not same as source image,"
" the image has been broken")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=4)
# After import to image repository, figure out the image type is
# single disk image or multiple-disk image,if multiple disks image,
# extract it, if it's single image, rename its name to be same as
# specific vdev
# TODO: (nafei) use sub-function to check the image type
image_type = 'rootonly'
if image_type == 'rootonly':
final_image_fpath = '/'.join([target_folder,
CONF.zvm.user_root_vdev])
os.rename(import_image_fpath, final_image_fpath)
elif image_type == 'alldisks':
# For multiple disks image, extract it, after extract, the
# content under image folder is like: 0100, 0101, 0102
# and remove the image file 0100-0101-0102.tgz
pass
# TODO: put multiple disk image into consideration, update the
# disk_size_units and image_size db field
if not self.is_rhcos(image_os_version):
disk_size_units = self._get_disk_size_units(final_image_fpath)
else:
disk_size_units = self._get_disk_size_units_rhcos(
final_image_fpath)
image_size = self._get_image_size(final_image_fpath)
# TODO: update the real_md5sum field to include each disk image
self._ImageDbOperator.image_add_record(image_name,
image_os_version,
real_md5sum,
disk_size_units,
image_size,
image_type,
comments=comments)
LOG.info("Image %s is import successfully" % image_name)
except Exception:
# Cleanup the image from image repository
self._pathutils.clean_temp_folder(target_folder)
raise
def image_export(self, image_name, dest_url, remote_host=None):
"""Export the specific image to remote host or local file system
:param image_name: image name that can be uniquely identify an image
:param dest_path: the location to store exported image, eg.
/opt/images, the image will be stored in folder
/opt/images/
:param remote_host: the server that export image to, the format is
username@IP eg. nova@192.168.99.1, if remote_host is
None, it means the image will be stored in local server
:returns a dictionary that contains the exported image info
{
'image_name': the image_name that exported
'image_path': the image_path after exported
'os_version': the os version of the exported image
'md5sum': the md5sum of the original image
'comments': the comments of the original image
}
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
image_type = image_info[0]['type']
# TODO: (nafei) according to image_type, detect image exported path
# For multiple disk image, make the tgz firstly, the specify the
# source_path to be something like: 0100-0101-0102.tgz
if image_type == 'rootonly':
source_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
image_info[0]['imageosdistro'],
image_name,
CONF.zvm.user_root_vdev])
else:
pass
self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export(
source_path, dest_url,
remote_host=remote_host)
# TODO: (nafei) for multiple disks image, update the expect_dict
# to be the tgz's md5sum
export_dict = {'image_name': image_name,
'image_path': dest_url,
'os_version': image_info[0]['imageosdistro'],
'md5sum': image_info[0]['md5sum'],
'comments': image_info[0]['comments']}
LOG.info("Image %s export successfully" % image_name)
return export_dict
def _get_image_disk_size_units(self, image_path):
""" Return a comma separated string to indicate the image disk size
and units for each image disk file under image_path
For single disk image , it looks like: 0100=3338:CYL
For multiple disk image, it looks like:
0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL"""
pass
def _get_disk_size_units(self, image_path):
command = 'hexdump -n 48 -C %s' % image_path
(rc, output) = zvmutils.execute(command)
LOG.debug("hexdump result is %s" % output)
if rc:
msg = ("Error happened when executing command hexdump with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=5)
try:
root_disk_size = int(output[144:156])
disk_units = output[220:223]
root_disk_units = ':'.join([str(root_disk_size), disk_units])
except ValueError:
msg = ("Image file at %s is missing built-in disk size "
"metadata, it was probably not captured by SDK" %
image_path)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=6)
if 'FBA' not in output and 'CKD' not in output:
raise exception.SDKImageOperationError(rs=7)
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_disk_size_units_rhcos(self, image_path):
command = "fdisk -b 4096 -l %s | head -2 | awk '{print $5}'" % (
image_path)
rc = 0
output = ""
try:
# shell should be set True because it is a shell command with
# pipeline, so can not use utils.execute function here
output = subprocess.check_output(command, shell=True,
stderr=subprocess.STDOUT)
output = bytes.decode(output)
except subprocess.CalledProcessError as err:
rc = err.returncode
output = err.output
except Exception as err:
err_msg = ('Command "%s" Error: %s' % (' '.join(command),
str(err)))
raise exception.SDKInternalError(msg=err_msg)
if rc or output.strip('1234567890*\n'):
msg = ("Error happened when executing command fdisk with "
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
image_size = output.split()[0]
try:
cyl = (float(image_size)) / 737280
cyl = str(int(math.ceil(cyl)))
except Exception:
msg = ("Failed to convert %s to a number of cylinders."
% image_size)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=14, msg=msg)
disk_units = "CYL"
root_disk_units = ':'.join([str(cyl), disk_units])
LOG.debug("The image's root_disk_units is %s" % root_disk_units)
return root_disk_units
def _get_image_size(self, image_path):
"""Return disk size in bytes"""
command = 'du -b %s' % image_path
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when executing command du -b with"
"reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=8)
size = output.split()[0]
return size
def _get_image_path_by_name(self, image_name):
try:
target_info = self._ImageDbOperator.image_query_record(image_name)
except exception.SDKObjectNotExistError:
msg = ("The image %s does not exist in image repository"
% image_name)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=20, img=image_name)
# TODO: (nafei) Handle multiple disks image deploy
image_path = '/'.join([CONF.image.sdk_image_repository,
const.IMAGE_TYPE['DEPLOY'],
target_info[0]['imageosdistro'],
image_name])
return image_path
def _scheme2backend(self, scheme):
try:
return {
"file": FilesystemBackend,
"http": HTTPBackend,
# "https": HTTPSBackend
}[scheme]
except KeyError:
msg = ("No backend found for '%s'" % scheme)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=2, schema=scheme)
def _get_md5sum(self, fpath):
"""Calculate the md5sum of the specific image file"""
try:
current_md5 = hashlib.md5()
if isinstance(fpath, six.string_types) and os.path.exists(fpath):
with open(fpath, "rb") as fh:
for chunk in self._read_chunks(fh):
current_md5.update(chunk)
elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or
isinstance(fpath, IOBase)):
for chunk in self._read_chunks(fpath):
current_md5.update(chunk)
else:
return ""
return current_md5.hexdigest()
except Exception:
msg = ("Failed to calculate the image's md5sum")
LOG.error(msg)
raise exception.SDKImageOperationError(rs=3)
def _read_chunks(self, fh):
fh.seek(0)
chunk = fh.read(CHUNKSIZE)
while chunk:
yield chunk
chunk = fh.read(CHUNKSIZE)
else:
fh.seek(0)
def image_delete(self, image_name):
# Delete image file
try:
self._delete_image_file(image_name)
# Delete image record from db
self._ImageDbOperator.image_delete_record(image_name)
except exception.SDKImageOperationError as err:
results = err.results
if ((results['rc'] == 300) and (results['rs'] == 20)):
LOG.warning("Image %s does not exist", image_name)
return
else:
LOG.error("Failed to delete image %s, error: %s" %
(image_name, err.format_message()))
raise
msg = ('Delete image %s successfully' % image_name)
LOG.info(msg)
def _delete_image_file(self, image_name):
image_path = self._get_image_path_by_name(image_name)
self._pathutils.clean_temp_folder(image_path)
def _get_image_last_access_time(self, image_name, raise_exception=True):
"""Get the last access time of the image."""
image_file = os.path.join(self._get_image_path_by_name(image_name),
CONF.zvm.user_root_vdev)
if not os.path.exists(image_file):
if raise_exception:
msg = 'Failed to get time stamp of image:%s' % image_name
LOG.error(msg)
raise exception.SDKImageOperationError(rs=23, img=image_name)
else:
# An invalid timestamp
return -1
atime = os.path.getatime(image_file)
return atime
def image_query(self, image_name=None):
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
# because database maybe None, so return nothing here
return []
# if image_name is not None, means there is only one record
if image_name:
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
image_info[0]['last_access_time'] = last_access_time
else:
for item in image_info:
image_name = item['imagename']
# set raise_exception to false because one failed
# may stop processing all the items in the list
last_access_time = self._get_image_last_access_time(
image_name, raise_exception=False)
item['last_access_time'] = last_access_time
return image_info
def image_get_root_disk_size(self, image_name):
"""Return the root disk units of the specified image
image_name: the unique image name in db
Return the disk units in format like 3339:CYL or 467200:BLK
"""
image_info = self.image_query(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
disk_size_units = image_info[0]['disk_size_units'].split(':')[0]
return disk_size_units
def image_get_os_distro(self, image_name):
"""
Return the operating system distro of the specified image
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if not image_info:
raise exception.SDKImageOperationError(rs=20, img=image_name)
os_distro = image_info[0]['imageosdistro']
return os_distro
def _get_image_disk_type(self, image_name):
"""
Return image disk type
"""
image_info = self._ImageDbOperator.image_query_record(image_name)
if ((image_info[0]['comments'] is not None) and
(image_info[0]['comments'].__contains__('disk_type'))):
image_disk_type = eval(image_info[0]['comments'])['disk_type']
if image_disk_type == 'DASD':
return 'ECKD'
elif image_disk_type == 'SCSI':
return 'SCSI'
else:
return None
else:
return None
def punch_file(self, userid, fn, fclass):
rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" %
{'uid': userid, 'file': fn, 'class': fclass})
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
LOG.error("Failed to punch file to userid '%s',"
"error: %s" % (userid, err.format_message()))
raise
finally:
os.remove(fn)
def get_guest_connection_status(self, userid):
'''Get guest vm connection status.'''
rd = ' '.join(('getvm', userid, 'isreachable'))
results = self._request(rd)
if results['rs'] == 1:
return True
else:
return False
def _generate_disk_parmline(self, vdev, fmt, mntdir):
parms = [
'action=' + 'addMdisk',
'vaddr=' + vdev,
'filesys=' + fmt,
'mntdir=' + mntdir
]
parmline = ' '.join(parms)
parmstr = "'" + parmline + "'"
return parmstr
def process_additional_minidisks(self, userid, disk_info):
'''Generate and punch the scripts used to process additional disk into
target vm's reader.
'''
for idx, disk in enumerate(disk_info):
vdev = disk.get('vdev') or self.generate_disk_vdev(
offset = (idx + 1))
fmt = disk.get('format')
mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral',
str(vdev)])
# the mount point of swap partition is swap
if fmt == "swap":
mount_dir = "swap"
disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir)
func_name = '/var/lib/zvmsdk/setupDisk'
self.aemod_handler(userid, func_name, disk_parms)
# trigger do-script
if self.get_power_state(userid) == 'on':
self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start")
def aemod_handler(self, instance_name, func_name, parms):
rd = ' '.join(['changevm', instance_name, 'aemod', func_name,
'--invparms', parms])
action = parms[0] + instance_name
with zvmutils.log_and_reraise_smt_request_failed(action):
self._request(rd)
def get_user_console_output(self, userid):
# get console into reader
rd = 'getvm %s consoleoutput' % userid
action = 'get console log reader file list for guest vm: %s' % userid
with zvmutils.log_and_reraise_smt_request_failed(action):
resp = self._request(rd)
with zvmutils.expect_invalid_resp_data(resp):
rf_list = resp['response'][0].rpartition(':')[2].strip().split()
# TODO: make sure reader device is online
# via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online'
# 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c'
# 'which udevadm &> /dev/null && udevadm settle || udevsettle'
logs = []
for rf in rf_list:
cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf
rc, output = zvmutils.execute(cmd)
if rc == 0:
logs.append(output)
return ''.join(logs)
def query_vswitch(self, switch_name):
smt_userid = zvmutils.get_smt_userid()
rd = ' '.join((
"SMAPI %s API Virtual_Network_Vswitch_Query_Extended" %
smt_userid,
"--operands",
'-k switch_name=%s' % switch_name
))
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 212) and (err.results['rs'] == 40)):
msg = 'Vswitch %s does not exist' % switch_name
LOG.error(msg)
obj_desc = "Vswitch %s" % switch_name
raise exception.SDKObjectNotExistError(obj_desc=obj_desc,
modID='network')
else:
action = "query vswitch details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
vsw_info = {}
with zvmutils.expect_invalid_resp_data():
# ignore user_vlan_id part and jump to the vswitch basic info
idx_end = len(rd_list)
idx = 0
while((idx < idx_end) and
not rd_list[idx].__contains__('switch_name')):
idx = idx + 1
# The next 21 lines contains the vswitch basic info
# eg, name, type, port_type, vlan_awareness, etc
for i in range(21):
rd = rd_list[idx + i].split(':')
vsw_info[rd[0].strip()] = rd[1].strip()
idx = idx + 21
# Skip the vepa_status
while((idx < idx_end) and
not rd_list[idx].__contains__('real_device_address') and
not rd_list[idx].__contains__('port_num') and
not rd_list[idx].__contains__('adapter_owner')):
idx = idx + 1
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
if value == '(NONE)':
value = 'NONE'
return idx + offset, value
def _parse_dev_status(value):
if value in const.DEV_STATUS.keys():
return const.DEV_STATUS[value]
else:
return 'Unknown'
def _parse_dev_err(value):
if value in const.DEV_ERROR.keys():
return const.DEV_ERROR[value]
else:
return 'Unknown'
# Start to analyse the real devices info
vsw_info['real_devices'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('real_device_address')):
# each rdev has 6 lines' info
idx, rdev_addr = _parse_value(rd_list, idx,
'real_device_address: ')
idx, vdev_addr = _parse_value(rd_list, idx,
'virtual_device_address: ')
idx, controller = _parse_value(rd_list, idx,
'controller_name: ')
idx, port_name = _parse_value(rd_list, idx, 'port_name: ')
idx, dev_status = _parse_value(rd_list, idx,
'device_status: ')
idx, dev_err = _parse_value(rd_list, idx,
'device_error_status ')
vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr,
'controller': controller,
'port_name': port_name,
'dev_status':
_parse_dev_status(
dev_status),
'dev_err': _parse_dev_err(
dev_err)
}
# Under some case there would be an error line in the output
# "Error controller_name is NULL!!", skip this line
if ((idx < idx_end) and
rd_list[idx].__contains__(
'Error controller_name is NULL!!')):
idx += 1
# Start to get the authorized userids
vsw_info['authorized_users'] = {}
while((idx < idx_end) and rd_list[idx].__contains__('port_num')):
# each authorized userid has 6 lines' info at least
idx, port_num = _parse_value(rd_list, idx,
'port_num: ')
idx, userid = _parse_value(rd_list, idx,
'grant_userid: ')
idx, prom_mode = _parse_value(rd_list, idx,
'promiscuous_mode: ')
idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ')
idx, vlan_count = _parse_value(rd_list, idx,
'vlan_count: ')
vlan_ids = []
for i in range(int(vlan_count)):
idx, id = _parse_value(rd_list, idx,
'user_vlan_id: ')
vlan_ids.append(id)
# For vlan unaware vswitch, the query smcli would
# return vlan_count as 1, here we just set the count to 0
if (vsw_info['vlan_awareness'] == 'UNAWARE'):
vlan_count = 0
vlan_ids = []
vsw_info['authorized_users'][userid] = {
'port_num': port_num,
'prom_mode': prom_mode,
'osd_sim': osd_sim,
'vlan_count': vlan_count,
'vlan_ids': vlan_ids
}
# Start to get the connected adapters info
# OWNER_VDEV would be used as the dict key for each adapter
vsw_info['adapters'] = {}
while((idx < idx_end) and
rd_list[idx].__contains__('adapter_owner')):
# each adapter has four line info: owner, vdev, macaddr, type
idx, owner = _parse_value(rd_list, idx,
'adapter_owner: ')
idx, vdev = _parse_value(rd_list, idx,
'adapter_vdev: ')
idx, mac = _parse_value(rd_list, idx,
'adapter_macaddr: ')
idx, type = _parse_value(rd_list, idx, 'adapter_type: ')
key = owner + '_' + vdev
vsw_info['adapters'][key] = {
'mac': mac,
'type': type
}
# Todo: analyze and add the uplink NIC info and global member info
def _parse_switch_status(value):
if value in const.SWITCH_STATUS.keys():
return const.SWITCH_STATUS[value]
else:
return 'Unknown'
if 'switch_status' in vsw_info.keys():
vsw_info['switch_status'] = _parse_switch_status(
vsw_info['switch_status'])
return vsw_info
def get_nic_info(self, userid=None, nic_id=None, vswitch=None):
nic_info = self._NetDbOperator.switch_select_record(userid=userid,
nic_id=nic_id, vswitch=vswitch)
return nic_info
def is_first_network_config(self, userid):
action = "get guest '%s' to database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
info = self._GuestDbOperator.get_guest_by_userid(userid)
# check net_set
if int(info[3]) == 0:
return True
else:
return False
def update_guestdb_with_net_set(self, userid):
action = "update guest '%s' in database" % userid
with zvmutils.log_and_reraise_sdkbase_error(action):
self._GuestDbOperator.update_guest_by_userid(userid, net_set='1')
def _is_OSA_free(self, OSA_device):
osa_info = self._query_OSA()
if 'OSA' not in osa_info.keys():
return False
elif len(osa_info['OSA']['FREE']) == 0:
return False
else:
dev1 = str(OSA_device).zfill(4).upper()
dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper()
dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper()
if ((dev1 in osa_info['OSA']['FREE']) and
(dev2 in osa_info['OSA']['FREE']) and
(dev3 in osa_info['OSA']['FREE'])):
return True
else:
return False
def _query_OSA(self):
smt_userid = zvmutils.get_smt_userid()
rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid
OSA_info = {}
try:
results = self._request(rd)
rd_list = results['response']
except exception.SDKSMTRequestFailed as err:
if ((err.results['rc'] == 4) and (err.results['rs'] == 4)):
msg = 'No OSAs on system'
LOG.info(msg)
return OSA_info
else:
action = "query OSA details info"
msg = "Failed to %s. " % action
msg += "SMT error: %s" % err.format_message()
LOG.error(msg)
raise exception.SDKSMTRequestFailed(err.results, msg)
with zvmutils.expect_invalid_resp_data():
idx_end = len(rd_list)
idx = 0
def _parse_value(data_list, idx, keyword, offset=1):
value = data_list[idx].rpartition(keyword)[2].strip()
return idx + offset, value
# Start to analyse the osa devices info
while((idx < idx_end) and
rd_list[idx].__contains__('OSA Address')):
idx, osa_addr = _parse_value(rd_list, idx,
'OSA Address: ')
idx, osa_status = _parse_value(rd_list, idx,
'OSA Status: ')
idx, osa_type = _parse_value(rd_list, idx,
'OSA Type: ')
if osa_type != 'UNKNOWN':
idx, CHPID_addr = _parse_value(rd_list, idx,
'CHPID Address: ')
idx, Agent_status = _parse_value(rd_list, idx,
'Agent Status: ')
if osa_type not in OSA_info.keys():
OSA_info[osa_type] = {}
OSA_info[osa_type]['FREE'] = []
OSA_info[osa_type]['BOXED'] = []
OSA_info[osa_type]['OFFLINE'] = []
OSA_info[osa_type]['ATTACHED'] = []
if osa_status.__contains__('ATT'):
id = osa_status.split()[1]
item = (id, osa_addr)
OSA_info[osa_type]['ATTACHED'].append(item)
else:
OSA_info[osa_type][osa_status].append(osa_addr)
return OSA_info
def _get_available_vdev(self, userid, vdev=None):
ports_info = self._NetDbOperator.switch_select_table()
vdev_info = []
for p in ports_info:
if p['userid'] == userid.upper():
vdev_info.append(p['interface'])
if len(vdev_info) == 0:
# no nic defined for the guest
if vdev is None:
nic_vdev = CONF.zvm.default_nic_vdev
else:
nic_vdev = vdev
else:
if vdev is None:
used_vdev = max(vdev_info)
nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:]
else:
if self._is_vdev_valid(vdev, vdev_info):
nic_vdev = vdev
else:
errmsg = ("The specified virtual device number %s "
"has already been used." % vdev)
raise exception.SDKConflictError(modID='network', rs=6,
vdev=vdev, userid=userid,
msg=errmsg)
if ((len(nic_vdev) > 4) or
(len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)):
errmsg = ("Virtual device number %s is not valid" % nic_vdev)
raise exception.SDKInvalidInputFormat(msg=errmsg)
return nic_vdev
def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False):
nic_vdev = self._get_available_vdev(userid, vdev=vdev)
if not self._is_OSA_free(OSA_device):
errmsg = ("The specified OSA device number %s "
"is not free" % OSA_device)
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
LOG.debug('Nic attributes: vdev is %(vdev)s, '
'dedicated OSA device is %(osa)s',
{'vdev': nic_vdev,
'osa': OSA_device})
self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active)
return nic_vdev
def _dedicate_OSA_inactive_exception(self, error, userid, vdev,
OSA_device):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)):
obj_desc = "Guest device %s" % vdev
raise exception.SDKConflictError(modID='network', rs=15,
osa=OSA_device, userid=userid,
obj=obj_desc)
elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA_active_exception(self, error, userid, OSA_device):
if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or
((error.results['rc'] == 204) and (error.results['rs'] == 8)) or
((error.results['rc'] == 204) and (error.results['rs'] == 16))):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=14,
osa=OSA_device, userid=userid,
msg=errmsg)
else:
raise error
def _dedicate_OSA(self, userid, OSA_device, vdev, active=False):
if active:
self._is_active(userid)
msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user %s "
"in the guest's user direct, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for user"
" %s in the guest's user direct, "
"error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
self._dedicate_OSA_inactive_exception(err, userid, vdev,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
if active:
def_vdev = vdev
att_OSA_device = OSA_device
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Dedicate' %
userid,
"--operands",
"-v %s" % def_vdev,
"-r %s" % att_OSA_device))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
LOG.error("Failed to dedicate OSA %s to nic %s for user "
"%s on the active guest system, error: %s" %
(att_OSA_device, def_vdev, userid,
err.format_message()))
# TODO revoke the dedicated OSA in user direct and active
detach_vdev = vdev
for j in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % detach_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err2:
if ((err2.results['rc'] == 404) and
(err2.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s in the guest's user "
"direct, error: %s" %
(def_vdev, userid,
err2.format_message()))
pass
detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:]
while (int(def_vdev, 16) != int(vdev, 16)):
def_vdev = str(hex(int(def_vdev, 16) - 1))[2:]
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err3:
if ((err3.results['rc'] == 204) and
(err3.results['rs'] == 8)):
pass
else:
LOG.error("Failed to Undedicate nic %s for "
"user %s on the active guest "
"system, error: %s" %
(def_vdev, userid,
err3.format_message()))
pass
self._dedicate_OSA_active_exception(err, userid,
OSA_device)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:]
OSA_desc = 'OSA=%s' % OSA_device
self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc)
msg = ('Dedicate nic device %(vdev)s of guest %(vm)s '
'to OSA device %(osa)s successfully'
% {'vdev': vdev, 'vm': userid, 'osa': OSA_device})
LOG.info(msg)
def _undedicate_nic_active_exception(self, error, userid, vdev):
if ((error.results['rc'] == 204) and (error.results['rs'] == 44)):
errmsg = error.format_message()
raise exception.SDKConflictError(modID='network', rs=16,
userid=userid, vdev=vdev,
msg=errmsg)
else:
raise error
def _undedicate_nic_inactive_exception(self, error, userid, vdev):
if ((error.results['rc'] == 400) and (error.results['rs'] == 12)):
obj_desc = "Guest %s" % userid
raise exception.SDKConflictError(modID='network', rs=17,
userid=userid, vdev=vdev,
obj=obj_desc)
else:
raise error
def _undedicate_nic(self, userid, vdev, active=False,
del_active_only=False):
if active:
self._is_active(userid)
msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
if not del_active_only:
def_vdev = vdev
for i in range(3):
requestData = ' '.join((
'SMAPI %s API Image_Device_Undedicate_DM' %
userid,
"--operands",
"-v %s" % def_vdev))
try:
self._request(requestData)
except (exception.SDKSMTRequestFailed,
exception.SDKInternalError) as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 404) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist in "
"the guest's user direct", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s in "
"the guest's user direct, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_inactive_exception(err, userid, vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
self._NetDbOperator.switch_delete_record_for_nic(userid, vdev)
if active:
def_vdev = vdev
for i in range(3):
rd = ' '.join((
"SMAPI %s API Image_Device_Undedicate" %
userid,
"--operands",
'-v %s' % def_vdev))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err:
results = err.results
emsg = err.format_message()
if ((results['rc'] == 204) and
(results['rs'] == 8)):
LOG.warning("Virtual device %s does not exist on "
"the active guest system", vdev)
else:
LOG.error("Failed to undedicate nic %s for %s on "
"the active guest system, error: %s" %
(vdev, userid, emsg))
self._undedicate_nic_active_exception(err, userid,
vdev)
def_vdev = str(hex(int(def_vdev, 16) + 1))[2:]
msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully'
% {'vdev': vdev, 'vm': userid})
LOG.info(msg)
def _request_with_error_ignored(self, rd):
"""Send smt request, log and ignore any errors."""
try:
return self._request(rd)
except Exception as err:
# log as warning and ignore namelist operation failures
LOG.warning(six.text_type(err))
def namelist_add(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Add " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_remove(self, namelist, userid):
rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist,
"--operands -n %s" % userid))
self._request_with_error_ignored(rd)
def namelist_query(self, namelist):
rd = "SMAPI %s API Name_List_Query" % namelist
resp = self._request_with_error_ignored(rd)
if resp is not None:
return resp['response']
else:
return []
def namelist_destroy(self, namelist):
rd = "SMAPI %s API Name_List_Destroy" % namelist
self._request_with_error_ignored(rd)
def _get_defined_cpu_addrs(self, userid):
user_direct = self.get_user_direct(userid)
defined_addrs = []
max_cpus = 0
for ent in user_direct:
if ent.startswith("CPU"):
cpu_addr = ent.split()[1].strip().upper()
defined_addrs.append(cpu_addr)
if ent.startswith("MACHINE ESA"):
max_cpus = int(ent.split()[2].strip())
return (max_cpus, defined_addrs)
def _get_available_cpu_addrs(self, used_addrs, max_cpus):
# Get available CPU addresses that are not defined in user entry
used_set = set(used_addrs)
available_addrs = set([hex(i)[2:].rjust(2, '0').upper()
for i in range(0, max_cpus)])
available_addrs.difference_update(used_set)
return list(available_addrs)
def _get_active_cpu_addrs(self, userid):
# Get the active cpu addrs in two-digit hex string in upper case
# Sample output for 'lscpu --parse=ADDRESS':
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # Address
# 0
# 1
active_addrs = []
active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS")
for c in active_cpus:
# Skip the comment lines at beginning
if c.startswith("# "):
continue
addr = hex(int(c.strip()))[2:].rjust(2, '0').upper()
active_addrs.append(addr)
return active_addrs
def resize_cpus(self, userid, count):
# Check defined cpus in user entry. If greater than requested, then
# delete cpus. Otherwise, add new cpus.
# Return value: for revert usage, a tuple of
# action: The action taken for this resize, possible values:
# 0: no action, 1: add cpu, 2: delete cpu
# cpu_addrs: list of influenced cpu addrs
action = 0
updated_addrs = []
(max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid)
defined_count = len(defined_addrs)
# Check maximum cpu count defined
if max_cpus == 0:
LOG.error("Resize for guest '%s' cann't be done. The maximum "
"number of cpus is not defined in user directory." %
userid)
raise exception.SDKConflictError(modID='guest', rs=3,
userid=userid)
# Check requested count is less than the maximum cpus
if count > max_cpus:
LOG.error("Resize for guest '%s' cann't be done. The "
"requested number of cpus: '%i' exceeds the maximum "
"number of cpus allowed: '%i'." %
(userid, count, max_cpus))
raise exception.SDKConflictError(modID='guest', rs=4,
userid=userid,
req=count, max=max_cpus)
# Check count and take action
if defined_count == count:
LOG.info("The number of current defined CPUs in user '%s' equals "
"to requested count: %i, no action for static resize"
"needed." % (userid, count))
return (action, updated_addrs, max_cpus)
elif defined_count < count:
action = 1
# add more CPUs
available_addrs = self._get_available_cpu_addrs(defined_addrs,
max_cpus)
# sort the list and get the first few addrs to use
available_addrs.sort()
# Define new cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid,
"--operands"))
updated_addrs = available_addrs[0:count - defined_count]
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Define new cpus in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("New CPUs defined in user directory for '%s' "
"successfully" % userid)
return (action, updated_addrs, max_cpus)
else:
action = 2
# Delete CPUs
defined_addrs.sort()
updated_addrs = defined_addrs[-(defined_count - count):]
# Delete the last few cpus in user directory
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid,
"--operands"))
for addr in updated_addrs:
rd += (" -k CPU=CPUADDR=%s" % addr)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
msg = ("Delete CPUs in user directory for '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise exception.SDKGuestOperationError(rs=6, userid=userid,
err=e.format_message())
LOG.info("CPUs '%s' deleted from user directory for '%s' "
"successfully" % (str(updated_addrs), userid))
return (action, updated_addrs, max_cpus)
def live_resize_cpus(self, userid, count):
# Get active cpu count and compare with requested count
# If request count is smaller than the current count, then report
# error and exit immediately.
active_addrs = self._get_active_cpu_addrs(userid)
active_count = len(active_addrs)
if active_count > count:
LOG.error("Failed to live resize cpus of guest: %(uid)s, "
"current active cpu count: %(cur)i is greater than "
"the requested count: %(req)i." %
{'uid': userid, 'cur': active_count,
'req': count})
raise exception.SDKConflictError(modID='guest', rs=2,
userid=userid,
active=active_count,
req=count)
# Static resize CPUs. (add or delete CPUs from user directory)
(action, updated_addrs, max_cpus) = self.resize_cpus(userid, count)
if active_count == count:
# active count equals to requested
LOG.info("Current active cpu count of guest: '%s' equals to the "
"requested count: '%i', no more actions needed for "
"live resize." % (userid, count))
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
return
else:
# Get the number of cpus to add to active and check address
active_free = self._get_available_cpu_addrs(active_addrs,
max_cpus)
active_free.sort()
active_new = active_free[0:count - active_count]
# Do live resize
# Define new cpus
cmd_str = "vmcp def cpu " + ' '.join(active_new)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Define cpu of guest: '%s' to active failed with . "
"error: %s." % (userid, err1.format_message()))
# Start to do rollback
if action == 0:
LOG.error(msg1)
else:
LOG.error(msg1 + (" Will revert the user directory "
"change."))
# Combine influenced cpu addrs
cpu_entries = ""
for addr in updated_addrs:
cpu_entries += (" -k CPU=CPUADDR=%s" % addr)
rd = ''
if action == 1:
# Delete added CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM"
% userid, " --operands"))
else:
# Add deleted CPUs
rd = ''.join(("SMAPI %s API Image_Definition_Create_DM"
% userid, " --operands"))
rd += cpu_entries
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
msg = ("Failed to revert user directory change for '"
"%s', SMT error: %s" % (userid,
err2.format_message()))
LOG.error(msg)
else:
LOG.info("Revert user directory change for '%s' "
"successfully." % userid)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
# Activate successfully, rescan in Linux layer to hot-plug new cpus
LOG.info("Added new CPUs to active configuration of guest '%s'" %
userid)
try:
self.execute_cmd(userid, "chcpu -r")
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Rescan cpus to hot-plug new defined cpus for guest: "
"'%s' failed with error: %s. No rollback is done and you"
"may need to check the status and restart the guest to "
"make the defined cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=8, userid=userid,
err=msg)
uname_out = self.execute_cmd(userid, "uname -a")
if uname_out and len(uname_out) >= 1:
distro = uname_out[0]
else:
distro = ''
if 'ubuntu' in distro or 'Ubuntu' in distro \
or 'UBUNTU' in distro:
try:
# need use chcpu -e <cpu-list> to make cpu online for Ubuntu
online_cmd = "chcpu -e " + ','.join(active_new)
self.execute_cmd(userid, online_cmd)
except exception.SDKSMTRequestFailed as err:
msg = err.format_message()
LOG.error("Enable cpus for guest: '%s' failed with error: %s. "
"No rollback is done and you may need to check the "
"status and restart the guest to make the defined "
"cpus online." % (userid, msg))
raise exception.SDKGuestOperationError(rs=15, userid=userid,
err=msg)
LOG.info("Live resize cpus for guest: '%s' finished successfully."
% userid)
def _get_defined_memory(self, userid):
user_direct = self.get_user_direct(userid)
defined_mem = max_mem = reserved_mem = -1
for ent in user_direct:
# u'USER userid password storage max privclass'
if ent.startswith("USER "):
fields = ent.split(' ')
if len(fields) != 6:
# This case should not exist if the target user
# is created by zcc and not updated manually by user
break
defined_mem = int(zvmutils.convert_to_mb(fields[3]))
max_mem = int(zvmutils.convert_to_mb(fields[4]))
# For legacy guests, the reserved memory may not be defined
if ent.startswith("COMMAND DEF STOR RESERVED"):
reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4]))
return (defined_mem, max_mem, reserved_mem, user_direct)
def _replace_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
entry_str = ""
if isinstance(user_entry, list):
for ent in user_entry:
if ent == "":
# skip empty line
continue
else:
entry_str += (ent + '\n')
else:
entry_str = user_entry
tmp_folder = tempfile.mkdtemp()
tmp_user_direct = os.path.join(tmp_folder, userid)
with open(tmp_user_direct, 'w') as f:
f.write(entry_str)
rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid,
"--operands ",
"-f %s" % tmp_user_direct))
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err1:
msg = ("Replace definition of guest '%s' failed with "
"SMT error: %s." % (userid, err1.format_message()))
LOG.error(msg)
LOG.debug("Unlocking the user directory.")
rd = ("SMAPI %s API Image_Unlock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as err2:
# ignore 'not locked' error
if ((err2.results['rc'] == 400) and (
err2.results['rs'] == 24)):
LOG.debug("Guest '%s' unlocked successfully." % userid)
pass
else:
# just print error and ignore this unlock error
msg = ("Unlock definition of guest '%s' failed "
"with SMT error: %s" %
(userid, err2.format_message()))
LOG.error(msg)
else:
LOG.debug("Guest '%s' unlocked successfully." % userid)
# at the end, raise the replace error for upper layer to handle
raise err1
finally:
self._pathutils.clean_temp_folder(tmp_folder)
def _lock_user_direct(self, userid):
rd = ("SMAPI %s API Image_Lock_DM " % userid)
try:
self._request(rd)
except exception.SDKSMTRequestFailed as e:
# ignore the "already locked" error
if ((e.results['rc'] == 400) and (e.results['rs'] == 12)):
LOG.debug("Image is already unlocked.")
else:
msg = ("Lock definition of guest '%s' failed with"
" SMT error: %s" % (userid, e.format_message()))
LOG.error(msg)
raise e
def resize_memory(self, userid, memory):
# Check defined storage in user entry.
# Update STORAGE and RESERVED accordingly.
size = int(zvmutils.convert_to_mb(memory))
(defined_mem, max_mem, reserved_mem,
user_direct) = self._get_defined_memory(userid)
# Check max memory is properly defined
if max_mem == -1 or reserved_mem == -1:
LOG.error("Memory resize for guest '%s' cann't be done."
"Failed to get the defined/max/reserved memory size "
"from user directory." % userid)
raise exception.SDKConflictError(modID='guest', rs=19,
userid=userid)
action = 0
# Make sure requested size is less than the maximum memory size
if size > max_mem:
LOG.error("Memory resize for guest '%s' cann't be done. The "
"requested memory size: '%im' exceeds the maximum "
"size allowed: '%im'." %
(userid, size, max_mem))
raise exception.SDKConflictError(modID='guest', rs=20,
userid=userid,
req=size, max=max_mem)
# check if already satisfy request
if defined_mem == size:
LOG.info("The current defined memory size in user '%s' equals "
"to requested size: %im, no action for memory resize "
"needed." % (userid, size))
return (action, defined_mem, max_mem, user_direct)
else:
# set action to 1 to represent that revert need to be done when
# live resize failed.
action = 1
# get the new reserved memory size
new_reserved = max_mem - size
# get maximum reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
CONF.zvm.user_default_max_reserved_memory))
# when new reserved memory value > the MAX_STOR_RESERVED,
# make is as the MAX_STOR_RESERVED value
if new_reserved > MAX_STOR_RESERVED:
new_reserved = MAX_STOR_RESERVED
# prepare the new user entry content
entry_str = ""
for ent in user_direct:
if ent == '':
# Avoid adding an empty line in the entry file
# otherwise Image_Replace_DM would return syntax error.
continue
new_ent = ""
if ent.startswith("USER "):
fields = ent.split(' ')
for i in range(len(fields)):
# update fields[3] to new defined size
if i != 3:
new_ent += (fields[i] + ' ')
else:
new_ent += (str(size) + 'M ')
# remove the last space
new_ent = new_ent.strip()
elif ent.startswith("COMMAND DEF STOR RESERVED"):
new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved)
else:
new_ent = ent
# append this new entry
entry_str += (new_ent + '\n')
# Lock and replace user definition with the new_entry content
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=9, userid=userid,
err=e.format_message())
LOG.debug("User directory Locked successfully for guest '%s' " %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, entry_str)
except exception.SDKSMTRequestFailed as e:
raise exception.SDKGuestOperationError(rs=10,
userid=userid,
err=e.format_message())
# Finally return useful info
return (action, defined_mem, max_mem, user_direct)
def _revert_user_direct(self, userid, user_entry):
# user_entry can be a list or a string
try:
self._lock_user_direct(userid)
except exception.SDKSMTRequestFailed:
# print revert error and return
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory Locked successfully for guest '%s'." %
userid)
# Replace user directory
try:
self._replace_user_direct(userid, user_entry)
except exception.SDKSMTRequestFailed:
msg = ("Failed to revert user direct of guest '%s'." % userid)
LOG.error(msg)
return
LOG.debug("User directory reverted successfully for guest '%s'." %
userid)
def _get_active_memory(self, userid):
# Return an integer value representing the active memory size in mb
output = self.execute_cmd(userid, "lsmem")
active_mem = 0
for e in output:
# cmd output contains line starts with "Total online memory",
# its format can be like:
# "Total online memory : 8192 MB"
# or
# "Total online memory: 8G"
# need handle both formats
if e.startswith("Total online memory"):
try:
# sample mem_info_str: "8192MB" or "8G"
mem_info_str = e.split(':')[1].replace(' ', '').upper()
# make mem_info as "8192M" or "8G"
if mem_info_str.endswith('B'):
mem_info = mem_info_str[:-1]
else:
mem_info = mem_info_str
active_mem = int(zvmutils.convert_to_mb(mem_info))
except (IndexError, ValueError, KeyError, TypeError) as e:
errmsg = ("Failed to get active storage size for guest: %s"
% userid)
LOG.error(errmsg + " with error: " + six.text_type(e))
raise exception.SDKInternalError(msg=errmsg)
break
return active_mem
def live_resize_memory(self, userid, memory):
# Get active memory size and compare with requested size
# If request size is smaller than the current size, then report
# error and exit immediately.
size = int(zvmutils.convert_to_mb(memory))
active_size = self._get_active_memory(userid)
if active_size > size:
LOG.error("Failed to live resize memory of guest: %(uid)s, "
"current active memory size: %(cur)im is greater than "
"the requested size: %(req)im." %
{'uid': userid, 'cur': active_size,
'req': size})
raise exception.SDKConflictError(modID='guest', rs=18,
userid=userid,
active=active_size,
req=size)
# get maximum reserved memory value
MAX_STOR_RESERVED = int(zvmutils.convert_to_mb(
CONF.zvm.user_default_max_reserved_memory))
# The maximum increased memory size in one live resizing can't
# exceed MAX_STOR_RESERVED
increase_size = size - active_size
if increase_size > MAX_STOR_RESERVED:
LOG.error("Live memory resize for guest '%s' cann't be done. "
"The memory size to be increased: '%im' is greater "
" than the maximum reserved memory size: '%im'." %
(userid, increase_size, MAX_STOR_RESERVED))
raise exception.SDKConflictError(modID='guest', rs=21,
userid=userid,
inc=increase_size,
max=MAX_STOR_RESERVED)
# Static resize memory. (increase/decrease memory from user directory)
(action, defined_mem, max_mem,
user_direct) = self.resize_memory(userid, memory)
# Compare active size and requested size, then update accordingly
if active_size == size:
# online memory already satisfied
LOG.info("Current active memory size of guest: '%s' equals to the "
"requested size: '%iM', no more actions needed for "
"live resize." % (userid, size))
LOG.info("Live resize memory for guest: '%s' finished "
"successfully." % userid)
return
else:
# Do live resize. update memory size
# Step1: Define new standby storage
cmd_str = ("vmcp def storage standby %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as e:
# rollback and return
msg = ("Define standby memory of guest: '%s' failed with "
"error: %s." % (userid, e.format_message()))
LOG.error(msg)
# Start to do rollback
if action == 1:
LOG.debug("Start to revert user definition of guest '%s'."
% userid)
self._revert_user_direct(userid, user_direct)
# Finally, raise the error and exit
raise exception.SDKGuestOperationError(rs=11,
userid=userid,
err=e.format_message())
# Step 2: Online new memory
cmd_str = ("chmem -e %sM" % increase_size)
try:
self.execute_cmd(userid, cmd_str)
except exception.SDKSMTRequestFailed as err1:
# rollback and return
msg1 = ("Online memory of guest: '%s' failed with "
"error: %s." % (userid, err1.format_message()))
LOG.error(msg1)
# Start to do rollback
LOG.info("Start to do revert.")
LOG.debug("Reverting the standby memory.")
try:
self.execute_cmd(userid, "vmcp def storage standby 0M")
except exception.SDKSMTRequestFailed as err2:
# print revert error info and continue
msg2 = ("Revert standby memory of guest: '%s' failed with "
"error: %s." % (userid, err2.format_message()))
LOG.error(msg2)
# Continue to do the user directory change.
if action == 1:
LOG.debug("Reverting the user directory change of guest "
"'%s'." % userid)
self._revert_user_direct(userid, user_direct)
# Finally raise the exception
raise exception.SDKGuestOperationError(
rs=7, userid=userid, err=err1.format_message())
LOG.info("Live resize memory for guest: '%s' finished successfully."
% userid)
def is_rhcos(self, os_version):
return os_version.lower().startswith('rhcos')
def _get_wwpn_lun(self, userid):
user_direct = self.get_user_direct(userid)
wwpn = None
lun = None
for ent in user_direct:
if ent.upper().startswith("LOADDEV PORT"):
wwpn = ent.split()[2].strip()
elif ent.upper().startswith("LOADDEV LUN"):
lun = ent.split()[2].strip()
return (wwpn, lun)
class FilesystemBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
"""Import image from remote host to local image repository using scp.
If remote_host not specified, it means the source file exist in local
file system, just copy the image to image repository
"""
source = urlparse.urlparse(url).path
if kwargs['remote_host']:
if '@' in kwargs['remote_host']:
source_path = ':'.join([kwargs['remote_host'], source])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Copying image file from remote filesystem failed"
" with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=10, err=output)
else:
msg = ("The specified remote_host %s format invalid" %
kwargs['remote_host'])
LOG.error(msg)
raise exception.SDKImageOperationError(rs=11,
rh=kwargs['remote_host'])
else:
LOG.debug("Remote_host not specified, will copy from local")
try:
shutil.copyfile(source, target)
except Exception as err:
msg = ("Import image from local file system failed"
" with reason %s" % six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=12,
err=six.text_type(err))
@classmethod
def image_export(cls, source_path, dest_url, **kwargs):
"""Export the specific image to remote host or local file system """
dest_path = urlparse.urlparse(dest_url).path
if kwargs['remote_host']:
target_path = ':'.join([kwargs['remote_host'], dest_path])
command = ' '.join(['/usr/bin/scp',
"-P", CONF.zvm.remotehost_sshd_port,
"-o StrictHostKeyChecking=no",
'-r ', source_path, target_path])
(rc, output) = zvmutils.execute(command)
if rc:
msg = ("Error happened when copying image file to remote "
"host with reason: %s" % output)
LOG.error(msg)
raise exception.SDKImageOperationError(rs=21, msg=output)
else:
# Copy to local file system
LOG.debug("Remote_host not specified, will copy to local server")
try:
shutil.copyfile(source_path, dest_path)
except Exception as err:
msg = ("Export image from %(src)s to local file system"
" %(dest)s failed: %(err)s" %
{'src': source_path,
'dest': dest_path,
'err': six.text_type(err)})
LOG.error(msg)
raise exception.SDKImageOperationError(rs=22,
err=six.text_type(err))
class HTTPBackend(object):
@classmethod
def image_import(cls, image_name, url, target, **kwargs):
import_image = MultiThreadDownloader(image_name, url,
target)
import_image.run()
class MultiThreadDownloader(threading.Thread):
def __init__(self, image_name, url, target):
super(MultiThreadDownloader, self).__init__()
self.url = url
# Set thread number
self.threadnum = 8
r = requests.head(self.url)
# Get the size of the download resource
self.totalsize = int(r.headers['Content-Length'])
self.target = target
def handle_download_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as err:
self.fd.close()
msg = ("Download image from http server failed: %s" %
six.text_type(err))
LOG.error(msg)
raise exception.SDKImageOperationError(rs=9,
err=six.text_type(err))
return wrapper
def get_range(self):
ranges = []
offset = int(self.totalsize / self.threadnum)
for i in range(self.threadnum):
if i == self.threadnum - 1:
ranges.append((i * offset, ''))
else:
# Get the process range for each thread
ranges.append((i * offset, (i + 1) * offset))
return ranges
def download(self, start, end):
headers = {'Range': 'Bytes=%s-%s' % (start, end),
'Accept-Encoding': '*'}
# Get the data
res = requests.get(self.url, headers=headers)
# seek to the right position for writing data
LOG.debug("Downloading file range %s:%s success" % (start, end))
with _LOCK:
self.fd.seek(start)
self.fd.write(res.content)
@handle_download_errors
def run(self):
self.fd = open(self.target, 'w')
thread_list = []
n = 0
for ran in self.get_range():
start, end = ran
LOG.debug('thread %d start:%s,end:%s' % (n, start, end))
n += 1
# Open thread
thread = threading.Thread(target=self.download, args=(start, end))
thread.start()
thread_list.append(thread)
for i in thread_list:
i.join()
LOG.info('Download %s success' % (self.name))
self.fd.close()
| 45.063793
| 79
| 0.510781
|
317e9fc1184ee14a2aa11ec5bcfe86d5663fb39c
| 27
|
py
|
Python
|
tomark/__init__.py
|
codazoda/tomark
|
f504c932c24ace8f5eda14325d299d949f8dde93
|
[
"MIT"
] | 7
|
2021-01-25T04:42:37.000Z
|
2021-07-06T20:47:32.000Z
|
tomark/__init__.py
|
codazoda/tomark
|
f504c932c24ace8f5eda14325d299d949f8dde93
|
[
"MIT"
] | null | null | null |
tomark/__init__.py
|
codazoda/tomark
|
f504c932c24ace8f5eda14325d299d949f8dde93
|
[
"MIT"
] | 2
|
2021-01-25T04:42:58.000Z
|
2021-07-06T04:45:47.000Z
|
from .tomark import Tomark
| 13.5
| 26
| 0.814815
|
348cc447f562e843d70b6c3d13c0beb4f6aa7312
| 5,440
|
py
|
Python
|
satchmo/apps/product/views/adminviews.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/product/views/adminviews.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/product/views/adminviews.py
|
djangoplicity/satchmo
|
75b672dffb64fed3e55c253d51a0ce73f0747e05
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib.auth.decorators import user_passes_test
from django.contrib import messages
from django.core import urlresolvers
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.utils.translation import ugettext as _
from django.db import IntegrityError
from product.forms import VariationManagerForm, InventoryForm, ProductExportForm, ProductImportForm
from product.models import Product
from product.modules.configurable.models import ConfigurableProduct
from satchmo_utils.views import bad_or_missing
import logging
log = logging.getLogger('product.views.adminviews')
def edit_inventory(request):
"""A quick inventory price, qty update form"""
if request.method == "POST":
new_data = request.POST.copy()
form = InventoryForm(new_data)
if form.is_valid():
form.save(request)
url = urlresolvers.reverse('satchmo_admin_edit_inventory')
return HttpResponseRedirect(url)
else:
form = InventoryForm()
ctx = {
'title' : _('Inventory Editor'),
'form' : form
}
return render(request, 'product/admin/inventory_form.html', ctx)
edit_inventory = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(edit_inventory)
def export_products(request, template='product/admin/product_export_form.html'):
"""A product export tool"""
if request.method == 'POST':
new_data = request.POST.copy()
form = ProductExportForm(new_data)
if form.is_valid():
return form.export(request)
else:
form = ProductExportForm()
fileform = ProductImportForm()
ctx = {
'title' : _('Product Import/Export'),
'form' : form,
'importform': fileform
}
return render(request, template, ctx)
export_products = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(export_products)
def import_products(request, maxsize=10000000):
"""
Imports product from an uploaded file.
"""
if request.method == 'POST':
errors = []
results = []
if 'upload' in request.FILES:
infile = request.FILES['upload']
form = ProductImportForm()
results, errors = form.import_from(infile, maxsize=maxsize)
else:
errors.append('File: %s' % request.FILES.keys())
errors.append(_('No upload file found'))
ctx = {
'errors' : errors,
'results' : results
}
return render(request, "product/admin/product_import_result.html", ctx)
else:
url = urlresolvers.reverse('satchmo_admin_product_export')
return HttpResponseRedirect(url)
import_products = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(import_products)
# def product_active_report(request):
#
# products = Product.objects.filter(active=True)
# products = [p for p in products.all() if 'productvariation' not in p.get_subtypes]
# ctx = RequestContext(Request, {title: 'Active Product Report', 'products' : products })
# return render_to_response('product/admin/active_product_report.html', ctx)
#
# product_active_report = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(product_active_report)
def variation_list(request):
products = Product.objects.filter(configurableproduct__in = ConfigurableProduct.objects.all())
ctx = {
'products' : products,
}
return render(request, 'product/admin/variation_manager_list.html', ctx)
def variation_manager(request, product_id = ""):
try:
product = Product.objects.get(id=product_id)
subtypes = product.get_subtypes()
if 'ProductVariation' in subtypes:
# got a variation, we want to work with its parent
product = product.productvariation.parent.product
if 'ConfigurableProduct' in product.get_subtypes():
url = urlresolvers.reverse("satchmo_admin_variation_manager",
kwargs = {'product_id' : product.id})
return HttpResponseRedirect(url)
if 'ConfigurableProduct' not in subtypes:
return bad_or_missing(request, _('The product you have requested is not a Configurable Product.'))
except Product.DoesNotExist:
return bad_or_missing(request, _('The product you have requested does not exist.'))
if request.method == 'POST':
new_data = request.POST.copy()
form = VariationManagerForm(new_data, product=product)
if form.is_valid():
log.debug("Saving form")
try:
form.save(request)
except IntegrityError:
messages.error(request, _('The product you are attempting to remove is linked to an order and can not be removed.'))
# rebuild the form
form = VariationManagerForm(product=product)
else:
log.debug('errors on form')
else:
form = VariationManagerForm(product=product)
ctx = {
'product' : product,
'form' : form,
}
return render(request, 'product/admin/variation_manager.html', ctx)
variation_manager = user_passes_test(lambda u: u.is_authenticated() and u.is_staff, login_url='/accounts/login/')(variation_manager)
| 37.006803
| 142
| 0.670404
|
ecf30d039a9053fac5ac961b8a17d388e8514946
| 349
|
py
|
Python
|
env3/Scripts/easy_install-script.py
|
kdmitrieva16/python_training
|
8c88d44c8d4c26f09ed60f091138ae9c85307fce
|
[
"Apache-2.0"
] | null | null | null |
env3/Scripts/easy_install-script.py
|
kdmitrieva16/python_training
|
8c88d44c8d4c26f09ed60f091138ae9c85307fce
|
[
"Apache-2.0"
] | null | null | null |
env3/Scripts/easy_install-script.py
|
kdmitrieva16/python_training
|
8c88d44c8d4c26f09ed60f091138ae9c85307fce
|
[
"Apache-2.0"
] | null | null | null |
#!C:\PTGH\python_training\env3\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==18.1','console_scripts','easy_install'
__requires__ = 'setuptools==18.1'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('setuptools==18.1', 'console_scripts', 'easy_install')()
)
| 31.727273
| 81
| 0.733524
|
cfc10f45701526425731ce202fcc60e0ee252408
| 50,430
|
py
|
Python
|
detectron2/utils/visualizer.py
|
aabbas90/detectron2
|
e96afbad765b27bddf55237bc4a051963690aba0
|
[
"Apache-2.0"
] | 1
|
2021-01-06T10:29:37.000Z
|
2021-01-06T10:29:37.000Z
|
detectron2/utils/visualizer.py
|
aabbas90/detectron2
|
e96afbad765b27bddf55237bc4a051963690aba0
|
[
"Apache-2.0"
] | null | null | null |
detectron2/utils/visualizer.py
|
aabbas90/detectron2
|
e96afbad765b27bddf55237bc4a051963690aba0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import colorsys
import logging
import math
import numpy as np
from enum import Enum, unique
import cv2
import matplotlib as mpl
import matplotlib.colors as mplc
import matplotlib.figure as mplfigure
import pycocotools.mask as mask_util
import torch
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PIL import Image
from detectron2.data import MetadataCatalog
from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes
from detectron2.utils.file_io import PathManager
from .colormap import random_color
logger = logging.getLogger(__name__)
__all__ = ["ColorMode", "VisImage", "Visualizer"]
_SMALL_OBJECT_AREA_THRESH = 1000
_LARGE_MASK_AREA_THRESH = 120000
_OFF_WHITE = (1.0, 1.0, 240.0 / 255)
_BLACK = (0, 0, 0)
_RED = (1.0, 0, 0)
_KEYPOINT_THRESHOLD = 0.05
@unique
class ColorMode(Enum):
"""
Enum of different color modes to use for instance visualizations.
"""
IMAGE = 0
"""
Picks a random color for every instance and overlay segmentations with low opacity.
"""
SEGMENTATION = 1
"""
Let instances of the same category have similar colors
(from metadata.thing_colors), and overlay them with
high opacity. This provides more attention on the quality of segmentation.
"""
IMAGE_BW = 2
"""
Same as IMAGE, but convert all areas without masks to gray-scale.
Only available for drawing per-instance mask predictions.
"""
class GenericMask:
"""
Attribute:
polygons (list[ndarray]): list[ndarray]: polygons for this mask.
Each ndarray has format [x, y, x, y, ...]
mask (ndarray): a binary mask
"""
def __init__(self, mask_or_polygons, height, width):
self._mask = self._polygons = self._has_holes = None
self.height = height
self.width = width
m = mask_or_polygons
if isinstance(m, dict):
# RLEs
assert "counts" in m and "size" in m
if isinstance(m["counts"], list): # uncompressed RLEs
h, w = m["size"]
assert h == height and w == width
m = mask_util.frPyObjects(m, h, w)
self._mask = mask_util.decode(m)[:, :]
return
if isinstance(m, list): # list[ndarray]
self._polygons = [np.asarray(x).reshape(-1) for x in m]
return
if isinstance(m, np.ndarray): # assumed to be a binary mask
assert m.shape[1] != 2, m.shape
assert m.shape == (height, width), m.shape
self._mask = m.astype("uint8")
return
raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m)))
@property
def mask(self):
if self._mask is None:
self._mask = self.polygons_to_mask(self._polygons)
return self._mask
@property
def polygons(self):
if self._polygons is None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
return self._polygons
@property
def has_holes(self):
if self._has_holes is None:
if self._mask is not None:
self._polygons, self._has_holes = self.mask_to_polygons(self._mask)
else:
self._has_holes = False # if original format is polygon, does not have holes
return self._has_holes
def mask_to_polygons(self, mask):
# cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level
# hierarchy. External contours (boundary) of the object are placed in hierarchy-1.
# Internal contours (holes) are placed in hierarchy-2.
# cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours.
mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr
res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
hierarchy = res[-1]
if hierarchy is None: # empty mask
return [], False
has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0
res = res[-2]
res = [x.flatten() for x in res]
# These coordinates from OpenCV are integers in range [0, W-1 or H-1].
# We add 0.5 to turn them into real-value coordinate space. A better solution
# would be to first +0.5 and then dilate the returned polygon by 0.5.
res = [x + 0.5 for x in res if len(x) >= 6]
return res, has_holes
def polygons_to_mask(self, polygons):
rle = mask_util.frPyObjects(polygons, self.height, self.width)
rle = mask_util.merge(rle)
return mask_util.decode(rle)[:, :]
def area(self):
return self.mask.sum()
def bbox(self):
p = mask_util.frPyObjects(self.polygons, self.height, self.width)
p = mask_util.merge(p)
bbox = mask_util.toBbox(p)
bbox[2] += bbox[0]
bbox[3] += bbox[1]
return bbox
class _PanopticPrediction:
"""
Unify different panoptic annotation/prediction formats
"""
def __init__(self, panoptic_seg, segments_info, metadata=None):
if segments_info is None:
assert metadata is not None
# If "segments_info" is None, we assume "panoptic_img" is a
# H*W int32 image storing the panoptic_id in the format of
# category_id * label_divisor + instance_id. We reserve -1 for
# VOID label.
label_divisor = metadata.label_divisor
segments_info = []
for panoptic_label in np.unique(panoptic_seg.numpy()):
if panoptic_label == -1:
# VOID region.
continue
pred_class = panoptic_label // label_divisor
isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values()
segments_info.append(
{
"id": int(panoptic_label),
"category_id": int(pred_class),
"isthing": bool(isthing),
}
)
del metadata
self._seg = panoptic_seg
self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info
segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True)
areas = areas.numpy()
sorted_idxs = np.argsort(-areas)
self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs]
self._seg_ids = self._seg_ids.tolist()
for sid, area in zip(self._seg_ids, self._seg_areas):
if sid in self._sinfo:
self._sinfo[sid]["area"] = float(area)
def non_empty_mask(self):
"""
Returns:
(H, W) array, a mask for all pixels that have a prediction
"""
empty_ids = []
for id in self._seg_ids:
if id not in self._sinfo:
empty_ids.append(id)
if len(empty_ids) == 0:
return np.zeros(self._seg.shape, dtype=np.uint8)
assert (
len(empty_ids) == 1
), ">1 ids corresponds to no labels. This is currently not supported"
return (self._seg != empty_ids[0]).numpy().astype(np.bool)
def semantic_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or sinfo["isthing"]:
# Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions.
continue
yield (self._seg == sid).numpy().astype(np.bool), sinfo
def instance_masks(self):
for sid in self._seg_ids:
sinfo = self._sinfo.get(sid)
if sinfo is None or not sinfo["isthing"]:
continue
mask = (self._seg == sid).numpy().astype(np.bool)
if mask.sum() > 0:
yield mask, sinfo
def _create_text_labels(classes, scores, class_names, is_crowd=None):
"""
Args:
classes (list[int] or None):
scores (list[float] or None):
class_names (list[str] or None):
is_crowd (list[bool] or None):
Returns:
list[str] or None
"""
labels = None
if classes is not None and class_names is not None and len(class_names) > 0:
labels = [class_names[i] for i in classes]
if scores is not None:
if labels is None:
labels = ["{:.0f}%".format(s * 100) for s in scores]
else:
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
if is_crowd is not None:
labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)]
return labels
class VisImage:
def __init__(self, img, scale=1.0):
"""
Args:
img (ndarray): an RGB image of shape (H, W, 3).
scale (float): scale the input image
"""
self.img = img
self.scale = scale
self.width, self.height = img.shape[1], img.shape[0]
self._setup_figure(img)
def _setup_figure(self, img):
"""
Args:
Same as in :meth:`__init__()`.
Returns:
fig (matplotlib.pyplot.figure): top level container for all the image plot elements.
ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system.
"""
fig = mplfigure.Figure(frameon=False)
self.dpi = fig.get_dpi()
# add a small 1e-2 to avoid precision lost due to matplotlib's truncation
# (https://github.com/matplotlib/matplotlib/issues/15363)
fig.set_size_inches(
(self.width * self.scale + 1e-2) / self.dpi,
(self.height * self.scale + 1e-2) / self.dpi,
)
self.canvas = FigureCanvasAgg(fig)
# self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig)
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.axis("off")
# Need to imshow this first so that other patches can be drawn on top
ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest")
self.fig = fig
self.ax = ax
def save(self, filepath):
"""
Args:
filepath (str): a string that contains the absolute path, including the file name, where
the visualized image will be saved.
"""
self.fig.savefig(filepath)
def get_image(self):
"""
Returns:
ndarray:
the visualized image of shape (H, W, 3) (RGB) in uint8 type.
The shape is scaled w.r.t the input image using the given `scale` argument.
"""
canvas = self.canvas
s, (width, height) = canvas.print_to_buffer()
# buf = io.BytesIO() # works for cairo backend
# canvas.print_rgba(buf)
# width, height = self.width, self.height
# s = buf.getvalue()
buffer = np.frombuffer(s, dtype="uint8")
img_rgba = buffer.reshape(height, width, 4)
rgb, alpha = np.split(img_rgba, [3], axis=2)
return rgb.astype("uint8")
class Visualizer:
"""
Visualizer that draws data about detection/segmentation on images.
It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`
that draw primitive objects to images, as well as high-level wrappers like
`draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`
that draw composite data in some pre-defined style.
Note that the exact visualization style for the high-level wrappers are subject to change.
Style such as color, opacity, label contents, visibility of labels, or even the visibility
of objects themselves (e.g. when the object is too small) may change according
to different heuristics, as long as the results still look visually reasonable.
To obtain a consistent style, you can implement custom drawing functions with the
abovementioned primitive methods instead. If you need more customized visualization
styles, you can process the data yourself following their format documented in
tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not
intend to satisfy everyone's preference on drawing styles.
This visualizer focuses on high rendering quality rather than performance. It is not
designed to be used for real-time applications.
"""
# TODO implement a fast, rasterized version using OpenCV
def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):
"""
Args:
img_rgb: a numpy array of shape (H, W, C), where H and W correspond to
the height and width of the image respectively. C is the number of
color channels. The image is required to be in RGB format since that
is a requirement of the Matplotlib library. The image is also expected
to be in the range [0, 255].
metadata (Metadata): image metadata.
instance_mode (ColorMode): defines one of the pre-defined style for drawing
instances on an image.
"""
self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)
if metadata is None:
metadata = MetadataCatalog.get("__nonexist__")
self.metadata = metadata
self.output = VisImage(self.img, scale=scale)
self.cpu_device = torch.device("cpu")
# too small texts are useless, therefore clamp to 9
self._default_font_size = max(
np.sqrt(self.output.height * self.output.width) // 90, 10 // scale
)
self._instance_mode = instance_mode
def draw_instance_predictions(self, predictions):
"""
Draw instance-level prediction results on an image.
Args:
predictions (Instances): the output of an instance detection/segmentation
model. Following fields will be used to draw:
"pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle").
Returns:
output (VisImage): image object with visualizations.
"""
boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None
scores = predictions.scores if predictions.has("scores") else None
classes = predictions.pred_classes if predictions.has("pred_classes") else None
labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None))
keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None
if predictions.has("pred_masks"):
masks = np.asarray(predictions.pred_masks)
masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]
else:
masks = None
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes
]
alpha = 0.8
else:
colors = None
alpha = 0.5
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(
(predictions.pred_masks.any(dim=0) > 0).numpy()
if predictions.has("pred_masks")
else None
)
alpha = 0.3
self.overlay_instances(
masks=masks,
boxes=boxes,
labels=labels,
keypoints=keypoints,
assigned_colors=colors,
alpha=alpha,
)
return self.output
def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8, draw_text = True):
"""
Draw semantic segmentation predictions/labels.
Args:
sem_seg (Tensor or ndarray): the segmentation of shape (H, W).
Each value is the integer label of the pixel.
area_threshold (int): segments with less than `area_threshold` are not drawn.
alpha (float): the larger it is, the more opaque the segmentations are.
Returns:
output (VisImage): image object with visualizations.
"""
if isinstance(sem_seg, torch.Tensor):
sem_seg = sem_seg.numpy()
labels, areas = np.unique(sem_seg, return_counts=True)
sorted_idxs = np.argsort(-areas).tolist()
labels = labels[sorted_idxs]
for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]
except (AttributeError, IndexError):
mask_color = None
binary_mask = (sem_seg == label).astype(np.uint8)
text = self.metadata.stuff_classes[label]
if not draw_text:
text = None
self.draw_binary_mask(
binary_mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=alpha,
area_threshold=area_threshold,
)
return self.output
def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):
"""
Draw panoptic prediction annotations or results.
Args:
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each
segment.
segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.
If it is a ``list[dict]``, each dict contains keys "id", "category_id".
If None, category id of each pixel is computed by
``pixel // metadata.label_divisor``.
area_threshold (int): stuff segments with less than `area_threshold` are not drawn.
Returns:
output (VisImage): image object with visualizations.
"""
pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)
if self._instance_mode == ColorMode.IMAGE_BW:
self.output.img = self._create_grayscale_image(pred.non_empty_mask())
# draw mask for all semantic segments first i.e. "stuff"
for mask, sinfo in pred.semantic_masks():
category_idx = sinfo["category_id"]
try:
mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]
except AttributeError:
mask_color = None
text = self.metadata.stuff_classes[category_idx]
self.draw_binary_mask(
mask,
color=mask_color,
edge_color=_OFF_WHITE,
text=text,
alpha=0.1,
area_threshold=area_threshold,
)
# draw mask for all instances second
all_instances = list(pred.instance_masks())
if len(all_instances) == 0:
return self.output
masks, sinfo = list(zip(*all_instances))
category_ids = [x["category_id"] for x in sinfo]
try:
scores = [x["score"] for x in sinfo]
except KeyError:
scores = None
labels = None
# _create_text_labels(
# category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo]
# )
try:
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids
]
except AttributeError:
colors = None
self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)
return self.output
draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility
def draw_dataset_dict(self, dic):
"""
Draw annotations/segmentaions in Detectron2 Dataset format.
Args:
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.
Returns:
output (VisImage): image object with visualizations.
"""
annos = dic.get("annotations", None)
if annos:
if "segmentation" in annos[0]:
masks = [x["segmentation"] for x in annos]
else:
masks = None
if "keypoints" in annos[0]:
keypts = [x["keypoints"] for x in annos]
keypts = np.array(keypts).reshape(len(annos), -1, 3)
else:
keypts = None
boxes = [
BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS)
if len(x["bbox"]) == 4
else x["bbox"]
for x in annos
]
colors = None
category_ids = [x["category_id"] for x in annos]
if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"):
colors = [
self._jitter([x / 255 for x in self.metadata.thing_colors[c]])
for c in category_ids
]
names = self.metadata.get("thing_classes", None)
labels = _create_text_labels(
category_ids,
scores=None,
class_names=names,
is_crowd=[x.get("iscrowd", 0) for x in annos],
)
self.overlay_instances(
labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors
)
sem_seg = dic.get("sem_seg", None)
if sem_seg is None and "sem_seg_file_name" in dic:
with PathManager.open(dic["sem_seg_file_name"], "rb") as f:
sem_seg = Image.open(f)
sem_seg = np.asarray(sem_seg, dtype="uint8")
if sem_seg is not None:
self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5)
pan_seg = dic.get("pan_seg", None)
if pan_seg is None and "pan_seg_file_name" in dic:
with PathManager.open(dic["pan_seg_file_name"], "rb") as f:
pan_seg = Image.open(f)
pan_seg = np.asarray(pan_seg)
from panopticapi.utils import rgb2id
pan_seg = rgb2id(pan_seg)
if pan_seg is not None:
segments_info = dic["segments_info"]
pan_seg = torch.Tensor(pan_seg)
self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5)
return self.output
def overlay_instances(
self,
*,
boxes=None,
labels=None,
masks=None,
keypoints=None,
assigned_colors=None,
alpha=0.5
):
"""
Args:
boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,
or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,
or a :class:`RotatedBoxes`,
or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image,
labels (list[str]): the text to be displayed for each instance.
masks (masks-like object): Supported types are:
* :class:`detectron2.structures.PolygonMasks`,
:class:`detectron2.structures.BitMasks`.
* list[list[ndarray]]: contains the segmentation masks for all objects in one image.
The first level of the list corresponds to individual instances. The second
level to all the polygon that compose the instance, and the third level
to the polygon coordinates. The third level should have the format of
[x0, y0, x1, y1, ..., xn, yn] (n >= 3).
* list[ndarray]: each ndarray is a binary mask of shape (H, W).
* list[dict]: each dict is a COCO-style RLE.
keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),
where the N is the number of instances and K is the number of keypoints.
The last dimension corresponds to (x, y, visibility or score).
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = None
if boxes is not None:
boxes = self._convert_boxes(boxes)
num_instances = len(boxes)
if masks is not None:
masks = self._convert_masks(masks)
if num_instances:
assert len(masks) == num_instances
else:
num_instances = len(masks)
if keypoints is not None:
if num_instances:
assert len(keypoints) == num_instances
else:
num_instances = len(keypoints)
keypoints = self._convert_keypoints(keypoints)
if labels is not None:
assert len(labels) == num_instances
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
if boxes is not None and boxes.shape[1] == 5:
return self.overlay_rotated_instances(
boxes=boxes, labels=labels, assigned_colors=assigned_colors
)
# Display in largest to smallest order to reduce occlusion.
areas = None
if boxes is not None:
areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)
elif masks is not None:
areas = np.asarray([x.area() for x in masks])
if areas is not None:
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs] if boxes is not None else None
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None
assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]
keypoints = keypoints[sorted_idxs] if keypoints is not None else None
for i in range(num_instances):
color = assigned_colors[i]
if boxes is not None:
self.draw_box(boxes[i], edge_color=color)
if masks is not None:
for segment in masks[i].polygons:
self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)
if labels is not None:
# first get a box
if boxes is not None:
x0, y0, x1, y1 = boxes[i]
text_pos = (x0, y0) # if drawing boxes, put text on the box corner.
horiz_align = "left"
elif masks is not None:
# skip small mask without polygon
if len(masks[i].polygons) == 0:
continue
x0, y0, x1, y1 = masks[i].bbox()
# draw text in the center (defined by median) when box is not drawn
# median is less sensitive to outliers.
text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]
horiz_align = "center"
else:
continue # drawing the box confidence for keypoints isn't very useful.
# for small objects, draw text at the side to avoid occlusion
instance_area = (y1 - y0) * (x1 - x0)
if (
instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale
or y1 - y0 < 40 * self.output.scale
):
if y1 >= self.output.height - 5:
text_pos = (x1, y0)
else:
text_pos = (x0, y1)
height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)
* 0.5
* self._default_font_size
)
self.draw_text(
labels[i],
text_pos,
color=lighter_color,
horizontal_alignment=horiz_align,
font_size=font_size,
)
# draw keypoints
if keypoints is not None:
for keypoints_per_instance in keypoints:
self.draw_and_connect_keypoints(keypoints_per_instance)
return self.output
def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):
"""
Args:
boxes (ndarray): an Nx5 numpy array of
(x_center, y_center, width, height, angle_degrees) format
for the N objects in a single image.
labels (list[str]): the text to be displayed for each instance.
assigned_colors (list[matplotlib.colors]): a list of colors, where each color
corresponds to each mask or box in the image. Refer to 'matplotlib.colors'
for full list of formats that the colors are accepted in.
Returns:
output (VisImage): image object with visualizations.
"""
num_instances = len(boxes)
if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
if num_instances == 0:
return self.output
# Display in largest to smallest order to reduce occlusion.
if boxes is not None:
areas = boxes[:, 2] * boxes[:, 3]
sorted_idxs = np.argsort(-areas).tolist()
# Re-order overlapped instances in descending order.
boxes = boxes[sorted_idxs]
labels = [labels[k] for k in sorted_idxs] if labels is not None else None
colors = [assigned_colors[idx] for idx in sorted_idxs]
for i in range(num_instances):
self.draw_rotated_box_with_label(
boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None
)
return self.output
def draw_and_connect_keypoints(self, keypoints):
"""
Draws keypoints of an instance and follows the rules for keypoint connections
to draw lines between appropriate keypoints. This follows color heuristics for
line color.
Args:
keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints
and the last dimension corresponds to (x, y, probability).
Returns:
output (VisImage): image object with visualizations.
"""
visible = {}
keypoint_names = self.metadata.get("keypoint_names")
for idx, keypoint in enumerate(keypoints):
# draw keypoint
x, y, prob = keypoint
if prob > _KEYPOINT_THRESHOLD:
self.draw_circle((x, y), color=_RED)
if keypoint_names:
keypoint_name = keypoint_names[idx]
visible[keypoint_name] = (x, y)
if self.metadata.get("keypoint_connection_rules"):
for kp0, kp1, color in self.metadata.keypoint_connection_rules:
if kp0 in visible and kp1 in visible:
x0, y0 = visible[kp0]
x1, y1 = visible[kp1]
color = tuple(x / 255.0 for x in color)
self.draw_line([x0, x1], [y0, y1], color=color)
# draw lines from nose to mid-shoulder and mid-shoulder to mid-hip
# Note that this strategy is specific to person keypoints.
# For other keypoints, it should just do nothing
try:
ls_x, ls_y = visible["left_shoulder"]
rs_x, rs_y = visible["right_shoulder"]
mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2
except KeyError:
pass
else:
# draw line from nose to mid-shoulder
nose_x, nose_y = visible.get("nose", (None, None))
if nose_x is not None:
self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)
try:
# draw line from mid-shoulder to mid-hip
lh_x, lh_y = visible["left_hip"]
rh_x, rh_y = visible["right_hip"]
except KeyError:
pass
else:
mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2
self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)
return self.output
"""
Primitive drawing functions:
"""
def draw_text(
self,
text,
position,
*,
font_size=None,
color="g",
horizontal_alignment="center",
rotation=0
):
"""
Args:
text (str): class label
position (tuple): a tuple of the x and y coordinates to place text on image.
font_size (int, optional): font of the text. If not provided, a font size
proportional to the image width is calculated and used.
color: color of the text. Refer to `matplotlib.colors` for full list
of formats that are accepted.
horizontal_alignment (str): see `matplotlib.text.Text`
rotation: rotation angle in degrees CCW
Returns:
output (VisImage): image object with text drawn.
"""
if not font_size:
font_size = self._default_font_size
# since the text background is dark, we don't want the text to be dark
color = np.maximum(list(mplc.to_rgb(color)), 0.2)
color[np.argmax(color)] = max(0.8, np.max(color))
x, y = position
self.output.ax.text(
x,
y,
text,
size=font_size * self.output.scale,
family="sans-serif",
bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"},
verticalalignment="top",
horizontalalignment=horizontal_alignment,
color=color,
zorder=10,
rotation=rotation,
)
return self.output
def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"):
"""
Args:
box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0
are the coordinates of the image's top left corner. x1 and y1 are the
coordinates of the image's bottom right corner.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
Returns:
output (VisImage): image object with box drawn.
"""
x0, y0, x1, y1 = box_coord
width = x1 - x0
height = y1 - y0
linewidth = max(self._default_font_size / 4, 1)
self.output.ax.add_patch(
mpl.patches.Rectangle(
(x0, y0),
width,
height,
fill=False,
edgecolor=edge_color,
linewidth=linewidth * self.output.scale,
alpha=alpha,
linestyle=line_style,
)
)
return self.output
def draw_rotated_box_with_label(
self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None
):
"""
Draw a rotated box with label on its top-left corner.
Args:
rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),
where cnt_x and cnt_y are the center coordinates of the box.
w and h are the width and height of the box. angle represents how
many degrees the box is rotated CCW with regard to the 0-degree box.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
edge_color: color of the outline of the box. Refer to `matplotlib.colors`
for full list of formats that are accepted.
line_style (string): the string to use to create the outline of the boxes.
label (string): label for rotated box. It will not be rendered when set to None.
Returns:
output (VisImage): image object with box drawn.
"""
cnt_x, cnt_y, w, h, angle = rotated_box
area = w * h
# use thinner lines when the box is small
linewidth = self._default_font_size / (
6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3
)
theta = angle * math.pi / 180.0
c = math.cos(theta)
s = math.sin(theta)
rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]
# x: left->right ; y: top->down
rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]
for k in range(4):
j = (k + 1) % 4
self.draw_line(
[rotated_rect[k][0], rotated_rect[j][0]],
[rotated_rect[k][1], rotated_rect[j][1]],
color=edge_color,
linestyle="--" if k == 1 else line_style,
linewidth=linewidth,
)
if label is not None:
text_pos = rotated_rect[1] # topleft corner
height_ratio = h / np.sqrt(self.output.height * self.output.width)
label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)
font_size = (
np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size
)
self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)
return self.output
def draw_circle(self, circle_coord, color, radius=3):
"""
Args:
circle_coord (list(int) or tuple(int)): contains the x and y coordinates
of the center of the circle.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
radius (int): radius of the circle.
Returns:
output (VisImage): image object with box drawn.
"""
x, y = circle_coord
self.output.ax.add_patch(
mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)
)
return self.output
def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None):
"""
Args:
x_data (list[int]): a list containing x values of all the points being drawn.
Length of list should match the length of y_data.
y_data (list[int]): a list containing y values of all the points being drawn.
Length of list should match the length of x_data.
color: color of the line. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
linestyle: style of the line. Refer to `matplotlib.lines.Line2D`
for a full list of formats that are accepted.
linewidth (float or None): width of the line. When it's None,
a default value will be computed and used.
Returns:
output (VisImage): image object with line drawn.
"""
if linewidth is None:
linewidth = self._default_font_size / 3
linewidth = max(linewidth, 1)
self.output.ax.add_line(
mpl.lines.Line2D(
x_data,
y_data,
linewidth=linewidth * self.output.scale,
color=color,
linestyle=linestyle,
)
)
return self.output
def draw_binary_mask(
self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=0
):
"""
Args:
binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Each value in the array is either a 0 or 1 value of uint8
type.
color: color of the mask. Refer to `matplotlib.colors` for a full list of
formats that are accepted. If None, will pick a random color.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted.
text (str): if None, will be drawn in the object's center of mass.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
area_threshold (float): a connected component small than this will not be shown.
Returns:
output (VisImage): image object with mask drawn.
"""
if color is None:
color = random_color(rgb=True, maximum=1)
color = mplc.to_rgb(color)
has_valid_segment = False
binary_mask = binary_mask.astype("uint8") # opencv needs uint8
mask = GenericMask(binary_mask, self.output.height, self.output.width)
shape2d = (binary_mask.shape[0], binary_mask.shape[1])
if not mask.has_holes:
# draw polygons for regular masks
for segment in mask.polygons:
area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))
if area < (area_threshold or 0):
continue
has_valid_segment = True
segment = segment.reshape(-1, 2)
self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)
else:
# TODO: Use Path/PathPatch to draw vector graphics:
# https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon
rgba = np.zeros(shape2d + (4,), dtype="float32")
rgba[:, :, :3] = color
rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha
has_valid_segment = True
self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))
if text is not None and has_valid_segment:
# TODO sometimes drawn on wrong objects. the heuristics here can improve.
lighter_color = self._change_color_brightness(color, brightness_factor=0.7)
_num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)
largest_component_id = np.argmax(stats[1:, -1]) + 1
# draw text on the largest component, as well as other very large components.
for cid in range(1, _num_cc):
if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:
# median is more stable than centroid
# center = centroids[largest_component_id]
center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]
self.draw_text(text, center, color=lighter_color)
return self.output
def draw_heatmaps(self, heatmaps, alphas, cmaps):
"""
Args:
heatmap (ndarray): numpy array of shape (H, W), where H is the image height and
W is the image width. Array should be uint8 and so in range [0, 255].
heatmaps: List[ndarray]. Each array should be in [0, 1] where 0's would be transparent.
alphas: List[float]: Each element in [0, 1] where i-th index controls the transparency of heatmap i-th location
cmaps: List[cmap]: Each element contains the colormap in which its heatmap should be drawn.
Returns:
output (VisImage): image object with heatmap drawn.
"""
self.output.ax.imshow((self.output.img).astype("uint8"), extent=(0, self.output.width, self.output.height, 0), interpolation="nearest", cmap = 'gray')
for (hm, cm, a) in zip(heatmaps, cmaps, alphas):
self.output.ax.imshow(hm, extent=(0, self.output.width, self.output.height, 0), alpha = a * hm, cmap = cm, vmin = 0.0, vmax = 1.0, interpolation="nearest")
return self.output
def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):
"""
Args:
segment: numpy array of shape Nx2, containing all the points in the polygon.
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a
full list of formats that are accepted. If not provided, a darker shade
of the polygon color will be used instead.
alpha (float): blending efficient. Smaller values lead to more transparent masks.
Returns:
output (VisImage): image object with polygon drawn.
"""
edge_color = (1.0, 1.0, 1.0)
if edge_color is None:
# make edge color darker than the polygon color
if alpha > 0.8:
edge_color = self._change_color_brightness(color, brightness_factor=-0.7)
else:
edge_color = color
edge_color = mplc.to_rgb(edge_color) + (1,)
polygon = mpl.patches.Polygon(
segment,
fill=True,
facecolor=mplc.to_rgb(color) + (alpha,),
edgecolor=edge_color,
linewidth=max(self._default_font_size // 15 * self.output.scale, 1),
)
self.output.ax.add_patch(polygon)
return self.output
"""
Internal methods:
"""
def _jitter(self, color):
"""
Randomly modifies given color to produce a slightly different color than the color given.
Args:
color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color
picked. The values in the list are in the [0.0, 1.0] range.
Returns:
jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the
color after being jittered. The values in the list are in the [0.0, 1.0] range.
"""
color = mplc.to_rgb(color)
vec = np.random.rand(3)
# better to do it in another color space
vec = vec / np.linalg.norm(vec) * 0.5
res = np.clip(vec + color, 0, 1)
return tuple(res)
def _create_grayscale_image(self, mask=None):
"""
Create a grayscale version of the original image.
The colors in masked area, if given, will be kept.
"""
img_bw = self.img.astype("f4").mean(axis=2)
img_bw = np.stack([img_bw] * 3, axis=2)
if mask is not None:
img_bw[mask] = self.img[mask]
return img_bw
def _change_color_brightness(self, color, brightness_factor):
"""
Depending on the brightness_factor, gives a lighter or darker color i.e. a color with
less or more saturation than the original color.
Args:
color: color of the polygon. Refer to `matplotlib.colors` for a full list of
formats that are accepted.
brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of
0 will correspond to no change, a factor in [-1.0, 0) range will result in
a darker color and a factor in (0, 1.0] range will result in a lighter color.
Returns:
modified_color (tuple[double]): a tuple containing the RGB values of the
modified color. Each value in the tuple is in the [0.0, 1.0] range.
"""
assert brightness_factor >= -1.0 and brightness_factor <= 1.0
color = mplc.to_rgb(color)
polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))
modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])
modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness
modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness
modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])
return modified_color
def _convert_boxes(self, boxes):
"""
Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.
"""
if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):
return boxes.tensor.numpy()
else:
return np.asarray(boxes)
def _convert_masks(self, masks_or_polygons):
"""
Convert different format of masks or polygons to a tuple of masks and polygons.
Returns:
list[GenericMask]:
"""
m = masks_or_polygons
if isinstance(m, PolygonMasks):
m = m.polygons
if isinstance(m, BitMasks):
m = m.tensor.numpy()
if isinstance(m, torch.Tensor):
m = m.numpy()
ret = []
for x in m:
if isinstance(x, GenericMask):
ret.append(x)
else:
ret.append(GenericMask(x, self.output.height, self.output.width))
return ret
def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
keypoints = keypoints.tensor
keypoints = np.asarray(keypoints)
return keypoints
def get_output(self):
"""
Returns:
output (VisImage): the image output containing the visualizations added
to the image.
"""
return self.output
| 40.669355
| 167
| 0.583204
|
50ec796fdb3c0d33eb4403884d65cc159beb7843
| 24,381
|
py
|
Python
|
src/biokbase/narrative/jobs/jobmanager.py
|
msneddon/narrative
|
499d7e7773c40144a4c130f9de4668501d940f90
|
[
"MIT"
] | null | null | null |
src/biokbase/narrative/jobs/jobmanager.py
|
msneddon/narrative
|
499d7e7773c40144a4c130f9de4668501d940f90
|
[
"MIT"
] | null | null | null |
src/biokbase/narrative/jobs/jobmanager.py
|
msneddon/narrative
|
499d7e7773c40144a4c130f9de4668501d940f90
|
[
"MIT"
] | null | null | null |
"""
KBase Job Manager
The main class here defines a manager for running jobs (as Job objects).
This class knows how to fetch job status, kill jobs, etc.
It also communicates with the front end over the KBaseJobs channel.
It is intended for use as a singleton - use the get_manager() function
to fetch it.
"""
__author__ = "Bill Riehl <wjriehl@lbl.gov>"
__version__ = "0.0.1"
import biokbase.narrative.clients as clients
from .job import Job
from ipykernel.comm import Comm
import threading
import json
import logging
from biokbase.narrative.common import kblogging
from biokbase.narrative.common.log_common import EVENT_MSG_SEP
from IPython.display import HTML
from jinja2 import Template
import dateutil.parser
import datetime
from biokbase.narrative.app_util import system_variable
from biokbase.narrative.exception_util import (
NarrativeException,
transform_job_exception
)
import traceback
import sys
class JobManager(object):
"""
The KBase Job Manager clsas. This handles all jobs and makes their status available.
On status lookups, it feeds the results to the KBaseJobs channel that the front end
listens to.
"""
__instance = None
# keys = job_id, values = { refresh = T/F, job = Job object }
_running_jobs = dict()
_lookup_timer = None
_comm = None
_log = kblogging.get_logger(__name__)
# TODO: should this not be done globally?
_running_lookup_loop = False
def __new__(cls):
if JobManager.__instance is None:
JobManager.__instance = object.__new__(cls)
return JobManager.__instance
def initialize_jobs(self):
"""
Initializes this JobManager.
This is expected to be run by a running Narrative, and naturally linked to a workspace.
So it does the following steps.
1. app_util.system_variable('workspace_id')
2. get list of jobs with that ws id from UJS (also gets tag, cell_id, run_id)
3. initialize the Job objects by running NJS.get_job_params on each of those (also gets app_id)
4. start the status lookup loop.
"""
ws_id = system_variable('workspace_id')
try:
nar_jobs = clients.get('user_and_job_state').list_jobs2({
'authstrat': 'kbaseworkspace',
'authparams': [str(ws_id)]
})
except Exception as e:
kblogging.log_event(self._log, 'init_error', {'err': str(e)})
new_e = transform_job_exception(e)
error = {
'error': 'Unable to get initial jobs list',
'message': getattr(new_e, 'message', 'Unknown reason'),
'code': getattr(new_e, 'code', -1),
'source': getattr(new_e, 'source', 'jobmanager'),
'name': getattr(new_e, 'name', type(e).__name__),
'service': 'user_and_job_state'
}
self._send_comm_message('job_init_err', error)
raise new_e
for info in nar_jobs:
job_id = info[0]
user_info = info[1]
job_meta = info[10]
try:
job_info = clients.get('job_service').get_job_params(job_id)[0]
self._running_jobs[job_id] = {
'refresh': True,
'job': Job.from_state(job_id,
job_info,
user_info[0],
app_id=job_info.get('app_id'),
tag=job_meta.get('tag', 'release'),
cell_id=job_meta.get('cell_id', None),
run_id=job_meta.get('run_id', None))
}
except Exception as e:
kblogging.log_event(self._log, 'init_error', {'err': str(e)})
new_e = transform_job_exception(e)
error = {
'error': 'Unable to get job info on initial lookup',
'job_id': job_id,
'message': getattr(new_e, 'message', 'Unknown reason'),
'code': getattr(new_e, 'code', -1),
'source': getattr(new_e, 'source', 'jobmanager'),
'name': getattr(new_e, 'name', type(e).__name__),
'service': 'job_service'
}
self._send_comm_message('job_init_lookup_err', error)
raise new_e # should crash and burn on any of these.
if not self._running_lookup_loop:
# only keep one loop at a time in cause this gets called again!
if self._lookup_timer is not None:
self._lookup_timer.cancel()
self._running_lookup_loop = True
self._lookup_job_status_loop()
else:
self._lookup_all_job_status()
def list_jobs(self):
"""
List all job ids, their info, and status in a quick HTML format.
"""
try:
status_set = list()
for job_id in self._running_jobs:
job = self._running_jobs[job_id]['job']
job_state = job.state()
job_params = job.parameters()
job_state['app_id'] = job_params[0].get('app_id', 'Unknown App')
job_state['owner'] = job.owner
status_set.append(job_state)
if not len(status_set):
return "No running jobs!"
status_set = sorted(status_set, key=lambda s: s['creation_time'])
for i in range(len(status_set)):
status_set[i]['creation_time'] = datetime.datetime.strftime(datetime.datetime.fromtimestamp(status_set[i]['creation_time']/1000), "%Y-%m-%d %H:%M:%S")
exec_start = status_set[i].get('exec_start_time', None)
if 'finish_time' in status_set[i]:
finished = status_set[i].get('finish_time', None)
if finished is not None and exec_start:
delta = datetime.datetime.fromtimestamp(finished/1000.0) - datetime.datetime.fromtimestamp(exec_start/1000.0)
delta = delta - datetime.timedelta(microseconds=delta.microseconds)
status_set[i]['run_time'] = str(delta)
status_set[i]['finish_time'] = datetime.datetime.strftime(datetime.datetime.fromtimestamp(status_set[i]['finish_time']/1000), "%Y-%m-%d %H:%M:%S")
elif exec_start:
delta = datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(exec_start/1000.0)
delta = delta - datetime.timedelta(microseconds=delta.microseconds)
status_set[i]['run_time'] = str(delta)
else:
status_set[i]['run_time'] = 'Not started'
tmpl = """
<table class="table table-bordered table-striped table-condensed">
<tr>
<th>Id</th>
<th>Name</th>
<th>Submitted</th>
<th>Submitted By</th>
<th>Status</th>
<th>Run Time</th>
<th>Complete Time</th>
</tr>
{% for j in jobs %}
<tr>
<td>{{ j.job_id|e }}</td>
<td>{{ j.app_id|e }}</td>
<td>{{ j.creation_time|e }}</td>
<td>{{ j.owner|e }}</td>
<td>{{ j.job_state|e }}</td>
<td>{{ j.run_time|e }}</td>
<td>{% if j.finish_time %}{{ j.finish_time|e }}{% else %}Incomplete{% endif %}</td>
</tr>
{% endfor %}
</table>
"""
return HTML(Template(tmpl).render(jobs=status_set))
except Exception as e:
kblogging.log_event(self._log, "list_jobs.error", {'err': str(e)})
raise
def get_jobs_list(self):
"""
A convenience method for fetching an unordered list of all running Jobs.
"""
return [j['job'] for j in self._running_jobs.values()]
# def _get_existing_job(self, job_tuple):
# """
# creates a Job object from a job_id that already exists.
# If no job exists, raises an Exception.
# Parameters:
# -----------
# job_tuple : The expected 5-tuple representing a Job. The format is:
# (job_id, set of job inputs (as JSON), version tag, cell id that started the job, run id of the job)
# """
# # remove the prefix (if present) and take the last element in the split
# job_id = job_tuple[0].split(':')[-1]
# try:
# job_info = clients.get('job_service').get_job_params(job_id)[0]
# return Job.from_state(job_id, job_info, app_id=job_tuple[1], tag=job_tuple[2], cell_id=job_tuple[3], run_id=job_tuple[4])
# except Exception as e:
# kblogging.log_event(self._log, "get_existing_job.error", {'job_id': job_id, 'err': str(e)})
# raise
def _construct_job_status(self, job_id):
"""
Always creates a Job Status.
It'll embed error messages into the status if there are problems.
"""
state = {}
widget_info = None
app_spec = {}
job = self.get_job(job_id)
if job is None:
state = {
'job_state': 'error',
'error': {
'error': 'Job does not seem to exist, or it is otherwise unavailable.',
'message': 'Job does not exist',
'name': 'Job Error',
'code': -1,
'exception': {
'error_message': 'job not found in JobManager',
'error_type': 'ValueError',
'error_stacktrace': ''
}
},
'cell_id': None,
'run_id': None
}
return {
'state': state,
'app_spec': app_spec,
'widget_info': widget_info,
'owner': None
}
try:
app_spec = job.app_spec()
except Exception as e:
kblogging.log_event(self._log, "lookup_job_status.error", {'err': str(e)})
try:
state = job.state()
except Exception as e:
kblogging.log_event(self._log, "lookup_job_status.error", {'err': str(e)})
new_e = transform_job_exception(e)
e_type = type(e).__name__
e_message = str(new_e).replace('<', '<').replace('>', '>')
e_trace = traceback.format_exc().replace('<', '<').replace('>', '>')
e_code = getattr(new_e, "code", -2)
e_source = getattr(new_e, "source", "JobManager")
state = {
'job_state': 'error',
'error': {
'error': 'Unable to find current job state. Please try again later, or contact KBase.',
'message': 'Unable to return job state',
'name': 'Job Error',
'code': e_code,
'source': e_source,
'exception': {
'error_message': e_message,
'error_type': e_type,
'error_stacktrace': e_trace,
}
},
'creation_time': 0,
'cell_id': job.cell_id,
'run_id': job.run_id,
'job_id': job_id
}
if state.get('finished', 0) == 1:
try:
widget_info = job.get_viewer_params(state)
except Exception as e:
# Can't get viewer params
new_e = transform_job_exception(e)
kblogging.log_event(self._log, "lookup_job_status.error", {'err': str(e)})
state['job_state'] = 'error'
state['error'] = {
'error': 'Unable to generate App output viewer!\nThe App appears to have completed successfully,\nbut we cannot construct its output viewer.\nPlease contact the developer of this App for assistance.',
'message': 'Unable to build output viewer parameters!',
'name': 'App Error',
'code': getattr(new_e, "code", -1),
'source': getattr(new_e, "source", "JobManager")
}
if 'canceling' in self._running_jobs[job_id]:
state['job_state'] = 'canceling'
return {'state': state,
'spec': app_spec,
'widget_info': widget_info,
'owner': job.owner}
def _lookup_job_status(self, job_id):
"""
Will raise a ValueError if job_id doesn't exist.
Sends the status over the comm channel as the usual job_status message.
"""
status = self._construct_job_status(job_id)
self._send_comm_message('job_status', status)
def _lookup_all_job_status(self, ignore_refresh_flag=False):
"""
Looks up status for all jobs.
Once job info is acquired, it gets pushed to the front end over the
'KBaseJobs' channel.
"""
status_set = dict()
# grab the list of running job ids, so we don't run into update-while-iterating problems.
for job_id in self._running_jobs.keys():
if self._running_jobs[job_id]['refresh'] or ignore_refresh_flag:
status_set[job_id] = self._construct_job_status(job_id)
self._send_comm_message('job_status_all', status_set)
def _lookup_job_status_loop(self):
"""
Initialize a loop that will look up job info. This uses a Timer thread on a 10
second loop to update things.
"""
self._lookup_all_job_status()
self._lookup_timer = threading.Timer(10, self._lookup_job_status_loop)
self._lookup_timer.start()
def cancel_job_lookup_loop(self):
"""
Cancels a running timer if one's still alive.
"""
if self._lookup_timer:
self._lookup_timer.cancel()
self._lookup_timer = None
self._running_lookup_loop = False
def register_new_job(self, job):
"""
Registers a new Job with the manager - should only be invoked when a new Job gets
started. This stores the Job locally and pushes it over the comm channel to the
Narrative where it gets serialized.
Parameters:
-----------
job : biokbase.narrative.jobs.job.Job object
The new Job that was started.
"""
self._running_jobs[job.job_id] = {'job': job, 'refresh': True}
# push it forward! create a new_job message.
self._lookup_job_status(job.job_id)
self._send_comm_message('new_job', {})
def get_job(self, job_id):
"""
Returns a Job with the given job_id.
Raises a ValueError if not found.
"""
if job_id in self._running_jobs:
return self._running_jobs[job_id]['job']
else:
raise ValueError('No job present with id {}'.format(job_id))
def _handle_comm_message(self, msg):
"""
Handles comm messages that come in from the other end of the KBaseJobs channel.
All messages (of any use) should have a 'request_type' property.
Possible types:
* all_status
refresh all jobs that are flagged to be looked up. Will send a
message back with all lookup status.
* job_status
refresh the single job given in the 'job_id' field. Sends a message
back with that single job's status, or an error message.
* stop_update_loop
stop the running refresh loop, if there's one going (might be
one more pass, depending on the thread state)
* start_update_loop
reinitialize the refresh loop.
* stop_job_update
flag the given job id (should be an accompanying 'job_id' field) that the front
end knows it's in a terminal state and should no longer have its status looked
up in the refresh cycle.
* start_job_update
remove the flag that gets set by stop_job_update (needs an accompanying 'job_id'
field)
"""
if 'request_type' in msg['content']['data']:
r_type = msg['content']['data']['request_type']
job_id = msg['content']['data'].get('job_id', None)
if job_id is not None and job_id not in self._running_jobs:
# If it's not a real job, just silently ignore the request.
# Maybe return an error? Yeah. Let's do that.
# self._send_comm_message('job_comm_error', {'job_id': job_id, 'message': 'Unknown job id', 'request_type': r_type})
# TODO: perhaps we should implement request/response here. All we really need is to thread a message
# id through
self._send_comm_message('job_does_not_exist', {'job_id': job_id, 'request_type': r_type})
return
if r_type == 'all_status':
self._lookup_all_job_status(ignore_refresh_flag=True)
elif r_type == 'job_status':
if job_id is not None:
self._lookup_job_status(job_id)
elif r_type == 'stop_update_loop':
if self._lookup_timer is not None:
self._lookup_timer.cancel()
elif r_type == 'start_update_loop':
self._lookup_job_status_loop()
elif r_type == 'stop_job_update':
if job_id is not None:
self._running_jobs[job_id]['refresh'] = False
elif r_type == 'start_job_update':
if job_id is not None:
self._running_jobs[job_id]['refresh'] = True
elif r_type == 'delete_job':
if job_id is not None:
try:
self.delete_job(job_id)
except Exception as e:
self._send_comm_message('job_comm_error', {'message': str(e), 'request_type': r_type, 'job_id': job_id})
elif r_type == 'cancel_job':
if job_id is not None:
try:
self.cancel_job(job_id)
except Exception as e:
self._send_comm_message('job_comm_error', {'message': str(e), 'request_type': r_type, 'job_id': job_id})
elif r_type == 'job_logs':
if job_id is not None:
first_line = msg['content']['data'].get('first_line', 0)
num_lines = msg['content']['data'].get('num_lines', None)
self._get_job_logs(job_id, first_line=first_line, num_lines=num_lines)
else:
raise ValueError('Need a job id to fetch jobs!')
elif r_type == 'job_logs_latest':
if job_id is not None:
num_lines = msg['content']['data'].get('num_lines', None)
self._get_latest_job_logs(job_id, num_lines=num_lines)
else:
self._send_comm_message('job_comm_error', {'message': 'Unknown message', 'request_type': r_type})
raise ValueError('Unknown KBaseJobs message "{}"'.format(r_type))
def _get_latest_job_logs(self, job_id, num_lines=None):
job = self.get_job(job_id)
if job is None:
raise ValueError('job "{}" not found while fetching logs!'.format(job_id))
(max_lines, logs) = job.log()
first_line = 0
if num_lines is not None and max_lines > num_lines:
first_line = max_lines - num_lines
logs = logs[first_line:]
self._send_comm_message('job_logs', {'job_id': job_id, 'first': first_line, 'max_lines': max_lines, 'lines': logs, 'latest': True})
def _get_job_logs(self, job_id, first_line=0, num_lines=None):
job = self.get_job(job_id)
if job is None:
raise ValueError('job "{}" not found!'.format(job_id))
(max_lines, log_slice) = job.log(first_line=first_line, num_lines=num_lines)
self._send_comm_message('job_logs', {'job_id': job_id, 'first': first_line, 'max_lines': max_lines, 'lines': log_slice, 'latest': False})
def delete_job(self, job_id):
"""
If the job_id doesn't exist, raises a ValueError.
Attempts to delete a job, and cancels it first. If the job cannot be canceled,
raises an exception. If it can be canceled but not deleted, it gets canceled, then raises
an exception.
"""
if job_id is None:
raise ValueError('Job id required for deletion!')
if job_id not in self._running_jobs:
self._send_comm_message('job_does_not_exist', {'job_id': job_id, 'source': 'delete_job'})
return
# raise ValueError('Attempting to cancel a Job that does not exist!')
try:
self.cancel_job(job_id)
except Exception as e:
raise
try:
clients.get('user_and_job_state').delete_job(job_id)
except Exception as e:
raise
del self._running_jobs[job_id]
self._send_comm_message('job_deleted', {'job_id': job_id})
def cancel_job(self, job_id):
"""
Cancels a running job, placing it in a canceled state.
Does NOT delete the job.
Raises an exception if the current user doesn't have permission to cancel the job.
"""
if job_id is None:
raise ValueError('Job id required for cancellation!')
if job_id not in self._running_jobs:
self._send_comm_message('job_does_not_exist', {'job_id': job_id, 'source': 'cancel_job'})
return
try:
job = self.get_job(job_id)
state = job.state()
if state.get('canceled', 0) == 1 or state.get('finished', 0) == 1:
# It's already finished, don't try to cancel it again.
return
except Exception as e:
raise ValueError('Unable to get Job state')
# Stop updating the job status while we try to cancel.
# Also, set it to have a special state of 'canceling' while we're doing the cancel
is_refreshing = self._running_jobs[job_id].get('refresh', False)
self._running_jobs[job_id]['refresh'] = False
self._running_jobs[job_id]['canceling'] = True
try:
clients.get('job_service').cancel_job({'job_id': job_id})
except Exception as e:
new_e = transform_job_exception(e)
error = {
'error': 'Unable to get cancel job',
'message': getattr(new_e, 'message', 'Unknown reason'),
'code': getattr(new_e, 'code', -1),
'source': getattr(new_e, 'source', 'jobmanager'),
'name': getattr(new_e, 'name', type(e).__name__),
'request_type': 'cancel_job',
'job_id': job_id
}
self._send_comm_message('job_comm_error', error)
raise(e)
finally:
self._running_jobs[job_id]['refresh'] = is_refreshing
del self._running_jobs[job_id]['canceling']
#
# self._send_comm_message('job_canceled', {'job_id': job_id})
# Rather than a separate message, how about triggering a job-status message:
self._lookup_job_status(job_id)
def _send_comm_message(self, msg_type, content):
"""
Sends a ipykernel.Comm message to the KBaseJobs channel with the given msg_type
and content. These just get encoded into the message itself.
"""
msg = {
'msg_type': msg_type,
'content': content
}
if self._comm is None:
self._comm = Comm(target_name='KBaseJobs', data={})
self._comm.on_msg(self._handle_comm_message)
self._comm.send(msg)
| 41.748288
| 220
| 0.554571
|
33a726f9a396ff15102ee56bd59a71b3feffb925
| 583
|
py
|
Python
|
ej2bplotter.py
|
lboccardi/SS_20212C_TP4
|
5034ce9084c2fa86bbca8b15fc35910cd21b02eb
|
[
"MIT"
] | null | null | null |
ej2bplotter.py
|
lboccardi/SS_20212C_TP4
|
5034ce9084c2fa86bbca8b15fc35910cd21b02eb
|
[
"MIT"
] | null | null | null |
ej2bplotter.py
|
lboccardi/SS_20212C_TP4
|
5034ce9084c2fa86bbca8b15fc35910cd21b02eb
|
[
"MIT"
] | null | null | null |
import sys
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
if (len(sys.argv) < 2):
print("Wrong amount of parameters")
exit()
times = []
positions = []
velocities = []
data = np.genfromtxt(".\TP4\datos3Jupiter.csv", delimiter=",", names=["spaceshipVelocity", "t"])
plt.plot(data['t']-655,data['spaceshipVelocity'],'r.-')
plt.grid(b=True, which='both', axis='both')
plt.ylabel('Velocidad de la nave (Km/s)')
plt.xlabel('Dias desde despegue (Dias)')
plt.show()
| 26.5
| 101
| 0.578045
|
345b1817bea7c61036934d3d67bc6af6905f6a12
| 4,924
|
py
|
Python
|
loss.py
|
ml-postech/LISA
|
4f3c1cd7e56d6d7b107a587152cd703663cfa4c4
|
[
"BSD-3-Clause"
] | null | null | null |
loss.py
|
ml-postech/LISA
|
4f3c1cd7e56d6d7b107a587152cd703663cfa4c4
|
[
"BSD-3-Clause"
] | null | null | null |
loss.py
|
ml-postech/LISA
|
4f3c1cd7e56d6d7b107a587152cd703663cfa4c4
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T16:06:22.000Z
|
2022-03-31T16:06:22.000Z
|
import torch
import torch.nn.functional as F
import torchaudio
class hp:
class dsp:
sample_rate = 16000
min_vol = -96 # in dBFS
max_vol = 0 # in dBFS
class loss:
class spectrogramloss: # According to Su et al. (2020)
n_fft = [2048, 512]
hop_length = [512, 128]
class L_Adv_G():
def __init__(self):
pass
def __call__(self, prediction_score):
return torch.clamp(1 - prediction_score, min=0)
class L_Dk():
def __init__(self):
pass
def __call__(self, ground_truth_score, prediction_score):
return torch.clamp(1 + prediction_score, min=0) + torch.clamp(1 - ground_truth_score, min=0)
class SampleLoss():
def __init__(self):
pass
def __call__(self, ground_truth, prediction):
ground_truth = ground_truth.squeeze()
prediction = prediction.squeeze()
return F.l1_loss(prediction, ground_truth)
class SpectrogramLoss():
def __init__(self, rank):
self.stft_1 = torchaudio.transforms.Spectrogram(n_fft=hp.loss.spectrogramloss.n_fft[0],
hop_length=hp.loss.spectrogramloss.hop_length[0],
normalized=True).to(rank)
self.stft_2 = torchaudio.transforms.Spectrogram(n_fft=hp.loss.spectrogramloss.n_fft[1],
hop_length=hp.loss.spectrogramloss.hop_length[1],
normalized=True).to(rank)
self.amp_to_db = torchaudio.transforms.AmplitudeToDB(top_db=-hp.dsp.min_vol)
def __call__(self, ground_truth, prediction):
ground_truth = ground_truth.squeeze()
prediction = prediction.squeeze()
ground_truth_stft1 = self.amp_to_db(
self.stft_1(ground_truth) / self.stft_1.n_fft
)
ground_truth_stft2 = self.amp_to_db(
self.stft_2(ground_truth) / self.stft_1.n_fft
)
prediction_stft1 = self.amp_to_db(
self.stft_1(prediction) / self.stft_2.n_fft
)
prediction_stft2 = self.amp_to_db(
self.stft_2(prediction) / self.stft_2.n_fft
)
return F.mse_loss(prediction_stft1, ground_truth_stft1) + F.mse_loss(prediction_stft2, ground_truth_stft2)
class CombinedLoss():
def __init__(self, rank):
self.l_adv_g = L_Adv_G()
self.l_dk = L_Dk()
self.sample_loss = SampleLoss()
self.spectrogram_loss = SpectrogramLoss(rank)
def __call__(self, step, ground_truth, prediction, prediction_postnet, prediction_scores, discriminator_scores,
L_FM_G):
# Discriminator losses
D_losses = []
for i in range(4):
D_loss = torch.mean(self.l_dk(discriminator_scores[i][0], discriminator_scores[i][1])) + L_FM_G[i]
D_losses.append(D_loss)
combined_loss = torch.sum(torch.stack(D_losses))
# Generator loss
if step % 1 == 0: # Modulo set to 1 to prevent unused parameters error when using DDP, deviating from the paper
wavenet_loss = self.sample_loss(ground_truth, prediction) + self.spectrogram_loss(ground_truth, prediction)
wavenet_postnet_loss = self.sample_loss(ground_truth, prediction_postnet) + self.spectrogram_loss(
ground_truth, prediction_postnet)
adversarial_loss = torch.sum(torch.stack(
[torch.mean(self.l_adv_g(prediction_score)) for prediction_score in prediction_scores]
))
G_loss = wavenet_loss + wavenet_postnet_loss + adversarial_loss
combined_loss += G_loss
else:
wavenet_loss = None
wavenet_postnet_loss = None
G_loss = None
return combined_loss, \
wavenet_loss, wavenet_postnet_loss, \
G_loss, D_losses
class GANLoss():
def __init__(self):
self.l_adv_g = L_Adv_G()
self.l_dk = L_Dk()
def __call__(self, step, prediction_scores, discriminator_scores, L_FM_G):
# Discriminator losses
D_losses = []
for i in range(4):
D_loss = torch.mean(self.l_dk(discriminator_scores[i][0], discriminator_scores[i][1])) + L_FM_G[i]
D_losses.append(D_loss)
combined_loss = torch.sum(torch.stack(D_losses))
adversarial_loss = torch.sum(torch.stack(
[torch.mean(self.l_adv_g(prediction_score)) for prediction_score in prediction_scores]
))
G_loss = adversarial_loss
combined_loss += G_loss
return combined_loss, \
{
'G_loss': G_loss,
'D1_loss': D_losses[0],
'D2_loss': D_losses[1],
'D3_loss': D_losses[2],
'Dmel_loss': D_losses[3],
}
| 34.433566
| 120
| 0.602965
|
f076497504573934ac0276a2ae05473905436b2c
| 3,051
|
py
|
Python
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/sync_agent_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/sync_agent_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/sync_agent_py3.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource_py3 import ProxyResource
class SyncAgent(ProxyResource):
"""An Azure SQL Database sync agent.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar sync_agent_name: Name of the sync agent.
:vartype sync_agent_name: str
:param sync_database_id: ARM resource id of the sync database in the sync
agent.
:type sync_database_id: str
:ivar last_alive_time: Last alive time of the sync agent.
:vartype last_alive_time: datetime
:ivar state: State of the sync agent. Possible values include: 'Online',
'Offline', 'NeverConnected'
:vartype state: str or ~azure.mgmt.sql.models.SyncAgentState
:ivar is_up_to_date: If the sync agent version is up to date.
:vartype is_up_to_date: bool
:ivar expiry_time: Expiration time of the sync agent version.
:vartype expiry_time: datetime
:ivar version: Version of the sync agent.
:vartype version: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'sync_agent_name': {'readonly': True},
'last_alive_time': {'readonly': True},
'state': {'readonly': True},
'is_up_to_date': {'readonly': True},
'expiry_time': {'readonly': True},
'version': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'sync_agent_name': {'key': 'properties.name', 'type': 'str'},
'sync_database_id': {'key': 'properties.syncDatabaseId', 'type': 'str'},
'last_alive_time': {'key': 'properties.lastAliveTime', 'type': 'iso-8601'},
'state': {'key': 'properties.state', 'type': 'str'},
'is_up_to_date': {'key': 'properties.isUpToDate', 'type': 'bool'},
'expiry_time': {'key': 'properties.expiryTime', 'type': 'iso-8601'},
'version': {'key': 'properties.version', 'type': 'str'},
}
def __init__(self, *, sync_database_id: str=None, **kwargs) -> None:
super(SyncAgent, self).__init__(**kwargs)
self.sync_agent_name = None
self.sync_database_id = sync_database_id
self.last_alive_time = None
self.state = None
self.is_up_to_date = None
self.expiry_time = None
self.version = None
| 38.620253
| 83
| 0.602753
|
748421d83f29db3322117a9ba96365800795a060
| 1,409
|
py
|
Python
|
workbench/planning/migrations/0013_publicholiday.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 15
|
2020-09-02T22:17:34.000Z
|
2022-02-01T20:09:10.000Z
|
workbench/planning/migrations/0013_publicholiday.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 18
|
2020-01-08T15:28:26.000Z
|
2022-02-28T02:46:41.000Z
|
workbench/planning/migrations/0013_publicholiday.py
|
yoshson/workbench
|
701558cac3357cd82e4dc99f0fefed12ee81ddc5
|
[
"MIT"
] | 8
|
2020-09-29T08:00:24.000Z
|
2022-01-16T11:58:19.000Z
|
# Generated by Django 3.2.2 on 2021-05-16 13:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("planning", "0012_auto_20210429_1448"),
]
operations = [
migrations.CreateModel(
name="PublicHoliday",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("date", models.DateField(verbose_name="date")),
("name", models.CharField(max_length=200, verbose_name="name")),
(
"fraction",
models.DecimalField(
decimal_places=2,
default=1,
max_digits=5,
verbose_name="fraction of day which is free",
),
),
],
options={
"verbose_name": "public holiday",
"verbose_name_plural": "public holidays",
"ordering": ["-date"],
},
),
migrations.RunSQL(
"SELECT audit_audit_table('planning_publicholiday');",
"",
),
]
| 29.354167
| 80
| 0.419446
|
332c2279fe65065a025a9c773c1b1c23f116c5d7
| 867
|
py
|
Python
|
variations/data_preperation.py
|
amarkumar1720/AlexNet
|
d544405df8a0ff08c64699cbdfd7869c8f3f5302
|
[
"MIT"
] | null | null | null |
variations/data_preperation.py
|
amarkumar1720/AlexNet
|
d544405df8a0ff08c64699cbdfd7869c8f3f5302
|
[
"MIT"
] | null | null | null |
variations/data_preperation.py
|
amarkumar1720/AlexNet
|
d544405df8a0ff08c64699cbdfd7869c8f3f5302
|
[
"MIT"
] | null | null | null |
import os
import shutil
# /home/amareshiitd/Desktop/deep learning/assn1/
default = ''
label=['Apple','Baby','Bicycle','Bird','Bus','Camel','Car','Chimpanzee','Clock','Crocodile','Deer','Dog','Elephant','Fish','Flower','Frog','Guitar','Horse','Lamp','Lock','Man','Motorcycle','Mushroom','Orange','Pear','Plane','Rocket','Ship','Table','Television','Tiger','Tractor','Train','Truck','Woman']
print(len(label))
for i in range(35):
print(i)
for j in ['train','validation','test']:
src = default+'ImageNet_Subset/'+label[i]+'/'+j
src_files = os.listdir(src)
directory = default+'dataset/'+j+'/'+label[i]+'/'
os.makedirs(directory)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
dest = default+'dataset/'+j+'/'+label[i]+'/'+file_name
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, dest)
| 43.35
| 303
| 0.657439
|
30ed73bcc4022fc4a852252d37d12922c9444347
| 2,193
|
py
|
Python
|
relay.py
|
krazybean/pagerduty_hook
|
fd16ba3c05ee02762c21c8ebc601fc9089403248
|
[
"MIT"
] | null | null | null |
relay.py
|
krazybean/pagerduty_hook
|
fd16ba3c05ee02762c21c8ebc601fc9089403248
|
[
"MIT"
] | null | null | null |
relay.py
|
krazybean/pagerduty_hook
|
fd16ba3c05ee02762c21c8ebc601fc9089403248
|
[
"MIT"
] | null | null | null |
import os
import sys
import requests
import time
import json
class Relay:
def __init__(self):
"""
tests environment for from/to endpoints
"""
try:
self.external_relay = os.environ['external_hook']
self.external_dest = os.environ['destination']
self.interval = 15
except KeyError:
sys.exit("External hook not set: export external_hook='endpoint'")
sys.exit("External hook not set: export destination='endpoint'")
def heartbeat(self):
"""
grabs from primary relay
"""
try:
r = requests.get(self.external_relay)
try:
return r.json()
except ValueError as e:
print r.text
except requests.exceptions.ConnectionError as e:
print "Connection Error (initiating 15s sleep): {0}".format(e)
time.sleep(15)
pass
def repost(self, data):
"""
sends to destination
"""
headers = {'Content-Type': 'application/json'}
r = requests.post(self.external_dest,
data=json.dumps(data),
headers=headers)
def initiate(self):
""" Looper """
try:
while True:
dataset = self.heartbeat()
if dataset:
if dataset.has_key('Queue'):
self.process_display()
self.retext("Nothing new")
else:
self.retext("New event, posting...=> [{0}]".format(self.external_dest))
self.repost(dataset)
except KeyboardInterrupt:
print " <= [Cancelled]"
def process_display(self, char=None):
""" More dots """
for x in xrange(self.interval):
print("Waiting {0}s".format(self.interval) + "." * x)
sys.stdout.write("\033[F")
time.sleep(1)
def retext(self, text=None):
""" retexting the sameline """
print("[{0}]".format(text))
if __name__ == '__main__':
relay = Relay()
print relay.initiate()
| 29.635135
| 95
| 0.510716
|
40eb361df52ac255304c646faf336f9f73401e8b
| 1,821
|
py
|
Python
|
configs/tridentnet/tridentnet_r50_caffe_1x_icdar2021_isolated.py
|
TriplePool/mmdetection
|
f72a87174ef9397b1e9239809bb961c8edbf4239
|
[
"Apache-2.0"
] | null | null | null |
configs/tridentnet/tridentnet_r50_caffe_1x_icdar2021_isolated.py
|
TriplePool/mmdetection
|
f72a87174ef9397b1e9239809bb961c8edbf4239
|
[
"Apache-2.0"
] | null | null | null |
configs/tridentnet/tridentnet_r50_caffe_1x_icdar2021_isolated.py
|
TriplePool/mmdetection
|
f72a87174ef9397b1e9239809bb961c8edbf4239
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/faster_rcnn_r50_caffe_c4_icdar2021.py',
'../_base_/datasets/icdar2021_detection_isolated.py',
'../_base_/schedules/schedule_1x_half_batch.py', '../_base_/default_runtime.py'
]
model = dict(
type='TridentFasterRCNN',
pretrained='open-mmlab://detectron2/resnet50_caffe',
backbone=dict(
type='TridentResNet',
trident_dilations=(1, 2, 3),
num_branch=3,
test_branch_idx=1),
roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1))
train_cfg = dict(
rpn_proposal=dict(nms_post=500, max_num=500),
rcnn=dict(
sampler=dict(num=128, pos_fraction=0.5, add_gt_as_proposals=False)))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1447, 2048), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.0),
dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1447, 2048),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| 33.722222
| 83
| 0.641955
|
2be612a9d76c8bacb2caeeee1504e1d920729ba8
| 8,138
|
py
|
Python
|
program_rtp.py
|
kaisubr/rotten-tomatoes-prediction
|
d6d6b81e23253763242f36fbbc973b07ba2fddfc
|
[
"Apache-2.0"
] | 1
|
2020-01-08T22:43:55.000Z
|
2020-01-08T22:43:55.000Z
|
program_rtp.py
|
kaisubr/rotten-tomatoes-prediction
|
d6d6b81e23253763242f36fbbc973b07ba2fddfc
|
[
"Apache-2.0"
] | null | null | null |
program_rtp.py
|
kaisubr/rotten-tomatoes-prediction
|
d6d6b81e23253763242f36fbbc973b07ba2fddfc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_absolute_error
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# In[2]:
CSV_PATH = "../input/rotten-tomatoes-movies-and-critics-datasets/rotten_tomatoes_movies.csv"
df = pd.read_csv(CSV_PATH)
print("Head (below): ")
df.head()
# In[3]:
# A little pre-processing.
# Make genre comma-separated -> non. comma separated
print("Creating individual genre columns [Animation, Drama, Horror, ...] and saving to data frame")
genre_cols = df.genre.str.get_dummies(sep=', ')
# print(genre_cols)
for col in genre_cols:
df[col + '_genre'] = genre_cols[col].values
# Make ratings label-encoded
print("Replacing ratings [PG, G, R, ... etc] with numbers 0 - ...")
unique = df['rating'].unique()
print(unique)
label_encoder = LabelEncoder()
df.rating = label_encoder.fit_transform(df.rating)
unique = df['rating'].unique()
print(unique)
# print(df.rating)
# Then drop the old genre/cast/direc/writers columns
# df.drop(['genre', 'cast', 'directors', 'writers'], axis=1)
print("Columns provided: ")
print(df.columns)
print("Head (below): ")
df.head()
# In[4]:
df = df.dropna(axis=0)
# print(df) reveals no columns were dropped. Imputation not needed.
# unique = df['in_theaters_date'].unique()
# print(sorted(unique))
# y = df['tomatometer_status']
y = df['tomatometer_rating']
X = df[['rating', 'runtime_in_minutes', 'Action & Adventure_genre', 'Animation_genre', 'Anime & Manga_genre',
'Art House & International_genre', 'Classics_genre', 'Comedy_genre',
'Cult Movies_genre', 'Documentary_genre', 'Drama_genre',
'Faith & Spirituality_genre', 'Gay & Lesbian_genre', 'Horror_genre',
'Kids & Family_genre', 'Musical & Performing Arts_genre',
'Mystery & Suspense_genre', 'Romance_genre',
'Science Fiction & Fantasy_genre', 'Special Interest_genre',
'Sports & Fitness_genre', 'Television_genre', 'Western_genre', 'studio_name']]
# Split our data
# X_train, X_valid, y_train, y_valid = train_test_split(X, y, random_state=0)
# print(X_train.shape)
# print(" vs ")
# print(X_valid.shape)
# Determine columns that are categorical in nature
obj = (X.dtypes == 'object')
object_cols = list(obj[obj].index)
print("Categorical: ")
print(object_cols)
# one-hot encode rating, directors, writers, studio_name
OH_encoder = OneHotEncoder(handle_unknown = 'ignore', sparse = False)
OH_cols = pd.DataFrame(OH_encoder.fit_transform(X[object_cols]))
OH_cols.index = X.index
numerical_X = X.drop(object_cols, axis = 1)
OH_X = pd.concat([numerical_X, OH_cols], axis=1)
print("Complete shape of feature dataframe: ")
print(OH_X.shape)
#y_train.head()
# y_valid.head()
# Split our data
X_train, X_valid, y_train, y_valid = train_test_split(OH_X, y, random_state=0)
print("Verify that the columns are same size: ")
print(X_train.shape)
print(" vs ")
print(X_valid.shape)
# In[5]:
def mean_absolute_percentage_error(y_valid, y_prediction):
y_valid = np.array(y_valid)
y_prediction = np.array(y_prediction)
return np.mean(np.abs((y_valid - y_prediction) / y_valid)) * 100
# In[6]:
model = None;
predictions = None;
def train_and_validate(DX_train, DX_valid, Dy_train, Dy_valid) :
global model
global predictions
model = RandomForestRegressor(random_state = 1)
model.fit(DX_train, Dy_train)
predictions = model.predict(DX_valid)
print(predictions)
mae = mean_absolute_error(Dy_valid, predictions)
# print("MAE = " + str(mae))
return mae
def train_and_validate(mln, DX_train, DX_valid, Dy_train, Dy_valid) :
global model
global predictions
model = RandomForestRegressor(max_leaf_nodes=mln, random_state = 1)
model.fit(DX_train, Dy_train)
predictions = model.predict(DX_valid)
print(predictions)
print(" ... vs ...")
print(Dy_valid)
mae = mean_absolute_error(Dy_valid, predictions)
# print("MAE = " + str(mae))
return mae
def xg_train_and_validate(DX_train, DX_valid, Dy_train, Dy_valid) :
global model
global predictions
model = XGBRegressor(n_estimators=350, learning_rate=0.20, n_jobs=4, random_state=1)
model.fit(DX_train, Dy_train, early_stopping_rounds=5,
eval_set=[(DX_valid, Dy_valid)], verbose=False)
predictions = model.predict(DX_valid)
print(predictions)
print(" ... vs ...")
print(Dy_valid)
mae = mean_absolute_error(Dy_valid, predictions)
# print("MAE = " + str(mae))
print("MAPE = " + str(mean_absolute_percentage_error(Dy_valid, predictions)))
return mae
# In[7]:
# This reveals ~400 max leaf nodes for the RandomForest model provides MAE of ~19:
for max_leaf_nodes in [4, 40, 400, 1600, 64000]:
mae = train_and_validate(max_leaf_nodes, X_train, X_valid, y_train, y_valid)
print("Max leaf nodes: %d \t Mean Absolute Error: %f" %(max_leaf_nodes, mae))
# In[8]:
# Predict with self.
print("MAE [self] = %d" %(xg_train_and_validate(X_train, X_train, y_train, y_train)) )
# Predict with validation data.
print("MAE [validation] = %d" %(xg_train_and_validate(X_train, X_valid, y_train, y_valid)) )
# In[9]:
print(predictions) # compare with y_valid
valid = y_valid.to_numpy() #indices, as you see above, are not neat (y_valid[737], y_valid[10978], ...). convert it to numpy array. # .index.values
corresponding_rating = X_valid.rating.to_numpy() # removes indices.
print(predictions.shape)
print(valid.shape)
print(corresponding_rating)
hist_X = np.zeros(6) # => keep sum [G, NC17, NR, PG, PG-13 and PG-13), R and R)]
count_X = np.zeros(6) # => keep count
err = np.zeros( (predictions.shape[0]) )
for i in range(predictions.shape[0]):
err[i] = abs(predictions[i] - valid[i])
# print("This: " + str(corresponding_rating[i]) + " with error " + str(err[i]))
if(corresponding_rating[i] == 0): #G
hist_X[0] = (err[i] + hist_X[0])
count_X[0] += 1
elif (corresponding_rating[i] == 1): #NC-17
hist_X[1] = (err[i] + hist_X[1])
count_X[1] += 1
elif (corresponding_rating[i] == 2): #NR
hist_X[2] = (err[i] + hist_X[2])
count_X[2] += 1
elif (corresponding_rating[i] == 3): #PG
hist_X[3] = (err[i] + hist_X[3])
count_X[3] += 1
elif (corresponding_rating[i] == 4 or corresponding_rating[i] == 5): #PG-13 || PG-13)
hist_X[4] = (err[i] + hist_X[4])
count_X[4] += 1
elif (corresponding_rating[i] == 6 or corresponding_rating[i] == 7): #R || R)
hist_X[5] = (err[i] + hist_X[5])
count_X[5] += 1
for j in range(6):
hist_X[j] = (hist_X[j] / float(count_X[j]))
print ("\nMean error compared with admission ratings...\n\t[G, NC17, NR, PG, PG-13, R] <=> " + str(hist_X))
# In[10]:
plt.figure(figsize=(6,7))
plt.ylim(0, 100)
plt.title("Mean absolute error for predicting Rotten Tomatoes\ncritic scores in comparison with film ratings")
plt.ylabel("Mean absolute error")
plt.xlabel("Film rating")
s_plot = sns.barplot(x=np.array(['G', 'NC17', 'NR', 'PG', 'PG-13', 'R']), y=hist_X)
| 29.273381
| 150
| 0.685549
|
1ac99ac4b149af53917ae0f6f4b35ff4ec32c0fe
| 1,031
|
py
|
Python
|
java/test/src/main/resources/test_cross_language_invocation.py
|
eisber/ray
|
94a286ef1d8ad5a3093b7f996a811727fa0e2d3e
|
[
"Apache-2.0"
] | null | null | null |
java/test/src/main/resources/test_cross_language_invocation.py
|
eisber/ray
|
94a286ef1d8ad5a3093b7f996a811727fa0e2d3e
|
[
"Apache-2.0"
] | null | null | null |
java/test/src/main/resources/test_cross_language_invocation.py
|
eisber/ray
|
94a286ef1d8ad5a3093b7f996a811727fa0e2d3e
|
[
"Apache-2.0"
] | null | null | null |
# This file is used by CrossLanguageInvocationTest.java to test cross-language
# invocation.
import six
import ray
@ray.remote
def py_func(value):
assert isinstance(value, bytes)
return b"Response from Python: " + value
@ray.remote
def py_func_call_java_function(value):
assert isinstance(value, bytes)
f = ray.java_function("org.ray.api.test.CrossLanguageInvocationTest",
"bytesEcho")
r = f.remote(value)
return b"[Python]py_func -> " + ray.get(r)
@ray.remote
def py_func_call_java_actor(value):
assert isinstance(value, bytes)
c = ray.java_actor_class(
"org.ray.api.test.CrossLanguageInvocationTest$TestActor")
java_actor = c.remote(b"Counter")
r = java_actor.concat.remote(value)
return ray.get(r)
@ray.remote
class Counter(object):
def __init__(self, value):
self.value = int(value)
def increase(self, delta):
self.value += int(delta)
return str(self.value).encode("utf-8") if six.PY3 else str(self.value)
| 24.547619
| 78
| 0.681862
|
69daa7a824ee858ab1e3d9e802281549ab7951c8
| 950
|
bzl
|
Python
|
test/testdata/angle_bracket_test/input.bzl
|
bsilver8192/stardoc
|
cdd19379490c681563b38ef86299f039bd368ce0
|
[
"Apache-2.0"
] | null | null | null |
test/testdata/angle_bracket_test/input.bzl
|
bsilver8192/stardoc
|
cdd19379490c681563b38ef86299f039bd368ce0
|
[
"Apache-2.0"
] | null | null | null |
test/testdata/angle_bracket_test/input.bzl
|
bsilver8192/stardoc
|
cdd19379490c681563b38ef86299f039bd368ce0
|
[
"Apache-2.0"
] | null | null | null |
"""Input file to test angle bracket bug (https://github.com/bazelbuild/skydoc/issues/186)"""
def bracket_function(name):
"""Dummy docstring with <brackets>.
This rule runs checks on <angle brackets>.
Args:
name: an arg with **formatted** docstring.
Returns:
some <angled> brackets
"""
_ignore = name # @unused
pass
# buildifier: disable=unsorted-dict-items
bracketuse = provider(
doc = "Information with <brackets>",
fields = {
"foo": "A string representing <foo>",
"bar": "A string representing bar",
"baz": "A string representing baz",
},
)
def _rule_impl(ctx):
_ignore = [ctx] # @unused
return []
my_anglebrac = rule(
implementation = _rule_impl,
doc = "Rule with <brackets>",
attrs = {
"useless": attr.string(
doc = "Args with some tags: <tag1>, <tag2>",
default = "Find <brackets>",
),
},
)
| 22.619048
| 92
| 0.585263
|
37ca26d1ebd89704ea87b60f0b5a8e013b9d0d62
| 387
|
py
|
Python
|
bbdd2/asgi.py
|
saczuac/bbdd2
|
08436c3db08fbb39349fead50609ec9d2c78ca57
|
[
"MIT"
] | null | null | null |
bbdd2/asgi.py
|
saczuac/bbdd2
|
08436c3db08fbb39349fead50609ec9d2c78ca57
|
[
"MIT"
] | null | null | null |
bbdd2/asgi.py
|
saczuac/bbdd2
|
08436c3db08fbb39349fead50609ec9d2c78ca57
|
[
"MIT"
] | null | null | null |
"""
ASGI config for bbdd2 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bbdd2.settings')
application = get_asgi_application()
| 22.764706
| 78
| 0.782946
|
7c01f3f774ea59f22e586a71dff8354de9ed4f16
| 3,953
|
py
|
Python
|
src/aob/settings/base.py
|
Busaka/aob
|
cc23925c0bb24c1b8da74bf14a2bdf04584550c7
|
[
"MIT"
] | null | null | null |
src/aob/settings/base.py
|
Busaka/aob
|
cc23925c0bb24c1b8da74bf14a2bdf04584550c7
|
[
"MIT"
] | null | null | null |
src/aob/settings/base.py
|
Busaka/aob
|
cc23925c0bb24c1b8da74bf14a2bdf04584550c7
|
[
"MIT"
] | null | null | null |
"""
Django settings for aob project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authtools',
'crispy_forms',
'easy_thumbnails',
'profiles',
'accounts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'aob.urls'
WSGI_APPLICATION = 'aob.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = []
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
| 28.035461
| 74
| 0.708576
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.