hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9dc39fc8222ff55bb5f11d961baed140d246d0e2
| 7,518
|
py
|
Python
|
lib/galaxy/webapps/galaxy/api/remote_files.py
|
openminted/galaxy
|
bd2147cccf814f8cec93372973b5e6ada4668a80
|
[
"CC-BY-3.0"
] | 1
|
2021-04-26T08:46:21.000Z
|
2021-04-26T08:46:21.000Z
|
lib/galaxy/webapps/galaxy/api/remote_files.py
|
saxtouri/galaxy
|
01a098bab92eebe89f02593c46d85360bd3e3ef1
|
[
"CC-BY-3.0"
] | 1
|
2019-02-04T16:21:27.000Z
|
2019-02-04T16:45:17.000Z
|
lib/galaxy/webapps/galaxy/api/remote_files.py
|
chambm/galaxy
|
fd1926767996a161cd2fc8bd184e6835fd688765
|
[
"CC-BY-3.0"
] | 1
|
2018-02-08T10:37:26.000Z
|
2018-02-08T10:37:26.000Z
|
"""
API operations on remote files.
"""
import os
import time
import hashlib
from galaxy import exceptions
from galaxy.web import _future_expose_api as expose_api
from galaxy.util import jstree, unicodify
from galaxy.web.base.controller import BaseAPIController
from operator import itemgetter
import logging
log = logging.getLogger(__name__)
class RemoteFilesAPIController(BaseAPIController):
@expose_api
def index(self, trans, **kwd):
"""
GET /api/remote_files/
Displays remote files.
:param target: target to load available datasets from, defaults to ftp
possible values: ftp, userdir, importdir
:type target: str
:param format: requested format of data, defaults to flat
possible values: flat, jstree, ajax
:returns: list of available files
:rtype: list
"""
target = kwd.get('target', None)
format = kwd.get('format', None)
if target == 'userdir':
user_login = trans.user.email
user_base_dir = trans.app.config.user_library_import_dir
if user_base_dir is None:
raise exceptions.ConfigDoesNotAllowException('The configuration of this Galaxy instance does not allow upload from user directories.')
full_import_dir = os.path.join(user_base_dir, user_login)
if not os.path.exists(full_import_dir):
raise exceptions.ObjectNotFound('You do not have any files in your user directory. Use FTP to upload there.')
if full_import_dir is not None:
if format == 'jstree':
disable = kwd.get('disable', 'folders')
try:
userdir_jstree = self.__create_jstree(full_import_dir, disable)
response = userdir_jstree.jsonData()
except Exception as exception:
log.debug(str(exception))
raise exceptions.InternalServerError('Could not create tree representation of the given folder: ' + str(full_import_dir))
if not response:
raise exceptions.ObjectNotFound('You do not have any files in your user directory. Use FTP to upload there.')
elif format == 'ajax':
raise exceptions.NotImplemented('Not implemented yet. Sorry.')
else:
try:
response = self.__load_all_filenames(full_import_dir)
except Exception as exception:
log.error('Could not get user import files: %s', str(exception), exc_info=True)
raise exceptions.InternalServerError('Could not get the files from your user directory folder.')
else:
raise exceptions.InternalServerError('Could not get the files from your user directory folder.')
elif target == 'importdir':
base_dir = trans.app.config.library_import_dir
if base_dir is None:
raise exceptions.ConfigDoesNotAllowException('The configuration of this Galaxy instance does not allow usage of import directory.')
if format == 'jstree':
disable = kwd.get('disable', 'folders')
try:
importdir_jstree = self.__create_jstree(base_dir, disable)
response = importdir_jstree.jsonData()
except Exception as exception:
log.debug(str(exception))
raise exceptions.InternalServerError('Could not create tree representation of the given folder: ' + str(base_dir))
elif format == 'ajax':
raise exceptions.NotImplemented('Not implemented yet. Sorry.')
else:
try:
response = self.__load_all_filenames(base_dir)
except Exception as exception:
log.error('Could not get user import files: %s', str(exception), exc_info=True)
raise exceptions.InternalServerError('Could not get the files from your import directory folder.')
else:
user_ftp_base_dir = trans.app.config.ftp_upload_dir
if user_ftp_base_dir is None:
raise exceptions.ConfigDoesNotAllowException('The configuration of this Galaxy instance does not allow upload from FTP directories.')
try:
user_ftp_dir = trans.user_ftp_dir
if user_ftp_dir is not None:
response = self.__load_all_filenames(user_ftp_dir)
else:
log.warning('You do not have an FTP directory named as your login at this Galaxy instance.')
return None
except Exception as exception:
log.warning('Could not get ftp files: %s', str(exception), exc_info=True)
return None
return response
def __load_all_filenames(self, directory):
"""
Loads recursively all files within the given folder and its
subfolders and returns a flat list.
"""
response = []
if os.path.exists(directory):
for (dirpath, dirnames, filenames) in os.walk(directory):
for filename in filenames:
path = os.path.relpath(os.path.join(dirpath, filename), directory)
statinfo = os.lstat(os.path.join(dirpath, filename))
response.append(dict(path=path,
size=statinfo.st_size,
ctime=time.strftime("%m/%d/%Y %I:%M:%S %p", time.localtime(statinfo.st_ctime))))
else:
log.warning("The directory \"%s\" does not exist." % directory)
return response
# sort by path
response = sorted(response, key=itemgetter("path"))
return response
def __create_jstree(self, directory, disable='folders'):
"""
Loads recursively all files and folders within the given folder
and its subfolders and returns jstree representation
of its structure.
"""
userdir_jstree = None
jstree_paths = []
if os.path.exists(directory):
for (dirpath, dirnames, filenames) in os.walk(directory):
for dirname in dirnames:
dir_path = os.path.relpath(os.path.join(dirpath, dirname), directory)
dir_path_hash = hashlib.sha1(unicodify(dir_path).encode('utf-8')).hexdigest()
disabled = True if disable == 'folders' else False
jstree_paths.append(jstree.Path(dir_path, dir_path_hash, {'type': 'folder', 'state': {'disabled': disabled}, 'li_attr': {'full_path': dir_path}}))
for filename in filenames:
file_path = os.path.relpath(os.path.join(dirpath, filename), directory)
file_path_hash = hashlib.sha1(unicodify(file_path).encode('utf-8')).hexdigest()
disabled = True if disable == 'files' else False
jstree_paths.append(jstree.Path(file_path, file_path_hash, {'type': 'file', 'state': {'disabled': disabled}, 'li_attr': {'full_path': file_path}}))
else:
raise exceptions.ConfigDoesNotAllowException('The given directory does not exist.')
userdir_jstree = jstree.JSTree(jstree_paths)
return userdir_jstree
| 50.12
| 167
| 0.597765
|
b256b0308b7a4fcdd886fc732ed8ca8860384b34
| 1,055
|
py
|
Python
|
exp_model.py
|
shihvictor/Cat-vs-Dog-Kaggle
|
6823aa9ba983e76c06dc868fd8b77d4c09bc691c
|
[
"MIT"
] | null | null | null |
exp_model.py
|
shihvictor/Cat-vs-Dog-Kaggle
|
6823aa9ba983e76c06dc868fd8b77d4c09bc691c
|
[
"MIT"
] | null | null | null |
exp_model.py
|
shihvictor/Cat-vs-Dog-Kaggle
|
6823aa9ba983e76c06dc868fd8b77d4c09bc691c
|
[
"MIT"
] | null | null | null |
from keras.layers import Input, ZeroPadding2D, Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization
from keras.models import Model
def exp_model(input_shape):
"""
Keras model
:return: Keras model using Keras functional API https://keras.io/guides/functional_api/
"""
# Input layer
X_input = Input(input_shape)
# Block0
X = Conv2D(filters=32, kernel_size=(3, 3), strides=1, padding='same')(X_input)
X = Activation(activation='relu')(X)
X = MaxPooling2D(pool_size=(2, 2))(X)
# Block1
X = Conv2D(filters=64, kernel_size=(3, 3), strides=1, padding='same')(X)
X = Activation(activation='relu')(X)
X = MaxPooling2D(pool_size=(2, 2))(X)
# Flatten output of ReLU block
X = Flatten()(X)
# Fully connected layer with softmax activation
X = Dense(units=128, activation="relu")(X)
X = Dropout(rate=.5)(X)
X = Dense(units=2, activation="softmax")(X)
# Keras model instance
model = Model(inputs=X_input, outputs=X, name='exp_model')
return model
| 31.969697
| 124
| 0.667299
|
f7a08c9ead1753b1ac0445dcd469498c8356e97b
| 1,948
|
py
|
Python
|
nicos_mlz/ngh/setups/memograph-treff.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12
|
2019-11-06T15:40:36.000Z
|
2022-01-01T16:23:00.000Z
|
nicos_mlz/ngh/setups/memograph-treff.py
|
ebadkamil/nicos
|
0355a970d627aae170c93292f08f95759c97f3b5
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 4
|
2019-11-08T10:18:16.000Z
|
2021-01-13T13:07:29.000Z
|
nicos_mlz/ngh/setups/memograph-treff.py
|
ISISComputingGroup/nicos
|
94cb4d172815919481f8c6ee686f21ebb76f2068
|
[
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6
|
2020-01-11T10:52:30.000Z
|
2022-02-25T12:35:23.000Z
|
description = 'memograph readout'
devices = dict(
t_in_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'T_in TREFF',
description = 'inlet temperature memograph',
),
t_out_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'T_out TREFF',
description = 'outlet temperature memograph',
),
t_diff_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'T_diff TREFF',
description = 'temperature difference memograph',
),
p_in_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'P_in TREFF',
description = 'inlet pressure memograph',
),
p_out_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'P_out TREFF',
description = 'outlet pressure memograph',
),
flow_in_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'FLOW_in TREFF',
description = 'inlet flow memograph',
),
leak_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'Leak TREFF',
description = 'leakage memograph',
),
cooling_memograph_treff = device('nicos_mlz.devices.memograph.MemographValue',
hostname = 'memograph04.care.frm2',
group = 2,
valuename = 'Cooling TREFF',
description = 'cooling power memograph',
),
)
| 37.461538
| 82
| 0.644251
|
908974b0d5137e8f37ea4cfbd9fef342a0fcbea6
| 17,802
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/express_route_circuit_peerings_operations.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/express_route_circuit_peerings_operations.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/express_route_circuit_peerings_operations.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class ExpressRouteCircuitPeeringsOperations(object):
"""ExpressRouteCircuitPeeringsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def _delete_initial(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified peering from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'}
def get(
self, resource_group_name, circuit_name, peering_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified authorization from the specified express route
circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: ExpressRouteCircuitPeering or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeering or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'}
def _create_or_update_initial(
self, resource_group_name, circuit_name, peering_name, peering_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCircuitPeering')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, circuit_name, peering_name, peering_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a peering in the specified express route circuits.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
express route circuit peering operation.
:type peering_parameters:
~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeering
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
ExpressRouteCircuitPeering or
ClientRawResponse<ExpressRouteCircuitPeering> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeering]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeering]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('ExpressRouteCircuitPeering', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}'}
def list(
self, resource_group_name, circuit_name, custom_headers=None, raw=False, **operation_config):
"""Gets all peerings in a specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ExpressRouteCircuitPeering
:rtype:
~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeeringPaged[~azure.mgmt.network.v2017_09_01.models.ExpressRouteCircuitPeering]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ExpressRouteCircuitPeeringPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ExpressRouteCircuitPeeringPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings'}
| 47.854839
| 195
| 0.676216
|
6be40a31f61f2460917b2b86a9e8de8bea56c2ed
| 1,018
|
py
|
Python
|
tests/dft/test_bader.py
|
dgehringer/pyiron_atomistics
|
f8f2d573a483e802c8e5840998a0769378b95e31
|
[
"BSD-3-Clause"
] | 14
|
2021-01-18T10:03:56.000Z
|
2022-03-01T20:59:35.000Z
|
tests/dft/test_bader.py
|
dgehringer/pyiron_atomistics
|
f8f2d573a483e802c8e5840998a0769378b95e31
|
[
"BSD-3-Clause"
] | 569
|
2018-04-12T06:37:14.000Z
|
2022-03-31T18:06:27.000Z
|
tests/dft/test_bader.py
|
srmnitc/pyiron_atomistics
|
2c8052b082f2c4fb6f6291ac2b1f801ea7ab1567
|
[
"BSD-3-Clause"
] | 6
|
2018-10-23T09:48:48.000Z
|
2022-02-13T12:13:00.000Z
|
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import numpy as np
import os
from pyiron_atomistics.dft.bader import parse_charge_vol_file
from pyiron_atomistics.vasp.structure import read_atoms
class TestBader(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
def test_parse_charge_vol(self):
filename = os.path.join(
self.file_location, "../static/dft/bader_files/ACF.dat")
struct = read_atoms(os.path.join(self.file_location, "../static/vasp_test_files/bader_test/POSCAR"))
charges, volumes = parse_charge_vol_file(structure=struct, filename=filename)
self.assertTrue(np.array_equal(charges, [0.438202, 0.438197, 7.143794]))
self.assertTrue(np.array_equal(volumes, [287.284690, 297.577878, 415.155432]))
| 40.72
| 108
| 0.74165
|
6ae28fb22db93a299c87db3bbb2764e9f9c4bdeb
| 4,084
|
py
|
Python
|
lstchain/scripts/onsite/onsite_create_ffactor_systematics_file.py
|
cta-observatory/cta-lstchain
|
d2d22567a6d0028737dd3b663f45c6206e43c437
|
[
"BSD-3-Clause"
] | 19
|
2018-05-30T14:13:16.000Z
|
2022-03-10T14:04:04.000Z
|
lstchain/scripts/onsite/onsite_create_ffactor_systematics_file.py
|
cta-observatory/cta-lstchain
|
d2d22567a6d0028737dd3b663f45c6206e43c437
|
[
"BSD-3-Clause"
] | 868
|
2018-05-31T12:02:25.000Z
|
2022-03-30T16:02:42.000Z
|
lstchain/scripts/onsite/onsite_create_ffactor_systematics_file.py
|
cta-observatory/cta-lstchain
|
d2d22567a6d0028737dd3b663f45c6206e43c437
|
[
"BSD-3-Clause"
] | 76
|
2018-05-31T09:59:02.000Z
|
2022-01-29T17:57:49.000Z
|
#!/usr//bin/env python
"""
Onsite script to create a F-factor systematic correction file by fitting an intensity scan
"""
import argparse
import os
import subprocess
from lstchain.io.data_management import query_yes_no
import lstchain
def none_or_str(value):
if value == "None":
return None
return value
# parse arguments
parser = argparse.ArgumentParser(description='Create flat-field calibration files',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
required = parser.add_argument_group('required arguments')
optional = parser.add_argument_group('optional arguments')
required.add_argument('-d', '--date', help="Date of the scan (YYYYMMDD)", required=True)
# config file is mandatory because it contains the list of input runs
required.add_argument('-c','--config', help="Config file (json format) with the list of runs", required=True)
version=lstchain.__version__.rsplit('.post',1)[0]
optional.add_argument('-v', '--prod_version', help="Version of the production",
default=f"v{version}")
optional.add_argument('-b','--base_dir', help="Root dir for the output directory tree", type=str, default='/fefs/aswg/data/real')
optional.add_argument('--sub_run', help="sub-run to be processed.", type=int, default=0)
optional.add_argument('--input_prefix', help="Prefix of the input file names", default="calibration")
optional.add_argument('-y', '--yes', action="store_true", help='Do not ask interactively for permissions, assume true')
args = parser.parse_args()
date = args.date
prod_id = args.prod_version
base_dir = args.base_dir
sub_run = args.sub_run
config_file = args.config
prefix = args.input_prefix
yes = args.yes
def main():
try:
# verify config file
if not os.path.exists(config_file):
raise IOError(f"Config file {config_file} does not exists. \n")
print(f"\n--> Config file {config_file}")
# verify input dir
input_dir=f"{base_dir}/monitoring/PixelCalibration/calibration/{date}/{prod_id}"
if not os.path.exists(input_dir):
raise IOError(f"Input directory {input_dir} not found\n")
print(f"\n--> Input directory {input_dir}")
# verify output dir
output_dir = f"{base_dir}/monitoring/PixelCalibration/ffactor_systematics/{date}/{prod_id}"
if not os.path.exists(output_dir):
print(f"--> Create directory {output_dir}")
os.makedirs(output_dir, exist_ok=True)
# make log dir
log_dir = f"{output_dir}/log"
if not os.path.exists(log_dir):
print(f"--> Create directory {log_dir}")
os.makedirs(log_dir, exist_ok=True)
# define output file names
output_file = f"{output_dir}/{prefix}_scan_fit_{date}.{sub_run:04d}.h5"
log_file = f"{output_dir}/log/{prefix}_scan_fit_{date}.{sub_run:04d}.log"
plot_file = f"{output_dir}/log/{prefix}_scan_fit_{date}.{sub_run:04d}.pdf"
if os.path.exists(output_file):
remove = False
if not yes and os.getenv('SLURM_JOB_ID') is None:
remove = query_yes_no(">>> Output file exists already. Do you want to remove it?")
if yes or remove:
os.remove(output_file)
os.remove(log_file)
else:
print(f"\n--> Output file exists already. Stop")
exit(1)
print(f"\n--> Plot file {plot_file}")
print(f"\n--> Log file {log_file}")
#
# produce intensity scan fit file
#
cmd = f"lstchain_fit_intensity_scan " \
f"--config={config_file} --input_dir={input_dir} --output_path={output_file} "\
f"--plot_path={plot_file} --sub_run={sub_run} " \
f"--input_prefix={prefix} --log-file={log_file} --log-file-level=DEBUG "
print("\n--> RUNNING...")
subprocess.run(cmd.split())
print("\n--> END")
except Exception as e:
print(f"\n >>> Exception: {e}")
if __name__ == '__main__':
main()
| 34.033333
| 129
| 0.641283
|
418ebf94b1c84c216ff5b1f215c9c2b6866896ad
| 7,777
|
py
|
Python
|
src/dispatch/plugins/dispatch_slack/actions.py
|
patcable/dispatch
|
bd7a2dfd1361a56ac6db12e69b7c9ac5695aee37
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/plugins/dispatch_slack/actions.py
|
patcable/dispatch
|
bd7a2dfd1361a56ac6db12e69b7c9ac5695aee37
|
[
"Apache-2.0"
] | null | null | null |
src/dispatch/plugins/dispatch_slack/actions.py
|
patcable/dispatch
|
bd7a2dfd1361a56ac6db12e69b7c9ac5695aee37
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import BackgroundTasks
from dispatch.conversation import service as conversation_service
from dispatch.conversation.enums import ConversationButtonActions
from dispatch.database import SessionLocal
from dispatch.decorators import background_task
from dispatch.incident import flows as incident_flows
from dispatch.incident import service as incident_service
from dispatch.incident.enums import IncidentStatus
from dispatch.incident.models import IncidentUpdate, IncidentRead
from dispatch.plugin import service as plugin_service
from dispatch.plugins.dispatch_slack import service as dispatch_slack_service
from dispatch.report import flows as report_flows
from dispatch.task import service as task_service
from dispatch.task.models import TaskStatus
from .config import (
SLACK_COMMAND_ASSIGN_ROLE_SLUG,
SLACK_COMMAND_ENGAGE_ONCALL_SLUG,
SLACK_COMMAND_REPORT_EXECUTIVE_SLUG,
SLACK_COMMAND_REPORT_TACTICAL_SLUG,
SLACK_COMMAND_UPDATE_INCIDENT_SLUG,
SLACK_COMMAND_RUN_WORKFLOW_SLUG,
)
from .service import get_user_email
slack_client = dispatch_slack_service.create_slack_client()
@background_task
def add_user_to_conversation(
user_id: str, user_email: str, incident_id: int, action: dict, db_session=None
):
"""Adds a user to a conversation."""
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
if incident.status == IncidentStatus.closed:
message = f"Sorry, we cannot add you to a closed incident. Please reach out to the incident commander ({incident.commander.name}) for details."
dispatch_slack_service.send_ephemeral_message(
slack_client, action["container"]["channel_id"], user_id, message
)
else:
dispatch_slack_service.add_users_to_conversation(
slack_client, incident.conversation.channel_id, [user_id]
)
message = f"Success! We've added you to incident {incident.name}. Please check your side bar for the new channel."
dispatch_slack_service.send_ephemeral_message(
slack_client, action["container"]["channel_id"], user_id, message
)
@background_task
def update_task_status(
user_id: str, user_email: str, incident_id: int, action: dict, db_session=None
):
"""Updates a task based on user input."""
action_type, external_task_id = action["actions"][0]["value"].split("-")
resolve = True
if action_type == "reopen":
resolve = False
# we only update the external task allowing syncing to care of propagation to dispatch
task = task_service.get_by_resource_id(db_session=db_session, resource_id=external_task_id)
# avoid external calls if we are already in the desired state
if resolve and task.status == TaskStatus.resolved:
message = "Task is already resolved."
dispatch_slack_service.send_ephemeral_message(
slack_client, action["container"]["channel_id"], user_id, message
)
return
if not resolve and task.status == TaskStatus.open:
message = "Task is already open."
dispatch_slack_service.send_ephemeral_message(
slack_client, action["container"]["channel_id"], user_id, message
)
return
# we don't currently have a good way to get the correct file_id (we don't store a task <-> relationship)
# lets try in both the incident doc and PIR doc
drive_task_plugin = plugin_service.get_active(db_session=db_session, plugin_type="task")
try:
file_id = task.incident.incident_document.resource_id
drive_task_plugin.instance.update(file_id, external_task_id, resolved=resolve)
except Exception:
file_id = task.incident.incident_review_document.resource_id
drive_task_plugin.instance.update(file_id, external_task_id, resolved=resolve)
status = "resolved" if task.status == TaskStatus.open else "re-opened"
message = f"Task successfully {status}."
dispatch_slack_service.send_ephemeral_message(
slack_client, action["container"]["channel_id"], user_id, message
)
@background_task
def handle_update_incident_action(user_id, user_email, incident_id, action, db_session=None):
"""Massages slack dialog data into something that Dispatch can use."""
submission = action["submission"]
notify = True if submission["notify"] == "Yes" else False
incident_in = IncidentUpdate(
title=submission["title"],
description=submission["description"],
incident_type={"name": submission["type"]},
incident_priority={"name": submission["priority"]},
status=submission["status"],
visibility=submission["visibility"],
)
incident = incident_service.get(db_session=db_session, incident_id=incident_id)
existing_incident = IncidentRead.from_orm(incident)
incident_service.update(db_session=db_session, incident=incident, incident_in=incident_in)
incident_flows.incident_update_flow(user_email, incident_id, existing_incident, notify)
@background_task
def handle_assign_role_action(user_id, user_email, incident_id, action, db_session=None):
"""Massages slack dialog data into something that Dispatch can use."""
assignee_user_id = action["submission"]["participant"]
assignee_role = action["submission"]["role"]
assignee_email = get_user_email(client=slack_client, user_id=assignee_user_id)
incident_flows.incident_assign_role_flow(user_email, incident_id, assignee_email, assignee_role)
def dialog_action_functions(action: str):
"""Interprets the action and routes it to the appropriate function."""
action_mappings = {
SLACK_COMMAND_ASSIGN_ROLE_SLUG: [handle_assign_role_action],
SLACK_COMMAND_ENGAGE_ONCALL_SLUG: [incident_flows.incident_engage_oncall_flow],
SLACK_COMMAND_REPORT_EXECUTIVE_SLUG: [report_flows.create_executive_report],
SLACK_COMMAND_REPORT_TACTICAL_SLUG: [report_flows.create_tactical_report],
SLACK_COMMAND_UPDATE_INCIDENT_SLUG: [handle_update_incident_action],
}
# this allows for unique action blocks e.g. invite-user or invite-user-1, etc
for key in action_mappings.keys():
if key in action:
return action_mappings[key]
return []
def block_action_functions(action: str):
"""Interprets the action and routes it to the appropriate function."""
action_mappings = {
ConversationButtonActions.invite_user: [add_user_to_conversation],
ConversationButtonActions.update_task_status: [update_task_status],
}
# this allows for unique action blocks e.g. invite-user or invite-user-1, etc
for key in action_mappings.keys():
if key in action:
return action_mappings[key]
return []
def handle_dialog_action(action: dict, background_tasks: BackgroundTasks, db_session: SessionLocal):
"""Handles all dialog actions."""
channel_id = action["channel"]["id"]
conversation = conversation_service.get_by_channel_id_ignoring_channel_type(
db_session=db_session, channel_id=channel_id
)
incident_id = conversation.incident_id
user_id = action["user"]["id"]
user_email = action["user"]["email"]
action_id = action["callback_id"]
for f in dialog_action_functions(action_id):
background_tasks.add_task(f, user_id, user_email, incident_id, action)
def handle_block_action(action: dict, background_tasks: BackgroundTasks):
"""Handles a standalone block action."""
action_id = action["actions"][0]["block_id"]
incident_id = action["actions"][0]["value"]
user_id = action["user"]["id"]
user_email = action["user"]["email"]
for f in block_action_functions(action_id):
background_tasks.add_task(f, user_id, user_email, incident_id, action)
| 41.367021
| 151
| 0.743474
|
d5127fc0c8fa03ca1295ca888e69ab0cdb154fb8
| 8,121
|
py
|
Python
|
kair/main_train_rrdb_psnr.py
|
gifwittit/KAIR
|
4391a476058037e73893b59f4ae78ecb788a6686
|
[
"MIT"
] | null | null | null |
kair/main_train_rrdb_psnr.py
|
gifwittit/KAIR
|
4391a476058037e73893b59f4ae78ecb788a6686
|
[
"MIT"
] | null | null | null |
kair/main_train_rrdb_psnr.py
|
gifwittit/KAIR
|
4391a476058037e73893b59f4ae78ecb788a6686
|
[
"MIT"
] | null | null | null |
import os.path
import math
import argparse
import random
import numpy as np
import logging
from torch.utils.data import DataLoader
import torch
from kair.utils import utils_logger, utils_option as option, utils_image as util
from kair.data.select_dataset import define_Dataset
from kair.models.select_model import define_Model
'''
# --------------------------------------------
# training code for RRDB_PSNR
# --------------------------------------------
# Kai Zhang (cskaizhang@gmail.com)
# github: https://github.com/cszn/KAIR
#
# Reference:
@inproceedings{wang2018esrgan,
title={Esrgan: Enhanced super-resolution generative adversarial networks},
author={Wang, Xintao and Yu, Ke and Wu, Shixiang and Gu, Jinjin and Liu, Yihao and Dong, Chao and Qiao, Yu and Change Loy, Chen},
booktitle={European Conference on Computer Vision (ECCV)},
pages={0--0},
year={2018}
}
# --------------------------------------------
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
def main(json_path='options/train_rrdb_psnr.json'):
'''
# ----------------------------------------
# Step--1 (prepare opt)
# ----------------------------------------
'''
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, default=json_path, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=True)
util.mkdirs((path for key, path in opt['path'].items() if 'pretrained' not in key))
# ----------------------------------------
# update opt
# ----------------------------------------
# -->-->-->-->-->-->-->-->-->-->-->-->-->-
init_iter, init_path_G = option.find_last_checkpoint(opt['path']['models'], net_type='G')
opt['path']['pretrained_netG'] = init_path_G
current_step = init_iter
border = opt['scale']
# --<--<--<--<--<--<--<--<--<--<--<--<--<-
# ----------------------------------------
# save opt to a '../option.json' file
# ----------------------------------------
option.save(opt)
# ----------------------------------------
# return None for missing key
# ----------------------------------------
opt = option.dict_to_nonedict(opt)
# ----------------------------------------
# configure logger
# ----------------------------------------
logger_name = 'train'
utils_logger.logger_info(logger_name, os.path.join(opt['path']['log'], logger_name + '.log'))
logger = logging.getLogger(logger_name)
logger.info(option.dict2str(opt))
# ----------------------------------------
# seed
# ----------------------------------------
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
logger.info('Random seed: {}'.format(seed))
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
'''
# ----------------------------------------
# Step--2 (creat dataloader)
# ----------------------------------------
'''
# ----------------------------------------
# 1) create_dataset
# 2) creat_dataloader for train and test
# ----------------------------------------
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = define_Dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['dataloader_batch_size']))
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(len(train_set), train_size))
train_loader = DataLoader(train_set,
batch_size=dataset_opt['dataloader_batch_size'],
shuffle=dataset_opt['dataloader_shuffle'],
num_workers=dataset_opt['dataloader_num_workers'],
drop_last=True,
pin_memory=True)
elif phase == 'test':
test_set = define_Dataset(dataset_opt)
test_loader = DataLoader(test_set, batch_size=1,
shuffle=False, num_workers=1,
drop_last=False, pin_memory=True)
else:
raise NotImplementedError("Phase [%s] is not recognized." % phase)
'''
# ----------------------------------------
# Step--3 (initialize model)
# ----------------------------------------
'''
model = define_Model(opt)
logger.info(model.info_network())
model.init_train()
logger.info(model.info_params())
'''
# ----------------------------------------
# Step--4 (main training)
# ----------------------------------------
'''
for epoch in range(1000000): # keep running
for i, train_data in enumerate(train_loader):
current_step += 1
# -------------------------------
# 1) update learning rate
# -------------------------------
model.update_learning_rate(current_step)
# -------------------------------
# 2) feed patch pairs
# -------------------------------
model.feed_data(train_data)
# -------------------------------
# 3) optimize parameters
# -------------------------------
model.optimize_parameters(current_step)
# -------------------------------
# 4) training information
# -------------------------------
if current_step % opt['train']['checkpoint_print'] == 0:
logs = model.current_log() # such as loss
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(epoch, current_step, model.current_learning_rate())
for k, v in logs.items(): # merge log information into message
message += '{:s}: {:.3e} '.format(k, v)
logger.info(message)
# -------------------------------
# 5) save model
# -------------------------------
if current_step % opt['train']['checkpoint_save'] == 0:
logger.info('Saving the model.')
model.save(current_step)
# -------------------------------
# 6) testing
# -------------------------------
if current_step % opt['train']['checkpoint_test'] == 0:
avg_psnr = 0.0
idx = 0
for test_data in test_loader:
idx += 1
image_name_ext = os.path.basename(test_data['L_path'][0])
img_name, ext = os.path.splitext(image_name_ext)
img_dir = os.path.join(opt['path']['images'], img_name)
util.mkdir(img_dir)
model.feed_data(test_data)
model.test()
visuals = model.current_visuals()
E_img = util.tensor2uint(visuals['E'])
H_img = util.tensor2uint(visuals['H'])
# -----------------------
# save estimated image E
# -----------------------
save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(img_name, current_step))
util.imsave(E_img, save_img_path)
# -----------------------
# calculate PSNR
# -----------------------
current_psnr = util.calculate_psnr(E_img, H_img, border=border)
logger.info('{:->4d}--> {:>10s} | {:<4.2f}dB'.format(idx, image_name_ext, current_psnr))
avg_psnr += current_psnr
avg_psnr = avg_psnr / idx
# testing log
logger.info('<epoch:{:3d}, iter:{:8,d}, Average PSNR : {:<.2f}dB\n'.format(epoch, current_step, avg_psnr))
logger.info('Saving the final model.')
model.save('latest')
logger.info('End of training.')
if __name__ == '__main__':
main()
| 35.933628
| 131
| 0.440956
|
0e61ea5030f0bcdb08bcb2d55b6b2429d8df5760
| 35,473
|
py
|
Python
|
core/domain/opportunity_services.py
|
ReshuKumari/oppia
|
cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1
|
[
"Apache-2.0"
] | null | null | null |
core/domain/opportunity_services.py
|
ReshuKumari/oppia
|
cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1
|
[
"Apache-2.0"
] | null | null | null |
core/domain/opportunity_services.py
|
ReshuKumari/oppia
|
cb89b633275b3d0b2d02e0d22e0c472d8b8da0e1
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on opportunity models."""
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
from constants import constants
from core.domain import exp_fetchers
from core.domain import opportunity_domain
from core.domain import question_fetchers
from core.domain import story_fetchers
from core.domain import topic_fetchers
from core.platform import models
import utils
(opportunity_models,) = models.Registry.import_models(
[models.NAMES.opportunity])
# NOTE TO DEVELOPERS: The functions:
# - delete_all_exploration_opportunity_summary_models()
# - delete_all_skill_opportunity_models()
# were removed in #13021 as part of the migration to Apache Beam. Please refer
# to that PR if you need to reinstate them.
def is_exploration_available_for_contribution(exp_id):
"""Checks whether a given exploration id belongs to a curated list of
exploration i.e, whether it's used as the chapter of any story.
Args:
exp_id: str. The id of the exploration which is needed to be checked.
Returns:
bool. Whether the given exp_id belongs to the curated explorations.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(
exp_id, strict=False)
return True if model is not None else False
def get_exploration_opportunity_summary_from_model(model):
"""Returns the ExplorationOpportunitySummary object out of the model.
Args:
model: ExplorationOpportunitySummaryModel. The exploration opportunity
summary model.
Returns:
ExplorationOpportunitySummary. The corresponding
ExplorationOpportunitySummary object.
"""
# We're making sure that the audio language codes in any exploration
# opportunity domain object match the ones in
# constants.SUPPORTED_AUDIO_LANGUAGES.
set_of_all_languages = set(
model.incomplete_translation_language_codes +
model.language_codes_needing_voice_artists +
model.language_codes_with_assigned_voice_artists)
supported_language_codes = set([language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)])
missing_language_codes = list(
supported_language_codes - set_of_all_languages)
if missing_language_codes:
logging.info(
'Missing language codes %s in exploration opportunity model with '
'id %s' % (missing_language_codes, model.id))
new_incomplete_translation_language_codes = (
model.incomplete_translation_language_codes + missing_language_codes)
return opportunity_domain.ExplorationOpportunitySummary(
model.id, model.topic_id, model.topic_name, model.story_id,
model.story_title, model.chapter_title, model.content_count,
new_incomplete_translation_language_codes, model.translation_counts,
model.language_codes_needing_voice_artists,
model.language_codes_with_assigned_voice_artists)
def _save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list):
"""Stores multiple ExplorationOpportunitySummary into datastore as a
ExplorationOpportunitySummaryModel.
Args:
exploration_opportunity_summary_list: list(
ExplorationOpportunitySummary). A list of exploration opportunity
summary object.
"""
exploration_opportunity_summary_model_list = []
for opportunity_summary in exploration_opportunity_summary_list:
model = opportunity_models.ExplorationOpportunitySummaryModel(
id=opportunity_summary.id,
topic_id=opportunity_summary.topic_id,
topic_name=opportunity_summary.topic_name,
story_id=opportunity_summary.story_id,
story_title=opportunity_summary.story_title,
chapter_title=opportunity_summary.chapter_title,
content_count=opportunity_summary.content_count,
incomplete_translation_language_codes=(
opportunity_summary.incomplete_translation_language_codes),
translation_counts=opportunity_summary.translation_counts,
language_codes_needing_voice_artists=(
opportunity_summary.language_codes_needing_voice_artists),
language_codes_with_assigned_voice_artists=(
opportunity_summary.language_codes_with_assigned_voice_artists)
)
exploration_opportunity_summary_model_list.append(model)
(
opportunity_models.ExplorationOpportunitySummaryModel
.update_timestamps_multi(exploration_opportunity_summary_model_list))
opportunity_models.ExplorationOpportunitySummaryModel.put_multi(
exploration_opportunity_summary_model_list)
def _create_exploration_opportunity_summary(topic, story, exploration):
"""Create an ExplorationOpportunitySummary object with the given topic,
story and exploration object.
Args:
topic: Topic. The topic object to which the opportunity belongs.
story: Story. The story object to which the opportunity belongs.
exploration: Exploration. The exploration object to which the
opportunity belongs.
Returns:
ExplorationOpportunitySummary. The exploration opportunity summary
object.
"""
audio_language_codes = set([
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES])
complete_translation_languages = set(
exploration.get_languages_with_complete_translation())
incomplete_translation_language_codes = (
audio_language_codes - complete_translation_languages)
language_codes_needing_voice_artists = complete_translation_languages
if exploration.language_code in incomplete_translation_language_codes:
# Removing exploration language from incomplete translation
# languages list as exploration does not need any translation in
# its own language.
incomplete_translation_language_codes.discard(
exploration.language_code)
# Adding exploration language to voiceover required languages
# list as exploration can be voiceovered in it's own language.
language_codes_needing_voice_artists.add(exploration.language_code)
content_count = exploration.get_content_count()
translation_counts = exploration.get_translation_counts()
story_node = story.story_contents.get_node_with_corresponding_exp_id(
exploration.id)
# TODO(#7376): Once the voiceover application functionality is
# implemented change this method such that it also populates the
# language_codes_with_assigned_voice_artists with the required data.
exploration_opportunity_summary = (
opportunity_domain.ExplorationOpportunitySummary(
exploration.id, topic.id, topic.name, story.id, story.title,
story_node.title, content_count,
list(incomplete_translation_language_codes), translation_counts,
list(language_codes_needing_voice_artists), []))
return exploration_opportunity_summary
def add_new_exploration_opportunities(story_id, exp_ids):
"""Adds new exploration opportunity into the model.
Args:
story_id: str. ID of the story.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(story.corresponding_topic_id)
_create_exploration_opportunities(story, topic, exp_ids)
def create_exploration_opportunities_for_story(story_id, topic_id):
"""Creates exploration opportunities corresponding to the supplied published
story ID iff the topic linked to the story is published.
Args:
story_id: str. The ID of the story domain object.
topic_id: str. The ID of the topic domain object corresponding to the
supplied story.
Raises:
Exception. A topic with the given ID doesn't exist.
Exception. The topic rights could not be found.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(topic_id)
topic_rights = topic_fetchers.get_topic_rights(topic.id)
if topic_rights.topic_is_published:
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
_create_exploration_opportunities(story, topic, exp_ids_in_story)
def create_exploration_opportunities_for_topic(topic_id):
"""Creates exploration opportunities corresponding to each of the supplied
published topic's published stories.
Args:
topic_id: str. The ID of the topic domain object.
"""
topic = topic_fetchers.get_topic_by_id(topic_id)
for story_reference in topic.get_all_story_references():
if not story_reference.story_is_published:
continue
story = story_fetchers.get_story_by_id(
story_reference.story_id, strict=False)
if story is not None:
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
_create_exploration_opportunities(story, topic, exp_ids_in_story)
def _create_exploration_opportunities(story, topic, exp_ids):
"""Creates new exploration opportunities corresponding to the supplied
story, topic, and exploration IDs.
Args:
story: Story. The story domain object corresponding to the exploration
opportunities.
topic: Topic. The topic domain object corresponding to the exploration
opportunities.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
explorations = exp_fetchers.get_multiple_explorations_by_id(exp_ids)
exploration_opportunity_summary_list = []
for exploration in explorations.values():
exploration_opportunity_summary_list.append(
_create_exploration_opportunity_summary(
topic, story, exploration))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_opportunity_with_updated_exploration(exp_id):
"""Updates the opportunities models with the changes made in the
exploration.
Args:
exp_id: str. The exploration id which is also the id of the opportunity
model.
"""
updated_exploration = exp_fetchers.get_exploration_by_id(exp_id)
content_count = updated_exploration.get_content_count()
translation_counts = updated_exploration.get_translation_counts()
complete_translation_language_list = (
updated_exploration.get_languages_with_complete_translation())
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.content_count = content_count
exploration_opportunity_summary.translation_counts = translation_counts
exploration_opportunity_summary.incomplete_translation_language_codes = (
utils.compute_list_difference(
exploration_opportunity_summary
.incomplete_translation_language_codes,
complete_translation_language_list
)
)
new_languages_for_voiceover = set(complete_translation_language_list) - set(
exploration_opportunity_summary.
language_codes_with_assigned_voice_artists)
# We only append new languages to language_codes_needing_voice_artists(
# instead of adding all of the complete_translation_language_list), as the
# complete translation languages list will be dynamic based on some
# content text are changed, where as the voiceover is a long term work and
# we can allow a voice_artist to work for an exploration which needs a
# little bit update in text translation.
language_codes_needing_voice_artists_set = set(
exploration_opportunity_summary.language_codes_needing_voice_artists)
language_codes_needing_voice_artists_set |= set(new_languages_for_voiceover)
exploration_opportunity_summary.language_codes_needing_voice_artists = list(
language_codes_needing_voice_artists_set)
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def update_exploration_opportunities_with_story_changes(story, exp_ids):
"""Updates the opportunities models with the story changes.
Args:
story: Story. The new story object.
exp_ids: list(str). A list of exploration IDs whose exploration
opportunity summary models need to be updated.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.story_title = story.title
node = story.story_contents.get_node_with_corresponding_exp_id(
exploration_opportunity_summary.id)
exploration_opportunity_summary.chapter_title = node.title
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_exploration_voiceover_opportunities(
exp_id, assigned_voice_artist_in_language_code):
"""Updates the language_codes_with_assigned_voice_artists of exploration
opportunity model.
Args:
exp_id: str. The ID of the exploration.
assigned_voice_artist_in_language_code: str. The language code in which
a voice artist is assigned to the exploration.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.language_codes_needing_voice_artists.remove(
assigned_voice_artist_in_language_code)
(
exploration_opportunity_summary
.language_codes_with_assigned_voice_artists.append(
assigned_voice_artist_in_language_code))
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def delete_exploration_opportunities(exp_ids):
"""Deletes the ExplorationOpportunitySummaryModel models corresponding to
the given exp_ids.
Args:
exp_ids: list(str). A list of exploration IDs whose opportunity summary
models are to be deleted.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exp_opportunity_models_to_be_deleted = [
model for model in exp_opportunity_models
if model is not None]
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models_to_be_deleted)
def delete_exploration_opportunities_corresponding_to_topic(topic_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given topic_id.
Args:
topic_id: str. The ID of the topic.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
def get_exploration_opportunity_ids_corresponding_to_topic(topic_id):
"""Returns the exploration IDs corresponding to the
ExplorationOpportunitySummaryModels that are associated with the supplied
topic ID.
Args:
topic_id: str. The ID of the topic.
Returns:
list(str). The exploration IDs.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
return [model.id for model in exp_opportunity_models if model is not None]
def update_exploration_opportunities(old_story, new_story):
"""Updates the opportunities models according to the changes made in the
story.
Args:
old_story: Story. The old story object which is now updated.
new_story: Story. The new story object.
"""
model_ids_need_update = set([])
exp_ids_in_old_story = old_story.story_contents.get_all_linked_exp_ids()
exp_ids_in_new_story = new_story.story_contents.get_all_linked_exp_ids()
new_added_exp_ids = set(exp_ids_in_new_story) - set(exp_ids_in_old_story)
deleted_exp_ids = set(exp_ids_in_old_story) - set(exp_ids_in_new_story)
unchanged_exp_ids = set(exp_ids_in_new_story) - new_added_exp_ids
if old_story.title != new_story.title:
model_ids_need_update |= set(unchanged_exp_ids)
else:
for exp_id in unchanged_exp_ids:
new_node = (
new_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
old_node = (
old_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
if old_node.title != new_node.title:
model_ids_need_update.add(exp_id)
update_exploration_opportunities_with_story_changes(
new_story, list(model_ids_need_update))
add_new_exploration_opportunities(new_story.id, new_added_exp_ids)
delete_exploration_opportunities(list(deleted_exp_ids))
def delete_exp_opportunities_corresponding_to_story(story_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given story_id.
Args:
story_id: str. The ID of the story.
"""
exp_opprtunity_model_class = (
opportunity_models.ExplorationOpportunitySummaryModel)
exp_opportunity_models = exp_opprtunity_model_class.get_all().filter(
exp_opprtunity_model_class.story_id == story_id
)
exp_opprtunity_model_class.delete_multi(exp_opportunity_models)
def get_translation_opportunities(language_code, cursor):
"""Returns a list of opportunities available for translation in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which translation opportunities
should be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next batch of
results. If there are no more results, this might be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models
.ExplorationOpportunitySummaryModel.get_all_translation_opportunities(
page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, cursor, more
def get_voiceover_opportunities(language_code, cursor):
"""Returns a list of opportunities available for voiceover in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which voiceover opportunities
to be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, cursor, more
def get_exploration_opportunity_summaries_by_ids(ids):
"""Returns a dict with key as id and value representing
ExplorationOpportunitySummary objects corresponding to the opportunity id.
Args:
ids: list(str). A list of opportunity ids.
Returns:
dict(str, ExplorationOpportunitySummary|None). A dict with key as the
opportunity id and values representing the ExplorationOpportunitySummary
domain objects corresponding to the opportunity id if exist else None.
"""
opportunities = {opportunity_id: None for opportunity_id in ids}
exp_opportunity_summary_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(ids))
for exp_opportunity_summary_model in exp_opportunity_summary_models:
if exp_opportunity_summary_model is not None:
opportunities[exp_opportunity_summary_model.id] = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
return opportunities
def update_opportunities_with_new_topic_name(topic_id, topic_name):
"""Updates the exploration opportunity summary models with new topic name.
Args:
topic_id: str. The corresponding topic id of the opportunity.
topic_name: str. The new topic name.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.topic_name = topic_name
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def get_skill_opportunity_from_model(model):
"""Returns a SkillOpportunity domain object from a SkillOpportunityModel.
Args:
model: SkillOpportunityModel. The skill opportunity model.
Returns:
SkillOpportunity. The corresponding SkillOpportunity object.
"""
return opportunity_domain.SkillOpportunity(
model.id, model.skill_description, model.question_count)
def get_skill_opportunities(cursor):
"""Returns a list of skill opportunities available for questions.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = constants.OPPORTUNITIES_PAGE_SIZE
skill_opportunity_models, cursor, more = (
opportunity_models.SkillOpportunityModel
.get_skill_opportunities(page_size, cursor))
opportunities = []
for skill_opportunity_model in skill_opportunity_models:
skill_opportunity = (
get_skill_opportunity_from_model(skill_opportunity_model))
opportunities.append(skill_opportunity)
return opportunities, cursor, more
def get_skill_opportunities_by_ids(ids):
"""Returns a list of SkillOpportunity domain objects corresponding to the
given list of ids.
Args:
ids: list(str). A list of the opportunity ids.
Returns:
dict(str, SkillOpportunity|None). A dict with key as the
opportunity id and values representing the SkillOpportunity
domain objects corresponding to the opportunity id if exist else None.
"""
opportunities = {opportunity_id: None for opportunity_id in ids}
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
opportunities[skill_opportunity_model.id] = (
get_skill_opportunity_from_model(skill_opportunity_model))
return opportunities
def create_skill_opportunity(skill_id, skill_description):
"""Creates a SkillOpportunityModel entity in the datastore.
Args:
skill_id: str. The skill_id of the opportunity.
skill_description: str. The skill_description of the opportunity.
Raises:
Exception. If a SkillOpportunityModel corresponding to the supplied
skill_id already exists.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
raise Exception(
'SkillOpportunity corresponding to skill ID %s already exists.' % (
skill_id))
questions, _ = (
question_fetchers.get_questions_and_skill_descriptions_by_skill_ids(
constants.MAX_QUESTIONS_PER_SKILL, [skill_id], 0))
skill_opportunity = opportunity_domain.SkillOpportunity(
skill_id=skill_id,
skill_description=skill_description,
question_count=len(questions)
)
_save_skill_opportunities([skill_opportunity])
def _save_skill_opportunities(skill_opportunities):
"""Saves SkillOpportunity domain objects into datastore as
SkillOpportunityModel objects.
Args:
skill_opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
"""
skill_opportunity_models = []
for skill_opportunity in skill_opportunities:
skill_opportunity.validate()
model = opportunity_models.SkillOpportunityModel(
id=skill_opportunity.id,
skill_description=skill_opportunity.skill_description,
question_count=skill_opportunity.question_count,
)
skill_opportunity_models.append(model)
opportunity_models.SkillOpportunityModel.update_timestamps_multi(
skill_opportunity_models)
opportunity_models.SkillOpportunityModel.put_multi(skill_opportunity_models)
def update_skill_opportunity_skill_description(skill_id, new_description):
"""Updates the skill_description of the SkillOpportunityModel with
new_description.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
new_description: str. The new skill_description.
"""
skill_opportunity = _get_skill_opportunity(skill_id)
if skill_opportunity is not None:
skill_opportunity.skill_description = new_description
_save_skill_opportunities([skill_opportunity])
def _get_skill_opportunity(skill_id):
"""Returns the SkillOpportunity domain object representing a
SkillOpportunityModel with the supplied skill_id in the datastore.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
Returns:
SkillOpportunity or None. The domain object representing a
SkillOpportunity with the supplied skill_id, or None if it does not
exist.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
return get_skill_opportunity_from_model(skill_opportunity_model)
return None
def delete_skill_opportunity(skill_id):
"""Deletes the SkillOpportunityModel corresponding to the supplied skill_id.
Args:
skill_id: str. The skill_id corresponding to the to-be-deleted
SkillOpportunityModel.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
opportunity_models.SkillOpportunityModel.delete(skill_opportunity_model)
def increment_question_counts(skill_ids, delta):
"""Increments question_count(s) of SkillOpportunityModel(s) with
corresponding skill_ids.
Args:
skill_ids: list(str). A list of skill_ids corresponding to
SkillOpportunityModel(s).
delta: int. The delta for which to increment each question_count.
"""
updated_skill_opportunities = (
_get_skill_opportunities_with_updated_question_counts(skill_ids, delta))
_save_skill_opportunities(updated_skill_opportunities)
def update_skill_opportunities_on_question_linked_skills_change(
old_skill_ids, new_skill_ids):
"""Updates question_count(s) of SkillOpportunityModel(s) corresponding to
the change in linked skill IDs for a question from old_skill_ids to
new_skill_ids, e.g. if skill_id1 is in old_skill_ids, but not in
new_skill_ids, the question_count of the SkillOpportunityModel for skill_id1
would be decremented.
NOTE: Since this method is updating the question_counts based on the change
of skill_ids from old_skill_ids to new_skill_ids, the input skill_id lists
must be related.
Args:
old_skill_ids: list(str). A list of old skill_id(s).
new_skill_ids: list(str). A list of new skill_id(s).
"""
old_skill_ids_set = set(old_skill_ids)
new_skill_ids_set = set(new_skill_ids)
new_skill_ids_added_to_question = new_skill_ids_set - old_skill_ids_set
skill_ids_removed_from_question = old_skill_ids_set - new_skill_ids_set
updated_skill_opportunities = []
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
new_skill_ids_added_to_question, 1))
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
skill_ids_removed_from_question, -1))
_save_skill_opportunities(updated_skill_opportunities)
def _get_skill_opportunities_with_updated_question_counts(skill_ids, delta):
"""Returns a list of SkillOpportunities with corresponding skill_ids
with question_count(s) updated by delta.
Args:
skill_ids: iterable(str). The IDs of the matching SkillOpportunityModels
in the datastore.
delta: int. The delta by which to update each question_count (can be
negative).
Returns:
list(SkillOpportunity). The updated SkillOpportunities.
"""
updated_skill_opportunities = []
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(skill_ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
skill_opportunity = get_skill_opportunity_from_model(
skill_opportunity_model)
skill_opportunity.question_count += delta
updated_skill_opportunities.append(skill_opportunity)
return updated_skill_opportunities
def regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=False):
"""Regenerates opportunity models which belongs to a given topic.
Args:
topic_id: str. The ID of the topic.
delete_existing_opportunities: bool. Whether to delete all the existing
opportunities related to the given topic.
Returns:
int. The number of opportunity models created.
"""
if delete_existing_opportunities:
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
topic = topic_fetchers.get_topic_by_id(topic_id)
story_ids = topic.get_canonical_story_ids()
stories = story_fetchers.get_stories_by_ids(story_ids)
exp_ids = []
non_existing_story_ids = []
for index, story in enumerate(stories):
if story is None:
non_existing_story_ids.append(story_ids[index])
else:
exp_ids += story.story_contents.get_all_linked_exp_ids()
exp_ids_to_exp = exp_fetchers.get_multiple_explorations_by_id(
exp_ids, strict=False)
non_existing_exp_ids = set(exp_ids) - set(exp_ids_to_exp.keys())
if len(non_existing_exp_ids) > 0 or len(non_existing_story_ids) > 0:
raise Exception(
'Failed to regenerate opportunities for topic id: %s, '
'missing_exp_with_ids: %s, missing_story_with_ids: %s' % (
topic_id, list(non_existing_exp_ids), non_existing_story_ids))
exploration_opportunity_summary_list = []
for story in stories:
for exp_id in story.story_contents.get_all_linked_exp_ids():
exploration_opportunity_summary_list.append(
_create_exploration_opportunity_summary(
topic, story, exp_ids_to_exp[exp_id]))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
return len(exploration_opportunity_summary_list)
| 40.773563
| 80
| 0.737801
|
81cde8bbad2444317f45c9efc6e1c571d2368888
| 1,825
|
py
|
Python
|
watchman/integration/test_bsdish.py
|
kabat87/watchman
|
6cab7e98f70722e9d635086596d543c0e1875e28
|
[
"MIT"
] | null | null | null |
watchman/integration/test_bsdish.py
|
kabat87/watchman
|
6cab7e98f70722e9d635086596d543c0e1875e28
|
[
"MIT"
] | null | null | null |
watchman/integration/test_bsdish.py
|
kabat87/watchman
|
6cab7e98f70722e9d635086596d543c0e1875e28
|
[
"MIT"
] | null | null | null |
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from watchman.integration.lib import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestBSDish(WatchmanTestCase.WatchmanTestCase):
def test_bsdish_toplevel(self) -> None:
root = self.mkdtemp()
os.mkdir(os.path.join(root, "lower"))
self.touchRelative(root, "lower", "file")
self.touchRelative(root, "top")
watch = self.watchmanCommand("watch", root)
self.assertFileList(root, ["lower", "lower/file", "top"])
find = self.watchmanCommand("find", root)
clock = find["clock"]
since = self.watchmanCommand("since", root, clock)
clock = since["clock"]
since = self.watchmanCommand(
"query", root, {"expression": ["allof", ["since", clock], ["type", "f"]]}
)
self.assertFileListsEqual([], since["files"])
clock = since["clock"]
os.unlink(os.path.join(root, "top"))
self.assertFileList(root, ["lower", "lower/file"])
now = self.watchmanCommand("since", root, clock)
expected = ["top"]
if watch["watcher"] == "kqueue+fsevents":
# For the split watch, a cookie is being written to each top level
# directory, and thus the "lower" directory will be reported as
# having been changed.
expected.append("lower")
self.assertEqual(len(expected), len(now["files"]))
self.assertFileListsEqual(
expected, list(map(lambda x: x["name"], now["files"]))
)
for f in now["files"]:
if f["name"] == "top":
self.assertFalse(f["exists"])
| 33.796296
| 85
| 0.603288
|
ea5b17dace93a0585ea82a8ca973c2d44f1700a3
| 360
|
py
|
Python
|
app/core/migrations/0005_auto_20210524_0120.py
|
ManemanFTW/recipe-app-api
|
c3ced191b0f428af1f9fd0eaaa5459ae956c2a19
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_auto_20210524_0120.py
|
ManemanFTW/recipe-app-api
|
c3ced191b0f428af1f9fd0eaaa5459ae956c2a19
|
[
"MIT"
] | null | null | null |
app/core/migrations/0005_auto_20210524_0120.py
|
ManemanFTW/recipe-app-api
|
c3ced191b0f428af1f9fd0eaaa5459ae956c2a19
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-05-24 01:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0004_recipe'),
]
operations = [
migrations.RenameField(
model_name='recipe',
old_name='time_minute',
new_name='time_minutes',
),
]
| 18.947368
| 48
| 0.580556
|
f39af7509c93998789d558137fdc6f0da7e9ef61
| 58
|
py
|
Python
|
1_PROGI/Exe 1.py
|
Julymusso/IFES
|
939277c375dacc7750705c5593537d80ab4cbc0e
|
[
"MIT"
] | null | null | null |
1_PROGI/Exe 1.py
|
Julymusso/IFES
|
939277c375dacc7750705c5593537d80ab4cbc0e
|
[
"MIT"
] | null | null | null |
1_PROGI/Exe 1.py
|
Julymusso/IFES
|
939277c375dacc7750705c5593537d80ab4cbc0e
|
[
"MIT"
] | null | null | null |
#var
#i: inteiro
for i in range(1,21,1):
print(i)
| 11.6
| 23
| 0.551724
|
6ccd4d93fe21f485bfa79babd7efd6f9ff8ecc42
| 903
|
py
|
Python
|
property/migrations/0011_auto_20191119_1610.py
|
NecrOctopuS/real_estate_agency
|
bcc61048dd9cbc150776ab90baae43cde0c35d9b
|
[
"MIT"
] | null | null | null |
property/migrations/0011_auto_20191119_1610.py
|
NecrOctopuS/real_estate_agency
|
bcc61048dd9cbc150776ab90baae43cde0c35d9b
|
[
"MIT"
] | 6
|
2020-06-06T00:40:27.000Z
|
2022-02-10T09:05:44.000Z
|
property/migrations/0011_auto_20191119_1610.py
|
NecrOctopuS/real_estate_agency
|
bcc61048dd9cbc150776ab90baae43cde0c35d9b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.4 on 2019-11-19 13:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('property', '0010_auto_20191119_1338'),
]
operations = [
migrations.RenameField(
model_name='owner',
old_name='owner',
new_name='name',
),
migrations.RenameField(
model_name='owner',
old_name='owner_phone_pure',
new_name='phone_pure',
),
migrations.RenameField(
model_name='owner',
old_name='owners_phonenumber',
new_name='phonenumber',
),
migrations.AddField(
model_name='flat',
name='owner_link',
field=models.ManyToManyField(blank=True, related_name='owner_flats', to='property.Owner', verbose_name='Владелец'),
),
]
| 26.558824
| 127
| 0.568106
|
40d6863a2a3cb54daae8d312887468eb1336c862
| 628
|
py
|
Python
|
contest/migrations/0034_auto_20180703_1705.py
|
jxtxzzw/eoj3
|
468c16ed6de8b9b542972d0e83b02fd2cfa35e4f
|
[
"MIT"
] | 1
|
2020-11-17T13:08:07.000Z
|
2020-11-17T13:08:07.000Z
|
contest/migrations/0034_auto_20180703_1705.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | 2
|
2020-09-23T21:27:55.000Z
|
2021-06-25T15:24:46.000Z
|
contest/migrations/0034_auto_20180703_1705.py
|
zerolfx/eoj3
|
156060399d1c3e5f7bcdbf34eaffbe2be66e1b20
|
[
"MIT"
] | 1
|
2019-07-13T00:44:39.000Z
|
2019-07-13T00:44:39.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-07-03 17:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contest', '0033_contest_volunteers'),
]
operations = [
migrations.AlterField(
model_name='contest',
name='scoring_method',
field=models.CharField(choices=[('acm', 'ACM Rule'), ('oi', 'OI Rule'), ('cf', 'School of Data Analysis (SDA) Rule'), ('tcmtime', 'TCM/TIME Rule'), ('subtask', 'Subtask Rule')], default='acm', max_length=10),
),
]
| 29.904762
| 220
| 0.61465
|
7828290560a0895bd79a59e89cadae94dab0f721
| 4,957
|
py
|
Python
|
tests/components/solarlog/test_config_flow.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 7
|
2019-02-07T14:14:12.000Z
|
2019-07-28T06:56:10.000Z
|
tests/components/solarlog/test_config_flow.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 9
|
2022-01-27T06:32:10.000Z
|
2022-03-31T07:07:51.000Z
|
tests/components/solarlog/test_config_flow.py
|
jasperro/core
|
26d7b2164e8a971506790ae5af06f31abdf278b5
|
[
"Apache-2.0"
] | 2
|
2020-04-19T13:35:24.000Z
|
2020-04-19T13:35:51.000Z
|
"""Test the solarlog config flow."""
from unittest.mock import patch
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.solarlog import config_flow
from homeassistant.components.solarlog.const import DEFAULT_HOST, DOMAIN
from homeassistant.const import CONF_HOST, CONF_NAME
from tests.common import MockConfigEntry, mock_coro
NAME = "Solarlog test 1 2 3"
HOST = "http://1.1.1.1"
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.solarlog.config_flow.SolarLogConfigFlow._test_connection",
return_value=mock_coro({"title": "solarlog test 1 2 3"}),
), patch(
"homeassistant.components.solarlog.async_setup", return_value=mock_coro(True)
) as mock_setup, patch(
"homeassistant.components.solarlog.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": HOST, "name": NAME}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "solarlog_test_1_2_3"
assert result2["data"] == {"host": "http://1.1.1.1"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
@pytest.fixture(name="test_connect")
def mock_controller():
"""Mock a successful _host_in_configuration_exists."""
with patch(
"homeassistant.components.solarlog.config_flow.SolarLogConfigFlow._test_connection",
side_effect=lambda *_: mock_coro(True),
):
yield
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.SolarLogConfigFlow()
flow.hass = hass
return flow
async def test_user(hass, test_connect):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
# tets with all provided
result = await flow.async_step_user({CONF_NAME: NAME, CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == HOST
async def test_import(hass, test_connect):
"""Test import step."""
flow = init_config_flow(hass)
# import with only host
result = await flow.async_step_import({CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog"
assert result["data"][CONF_HOST] == HOST
# import with only name
result = await flow.async_step_import({CONF_NAME: NAME})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == DEFAULT_HOST
# import with host and name
result = await flow.async_step_import({CONF_HOST: HOST, CONF_NAME: NAME})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == HOST
async def test_abort_if_already_setup(hass, test_connect):
"""Test we abort if the device is already setup."""
flow = init_config_flow(hass)
MockConfigEntry(
domain="solarlog", data={CONF_NAME: NAME, CONF_HOST: HOST}
).add_to_hass(hass)
# Should fail, same HOST different NAME (default)
result = await flow.async_step_import(
{CONF_HOST: HOST, CONF_NAME: "solarlog_test_7_8_9"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same HOST and NAME
result = await flow.async_step_user({CONF_HOST: HOST, CONF_NAME: NAME})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_HOST: "already_configured"}
# SHOULD pass, diff HOST (without http://), different NAME
result = await flow.async_step_import(
{CONF_HOST: "2.2.2.2", CONF_NAME: "solarlog_test_7_8_9"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_7_8_9"
assert result["data"][CONF_HOST] == "http://2.2.2.2"
# SHOULD pass, diff HOST, same NAME
result = await flow.async_step_import(
{CONF_HOST: "http://2.2.2.2", CONF_NAME: NAME}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "solarlog_test_1_2_3"
assert result["data"][CONF_HOST] == "http://2.2.2.2"
| 36.448529
| 92
| 0.700222
|
4b37dc35cfe1c1a97eadc9907815c0c1a618fcbd
| 10,388
|
py
|
Python
|
qtum_electrum/dnssec.py
|
TuoUP/qtum-electrum
|
d353c3cacce62214660a91836b68133aa92710d3
|
[
"MIT"
] | null | null | null |
qtum_electrum/dnssec.py
|
TuoUP/qtum-electrum
|
d353c3cacce62214660a91836b68133aa92710d3
|
[
"MIT"
] | null | null | null |
qtum_electrum/dnssec.py
|
TuoUP/qtum-electrum
|
d353c3cacce62214660a91836b68133aa92710d3
|
[
"MIT"
] | 2
|
2020-01-20T07:51:45.000Z
|
2020-07-18T08:31:54.000Z
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check DNSSEC trust chain.
# Todo: verify expiration dates
#
# Based on
# http://backreference.org/2010/11/17/dnssec-verification-with-dig/
# https://github.com/rthalley/dnspython/blob/master/tests/test_dnssec.py
import traceback
import sys
import time
import struct
import dns.name
import dns.query
import dns.dnssec
import dns.message
import dns.resolver
import dns.rdatatype
import dns.rdtypes.ANY.NS
import dns.rdtypes.ANY.CNAME
import dns.rdtypes.ANY.DLV
import dns.rdtypes.ANY.DNSKEY
import dns.rdtypes.ANY.DS
import dns.rdtypes.ANY.NSEC
import dns.rdtypes.ANY.NSEC3
import dns.rdtypes.ANY.NSEC3PARAM
import dns.rdtypes.ANY.RRSIG
import dns.rdtypes.ANY.SOA
import dns.rdtypes.ANY.TXT
import dns.rdtypes.IN.A
import dns.rdtypes.IN.AAAA
from dns.exception import DNSException
# Pure-Python version of dns.dnssec._validate_rsig
import ecdsa
from . import rsakey
def python_validate_rrsig(rrset, rrsig, keys, origin=None, now=None):
from dns.dnssec import ValidationFailure, ECDSAP256SHA256, ECDSAP384SHA384
from dns.dnssec import _find_candidate_keys, _make_hash, _is_ecdsa, _is_rsa, _to_rdata, _make_algorithm_id
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root)
for candidate_key in _find_candidate_keys(keys, rrsig):
if not candidate_key:
raise ValidationFailure('unknown key')
# For convenience, allow the rrset to be specified as a (name, rdataset)
# tuple as well as a proper rrset
if isinstance(rrset, tuple):
rrname = rrset[0]
rdataset = rrset[1]
else:
rrname = rrset.name
rdataset = rrset
if now is None:
now = time.time()
if rrsig.expiration < now:
raise ValidationFailure('expired')
if rrsig.inception > now:
raise ValidationFailure('not yet valid')
hash = _make_hash(rrsig.algorithm)
if _is_rsa(rrsig.algorithm):
keyptr = candidate_key.key
(bytes,) = struct.unpack('!B', keyptr[0:1])
keyptr = keyptr[1:]
if bytes == 0:
(bytes,) = struct.unpack('!H', keyptr[0:2])
keyptr = keyptr[2:]
rsa_e = keyptr[0:bytes]
rsa_n = keyptr[bytes:]
n = ecdsa.util.string_to_number(rsa_n)
e = ecdsa.util.string_to_number(rsa_e)
pubkey = rsakey.RSAKey(n, e)
sig = rrsig.signature
elif _is_ecdsa(rrsig.algorithm):
if rrsig.algorithm == ECDSAP256SHA256:
curve = ecdsa.curves.NIST256p
key_len = 32
digest_len = 32
elif rrsig.algorithm == ECDSAP384SHA384:
curve = ecdsa.curves.NIST384p
key_len = 48
digest_len = 48
else:
# shouldn't happen
raise ValidationFailure('unknown ECDSA curve')
keyptr = candidate_key.key
x = ecdsa.util.string_to_number(keyptr[0:key_len])
y = ecdsa.util.string_to_number(keyptr[key_len:key_len * 2])
assert ecdsa.ecdsa.point_is_valid(curve.generator, x, y)
point = ecdsa.ellipticcurve.Point(curve.curve, x, y, curve.order)
verifying_key = ecdsa.keys.VerifyingKey.from_public_point(point, curve)
r = rrsig.signature[:key_len]
s = rrsig.signature[key_len:]
sig = ecdsa.ecdsa.Signature(ecdsa.util.string_to_number(r),
ecdsa.util.string_to_number(s))
else:
raise ValidationFailure('unknown algorithm %u' % rrsig.algorithm)
hash.update(_to_rdata(rrsig, origin)[:18])
hash.update(rrsig.signer.to_digestable(origin))
if rrsig.labels < len(rrname) - 1:
suffix = rrname.split(rrsig.labels + 1)[1]
rrname = dns.name.from_text('*', suffix)
rrnamebuf = rrname.to_digestable(origin)
rrfixed = struct.pack('!HHI', rdataset.rdtype, rdataset.rdclass,
rrsig.original_ttl)
rrlist = sorted(rdataset);
for rr in rrlist:
hash.update(rrnamebuf)
hash.update(rrfixed)
rrdata = rr.to_digestable(origin)
rrlen = struct.pack('!H', len(rrdata))
hash.update(rrlen)
hash.update(rrdata)
digest = hash.digest()
if _is_rsa(rrsig.algorithm):
digest = _make_algorithm_id(rrsig.algorithm) + digest
if pubkey.verify(bytearray(sig), bytearray(digest)):
return
elif _is_ecdsa(rrsig.algorithm):
diglong = ecdsa.util.string_to_number(digest)
if verifying_key.pubkey.verifies(diglong, sig):
return
else:
raise ValidationFailure('unknown algorithm %s' % rrsig.algorithm)
raise ValidationFailure('verify failure')
# replace validate_rrsig
dns.dnssec._validate_rrsig = python_validate_rrsig
dns.dnssec.validate_rrsig = python_validate_rrsig
dns.dnssec.validate = dns.dnssec._validate
from .util import print_error
# hard-coded trust anchors (root KSKs)
trust_anchors = [
# KSK-2017:
dns.rrset.from_text('.', 1 , 'IN', 'DNSKEY', '257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU='),
# KSK-2010:
dns.rrset.from_text('.', 15202, 'IN', 'DNSKEY', '257 3 8 AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq QxA+Uk1ihz0='),
]
def check_query(ns, sub, _type, keys):
q = dns.message.make_query(sub, _type, want_dnssec=True)
response = dns.query.tcp(q, ns, timeout=5)
assert response.rcode() == 0, 'No answer'
answer = response.answer
assert len(answer) != 0, ('No DNS record found', sub, _type)
assert len(answer) != 1, ('No DNSSEC record found', sub, _type)
if answer[0].rdtype == dns.rdatatype.RRSIG:
rrsig, rrset = answer
elif answer[1].rdtype == dns.rdatatype.RRSIG:
rrset, rrsig = answer
else:
raise Exception('No signature set in record')
if keys is None:
keys = {dns.name.from_text(sub):rrset}
dns.dnssec.validate(rrset, rrsig, keys)
return rrset
def get_and_validate(ns, url, _type):
# get trusted root key
root_rrset = None
for dnskey_rr in trust_anchors:
try:
# Check if there is a valid signature for the root dnskey
root_rrset = check_query(ns, '', dns.rdatatype.DNSKEY, {dns.name.root: dnskey_rr})
break
except dns.dnssec.ValidationFailure:
# It's OK as long as one key validates
continue
if not root_rrset:
raise dns.dnssec.ValidationFailure('None of the trust anchors found in DNS')
keys = {dns.name.root: root_rrset}
# top-down verification
parts = url.split('.')
for i in range(len(parts), 0, -1):
sub = '.'.join(parts[i-1:])
name = dns.name.from_text(sub)
# If server is authoritative, don't fetch DNSKEY
query = dns.message.make_query(sub, dns.rdatatype.NS)
response = dns.query.udp(query, ns, 3)
assert response.rcode() == dns.rcode.NOERROR, "query error"
rrset = response.authority[0] if len(response.authority) > 0 else response.answer[0]
rr = rrset[0]
if rr.rdtype == dns.rdatatype.SOA:
continue
# get DNSKEY (self-signed)
rrset = check_query(ns, sub, dns.rdatatype.DNSKEY, None)
# get DS (signed by parent)
ds_rrset = check_query(ns, sub, dns.rdatatype.DS, keys)
# verify that a signed DS validates DNSKEY
for ds in ds_rrset:
for dnskey in rrset:
htype = 'SHA256' if ds.digest_type == 2 else 'SHA1'
good_ds = dns.dnssec.make_ds(name, dnskey, htype)
if ds == good_ds:
break
else:
continue
break
else:
raise Exception("DS does not match DNSKEY")
# set key for next iteration
keys = {name: rrset}
# get TXT record (signed by zone)
rrset = check_query(ns, url, _type, keys)
return rrset
def query(url, rtype):
# 8.8.8.8 is Google's public DNS server
nameservers = ['8.8.8.8']
ns = nameservers[0]
try:
out = get_and_validate(ns, url, rtype)
validated = True
except BaseException as e:
#traceback.print_exc(file=sys.stderr)
print_error("DNSSEC error:", str(e))
resolver = dns.resolver.get_default_resolver()
out = resolver.query(url, rtype)
validated = False
return out, validated
| 37.912409
| 418
| 0.657297
|
e78cbe4d24272b25eb8bc2833b689d27daed6bfe
| 1,100
|
py
|
Python
|
scripts/main.py
|
vsevdrob/nft-brownie-ipfs-pinata-hashlips
|
32080aa0eca2ee3143e1845adf0a246bbe148192
|
[
"MIT"
] | 2
|
2022-03-06T21:14:58.000Z
|
2022-03-16T00:22:22.000Z
|
scripts/main.py
|
webdriedesign/nft-brownie-ipfs-pinata-hashlips
|
32080aa0eca2ee3143e1845adf0a246bbe148192
|
[
"MIT"
] | 2
|
2022-03-17T00:53:53.000Z
|
2022-03-17T00:59:20.000Z
|
scripts/main.py
|
webdriedesign/nft-brownie-ipfs-pinata-hashlips
|
32080aa0eca2ee3143e1845adf0a246bbe148192
|
[
"MIT"
] | 1
|
2022-03-06T21:08:22.000Z
|
2022-03-06T21:08:22.000Z
|
from scripts.utils.modify_metadata import modify_metadata
from helper_brownie import get_account
from scripts.collectible.safe_mint import safe_mint
from scripts.collectible.config import AMOUNT_TO_MINT, COLLECTIBLE
from brownie import Collectible
def main():
account = get_account()
collectible = Collectible[-1]
current_token_id = collectible.totalSupply()
tx = None
print(f"Currenct amount NFTs minted: {current_token_id}")
while current_token_id < AMOUNT_TO_MINT:
print("================================================================")
print(" OK ")
print("================================================================")
upcoming_token_id = collectible.totalSupply() + 1
token_URI = modify_metadata(_token_id=upcoming_token_id)
current_token_id, tx = safe_mint(_account=account, _token_URI=token_URI)
print(f"\n- Nice job! Now you can enjoy NFT {COLLECTIBLE['name']} collection!")
print(f"- New total amount NFTs minted: {current_token_id}")
| 36.666667
| 83
| 0.602727
|
bf13cc2903797d954c22d461ade24540e7ce7387
| 1,201
|
py
|
Python
|
my-rl/qlearning_agent.py
|
siowyisheng/rl-learning
|
767963d7a61a77df87dc9d38f30f75aeb7c2e454
|
[
"MIT"
] | null | null | null |
my-rl/qlearning_agent.py
|
siowyisheng/rl-learning
|
767963d7a61a77df87dc9d38f30f75aeb7c2e454
|
[
"MIT"
] | null | null | null |
my-rl/qlearning_agent.py
|
siowyisheng/rl-learning
|
767963d7a61a77df87dc9d38f30f75aeb7c2e454
|
[
"MIT"
] | null | null | null |
# my implementation of a q-learning agent
import random
import numpy as np
class QLearningAgent:
"""An agent which uses Q learning to optimize actions in an environment."""
def __init__(self, alpha, gamma):
self._Q = {}
self.alpha = alpha
self.gamma = gamma
def decide(self, state, action_space, epsilon):
if np.random.random() < epsilon:
return random.choice(action_space)
else:
return _best_action(self._Q, state, action_space)
action = self.agent.decide(state, action_space, epsilon)
def learn(self, state, action, next_state, reward, action_space):
alpha = self.alpha
old_value = self._Q.get((state, action), 0)
next_best_action = _best_action(self._Q, next_state, action_space)
next_value = self._Q.get((next_state, next_best_action), 0)
discounted_return = reward + self.gamma * next_value
self._Q[state, action] = (1 - alpha) * old_value + (
alpha * discounted_return) # yapf: ignore
def _best_action(Q, state, action_space):
values = np.array([Q.get((state, a), 0) for a in action_space])
return action_space[np.argmax(values)]
| 34.314286
| 79
| 0.653622
|
454bbebb3c92a19b06882a3e19be5704aeb5fb01
| 4,735
|
py
|
Python
|
tests/test_utils.py
|
mkdryden/flatland-fork
|
53a992fc059719c87ba702583b32ec674df82528
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
mkdryden/flatland-fork
|
53a992fc059719c87ba702583b32ec674df82528
|
[
"MIT"
] | null | null | null |
tests/test_utils.py
|
mkdryden/flatland-fork
|
53a992fc059719c87ba702583b32ec674df82528
|
[
"MIT"
] | null | null | null |
# portions of this file are derived from SQLAlchemy
from tests._util import eq_, assert_raises
from flatland import util
def test_lazy_property():
poison = False
class Foo(object):
@util.lazy_property
def squiznart(self):
assert not poison
return 'abc'
assert Foo.squiznart != 'abc'
assert hasattr(Foo.squiznart, '__get__')
f = Foo()
assert 'squiznart' not in f.__dict__
assert f.squiznart == 'abc'
assert f.__dict__['squiznart'] == 'abc'
poison = True
assert f.squiznart == 'abc'
new_foo = Foo()
assert_raises(AssertionError, getattr, new_foo, 'squiznart')
assert 'squiznart' not in new_foo.__dict__
def test_as_mapping():
class Foo(object):
clazz = 'c'
def __init__(self):
self.inzt = 'i'
m = util.as_mapping(Foo)
assert 'clazz' in m
assert m['clazz'] == 'c'
assert sorted(dir(Foo)) == sorted(m)
assert_raises(KeyError, m.__getitem__, 'inzt')
mi = util.as_mapping(Foo())
assert 'clazz' in mi
assert mi['clazz'] == 'c'
assert 'inzt' in mi
assert mi['inzt'] == 'i'
assert sorted(dir(Foo())) == sorted(mi)
def test_luhn10():
assert util.luhn10(0) is True
assert util.luhn10(4100000000000001) is True
assert util.luhn10(4100000000000009) is False
def test_to_pairs():
to_pairs = util.to_pairs
wanted = [('a', 1), ('b', 2)]
assert list(to_pairs(wanted)) == wanted
assert list(to_pairs(iter(wanted))) == wanted
assert sorted(to_pairs(dict(wanted))) == wanted
class Duck(object):
def keys(self):
return list(dict(wanted).keys())
def __getitem__(self, key):
return dict(wanted)[key]
assert sorted(to_pairs(Duck())) == wanted
PAIRS = [('a', 1), ('b', 2), ('c', 3),
('d', 4), ('d', 4), ('d', 5)]
def test_keyslice_conflict():
generator = util.keyslice_pairs((), include=[1], omit=[2])
assert_raises(TypeError, list, generator)
def test_keyslice_pairs():
assert list(util.keyslice_pairs(PAIRS)) == PAIRS
assert list(util.keyslice_pairs(tuple(PAIRS))) == PAIRS
assert list(util.keyslice_pairs(iter(PAIRS))) == PAIRS
def _keyslice_eq_(wanted, kw={}):
got = list(util.keyslice_pairs(PAIRS, **kw))
eq_(wanted, got)
def test_keyslice_include():
yield _keyslice_eq_, PAIRS, dict(include=[])
yield _keyslice_eq_, [('a', 1)], dict(include=['a'])
yield _keyslice_eq_, [('a', 1), ('b', 2)], dict(include=['a', 'b'])
yield _keyslice_eq_, [('d', 4), ('d', 4), ('d', 5)], dict(include=['d'])
yield _keyslice_eq_, [('a', 1)], dict(include=['a', 'e'])
def test_keyslice_omit():
yield _keyslice_eq_, PAIRS, dict(omit=[])
yield _keyslice_eq_, [('a', 1), ('b', 2), ('c', 3)], dict(omit=['d'])
yield _keyslice_eq_, [('a', 1), ('b', 2)], dict(omit=['c', 'd'])
yield _keyslice_eq_, [('a', 1), ('b', 2)], dict(omit=['c', 'd', 'e'])
yield _keyslice_eq_, [], dict(omit=['a', 'b', 'c', 'd'])
def test_keyslice_rename():
wanted = PAIRS[:3] + [('Z', 4), ('Z', 4), ('Z', 5)]
yield _keyslice_eq_, wanted, dict(rename={'d': 'Z'})
yield _keyslice_eq_, wanted, dict(rename=[('d', 'Z')])
yield _keyslice_eq_, wanted, dict(rename={'d': 'Z', 'e': 'Y'})
wanted = [('d', 1), ('c', 2), ('b', 3),
('a', 4), ('a', 4), ('a', 5)]
yield _keyslice_eq_, wanted, dict(rename=list(zip('abcddd', 'dcbaaa')))
def test_keyslice_key():
wanted = [(int(k, 16), v) for k, v in PAIRS]
keyfunc = lambda v: int(v, 16)
yield _keyslice_eq_, wanted, dict(key=keyfunc)
wanted = wanted[:3] + [(0, 4), (0, 4), (0, 5)]
yield _keyslice_eq_, wanted, dict(key=keyfunc, rename={13: 0})
def test_keyslice_mixed():
wanted = [('a', 1), ('X', 2)]
yield _keyslice_eq_, wanted, dict(rename={'b': 'X'}, include=['a'])
yield _keyslice_eq_, wanted, dict(rename={'b': 'X'}, omit=['b', 'c', 'd'])
def test_symbols():
sym1 = util.symbol('foo')
assert sym1.name == 'foo'
sym2 = util.symbol('foo')
assert sym1 is sym2
assert sym1 == sym2
sym3 = util.symbol('bar')
assert sym1 is not sym3
assert sym1 != sym3
assert repr(sym3) == 'bar'
def test_symbol_pickle():
import pickle
try:
import pickle
except ImportError:
cPickle = pickle
for mod in pickle, cPickle:
sym1 = util.symbol('foo')
sym2 = util.symbol('foo')
assert sym1 is sym2
# default
s = pickle.dumps(sym1)
sym3 = pickle.loads(s)
for protocol in 0, 1, 2:
serial = pickle.dumps(sym1)
rt = pickle.loads(serial)
assert rt is sym1
assert rt is sym2
| 26.160221
| 78
| 0.582471
|
9e5a1256034c8fed5fcdf0cb915dc6895ef816c1
| 26
|
py
|
Python
|
vaas-app/src/vaas/__init__.py
|
allegro/vaas
|
3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2
|
[
"Apache-2.0"
] | 251
|
2015-09-02T10:50:51.000Z
|
2022-03-16T08:00:35.000Z
|
vaas-app/src/vaas/__init__.py
|
allegro/vaas
|
3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2
|
[
"Apache-2.0"
] | 154
|
2015-09-02T14:54:08.000Z
|
2022-03-16T08:34:17.000Z
|
vaas-app/src/vaas/__init__.py
|
allegro/vaas
|
3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2
|
[
"Apache-2.0"
] | 31
|
2015-09-03T07:51:05.000Z
|
2020-09-24T09:02:40.000Z
|
VERSION = ('2', '0', '1')
| 13
| 25
| 0.384615
|
e548183265e85cf4468da3103006bdf28132ec01
| 3,413
|
py
|
Python
|
functional_tests/test_cluster.py
|
raphaelm/django-parrot
|
3b33dced0f465379ab682ef975916b891b869e88
|
[
"Apache-2.0"
] | 4
|
2016-04-03T13:47:02.000Z
|
2021-12-04T09:55:39.000Z
|
functional_tests/test_cluster.py
|
raphaelm/django-parrot
|
3b33dced0f465379ab682ef975916b891b869e88
|
[
"Apache-2.0"
] | 13
|
2016-04-02T23:04:53.000Z
|
2016-07-02T09:35:46.000Z
|
functional_tests/test_cluster.py
|
raphaelm/django-parrot
|
3b33dced0f465379ab682ef975916b891b869e88
|
[
"Apache-2.0"
] | null | null | null |
import multiprocessing
import requests
from functional_tests import utils_proxy
from . import utils
def test_put_propagation():
with utils.running_cockatiel_cluster(nodenum=3) as procs:
content = 'Hello, this is a testfile'.encode('utf-8')
resp = requests.put(
'http://127.0.0.1:{port}{path}'.format(path='/foo/bar.txt', port=procs[0].port),
content
)
assert resp.status_code == 201
path = resp.headers['Location']
def get():
resp = requests.get(
'http://127.0.0.1:{port}{path}'.format(path=path, port=procs[1].port),
content
)
assert resp.status_code == 200
assert resp.content == content
assert resp.headers['Content-Type'] == 'text/plain'
resp = requests.get(
'http://127.0.0.1:{port}{path}'.format(path=path, port=procs[2].port),
)
assert resp.status_code == 200
assert resp.content == content
assert resp.headers['Content-Type'] == 'text/plain'
utils.waitfor(get)
def test_delete_propagation():
with utils.running_cockatiel_cluster() as procs:
content = 'Hello, this is a testfile'.encode('utf-8')
resp = requests.put(
'http://127.0.0.1:{port}{path}'.format(path='/foo/bar.txt', port=procs[0].port),
content
)
assert resp.status_code == 201
path = resp.headers['Location']
def check_arrived():
resp = requests.get(
'http://127.0.0.1:{port}{path}'.format(path=path, port=procs[1].port),
content
)
assert resp.status_code == 200
assert resp.content == content
assert resp.headers['Content-Type'] == 'text/plain'
utils.waitfor(check_arrived)
resp = requests.delete(
'http://127.0.0.1:{port}{path}'.format(path=path, port=procs[1].port),
)
assert resp.status_code == 200
def check_deleted():
resp = requests.get(
'http://127.0.0.1:{port}{path}'.format(path=path, port=procs[0].port),
)
assert resp.status_code == 404
utils.waitfor(check_deleted)
def test_proxy_propagation():
class CountingProxy(utils_proxy.ProxyRequestHandler):
cnt = multiprocessing.Value('i', 0)
def intercept_request(self, message, data):
with self.cnt.get_lock():
self.cnt.value += 1
return message, data
with utils.running_cockatiel_cluster(proxy=CountingProxy) as procs:
content = 'Hello, this is a testfile'.encode('utf-8')
resp = requests.put(
'http://127.0.0.1:{port}{path}'.format(path='/foo/bar.txt', port=procs[0].port),
content
)
assert resp.status_code == 201
path = resp.headers['Location']
def check_arrived():
resp = requests.get(
'http://127.0.0.1:{port}{path}'.format(path=path, port=procs[1].port),
)
assert resp.status_code == 200
assert resp.content == content
assert resp.headers['Content-Type'] == 'text/plain'
utils.waitfor(check_arrived)
assert CountingProxy.cnt.value in (1, 2) # Depends on a race condition, does not matter for this test
| 33.460784
| 106
| 0.565778
|
00ad90780fb5780afd0e4567b3427d2126bce9bd
| 67,184
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_virtual_network_gateways_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 1
|
2021-06-02T08:01:35.000Z
|
2021-06-02T08:01:35.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_virtual_network_gateways_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | 226
|
2019-07-24T07:57:21.000Z
|
2019-10-15T01:07:24.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_09_01/operations/_virtual_network_gateways_operations.py
|
iscai-msft/azure-sdk-for-python
|
83715b95c41e519d5be7f1180195e2fba136fc0f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VirtualNetworkGatewaysOperations(object):
"""VirtualNetworkGatewaysOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-09-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-09-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual
network gateway operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def get(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VirtualNetworkGateway or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _delete_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def _update_tags_initial(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, **operation_config):
parameters = models.TagsObject(tags=tags)
# Construct URL
url = self.update_tags.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'TagsObject')
# Construct and send request
request = self._client.patch(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update_tags(
self, resource_group_name, virtual_network_gateway_name, tags=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
tags=tags,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetworkGateway
:rtype:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayPaged[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'}
def list_connections(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of
VirtualNetworkGatewayConnectionListEntity
:rtype:
~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionListEntityPaged[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGatewayConnectionListEntity]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_connections.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VirtualNetworkGatewayConnectionListEntityPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'}
def _reset_initial(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.reset.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reset(
self, resource_group_name, virtual_network_gateway_name, gateway_vip=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""Resets the primary of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to
the begin reset of the active-active feature enabled gateway.
:type gateway_vip: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VirtualNetworkGateway
or ClientRawResponse<VirtualNetworkGateway> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.VirtualNetworkGateway]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VirtualNetworkGateway', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'}
def _generatevpnclientpackage_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generatevpnclientpackage.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generatevpnclientpackage(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN client package for P2S client of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'}
def _generate_vpn_profile_initial(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.generate_vpn_profile.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnClientParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def generate_vpn_profile(
self, resource_group_name, virtual_network_gateway_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Generates VPN profile for P2S client of the virtual network gateway in
the specified resource group. Used for IKEV2 and radius based
authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network
gateway VPN client package operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VpnClientParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'}
def _get_vpn_profile_package_url_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_vpn_profile_package_url.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_vpn_profile_package_url(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Gets pre-generated VPN profile for P2S client of the virtual network
gateway in the specified resource group. The profile needs to be
generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns str or
ClientRawResponse<str> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[str] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[str]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'}
def _get_bgp_peer_status_initial(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_bgp_peer_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_bgp_peer_status(
self, resource_group_name, virtual_network_gateway_name, peer=None, custom_headers=None, raw=False, polling=True, **operation_config):
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns BgpPeerStatusListResult
or ClientRawResponse<BgpPeerStatusListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.BgpPeerStatusListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.BgpPeerStatusListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('BgpPeerStatusListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'}
def supported_vpn_devices(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.supported_vpn_devices.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'}
def _get_learned_routes_initial(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_learned_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_learned_routes(
self, resource_group_name, virtual_network_gateway_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
has learned, including routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'}
def _get_advertised_routes_initial(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_advertised_routes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_advertised_routes(
self, resource_group_name, virtual_network_gateway_name, peer, custom_headers=None, raw=False, polling=True, **operation_config):
"""This operation retrieves a list of routes the virtual network gateway
is advertising to the specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network
gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer
:type peer: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns GatewayRouteListResult
or ClientRawResponse<GatewayRouteListResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_09_01.models.GatewayRouteListResult]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('GatewayRouteListResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'}
def vpn_device_configuration_script(
self, resource_group_name, virtual_network_gateway_connection_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the
virtual network gateway connection for which the configuration script
is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device
script operation.
:type parameters:
~azure.mgmt.network.v2017_09_01.models.VpnDeviceScriptParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: str or ClientRawResponse if raw=true
:rtype: str or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.vpn_device_configuration_script.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'}
| 49.363703
| 231
| 0.689911
|
877d842bf74be80d695b9cfe654b2bfb71c1a904
| 142
|
py
|
Python
|
weather.py
|
gding/vehicle-modeler
|
3c4bc53446c1e47c33300de72536f7fa6c71381d
|
[
"BSD-3-Clause"
] | null | null | null |
weather.py
|
gding/vehicle-modeler
|
3c4bc53446c1e47c33300de72536f7fa6c71381d
|
[
"BSD-3-Clause"
] | null | null | null |
weather.py
|
gding/vehicle-modeler
|
3c4bc53446c1e47c33300de72536f7fa6c71381d
|
[
"BSD-3-Clause"
] | null | null | null |
class Weather(object):
def __init__(
self, lat, lon, wind_speed, wind_direction,
solar_irradiance, cloud_cover):
| 23.666667
| 55
| 0.633803
|
f61893c71874538d5c062919e261de41db927489
| 5,554
|
py
|
Python
|
cryptoauthlib/python/cryptoauthlib/iface.py
|
pjaos/atca
|
1a6d04468706235c2c8c3b97f6ce3df5f1fba4ec
|
[
"Apache-2.0"
] | 8
|
2018-01-30T16:24:37.000Z
|
2022-02-25T12:50:22.000Z
|
cryptoauthlib/python/cryptoauthlib/iface.py
|
pjaos/atca
|
1a6d04468706235c2c8c3b97f6ce3df5f1fba4ec
|
[
"Apache-2.0"
] | 6
|
2018-03-29T15:07:39.000Z
|
2022-02-25T11:36:06.000Z
|
cryptoauthlib/python/cryptoauthlib/iface.py
|
pjaos/atca
|
1a6d04468706235c2c8c3b97f6ce3df5f1fba4ec
|
[
"Apache-2.0"
] | 3
|
2018-04-10T11:17:10.000Z
|
2021-11-01T23:43:59.000Z
|
"""
Interface Configuration
"""
# (c) 2015-2018 Microchip Technology Inc. and its subsidiaries.
#
# Subject to your compliance with these terms, you may use Microchip software
# and any derivatives exclusively with Microchip products. It is your
# responsibility to comply with third party license terms applicable to your
# use of third party software (including open source software) that may
# accompany Microchip software.
#
# THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
# EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
# WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
# PARTICULAR PURPOSE. IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT,
# SPECIAL, PUNITIVE, INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE
# OF ANY KIND WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF
# MICROCHIP HAS BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE
# FORESEEABLE. TO THE FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL
# LIABILITY ON ALL CLAIMS IN ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED
# THE AMOUNT OF FEES, IF ANY, THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR
# THIS SOFTWARE.
from ctypes import Structure, Union, c_uint16, c_int, c_uint8, c_uint32, c_void_p
from .library import get_cryptoauthlib, get_ctype_by_name
from .atcaenum import AtcaEnum
# Because this module directly mirrors the C api the following is an exception to the python coding standard
# pylint: disable-msg=too-few-public-methods
class ATCAIfaceType(AtcaEnum):
"""
Interface Type Enumerations from atca_iface.h
"""
ATCA_I2C_IFACE = 0
ATCA_SWI_IFACE = 1
ATCA_UART_IFACE = 2
ATCA_SPI_IFACE = 3
ATCA_HID_IFACE = 4
class ATCAKitType(AtcaEnum):
"""
Interface Type Enumerations for Kit devices
"""
ATCA_KIT_AUTO_IFACE = 0
ATCA_KIT_I2C_IFACE = 1
ATCA_KIT_SWI_IFACE = 2
ATCA_KIT_UNKNOWN_IFACE = 3
class ATCADeviceType(AtcaEnum):
"""
Device Type Enumeration from atca_devtypes.h
"""
ATSHA204A = 0
ATECC108A = 1
ATECC508A = 2
ATECC608A = 3
ATCA_DEV_UNKNOWN = 0x20
# The following must match atca_iface.h exactly
class _ATCAI2C(Structure):
"""I2C/TWI HAL configuration"""
_fields_ = [('slave_address', c_uint8),
('bus', c_uint8),
('baud', c_uint32)]
class _ATCASWI(Structure):
"""SWI (Atmel Single Wire Interface) HAL configuration"""
_fields_ = [('bus', c_uint8)]
class _ATCAUART(Structure):
"""Generic UART HAL configuration"""
_fields_ = [('port', c_int),
('baud', c_uint32),
('wordsize', c_uint8),
('parity', c_uint8),
('stopbits', c_uint8)]
class _ATCAHID(Structure):
"""USB (HID) HAL configuration"""
_fields_ = [('idx', c_int),
('dev_interface', get_ctype_by_name('ATCAKitType')),
('dev_identity', c_uint8),
('vid', c_uint32),
('pid', c_uint32),
('packetsize', c_uint32)]
class _ATCACUSTOM(Structure):
"""Custom HAL configuration"""
_fields_ = [('halinit', c_void_p),
('halpostinit', c_void_p),
('halsend', c_void_p),
('halreceive', c_void_p),
('halwake', c_void_p),
('halidle', c_void_p),
('halsleep', c_void_p),
('halrelease', c_void_p)]
class _ATCAIfaceParams(Union):
"""HAL Configurations supported by the library (this is a union)"""
_fields_ = [('atcai2c', _ATCAI2C),
('atcaswi', _ATCASWI),
('atcauart', _ATCAUART),
('atcahid', _ATCAHID),
('atcacustom', _ATCACUSTOM)]
class ATCAIfaceCfg(Structure):
"""Interface configuration structure used by atcab_init()"""
_fields_ = [('iface_type', get_ctype_by_name('ATCAIfaceType')),
('devtype', get_ctype_by_name('ATCADeviceType')),
('cfg', _ATCAIfaceParams),
('wake_delay', c_uint16),
('rx_retries', c_int),
('cfg_data', c_void_p)]
def cfg_ateccx08a_i2c_default():
"""Default configuration for an ECCx08A device on the first logical I2C bus"""
return ATCAIfaceCfg.in_dll(get_cryptoauthlib(), 'cfg_ateccx08a_i2c_default')
def cfg_ateccx08a_swi_default():
"""Default configuration for an ECCx08A device on the logical SWI bus over UART"""
return ATCAIfaceCfg.in_dll(get_cryptoauthlib(), 'cfg_ateccx08a_swi_default')
def cfg_ateccx08a_kithid_default():
"""Default configuration for Kit protocol over a HID interface"""
return ATCAIfaceCfg.in_dll(get_cryptoauthlib(), 'cfg_ateccx08a_kithid_default')
def cfg_atsha204a_i2c_default():
"""Default configuration for a SHA204A device on the first logical I2C bus"""
return ATCAIfaceCfg.in_dll(get_cryptoauthlib(), 'cfg_atsha204a_i2c_default')
def cfg_atsha204a_swi_default():
"""Default configuration for an SHA204A device on the logical SWI bus over UART"""
return ATCAIfaceCfg.in_dll(get_cryptoauthlib(), 'cfg_atsha204a_swi_default')
def cfg_atsha204a_kithid_default():
"""Default configuration for Kit protocol over a HID interface for SHA204"""
return ATCAIfaceCfg.in_dll(get_cryptoauthlib(), 'cfg_atsha204a_kithid_default')
# Make module import * safe - keep at the end of the file
__all__ = (['ATCAIfaceCfg', 'ATCAIfaceType', 'ATCADeviceType', 'ATCAKitType']
+ [x for x in dir() if x.startswith('cfg_')])
| 34.7125
| 108
| 0.675729
|
719b7b42aa1d2dd6264575f409d9bea327dac641
| 843
|
py
|
Python
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/Pmv/hostappInterface/cinema4d/test/c4dPmvClient.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | 8
|
2021-12-14T21:30:01.000Z
|
2022-02-14T11:30:03.000Z
|
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/Pmv/hostappInterface/cinema4d/test/c4dPmvClient.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/Pmv/hostappInterface/cinema4d/test/c4dPmvClient.py
|
J-E-J-S/aaRS-Pipeline
|
43f59f28ab06e4b16328c3bc405cdddc6e69ac44
|
[
"MIT"
] | null | null | null |
##############################################
# pmvClient.py (c) 2009 by Ludovic Autin #
# #
# Description: #
# #
# #
##############################################
from Pmv.moleculeViewer import MoleculeViewer
from mglutil.hostappli.pdb_c4d import *
import c4d
from c4d import documents
scene = doc = documents.get_active_document()
self = MoleculeViewer(logMode = 'overwrite', customizer=None, master=None,title='toto', withShell= 0,verbose=False, gui = False)
from Pmv.displayCommands import BindGeomToMolecularFragment
self.addCommand(BindGeomToMolecularFragment(), 'bindGeomToMolecularFragment', None)
self.embedInto('c4d',debug=1)
self.at_mat=create_Atoms_materials(doc)
self.hostApp.setServer('localhost',50000)
self.hostApp.start()
| 36.652174
| 128
| 0.608541
|
3a0a056d3638c0009e50569aed6ad50e137e2ecd
| 8,776
|
py
|
Python
|
lean/commands/research.py
|
InvestWeMust/lean-cli
|
a7241a0af6202dc7d56c0f35d09e51798cc5d426
|
[
"Apache-2.0"
] | 76
|
2021-02-03T02:32:32.000Z
|
2022-03-28T17:04:03.000Z
|
lean/commands/research.py
|
InvestWeMust/lean-cli
|
a7241a0af6202dc7d56c0f35d09e51798cc5d426
|
[
"Apache-2.0"
] | 64
|
2021-02-28T23:14:17.000Z
|
2022-03-30T23:22:24.000Z
|
lean/commands/research.py
|
InvestWeMust/lean-cli
|
a7241a0af6202dc7d56c0f35d09e51798cc5d426
|
[
"Apache-2.0"
] | 50
|
2021-02-11T01:25:24.000Z
|
2022-03-17T03:56:29.000Z
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webbrowser
from pathlib import Path
from typing import Optional
import click
from docker.errors import APIError
from docker.types import Mount
from lean.click import LeanCommand, PathParameter
from lean.constants import DEFAULT_RESEARCH_IMAGE, GUI_PRODUCT_INSTALL_ID
from lean.container import container
from lean.models.data_providers import all_data_providers
from lean.models.data_providers.quantconnect import QuantConnectDataProvider
def _check_docker_output(chunk: str, port: int) -> None:
"""Checks the output of the Docker container and opens the browser if Jupyter Lab has started.
:param chunk: the output chunk
:param port: the port Jupyter Lab will be running on
"""
if "is running at:" in chunk:
webbrowser.open(f"http://localhost:{port}/")
@click.command(cls=LeanCommand, requires_lean_config=True, requires_docker=True)
@click.argument("project", type=PathParameter(exists=True, file_okay=False, dir_okay=True))
@click.option("--port", type=int, default=8888, help="The port to run Jupyter Lab on (defaults to 8888)")
@click.option("--data-provider",
type=click.Choice([dp.get_name() for dp in all_data_providers], case_sensitive=False),
help="Update the Lean configuration file to retrieve data from the given provider")
@click.option("--download-data",
is_flag=True,
default=False,
help=f"Update the Lean configuration file to download data from the QuantConnect API, alias for --data-provider {QuantConnectDataProvider.get_name()}")
@click.option("--data-purchase-limit",
type=int,
help="The maximum amount of QCC to spend on downloading data during the research session when using QuantConnect as data provider")
@click.option("--detach", "-d",
is_flag=True,
default=False,
help="Run Jupyter Lab in a detached Docker container and return immediately")
@click.option("--no-open",
is_flag=True,
default=False,
help="Don't open the Jupyter Lab environment in the browser after starting it")
@click.option("--image", type=str, help=f"The LEAN research image to use (defaults to {DEFAULT_RESEARCH_IMAGE})")
@click.option("--update",
is_flag=True,
default=False,
help="Pull the LEAN research image before starting the research environment")
def research(project: Path,
port: int,
data_provider: Optional[str],
download_data: bool,
data_purchase_limit: Optional[int],
detach: bool,
no_open: bool,
image: Optional[str],
update: bool) -> None:
"""Run a Jupyter Lab environment locally using Docker.
By default the official LEAN research image is used.
You can override this using the --image option.
Alternatively you can set the default research image using `lean config set research-image <image>`.
"""
project_manager = container.project_manager()
algorithm_file = project_manager.find_algorithm_file(project)
lean_config_manager = container.lean_config_manager()
lean_config = lean_config_manager.get_complete_lean_config("backtesting", algorithm_file, None)
lean_config["composer-dll-directory"] = "/Lean/Launcher/bin/Debug"
if download_data:
data_provider = QuantConnectDataProvider.get_name()
if data_provider is not None:
data_provider = next(dp for dp in all_data_providers if dp.get_name() == data_provider)
data_provider.build(lean_config, container.logger()).configure(lean_config, "backtesting")
lean_config_manager.configure_data_purchase_limit(lean_config, data_purchase_limit)
lean_runner = container.lean_runner()
temp_manager = container.temp_manager()
run_options = lean_runner.get_basic_docker_config(lean_config,
algorithm_file,
temp_manager.create_temporary_directory(),
None,
False,
detach)
# Mount the config in the notebooks directory as well
local_config_path = next(m["Source"] for m in run_options["mounts"] if m["Target"].endswith("config.json"))
run_options["mounts"].append(Mount(target="/Lean/Launcher/bin/Debug/Notebooks/config.json",
source=str(local_config_path),
type="bind",
read_only=True))
# Jupyter Lab runs on port 8888, we expose it to the local port specified by the user
run_options["ports"]["8888"] = str(port)
# Open the browser as soon as Jupyter Lab has started
if detach or not no_open:
run_options["on_output"] = lambda chunk: _check_docker_output(chunk, port)
# Give container an identifiable name when running it from the GUI
if container.module_manager().is_module_installed(GUI_PRODUCT_INSTALL_ID):
project_id = container.project_config_manager().get_local_id(algorithm_file.parent)
run_options["name"] = f"lean_cli_gui_research_{project_id}"
# Make Ctrl+C stop Jupyter Lab immediately
run_options["stop_signal"] = "SIGKILL"
# Mount the project to the notebooks directory
run_options["volumes"][str(project)] = {
"bind": "/Lean/Launcher/bin/Debug/Notebooks",
"mode": "rw"
}
# Add references to all DLLs in QuantConnect.csx so custom C# libraries can be imported with using statements
run_options["commands"].append(" && ".join([
'find . -maxdepth 1 -iname "*.dll" | xargs -I _ echo \'#r "_"\' | cat - QuantConnect.csx > NewQuantConnect.csx',
"mv NewQuantConnect.csx QuantConnect.csx"
]))
# Allow notebooks to be embedded in iframes
run_options["commands"].append("mkdir -p ~/.jupyter")
run_options["commands"].append(
'echo "c.NotebookApp.disable_check_xsrf = True\nc.NotebookApp.tornado_settings = {\'headers\': {\'Content-Security-Policy\': \'frame-ancestors self *\'}}" > ~/.jupyter/jupyter_notebook_config.py')
# Hide headers in notebooks
run_options["commands"].append("mkdir -p ~/.ipython/profile_default/static/custom")
run_options["commands"].append(
'echo "#header-container { display: none !important; }" > ~/.ipython/profile_default/static/custom/custom.css')
# Run the script that starts Jupyter Lab when all set up has been done
run_options["commands"].append("./start.sh")
project_config_manager = container.project_config_manager()
cli_config_manager = container.cli_config_manager()
project_config = project_config_manager.get_project_config(algorithm_file.parent)
research_image = cli_config_manager.get_research_image(image or project_config.get("research-image", None))
container.update_manager().pull_docker_image_if_necessary(research_image, update)
try:
container.docker_manager().run_image(research_image, **run_options)
except APIError as error:
msg = error.explanation
if isinstance(msg, str) and any(m in msg.lower() for m in [
"port is already allocated",
"ports are not available"
"an attempt was made to access a socket in a way forbidden by its access permissions"
]):
raise RuntimeError(f"Port {port} is already in use, please specify a different port using --port <number>")
raise error
if detach:
temp_manager.delete_temporary_directories_when_done = False
logger = container.logger()
relative_project_dir = algorithm_file.parent.relative_to(lean_config_manager.get_cli_root_directory())
logger.info(
f"Successfully started Jupyter Lab environment for '{relative_project_dir}' in the '{run_options['name']}' container")
logger.info("You can use Docker's own commands to manage the detached container")
| 48.486188
| 204
| 0.682201
|
4f95f2d34263065bdf9005722a687a8b18430389
| 2,139
|
py
|
Python
|
InteractionTracker/analytics/migrations/0002_auto_20171016_1150.py
|
desertzebra/Lean-UX-Platform
|
1b61a4b4e0af6fc08e052fb22b4141e65122ef9a
|
[
"Apache-2.0"
] | 34
|
2019-03-11T08:10:16.000Z
|
2021-12-14T05:53:22.000Z
|
InteractionTracker/analytics/migrations/0002_auto_20171016_1150.py
|
shahidzaffar/Lean-UX-Platform
|
40c46c0421dd21cdfca254db689bf566c95e4d6a
|
[
"Apache-2.0"
] | 6
|
2020-11-17T06:57:39.000Z
|
2022-01-04T16:51:41.000Z
|
InteractionTracker/analytics/migrations/0002_auto_20171016_1150.py
|
shahidzaffar/Lean-UX-Platform
|
40c46c0421dd21cdfca254db689bf566c95e4d6a
|
[
"Apache-2.0"
] | 28
|
2019-03-11T08:10:19.000Z
|
2021-12-14T06:02:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-16 02:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analytics', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='log',
name='action_name',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='country',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='entry_screen',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='event_action',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='event_category',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='event_name',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='event_value',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='exit_screen',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='language',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='screen_resolution',
field=models.CharField(blank=True, max_length=250),
),
migrations.AlterField(
model_name='log',
name='user_agent',
field=models.CharField(blank=True, max_length=250),
),
]
| 30.126761
| 63
| 0.553062
|
a9fc8d2bb26436a7865655574c6a46645e2d1904
| 15,099
|
py
|
Python
|
google/cloud/appengine_admin_v1/types/app_yaml.py
|
parthea/python-appengine-admin
|
b23b075ae60fb5b7452e9c996b0febdeea81c50e
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/appengine_admin_v1/types/app_yaml.py
|
parthea/python-appengine-admin
|
b23b075ae60fb5b7452e9c996b0febdeea81c50e
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/appengine_admin_v1/types/app_yaml.py
|
parthea/python-appengine-admin
|
b23b075ae60fb5b7452e9c996b0febdeea81c50e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 as duration # type: ignore
__protobuf__ = proto.module(
package="google.appengine.v1",
manifest={
"AuthFailAction",
"LoginRequirement",
"SecurityLevel",
"ApiConfigHandler",
"ErrorHandler",
"UrlMap",
"StaticFilesHandler",
"ScriptHandler",
"ApiEndpointHandler",
"HealthCheck",
"ReadinessCheck",
"LivenessCheck",
"Library",
},
)
class AuthFailAction(proto.Enum):
r"""Actions to take when the user is not logged in."""
AUTH_FAIL_ACTION_UNSPECIFIED = 0
AUTH_FAIL_ACTION_REDIRECT = 1
AUTH_FAIL_ACTION_UNAUTHORIZED = 2
class LoginRequirement(proto.Enum):
r"""Methods to restrict access to a URL based on login status."""
LOGIN_UNSPECIFIED = 0
LOGIN_OPTIONAL = 1
LOGIN_ADMIN = 2
LOGIN_REQUIRED = 3
class SecurityLevel(proto.Enum):
r"""Methods to enforce security (HTTPS) on a URL."""
_pb_options = {"allow_alias": True}
SECURE_UNSPECIFIED = 0
SECURE_DEFAULT = 0
SECURE_NEVER = 1
SECURE_OPTIONAL = 2
SECURE_ALWAYS = 3
class ApiConfigHandler(proto.Message):
r"""`Google Cloud
Endpoints <https://cloud.google.com/appengine/docs/python/endpoints/>`__
configuration for API handlers.
Attributes:
auth_fail_action (google.cloud.appengine_admin_v1.types.AuthFailAction):
Action to take when users access resources that require
authentication. Defaults to ``redirect``.
login (google.cloud.appengine_admin_v1.types.LoginRequirement):
Level of login required to access this resource. Defaults to
``optional``.
script (str):
Path to the script from the application root
directory.
security_level (google.cloud.appengine_admin_v1.types.SecurityLevel):
Security (HTTPS) enforcement for this URL.
url (str):
URL to serve the endpoint at.
"""
auth_fail_action = proto.Field(proto.ENUM, number=1, enum="AuthFailAction",)
login = proto.Field(proto.ENUM, number=2, enum="LoginRequirement",)
script = proto.Field(proto.STRING, number=3)
security_level = proto.Field(proto.ENUM, number=4, enum="SecurityLevel",)
url = proto.Field(proto.STRING, number=5)
class ErrorHandler(proto.Message):
r"""Custom static error page to be served when an error occurs.
Attributes:
error_code (google.cloud.appengine_admin_v1.types.ErrorHandler.ErrorCode):
Error condition this handler applies to.
static_file (str):
Static file content to be served for this
error.
mime_type (str):
MIME type of file. Defaults to ``text/html``.
"""
class ErrorCode(proto.Enum):
r"""Error codes."""
_pb_options = {"allow_alias": True}
ERROR_CODE_UNSPECIFIED = 0
ERROR_CODE_DEFAULT = 0
ERROR_CODE_OVER_QUOTA = 1
ERROR_CODE_DOS_API_DENIAL = 2
ERROR_CODE_TIMEOUT = 3
error_code = proto.Field(proto.ENUM, number=1, enum=ErrorCode,)
static_file = proto.Field(proto.STRING, number=2)
mime_type = proto.Field(proto.STRING, number=3)
class UrlMap(proto.Message):
r"""URL pattern and description of how the URL should be handled.
App Engine can handle URLs by executing application code or by
serving static files uploaded with the version, such as images,
CSS, or JavaScript.
Attributes:
url_regex (str):
URL prefix. Uses regular expression syntax,
which means regexp special characters must be
escaped, but should not contain groupings. All
URLs that begin with this prefix are handled by
this handler, using the portion of the URL after
the prefix as part of the file path.
static_files (google.cloud.appengine_admin_v1.types.StaticFilesHandler):
Returns the contents of a file, such as an
image, as the response.
script (google.cloud.appengine_admin_v1.types.ScriptHandler):
Executes a script to handle the requests that match this URL
pattern. Only the ``auto`` value is supported for Node.js in
the App Engine standard environment, for example
``"script": "auto"``.
api_endpoint (google.cloud.appengine_admin_v1.types.ApiEndpointHandler):
Uses API Endpoints to handle requests.
security_level (google.cloud.appengine_admin_v1.types.SecurityLevel):
Security (HTTPS) enforcement for this URL.
login (google.cloud.appengine_admin_v1.types.LoginRequirement):
Level of login required to access this
resource. Not supported for Node.js in the App
Engine standard environment.
auth_fail_action (google.cloud.appengine_admin_v1.types.AuthFailAction):
Action to take when users access resources that require
authentication. Defaults to ``redirect``.
redirect_http_response_code (google.cloud.appengine_admin_v1.types.UrlMap.RedirectHttpResponseCode):
``30x`` code to use when performing redirects for the
``secure`` field. Defaults to ``302``.
"""
class RedirectHttpResponseCode(proto.Enum):
r"""Redirect codes."""
REDIRECT_HTTP_RESPONSE_CODE_UNSPECIFIED = 0
REDIRECT_HTTP_RESPONSE_CODE_301 = 1
REDIRECT_HTTP_RESPONSE_CODE_302 = 2
REDIRECT_HTTP_RESPONSE_CODE_303 = 3
REDIRECT_HTTP_RESPONSE_CODE_307 = 4
url_regex = proto.Field(proto.STRING, number=1)
static_files = proto.Field(
proto.MESSAGE, number=2, oneof="handler_type", message="StaticFilesHandler",
)
script = proto.Field(
proto.MESSAGE, number=3, oneof="handler_type", message="ScriptHandler",
)
api_endpoint = proto.Field(
proto.MESSAGE, number=4, oneof="handler_type", message="ApiEndpointHandler",
)
security_level = proto.Field(proto.ENUM, number=5, enum="SecurityLevel",)
login = proto.Field(proto.ENUM, number=6, enum="LoginRequirement",)
auth_fail_action = proto.Field(proto.ENUM, number=7, enum="AuthFailAction",)
redirect_http_response_code = proto.Field(
proto.ENUM, number=8, enum=RedirectHttpResponseCode,
)
class StaticFilesHandler(proto.Message):
r"""Files served directly to the user for a given URL, such as
images, CSS stylesheets, or JavaScript source files. Static file
handlers describe which files in the application directory are
static files, and which URLs serve them.
Attributes:
path (str):
Path to the static files matched by the URL
pattern, from the application root directory.
The path can refer to text matched in groupings
in the URL pattern.
upload_path_regex (str):
Regular expression that matches the file
paths for all files that should be referenced by
this handler.
http_headers (Sequence[google.cloud.appengine_admin_v1.types.StaticFilesHandler.HttpHeadersEntry]):
HTTP headers to use for all responses from
these URLs.
mime_type (str):
MIME type used to serve all files served by
this handler.
Defaults to file-specific MIME types, which are
derived from each file's filename extension.
expiration (google.protobuf.duration_pb2.Duration):
Time a static file served by this handler
should be cached by web proxies and browsers.
require_matching_file (bool):
Whether this handler should match the request
if the file referenced by the handler does not
exist.
application_readable (bool):
Whether files should also be uploaded as code
data. By default, files declared in static file
handlers are uploaded as static data and are
only served to end users; they cannot be read by
the application. If enabled, uploads are charged
against both your code and static data storage
resource quotas.
"""
path = proto.Field(proto.STRING, number=1)
upload_path_regex = proto.Field(proto.STRING, number=2)
http_headers = proto.MapField(proto.STRING, proto.STRING, number=3)
mime_type = proto.Field(proto.STRING, number=4)
expiration = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,)
require_matching_file = proto.Field(proto.BOOL, number=6)
application_readable = proto.Field(proto.BOOL, number=7)
class ScriptHandler(proto.Message):
r"""Executes a script to handle the request that matches the URL
pattern.
Attributes:
script_path (str):
Path to the script from the application root
directory.
"""
script_path = proto.Field(proto.STRING, number=1)
class ApiEndpointHandler(proto.Message):
r"""Uses Google Cloud Endpoints to handle requests.
Attributes:
script_path (str):
Path to the script from the application root
directory.
"""
script_path = proto.Field(proto.STRING, number=1)
class HealthCheck(proto.Message):
r"""Health checking configuration for VM instances. Unhealthy
instances are killed and replaced with new instances. Only
applicable for instances in App Engine flexible environment.
Attributes:
disable_health_check (bool):
Whether to explicitly disable health checks
for this instance.
host (str):
Host header to send when performing an HTTP
health check. Example: "myapp.appspot.com".
healthy_threshold (int):
Number of consecutive successful health
checks required before receiving traffic.
unhealthy_threshold (int):
Number of consecutive failed health checks
required before removing traffic.
restart_threshold (int):
Number of consecutive failed health checks
required before an instance is restarted.
check_interval (google.protobuf.duration_pb2.Duration):
Interval between health checks.
timeout (google.protobuf.duration_pb2.Duration):
Time before the health check is considered
failed.
"""
disable_health_check = proto.Field(proto.BOOL, number=1)
host = proto.Field(proto.STRING, number=2)
healthy_threshold = proto.Field(proto.UINT32, number=3)
unhealthy_threshold = proto.Field(proto.UINT32, number=4)
restart_threshold = proto.Field(proto.UINT32, number=5)
check_interval = proto.Field(proto.MESSAGE, number=6, message=duration.Duration,)
timeout = proto.Field(proto.MESSAGE, number=7, message=duration.Duration,)
class ReadinessCheck(proto.Message):
r"""Readiness checking configuration for VM instances. Unhealthy
instances are removed from traffic rotation.
Attributes:
path (str):
The request path.
host (str):
Host header to send when performing a HTTP
Readiness check. Example: "myapp.appspot.com".
failure_threshold (int):
Number of consecutive failed checks required
before removing traffic.
success_threshold (int):
Number of consecutive successful checks
required before receiving traffic.
check_interval (google.protobuf.duration_pb2.Duration):
Interval between health checks.
timeout (google.protobuf.duration_pb2.Duration):
Time before the check is considered failed.
app_start_timeout (google.protobuf.duration_pb2.Duration):
A maximum time limit on application
initialization, measured from moment the
application successfully replies to a
healthcheck until it is ready to serve traffic.
"""
path = proto.Field(proto.STRING, number=1)
host = proto.Field(proto.STRING, number=2)
failure_threshold = proto.Field(proto.UINT32, number=3)
success_threshold = proto.Field(proto.UINT32, number=4)
check_interval = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,)
timeout = proto.Field(proto.MESSAGE, number=6, message=duration.Duration,)
app_start_timeout = proto.Field(proto.MESSAGE, number=7, message=duration.Duration,)
class LivenessCheck(proto.Message):
r"""Health checking configuration for VM instances. Unhealthy
instances are killed and replaced with new instances.
Attributes:
path (str):
The request path.
host (str):
Host header to send when performing a HTTP
Liveness check. Example: "myapp.appspot.com".
failure_threshold (int):
Number of consecutive failed checks required
before considering the VM unhealthy.
success_threshold (int):
Number of consecutive successful checks
required before considering the VM healthy.
check_interval (google.protobuf.duration_pb2.Duration):
Interval between health checks.
timeout (google.protobuf.duration_pb2.Duration):
Time before the check is considered failed.
initial_delay (google.protobuf.duration_pb2.Duration):
The initial delay before starting to execute
the checks.
"""
path = proto.Field(proto.STRING, number=1)
host = proto.Field(proto.STRING, number=2)
failure_threshold = proto.Field(proto.UINT32, number=3)
success_threshold = proto.Field(proto.UINT32, number=4)
check_interval = proto.Field(proto.MESSAGE, number=5, message=duration.Duration,)
timeout = proto.Field(proto.MESSAGE, number=6, message=duration.Duration,)
initial_delay = proto.Field(proto.MESSAGE, number=7, message=duration.Duration,)
class Library(proto.Message):
r"""Third-party Python runtime library that is required by the
application.
Attributes:
name (str):
Name of the library. Example: "django".
version (str):
Version of the library to select, or
"latest".
"""
name = proto.Field(proto.STRING, number=1)
version = proto.Field(proto.STRING, number=2)
__all__ = tuple(sorted(__protobuf__.manifest))
| 35.527059
| 108
| 0.671501
|
d363f269f060ab90666a0a0459316360b0d4efbb
| 1,105
|
py
|
Python
|
aulas/.resps/parabola.py
|
achiaver/introducaopython
|
10c192e680732fd1a244d30822f8e227a2b118dc
|
[
"MIT"
] | 7
|
2019-03-18T19:38:19.000Z
|
2021-01-25T20:36:43.000Z
|
aulas/.resps/parabola.py
|
achiaver/introducaopython
|
10c192e680732fd1a244d30822f8e227a2b118dc
|
[
"MIT"
] | null | null | null |
aulas/.resps/parabola.py
|
achiaver/introducaopython
|
10c192e680732fd1a244d30822f8e227a2b118dc
|
[
"MIT"
] | 2
|
2020-03-08T14:23:43.000Z
|
2020-08-26T14:36:18.000Z
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino']})
plt.rc('text', usetex=True)
x = np.linspace(-4.49, 3.49, 101)
a, b, c = 1, 1, -2
y = a*x**2 + b*x + c
fig = plt.figure(figsize=(5,4))
gs = plt.GridSpec(1,1, figure=fig)
ax = fig.add_subplot(gs[0, 0])
ax.plot(x, y, label='$f(x) = x^2 + x - 2$')
ax.scatter([-2, 1],[0, 0], color='red', label='raízes')
ax.spines['left'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['bottom'].set_smart_bounds(True)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set(xlabel='$x$', ylabel='$f(x)$')
ax.xaxis.set_label_coords(0.94, 0.16 )
ax.yaxis.set_label_coords(.48, 0.92 )
ax.grid(color='black', linestyle='--', alpha=.05)
leg = ax.legend(loc=9, bbox_to_anchor=(0.5,0), ncol=2)
plt.title('Figura 1: Parábola em $[-4, 3]$');
plt.savefig('../imagens/parabola.png',dpi=150, bbox_extra_artists=(leg,), bbox_inches='tight')
| 33.484848
| 94
| 0.669683
|
e272a7a67dd36315a90f320d10af2c354f6ea8ef
| 488
|
py
|
Python
|
src/core/wsgi.py
|
FGlazov/il2_stats
|
fb91754e8319c645c875ef3c98c8ec5a3aa01fc2
|
[
"MIT"
] | 11
|
2016-12-15T21:12:55.000Z
|
2021-05-02T03:56:41.000Z
|
src/core/wsgi.py
|
FGlazov/il2_stats
|
fb91754e8319c645c875ef3c98c8ec5a3aa01fc2
|
[
"MIT"
] | 16
|
2019-10-05T21:22:22.000Z
|
2022-03-11T23:18:01.000Z
|
src/core/wsgi.py
|
FGlazov/il2_stats
|
fb91754e8319c645c875ef3c98c8ec5a3aa01fc2
|
[
"MIT"
] | 4
|
2020-07-10T01:09:39.000Z
|
2021-11-24T10:45:30.000Z
|
"""
WSGI config for il2_stats project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
from dj_static import Cling, MediaCling
application = Cling(MediaCling(get_wsgi_application()))
# application = get_wsgi_application()
| 25.684211
| 78
| 0.790984
|
920e1b263be61db8e5a057bff0189137222b88f3
| 202
|
py
|
Python
|
students/k3340/laboratory_works/MariKh4/laboratory_work_1/scientific_conferences/forms.py
|
MariKh4/ITMO_ICT_WebProgramming_2020
|
5296fcfe118490edbd9ec8758bcd9ffe2a0de333
|
[
"MIT"
] | null | null | null |
students/k3340/laboratory_works/MariKh4/laboratory_work_1/scientific_conferences/forms.py
|
MariKh4/ITMO_ICT_WebProgramming_2020
|
5296fcfe118490edbd9ec8758bcd9ffe2a0de333
|
[
"MIT"
] | null | null | null |
students/k3340/laboratory_works/MariKh4/laboratory_work_1/scientific_conferences/forms.py
|
MariKh4/ITMO_ICT_WebProgramming_2020
|
5296fcfe118490edbd9ec8758bcd9ffe2a0de333
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Comment
from django.contrib.auth.models import User
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
| 20.2
| 43
| 0.70297
|
0e9bc3aed69a3fe7ad20772a7dadbea92a8af146
| 4,484
|
py
|
Python
|
manila_tempest_tests/tests/api/admin/test_share_snapshot_instances_negative.py
|
openstack/manila-tempest-plugin
|
9c4a97b150e15b814acd4feb4da858a0eeff881e
|
[
"Apache-2.0"
] | 9
|
2017-10-31T10:36:34.000Z
|
2020-10-07T01:31:38.000Z
|
manila_tempest_tests/tests/api/admin/test_share_snapshot_instances_negative.py
|
openstack/manila-tempest-plugin
|
9c4a97b150e15b814acd4feb4da858a0eeff881e
|
[
"Apache-2.0"
] | null | null | null |
manila_tempest_tests/tests/api/admin/test_share_snapshot_instances_negative.py
|
openstack/manila-tempest-plugin
|
9c4a97b150e15b814acd4feb4da858a0eeff881e
|
[
"Apache-2.0"
] | 4
|
2018-07-19T13:55:51.000Z
|
2021-11-05T17:50:27.000Z
|
# Copyright 2016 Huawei
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
class SnapshotInstancesNegativeTest(base.BaseSharesMixedTest):
@classmethod
def skip_checks(cls):
super(SnapshotInstancesNegativeTest, cls).skip_checks()
if not CONF.share.run_snapshot_tests:
raise cls.skipException('Snapshot tests are disabled.')
utils.check_skip_if_microversion_not_supported('2.19')
@classmethod
def resource_setup(cls):
super(SnapshotInstancesNegativeTest, cls).resource_setup()
cls.admin_client = cls.admin_shares_v2_client
cls.member_client = cls.shares_v2_client
# create share type
extra_specs = {'snapshot_support': True}
cls.share_type = cls.create_share_type(extra_specs=extra_specs)
cls.share_type_id = cls.share_type['id']
# create share
cls.share = cls.create_share(share_type_id=cls.share_type_id,
client=cls.admin_client)
cls.snapshot = cls.create_snapshot_wait_for_active(
cls.share["id"], client=cls.admin_client)
@decorators.idempotent_id('6e371aac-ff8b-4eac-abc9-b8d777448ff3')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_list_snapshot_instances_with_snapshot_by_non_admin(self):
self.assertRaises(
lib_exc.Forbidden,
self.member_client.list_snapshot_instances,
snapshot_id=self.snapshot['id'])
@decorators.idempotent_id('d80331e4-8738-46c7-b726-1e716acef738')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_get_snapshot_instance_by_non_admin(self):
instances = self.admin_client.list_snapshot_instances(
snapshot_id=self.snapshot['id'])['snapshot_instances']
self.assertRaises(
lib_exc.Forbidden,
self.member_client.get_snapshot_instance,
instance_id=instances[0]['id'])
@decorators.idempotent_id('666a6caf-31b0-45d9-808c-e590250cffd4')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
def test_reset_snapshot_instance_status_by_non_admin(self):
instances = self.admin_client.list_snapshot_instances(
snapshot_id=self.snapshot['id'])['snapshot_instances']
self.assertRaises(
lib_exc.Forbidden,
self.member_client.reset_snapshot_instance_status,
instances[0]['id'],
'error')
class SnapshotInstancesNegativeNoResourceTest(base.BaseSharesMixedTest):
@classmethod
def skip_checks(cls):
super(SnapshotInstancesNegativeNoResourceTest, cls).skip_checks()
if not CONF.share.run_snapshot_tests:
raise cls.skipException('Snapshot tests are disabled.')
utils.check_skip_if_microversion_not_supported('2.19')
@classmethod
def resource_setup(cls):
super(SnapshotInstancesNegativeNoResourceTest, cls).resource_setup()
cls.admin_client = cls.admin_shares_v2_client
cls.member_client = cls.shares_v2_client
@decorators.idempotent_id('abded04d-40c0-4eb9-b2be-58b4efb16244')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_get_snapshot_instance_with_non_existent_instance(self):
self.assertRaises(lib_exc.NotFound,
self.admin_client.get_snapshot_instance,
instance_id="nonexistent_instance")
@decorators.idempotent_id('1609702b-de87-4d12-9a9c-78077d4676f3')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API)
def test_list_snapshot_instances_by_non_admin(self):
self.assertRaises(
lib_exc.Forbidden,
self.member_client.list_snapshot_instances)
| 40.396396
| 78
| 0.71521
|
6db43ed10d86518115fc639ba654d14345bd6fd5
| 2,027
|
py
|
Python
|
setup.py
|
klmitch/micropath
|
3de7f3d3da59dea802b502ebc71ec5e139e25e1f
|
[
"Apache-2.0"
] | 1
|
2018-06-07T22:17:14.000Z
|
2018-06-07T22:17:14.000Z
|
setup.py
|
klmitch/micropath
|
3de7f3d3da59dea802b502ebc71ec5e139e25e1f
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
klmitch/micropath
|
3de7f3d3da59dea802b502ebc71ec5e139e25e1f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import setuptools
# Utility function to read the README file
def readfile(filename):
with open(filename) as f:
return f.read()
# Utility function to read requirements.txt files
def readreq(filename):
result = []
with open(filename) as f:
for line in f:
line = line.strip()
# Process requirement file references
if line.startswith('-r '):
subfilename = line.split(None, 1)[-1].split('#', 1)[0].strip()
if subfilename:
result += readreq(subfilename)
continue
# Strip out "-e" prefixes
if line.startswith('-e '):
line = line.split(None, 1)[-1]
# Detect URLs in the line
idx = line.find('#egg=')
if idx >= 0:
line = line[idx + 5:]
# Strip off any comments
line = line.split('#', 1)[0].strip()
# Save the requirement
if line:
result.append(line.split('#', 1)[0].strip())
return result
setuptools.setup(
name='micropath',
version='0.1.0',
author='Kevin L. Mitchell',
author_email='klmitch@mit.edu',
url='https://github.com/klmitch/micropath',
description="The micropath Web Application Framework",
long_description=readfile('README.rst'),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: WSGI',
],
packages=setuptools.find_packages(exclude=['tests', 'tests.*']),
install_requires=readreq('requirements.txt'),
tests_require=readreq('test-requirements.txt'),
)
| 29.808824
| 78
| 0.563394
|
063498a0e6c75b31e43af3d7926c1195bdeb64f5
| 54,278
|
py
|
Python
|
pypy/module/_cppyy/interp_cppyy.py
|
DinrusGroup/PyPy
|
9fb17e23a17e3cf511cf9c4d11408393df4748c2
|
[
"Apache-2.0",
"OpenSSL"
] | 4
|
2019-02-11T06:58:43.000Z
|
2020-03-15T14:12:32.000Z
|
pypy/module/_cppyy/interp_cppyy.py
|
DinrusGroup/PyPy
|
9fb17e23a17e3cf511cf9c4d11408393df4748c2
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pypy/module/_cppyy/interp_cppyy.py
|
DinrusGroup/PyPy
|
9fb17e23a17e3cf511cf9c4d11408393df4748c2
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
import pypy.module._cppyy.capi as capi
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app, unwrap_spec
from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
from pypy.interpreter.baseobjspace import W_Root
from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
from rpython.rlib import jit, rdynload, rweakref, rgc
from rpython.rlib import jit_libffi, clibffi
from rpython.rlib.objectmodel import we_are_translated, keepalive_until_here
from pypy.module._cffi_backend import ctypefunc
from pypy.module._cppyy import converter, executor, ffitypes, helper
INSTANCE_FLAGS_PYTHON_OWNS = 0x0001
INSTANCE_FLAGS_IS_REF = 0x0002
INSTANCE_FLAGS_IS_R_VALUE = 0x0004
class FastCallNotPossible(Exception):
pass
# overload priorities: lower is preferred
priority = { 'void*' : 100,
'void**' : 100,
'float' : 30,
'double' : 10,
'const string&' : 1, } # solves a specific string ctor overload
from rpython.rlib.listsort import make_timsort_class
CPPMethodBaseTimSort = make_timsort_class()
class CPPMethodSort(CPPMethodBaseTimSort):
def lt(self, a, b):
return a.priority() < b.priority()
class State(object):
def __init__(self, space):
# final scoped name -> opaque handle
self.cppscope_cache = {
'void' : W_CPPClassDecl(space, capi.C_NULL_TYPE, 'void') }
# opaque handle -> app-level python class
self.cppclass_registry = {}
# app-level class generator callback
self.w_clgen_callback = None
# app-level function generator callback (currently not used)
self.w_fngen_callback = None
# C++11's nullptr
self.w_nullptr = None
def get_nullptr(space):
# construct a unique address that compares to NULL, serves as nullptr
if hasattr(space, 'fake'):
raise NotImplementedError
state = space.fromcache(State)
if state.w_nullptr is None:
from pypy.module._rawffi.interp_rawffi import unpack_simple_shape
from pypy.module._rawffi.array import W_Array, W_ArrayInstance
arr = space.interp_w(W_Array, unpack_simple_shape(space, space.newtext('P')))
# TODO: fix this hack; fromaddress() will allocate memory if address
# is null and there seems to be no way around it (ll_buffer can not
# be touched directly)
nullarr = arr.fromaddress(space, rffi.cast(rffi.ULONG, 0), 0)
assert isinstance(nullarr, W_ArrayInstance)
nullarr.free(space)
state.w_nullptr = nullarr
return state.w_nullptr
@unwrap_spec(scoped_name='text')
def resolve_name(space, scoped_name):
return space.newtext(capi.c_resolve_name(space, scoped_name))
# memoized lookup of handles by final, scoped, name of classes/namespaces
@unwrap_spec(final_scoped_name='text')
def scope_byname(space, final_scoped_name):
state = space.fromcache(State)
try:
return state.cppscope_cache[final_scoped_name]
except KeyError:
pass
opaque_handle = capi.c_get_scope_opaque(space, final_scoped_name)
assert lltype.typeOf(opaque_handle) == capi.C_SCOPE
if opaque_handle:
isns = capi.c_is_namespace(space, opaque_handle)
if isns:
cppscope = W_CPPNamespaceDecl(space, opaque_handle, final_scoped_name)
else:
if capi.c_has_complex_hierarchy(space, opaque_handle):
cppscope = W_CPPComplexClassDecl(space, opaque_handle, final_scoped_name)
else:
cppscope = W_CPPClassDecl(space, opaque_handle, final_scoped_name)
# store in the cache to prevent recursion
state.cppscope_cache[final_scoped_name] = cppscope
if not isns:
# build methods/data; TODO: also defer this for classes (a functional __dir__
# and instrospection for help() is enough and allows more lazy loading)
cppscope._build_methods()
cppscope._find_datamembers()
return cppscope
return None
@unwrap_spec(final_scoped_name='text')
def is_template(space, final_scoped_name):
return space.newbool(capi.c_is_template(space, final_scoped_name))
def std_string_name(space):
return space.newtext(capi.std_string_name)
@unwrap_spec(w_callback=W_Root)
def set_class_generator(space, w_callback):
state = space.fromcache(State)
state.w_clgen_callback = w_callback
@unwrap_spec(w_callback=W_Root)
def set_function_generator(space, w_callback):
state = space.fromcache(State)
state.w_fngen_callback = w_callback
def register_class(space, w_pycppclass):
w_cppclass = space.findattr(w_pycppclass, space.newtext("__cppdecl__"))
cppclass = space.interp_w(W_CPPClassDecl, w_cppclass, can_be_None=False)
# add back-end specific method pythonizations (doing this on the wrapped
# class allows simple aliasing of methods)
capi.pythonize(space, cppclass.name, w_pycppclass)
state = space.fromcache(State)
state.cppclass_registry[rffi.cast(rffi.LONG, cppclass.handle)] = w_pycppclass
class W_CPPLibrary(W_Root):
_immutable_ = True
def __init__(self, space, cdll):
self.cdll = cdll
self.space = space
W_CPPLibrary.typedef = TypeDef(
'CPPLibrary',
)
W_CPPLibrary.typedef.acceptable_as_base_class = True
class CPPMethod(object):
"""Dispatcher of methods. Checks the arguments, find the corresponding FFI
function if available, makes the call, and returns the wrapped result. It
also takes care of offset casting and recycling of known objects through
the memory_regulator."""
_attrs_ = ['space', 'scope', 'index', 'cppmethod', 'arg_defs', 'args_required',
'converters', 'executor', '_funcaddr', 'cif_descr', 'uses_local']
_immutable_ = True
def __init__(self, space, declaring_scope, method_index, arg_defs, args_required):
self.space = space
self.scope = declaring_scope
self.index = method_index
self.cppmethod = capi.c_get_method(self.space, self.scope, method_index)
self.arg_defs = arg_defs
self.args_required = args_required
# Setup of the method dispatch's innards is done lazily, i.e. only when
# the method is actually used.
self.converters = None
self.executor = None
self.cif_descr = lltype.nullptr(jit_libffi.CIF_DESCRIPTION)
self._funcaddr = lltype.nullptr(capi.C_FUNC_PTR.TO)
self.uses_local = False
@staticmethod
def unpack_cppthis(space, w_cppinstance, declaring_scope):
cppinstance = space.interp_w(W_CPPClass, w_cppinstance, can_be_None=False)
cppinstance._nullcheck()
return cppinstance.get_cppthis(declaring_scope)
def _address_from_local_buffer(self, call_local, idx):
if not call_local:
return call_local
stride = 2*rffi.sizeof(rffi.VOIDP)
loc_idx = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, call_local), idx*stride)
return rffi.cast(rffi.VOIDP, loc_idx)
@jit.unroll_safe
def call(self, cppthis, args_w):
jit.promote(self)
assert lltype.typeOf(cppthis) == capi.C_OBJECT
# check number of given arguments against required (== total - defaults)
args_expected = len(self.arg_defs)
args_given = len(args_w)
if args_given < self.args_required:
raise oefmt(self.space.w_TypeError,
"takes at least %d arguments (%d given)", self.args_required, args_given)
elif args_expected < args_given:
raise oefmt(self.space.w_TypeError,
"takes at most %d arguments (%d given)", args_expected, args_given)
# initial setup of converters, executors, and libffi (if available)
if self.converters is None:
try:
self._setup(cppthis)
except Exception:
pass
# some calls, e.g. for ptr-ptr or reference need a local array to store data for
# the duration of the call
if self.uses_local:
call_local = lltype.malloc(rffi.VOIDP.TO, 2*len(args_w), flavor='raw')
else:
call_local = lltype.nullptr(rffi.VOIDP.TO)
try:
# attempt to call directly through ffi chain
if self._funcaddr:
try:
return self.do_fast_call(cppthis, args_w, call_local)
except FastCallNotPossible:
pass # can happen if converters or executor does not implement ffi
# ffi chain must have failed; using stub functions instead
args = self.prepare_arguments(args_w, call_local)
try:
return self.executor.execute(self.space, self.cppmethod, cppthis, len(args_w), args)
finally:
self.finalize_call(args, args_w, call_local)
finally:
if call_local:
lltype.free(call_local, flavor='raw')
@jit.unroll_safe
def do_fast_call(self, cppthis, args_w, call_local):
if self.cif_descr == lltype.nullptr(jit_libffi.CIF_DESCRIPTION):
raise FastCallNotPossible
cif_descr = self.cif_descr
buffer = lltype.malloc(rffi.CCHARP.TO, cif_descr.exchange_size, flavor='raw')
try:
# this pointer
data = capi.exchange_address(buffer, cif_descr, 0)
x = rffi.cast(rffi.LONGP, data) # LONGP needed for test_zjit.py
x[0] = rffi.cast(rffi.LONG, cppthis)
# other arguments and defaults
i = len(self.arg_defs) + 1
for i in range(len(args_w)):
conv = self.converters[i]
w_arg = args_w[i]
data = capi.exchange_address(buffer, cif_descr, i+1)
conv.convert_argument_libffi(self.space, w_arg, data, call_local)
for j in range(i+1, len(self.arg_defs)):
conv = self.converters[j]
data = capi.exchange_address(buffer, cif_descr, j+1)
conv.default_argument_libffi(self.space, data)
assert self._funcaddr
w_res = self.executor.execute_libffi(
self.space, cif_descr, self._funcaddr, buffer)
finally:
lltype.free(buffer, flavor='raw')
keepalive_until_here(args_w)
return w_res
# from ctypefunc; have my own version for annotater purposes and to disable
# memory tracking (method live time is longer than the tests)
@jit.dont_look_inside
def _rawallocate(self, builder):
builder.space = self.space
# compute the total size needed in the CIF_DESCRIPTION buffer
builder.nb_bytes = 0
builder.bufferp = lltype.nullptr(rffi.CCHARP.TO)
builder.fb_build()
# allocate the buffer
if we_are_translated():
rawmem = lltype.malloc(rffi.CCHARP.TO, builder.nb_bytes,
flavor='raw', track_allocation=False)
rawmem = rffi.cast(jit_libffi.CIF_DESCRIPTION_P, rawmem)
else:
# gross overestimation of the length below, but too bad
rawmem = lltype.malloc(jit_libffi.CIF_DESCRIPTION_P.TO, builder.nb_bytes,
flavor='raw', track_allocation=False)
# the buffer is automatically managed from the W_CTypeFunc instance
self.cif_descr = rawmem
# call again fb_build() to really build the libffi data structures
builder.bufferp = rffi.cast(rffi.CCHARP, rawmem)
builder.fb_build()
assert builder.bufferp == rffi.ptradd(rffi.cast(rffi.CCHARP, rawmem),
builder.nb_bytes)
# fill in the 'exchange_*' fields
builder.fb_build_exchange(rawmem)
# fill in the extra fields
builder.fb_extra_fields(rawmem)
# call libffi's ffi_prep_cif() function
res = jit_libffi.jit_ffi_prep_cif(rawmem)
if res != clibffi.FFI_OK:
raise oefmt(self.space.w_SystemError,
"libffi failed to build this function type")
def _setup(self, cppthis):
self.converters = [converter.get_converter(self.space, arg_type, arg_dflt)
for arg_type, arg_dflt in self.arg_defs]
self.executor = executor.get_executor(
self.space, capi.c_method_result_type(self.space, self.scope, self.index))
for conv in self.converters:
if conv.uses_local:
self.uses_local = True
break
# Each CPPMethod corresponds one-to-one to a C++ equivalent and cppthis
# has been offset to the matching class. Hence, the libffi pointer is
# uniquely defined and needs to be setup only once.
funcaddr = capi.c_get_function_address(self.space, self.scope, self.index)
if funcaddr and cppthis: # methods only for now
state = self.space.fromcache(ffitypes.State)
# argument type specification (incl. cppthis)
fargs = []
try:
fargs.append(state.c_voidp)
for i, conv in enumerate(self.converters):
fargs.append(conv.cffi_type(self.space))
fresult = self.executor.cffi_type(self.space)
except:
raise FastCallNotPossible
# the following is derived from _cffi_backend.ctypefunc
builder = ctypefunc.CifDescrBuilder(fargs[:], fresult, clibffi.FFI_DEFAULT_ABI)
try:
self._rawallocate(builder)
except OperationError as e:
if not e.match(self.space, self.space.w_NotImplementedError):
raise
# else, eat the NotImplementedError. We will get the
# exception if we see an actual call
if self.cif_descr: # should not be True, but you never know
lltype.free(self.cif_descr, flavor='raw')
self.cif_descr = lltype.nullptr(jit_libffi.CIF_DESCRIPTION)
raise FastCallNotPossible
# success ...
self._funcaddr = funcaddr
@jit.unroll_safe
def prepare_arguments(self, args_w, call_local):
args = capi.c_allocate_function_args(self.space, len(args_w))
stride = capi.c_function_arg_sizeof(self.space)
for i in range(len(args_w)):
conv = self.converters[i]
w_arg = args_w[i]
try:
arg_i = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, args), i*stride)
loc_i = self._address_from_local_buffer(call_local, i)
conv.convert_argument(self.space, w_arg, rffi.cast(capi.C_OBJECT, arg_i), loc_i)
except:
# fun :-(
for j in range(i):
conv = self.converters[j]
arg_j = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, args), j*stride)
loc_j = self._address_from_local_buffer(call_local, j)
conv.free_argument(self.space, rffi.cast(capi.C_OBJECT, arg_j), loc_j)
capi.c_deallocate_function_args(self.space, args)
raise
return args
@jit.unroll_safe
def finalize_call(self, args, args_w, call_local):
stride = capi.c_function_arg_sizeof(self.space)
for i in range(len(args_w)):
conv = self.converters[i]
arg_i = lltype.direct_ptradd(rffi.cast(rffi.CCHARP, args), i*stride)
loc_i = self._address_from_local_buffer(call_local, i)
conv.finalize_call(self.space, args_w[i], loc_i)
conv.free_argument(self.space, rffi.cast(capi.C_OBJECT, arg_i), loc_i)
capi.c_deallocate_function_args(self.space, args)
def signature(self, show_formalargs=True):
return capi.c_method_signature(self.space, self.scope, self.index, show_formalargs)
def prototype(self, show_formalargs=True):
return capi.c_method_prototype(self.space, self.scope, self.index, show_formalargs)
def priority(self):
total_arg_priority = 0
for p in [priority.get(arg_type, 0) for arg_type, arg_dflt in self.arg_defs]:
total_arg_priority += p
return total_arg_priority
@rgc.must_be_light_finalizer
def __del__(self):
if self.cif_descr:
lltype.free(self.cif_descr, flavor='raw')
def __repr__(self):
return "CPPMethod: %s" % self.prototype()
def _freeze_(self):
assert 0, "you should never have a pre-built instance of this!"
class CPPFunction(CPPMethod):
"""Global (namespaced) function dispatcher."""
_immutable_ = True
@staticmethod
def unpack_cppthis(space, w_cppinstance, declaring_scope):
return capi.C_NULL_OBJECT
def __repr__(self):
return "CPPFunction: %s" % self.prototype()
class CPPTemplatedCall(CPPMethod):
"""Method dispatcher that first resolves the template instance."""
_attrs_ = ['space', 'templ_args']
_immutable_ = True
def __init__(self, space, templ_args, declaring_scope, method_index, arg_defs, args_required):
self.space = space
self.templ_args = templ_args
# TODO: might have to specialize for CPPTemplatedCall on CPPMethod/CPPFunction here
CPPMethod.__init__(self, space, declaring_scope, method_index, arg_defs, args_required)
def call(self, cppthis, args_w):
assert lltype.typeOf(cppthis) == capi.C_OBJECT
for i in range(len(args_w)):
try:
s = self.space.text_w(args_w[i])
except OperationError:
s = self.space.text_w(self.space.getattr(args_w[i], self.space.newtext('__name__')))
s = capi.c_resolve_name(self.space, s)
if s != self.templ_args[i]:
raise oefmt(self.space.w_TypeError,
"non-matching template (got %s where %s expected)",
s, self.templ_args[i])
return W_CPPBoundMethod(cppthis, self)
def bound_call(self, cppthis, args_w):
return CPPMethod.call(self, cppthis, args_w)
def __repr__(self):
return "CPPTemplatedCall: %s" % self.prototype()
class CPPConstructor(CPPMethod):
"""Method dispatcher that constructs new objects. This method can not have
a fast path, as the allocation of the object is currently left to the
reflection layer only, since the C++ class may have an overloaded operator
new, disallowing malloc here."""
_immutable_ = True
@staticmethod
def unpack_cppthis(space, w_cppinstance, declaring_scope):
return rffi.cast(capi.C_OBJECT, declaring_scope.handle)
def call(self, cppthis, args_w):
# Note: this does not return a wrapped instance, just a pointer to the
# new instance; the overload must still wrap it before returning. Also,
# cppthis is declaring_scope.handle (as per unpack_cppthis(), above).
return CPPMethod.call(self, cppthis, args_w)
def __repr__(self):
return "CPPConstructor: %s" % self.prototype()
class CPPSetItem(CPPMethod):
"""Method dispatcher specific to Python's __setitem__ mapped onto C++'s
operator[](int). The former function takes an extra argument to assign to
the return type of the latter."""
_immutable_ = True
def call(self, cppthis, args_w):
end = len(args_w)-1
if 0 <= end:
w_item = args_w[end]
args_w = args_w[:end]
if self.converters is None:
self._setup(cppthis)
self.executor.set_item(self.space, w_item) # TODO: what about threads?
CPPMethod.call(self, cppthis, args_w)
class W_CPPOverload(W_Root):
"""Dispatcher that is actually available at the app-level: it is a
collection of (possibly) overloaded methods or functions. It calls these
in order and deals with error handling and reporting."""
_attrs_ = ['space', 'scope', 'functions']
_immutable_fields_ = ['scope', 'functions[*]']
def __init__(self, space, declaring_scope, functions):
self.space = space
self.scope = declaring_scope
assert len(functions)
from rpython.rlib import debug
self.functions = debug.make_sure_not_resized(functions)
@jit.elidable_promote()
def is_static(self):
if isinstance(self.functions[0], CPPFunction):
return self.space.w_True
return self.space.w_False
@jit.unroll_safe
@unwrap_spec(args_w='args_w')
def call(self, w_cppinstance, args_w):
# instance handling is specific to the function type only, so take it out
# of the loop over function overloads
cppthis = self.functions[0].unpack_cppthis(
self.space, w_cppinstance, self.functions[0].scope)
assert lltype.typeOf(cppthis) == capi.C_OBJECT
# The following code tries out each of the functions in order. If
# argument conversion fails (or simply if the number of arguments do
# not match), that will lead to an exception, The JIT will snip out
# those (always) failing paths, but only if they have no side-effects.
# A second loop gathers all exceptions in the case all methods fail
# (the exception gathering would otherwise be a side-effect as far as
# the JIT is concerned).
#
# TODO: figure out what happens if a callback into from the C++ call
# raises a Python exception.
jit.promote(self)
for i in range(len(self.functions)):
cppyyfunc = self.functions[i]
try:
return cppyyfunc.call(cppthis, args_w)
except Exception:
pass
# only get here if all overloads failed ...
errmsg = 'none of the %d overloaded methods succeeded. Full details:' % len(self.functions)
if hasattr(self.space, "fake"): # FakeSpace fails errorstr (see below)
raise OperationError(self.space.w_TypeError, self.space.newtext(errmsg))
w_exc_type = None
all_same_type = True
for i in range(len(self.functions)):
cppyyfunc = self.functions[i]
try:
return cppyyfunc.call(cppthis, args_w)
except OperationError as e:
# special case if there's just one function, to prevent clogging the error message
if len(self.functions) == 1:
raise
if w_exc_type is None:
w_exc_type = e.w_type
elif all_same_type and not e.match(self.space, w_exc_type):
all_same_type = False
errmsg += '\n '+cppyyfunc.prototype()+' =>\n'
errmsg += ' '+e.errorstr(self.space)
except Exception as e:
# can not special case this for non-overloaded functions as we anyway need an
# OperationError error down from here
errmsg += '\n '+cppyyfunc.prototype()+' =>\n'
errmsg += ' Exception: '+str(e)
if all_same_type and w_exc_type is not None:
raise OperationError(w_exc_type, self.space.newtext(errmsg))
else:
raise OperationError(self.space.w_TypeError, self.space.newtext(errmsg))
def prototype(self):
sig = self.functions[0].prototype()
for i in range(1, len(self.functions)):
sig += '\n'+self.functions[i].prototype()
return self.space.newtext(sig)
def __repr__(self):
return "W_CPPOverload(%s)" % [f.prototype() for f in self.functions]
W_CPPOverload.typedef = TypeDef(
'CPPOverload',
is_static = interp2app(W_CPPOverload.is_static),
call = interp2app(W_CPPOverload.call),
prototype = interp2app(W_CPPOverload.prototype),
)
class W_CPPConstructorOverload(W_CPPOverload):
@jit.elidable_promote()
def is_static(self):
return self.space.w_False
@jit.elidable_promote()
def unpack_cppthis(self, w_cppinstance):
return rffi.cast(capi.C_OBJECT, self.scope.handle)
@jit.unroll_safe
@unwrap_spec(args_w='args_w')
def call(self, w_cppinstance, args_w):
# TODO: factor out the following:
if capi.c_is_abstract(self.space, self.scope.handle):
raise oefmt(self.space.w_TypeError,
"cannot instantiate abstract class '%s'",
self.scope.name)
w_result = W_CPPOverload.call(self, w_cppinstance, args_w)
newthis = rffi.cast(capi.C_OBJECT, self.space.uint_w(w_result))
cppinstance = self.space.interp_w(W_CPPClass, w_cppinstance, can_be_None=True)
if cppinstance is not None:
cppinstance._rawobject = newthis
memory_regulator.register(cppinstance)
def __repr__(self):
return "W_CPPConstructorOverload(%s)" % [f.prototype() for f in self.functions]
W_CPPConstructorOverload.typedef = TypeDef(
'CPPConstructorOverload',
is_static = interp2app(W_CPPConstructorOverload.is_static),
call = interp2app(W_CPPConstructorOverload.call),
prototype = interp2app(W_CPPConstructorOverload.prototype),
)
class W_CPPTemplateOverload(W_CPPOverload):
@unwrap_spec(args_w='args_w')
def __getitem__(self, args_w):
pass
def __repr__(self):
return "W_CPPTemplateOverload(%s)" % [f.prototype() for f in self.functions]
W_CPPTemplateOverload.typedef = TypeDef(
'CPPTemplateOverload',
__getitem__ = interp2app(W_CPPTemplateOverload.call),
)
class W_CPPBoundMethod(W_Root):
_attrs_ = ['cppthis', 'method']
def __init__(self, cppthis, method):
self.cppthis = cppthis
self.method = method
def __call__(self, args_w):
return self.method.bound_call(self.cppthis, args_w)
def __repr__(self):
return "W_CPPBoundMethod(%s)" % [f.prototype() for f in self.functions]
W_CPPBoundMethod.typedef = TypeDef(
'CPPBoundMethod',
__call__ = interp2app(W_CPPBoundMethod.__call__),
)
class W_CPPDataMember(W_Root):
_attrs_ = ['space', 'scope', 'converter', 'offset']
_immutable_fields = ['scope', 'converter', 'offset']
def __init__(self, space, declaring_scope, type_name, offset):
self.space = space
self.scope = declaring_scope
self.converter = converter.get_converter(self.space, type_name, '')
self.offset = offset
def is_static(self):
return self.space.w_False
def _get_offset(self, cppinstance):
if cppinstance:
assert lltype.typeOf(cppinstance.clsdecl.handle) == lltype.typeOf(self.scope.handle)
offset = self.offset + cppinstance.clsdecl.get_base_offset(cppinstance, self.scope)
else:
offset = self.offset
return offset
def get(self, w_cppinstance, w_pycppclass):
cppinstance = self.space.interp_w(W_CPPClass, w_cppinstance, can_be_None=True)
if not cppinstance:
raise oefmt(self.space.w_AttributeError,
"attribute access requires an instance")
offset = self._get_offset(cppinstance)
return self.converter.from_memory(self.space, w_cppinstance, w_pycppclass, offset)
def set(self, w_cppinstance, w_value):
cppinstance = self.space.interp_w(W_CPPClass, w_cppinstance, can_be_None=True)
if not cppinstance:
raise oefmt(self.space.w_AttributeError,
"attribute access requires an instance")
offset = self._get_offset(cppinstance)
self.converter.to_memory(self.space, w_cppinstance, w_value, offset)
return self.space.w_None
W_CPPDataMember.typedef = TypeDef(
'CPPDataMember',
is_static = interp2app(W_CPPDataMember.is_static),
__get__ = interp2app(W_CPPDataMember.get),
__set__ = interp2app(W_CPPDataMember.set),
)
W_CPPDataMember.typedef.acceptable_as_base_class = False
class W_CPPStaticData(W_CPPDataMember):
def is_static(self):
return self.space.w_True
@jit.elidable_promote()
def _get_offset(self, cppinstance):
return self.offset
def get(self, w_cppinstance, w_pycppclass):
return self.converter.from_memory(self.space, self.space.w_None, w_pycppclass, self.offset)
def set(self, w_cppinstance, w_value):
self.converter.to_memory(self.space, self.space.w_None, w_value, self.offset)
return self.space.w_None
W_CPPStaticData.typedef = TypeDef(
'CPPStaticData',
is_static = interp2app(W_CPPStaticData.is_static),
__get__ = interp2app(W_CPPStaticData.get),
__set__ = interp2app(W_CPPStaticData.set),
)
W_CPPStaticData.typedef.acceptable_as_base_class = False
def is_static(space, w_obj):
try:
space.interp_w(W_CPPStaticData, w_obj, can_be_None=False)
return space.w_True
except Exception:
return space.w_False
class W_CPPScopeDecl(W_Root):
_attrs_ = ['space', 'handle', 'name', 'methods', 'datamembers']
_immutable_fields_ = ['handle', 'name']
def __init__(self, space, opaque_handle, final_scoped_name):
self.space = space
self.name = final_scoped_name
assert lltype.typeOf(opaque_handle) == capi.C_SCOPE
self.handle = opaque_handle
self.methods = {}
# Do not call "self._build_methods()" here, so that a distinction can
# be made between testing for existence (i.e. existence in the cache
# of classes) and actual use. Point being that a class can use itself,
# e.g. as a return type or an argument to one of its methods.
self.datamembers = {}
# Idem as for self.methods: a type could hold itself by pointer.
def get_method_names(self):
return self.space.newlist([self.space.newtext(name) for name in self.methods])
@unwrap_spec(name='text')
def get_overload(self, name):
try:
return self.methods[name]
except KeyError:
pass
new_method = self.find_overload(name)
self.methods[name] = new_method
return new_method
def get_datamember_names(self):
return self.space.newlist([self.space.newtext(name) for name in self.datamembers])
@unwrap_spec(name='text')
def get_datamember(self, name):
try:
return self.datamembers[name]
except KeyError:
pass
new_dm = self.find_datamember(name)
self.datamembers[name] = new_dm
return new_dm
@unwrap_spec(name='text', signature='text')
def scope__dispatch__(self, name, signature):
overload = self.get_overload(name)
sig = '(%s)' % signature
for f in overload.functions:
if f.signature(False) == sig:
return W_CPPOverload(self.space, self, [f])
raise oefmt(self.space.w_LookupError, "no overload matches signature")
def __eq__(self, other):
return self.handle == other.handle
def __ne__(self, other):
return self.handle != other.handle
# For now, keep namespaces and classes separate as namespaces are extensible
# with info from multiple dictionaries and do not need to bother with meta
# classes for inheritance. Both are python classes, though, and refactoring
# may be in order at some point.
class W_CPPNamespaceDecl(W_CPPScopeDecl):
_attrs_ = ['space', 'handle', 'name', 'methods', 'datamembers']
_immutable_fields_ = ['handle', 'name']
def _make_cppfunction(self, pyname, index):
num_args = capi.c_method_num_args(self.space, self, index)
args_required = capi.c_method_req_args(self.space, self, index)
arg_defs = []
for i in range(num_args):
arg_type = capi.c_method_arg_type(self.space, self, index, i)
arg_dflt = capi.c_method_arg_default(self.space, self, index, i)
arg_defs.append((arg_type, arg_dflt))
return CPPFunction(self.space, self, index, arg_defs, args_required)
def _make_datamember(self, dm_name, dm_idx):
type_name = capi.c_datamember_type(self.space, self, dm_idx)
offset = capi.c_datamember_offset(self.space, self, dm_idx)
if offset == -1:
raise self.missing_attribute_error(dm_name)
datamember = W_CPPStaticData(self.space, self, type_name, offset)
self.datamembers[dm_name] = datamember
return datamember
def find_overload(self, meth_name):
indices = capi.c_method_indices_from_name(self.space, self, meth_name)
if not indices:
raise self.missing_attribute_error(meth_name)
cppfunctions = []
for meth_idx in indices:
f = self._make_cppfunction(meth_name, meth_idx)
cppfunctions.append(f)
overload = W_CPPOverload(self.space, self, cppfunctions)
return overload
def find_datamember(self, dm_name):
dm_idx = capi.c_datamember_index(self.space, self, dm_name)
if dm_idx < 0:
raise self.missing_attribute_error(dm_name)
datamember = self._make_datamember(dm_name, dm_idx)
return datamember
def is_namespace(self):
return self.space.w_True
def ns__dir__(self):
# Collect a list of everything (currently) available in the namespace.
# The backend can filter by returning empty strings. Special care is
# taken for functions, which need not be unique (overloading).
alldir = []
for i in range(capi.c_num_scopes(self.space, self)):
sname = capi.c_scope_name(self.space, self, i)
if sname: alldir.append(self.space.newtext(sname))
allmeth = {}
for i in range(capi.c_num_methods(self.space, self)):
idx = capi.c_method_index_at(self.space, self, i)
mname = capi.c_method_name(self.space, self, idx)
if mname: allmeth.setdefault(mname, 0)
for m in allmeth.keys():
alldir.append(self.space.newtext(m))
for i in range(capi.c_num_datamembers(self.space, self)):
dname = capi.c_datamember_name(self.space, self, i)
if dname: alldir.append(self.space.newtext(dname))
return self.space.newlist(alldir)
def missing_attribute_error(self, name):
return oefmt(self.space.w_AttributeError,
"namespace '%s' has no attribute %s", self.name, name)
W_CPPNamespaceDecl.typedef = TypeDef(
'CPPNamespaceDecl',
get_method_names = interp2app(W_CPPNamespaceDecl.get_method_names),
get_overload = interp2app(W_CPPNamespaceDecl.get_overload),
get_datamember_names = interp2app(W_CPPNamespaceDecl.get_datamember_names),
get_datamember = interp2app(W_CPPNamespaceDecl.get_datamember),
is_namespace = interp2app(W_CPPNamespaceDecl.is_namespace),
__cppname__ = interp_attrproperty('name', W_CPPNamespaceDecl, wrapfn="newtext"),
__dispatch__ = interp2app(W_CPPNamespaceDecl.scope__dispatch__),
__dir__ = interp2app(W_CPPNamespaceDecl.ns__dir__),
)
W_CPPNamespaceDecl.typedef.acceptable_as_base_class = False
class W_CPPClassDecl(W_CPPScopeDecl):
_attrs_ = ['space', 'handle', 'name', 'methods', 'datamembers']
_immutable_fields_ = ['handle', 'name', 'methods[*]', 'datamembers[*]']
def _build_methods(self):
assert len(self.methods) == 0
methods_temp = {}
for i in range(capi.c_num_methods(self.space, self)):
idx = capi.c_method_index_at(self.space, self, i)
if capi.c_is_constructor(self.space, self, idx):
pyname = '__init__'
else:
pyname = helper.map_operator_name(self.space,
capi.c_method_name(self.space, self, idx),
capi.c_method_num_args(self.space, self, idx),
capi.c_method_result_type(self.space, self, idx))
cppmethod = self._make_cppfunction(pyname, idx)
methods_temp.setdefault(pyname, []).append(cppmethod)
# the following covers the case where the only kind of operator[](idx)
# returns are the ones that produce non-const references; these can be
# used for __getitem__ just as much as for __setitem__, though
if not "__getitem__" in methods_temp:
try:
for m in methods_temp["__setitem__"]:
cppmethod = self._make_cppfunction("__getitem__", m.index)
methods_temp.setdefault("__getitem__", []).append(cppmethod)
except KeyError:
pass # just means there's no __setitem__ either
# create the overload methods from the method sets
for pyname, methods in methods_temp.iteritems():
CPPMethodSort(methods).sort()
if pyname == '__init__':
overload = W_CPPConstructorOverload(self.space, self, methods[:])
else:
overload = W_CPPOverload(self.space, self, methods[:])
self.methods[pyname] = overload
def _make_cppfunction(self, pyname, index):
num_args = capi.c_method_num_args(self.space, self, index)
args_required = capi.c_method_req_args(self.space, self, index)
arg_defs = []
for i in range(num_args):
arg_type = capi.c_method_arg_type(self.space, self, index, i)
arg_dflt = capi.c_method_arg_default(self.space, self, index, i)
arg_defs.append((arg_type, arg_dflt))
if capi.c_is_constructor(self.space, self, index):
cppfunction = CPPConstructor(self.space, self, index, arg_defs, args_required)
elif capi.c_method_is_template(self.space, self, index):
templ_args = capi.c_template_args(self.space, self, index)
cppfunction = CPPTemplatedCall(self.space, templ_args, self, index, arg_defs, args_required)
elif capi.c_is_staticmethod(self.space, self, index):
cppfunction = CPPFunction(self.space, self, index, arg_defs, args_required)
elif pyname == "__setitem__":
cppfunction = CPPSetItem(self.space, self, index, arg_defs, args_required)
else:
cppfunction = CPPMethod(self.space, self, index, arg_defs, args_required)
return cppfunction
def _find_datamembers(self):
num_datamembers = capi.c_num_datamembers(self.space, self)
for i in range(num_datamembers):
if not capi.c_is_publicdata(self.space, self, i):
continue
datamember_name = capi.c_datamember_name(self.space, self, i)
type_name = capi.c_datamember_type(self.space, self, i)
offset = capi.c_datamember_offset(self.space, self, i)
if offset == -1:
continue # dictionary problem; raises AttributeError on use
is_static = bool(capi.c_is_staticdata(self.space, self, i))
if is_static:
datamember = W_CPPStaticData(self.space, self, type_name, offset)
else:
datamember = W_CPPDataMember(self.space, self, type_name, offset)
self.datamembers[datamember_name] = datamember
def find_overload(self, name):
raise self.missing_attribute_error(name)
def find_datamember(self, name):
raise self.missing_attribute_error(name)
def get_base_offset(self, cppinstance, calling_scope):
assert self == cppinstance.clsdecl
return 0
def get_cppthis(self, cppinstance, calling_scope):
assert self == cppinstance.clsdecl
return cppinstance.get_rawobject()
def is_namespace(self):
return self.space.w_False
def get_base_names(self):
bases = []
num_bases = capi.c_num_bases(self.space, self)
for i in range(num_bases):
base_name = capi.c_base_name(self.space, self, i)
bases.append(self.space.newtext(base_name))
return self.space.newlist(bases)
def missing_attribute_error(self, name):
return oefmt(self.space.w_AttributeError,
"class '%s' has no attribute %s", self.name, name)
W_CPPClassDecl.typedef = TypeDef(
'CPPClassDecl',
get_base_names = interp2app(W_CPPClassDecl.get_base_names),
get_method_names = interp2app(W_CPPClassDecl.get_method_names),
get_overload = interp2app(W_CPPClassDecl.get_overload),
get_datamember_names = interp2app(W_CPPClassDecl.get_datamember_names),
get_datamember = interp2app(W_CPPClassDecl.get_datamember),
is_namespace = interp2app(W_CPPClassDecl.is_namespace),
__cppname__ = interp_attrproperty('name', W_CPPClassDecl, wrapfn="newtext"),
__dispatch__ = interp2app(W_CPPClassDecl.scope__dispatch__)
)
W_CPPClassDecl.typedef.acceptable_as_base_class = False
class W_CPPComplexClassDecl(W_CPPClassDecl):
def get_base_offset(self, cppinstance, calling_scope):
assert self == cppinstance.clsdecl
offset = capi.c_base_offset(self.space,
self, calling_scope, cppinstance.get_rawobject(), 1)
return offset
def get_cppthis(self, cppinstance, calling_scope):
assert self == cppinstance.clsdecl
offset = self.get_base_offset(cppinstance, calling_scope)
return capi.direct_ptradd(cppinstance.get_rawobject(), offset)
W_CPPComplexClassDecl.typedef = TypeDef(
'CPPComplexClassDecl',
get_base_names = interp2app(W_CPPComplexClassDecl.get_base_names),
get_method_names = interp2app(W_CPPComplexClassDecl.get_method_names),
get_overload = interp2app(W_CPPComplexClassDecl.get_overload),
get_datamember_names = interp2app(W_CPPComplexClassDecl.get_datamember_names),
get_datamember = interp2app(W_CPPComplexClassDecl.get_datamember),
is_namespace = interp2app(W_CPPComplexClassDecl.is_namespace),
__cppname__ = interp_attrproperty('name', W_CPPComplexClassDecl, wrapfn="newtext"),
__dispatch__ = interp2app(W_CPPComplexClassDecl.scope__dispatch__)
)
W_CPPComplexClassDecl.typedef.acceptable_as_base_class = False
class W_CPPClass(W_Root):
_attrs_ = ['space', 'clsdecl', '_rawobject', 'flags',
'finalizer_registered']
_immutable_fields_ = ['clsdecl']
finalizer_registered = False
def __init__(self, space, decl, rawobject, isref, python_owns):
self.space = space
self.clsdecl = decl
assert lltype.typeOf(rawobject) == capi.C_OBJECT
assert not isref or rawobject
self._rawobject = rawobject
assert not isref or not python_owns
self.flags = 0
if isref:
self.flags |= INSTANCE_FLAGS_IS_REF
if python_owns:
self.flags |= INSTANCE_FLAGS_PYTHON_OWNS
self._opt_register_finalizer()
def _opt_register_finalizer(self):
if not self.finalizer_registered and not hasattr(self.space, "fake"):
assert self.flags & INSTANCE_FLAGS_PYTHON_OWNS
self.register_finalizer(self.space)
self.finalizer_registered = True
def _nullcheck(self):
if not self._rawobject or \
((self.flags & INSTANCE_FLAGS_IS_REF) and not self.get_rawobject()):
raise oefmt(self.space.w_ReferenceError,
"trying to access a NULL pointer")
# allow user to determine ownership rules on a per object level
def fget_python_owns(self, space):
return space.newbool(bool(self.flags & INSTANCE_FLAGS_PYTHON_OWNS))
@unwrap_spec(value=bool)
def fset_python_owns(self, space, value):
if space.is_true(value):
self.flags |= INSTANCE_FLAGS_PYTHON_OWNS
self._opt_register_finalizer()
else:
self.flags &= ~INSTANCE_FLAGS_PYTHON_OWNS
def get_cppthis(self, calling_scope):
return self.clsdecl.get_cppthis(self, calling_scope)
def get_rawobject(self):
if not (self.flags & INSTANCE_FLAGS_IS_REF):
return self._rawobject
else:
ptrptr = rffi.cast(rffi.VOIDPP, self._rawobject)
return rffi.cast(capi.C_OBJECT, ptrptr[0])
def _get_as_builtin(self):
try:
return self.space.call_method(self, "_cppyy_as_builtin")
except OperationError as e:
if not (e.match(self.space, self.space.w_TypeError) or
e.match(self.space, self.space.w_AttributeError)):
# TODO: TypeError is raised by call_method if the method is not found;
# it'd be a lot nicer if only AttributeError were raise
raise
return None
def instance__init__(self, args_w):
raise oefmt(self.space.w_TypeError,
"cannot instantiate abstract class '%s'",
self.clsdecl.name)
def instance__eq__(self, w_other):
# special case: if other is None, compare pointer-style
if self.space.is_w(w_other, self.space.w_None):
return self.space.newbool(not self._rawobject)
# get here if no class-specific overloaded operator is available, try to
# find a global overload in gbl, in __gnu_cxx (for iterators), or in the
# scopes of the argument classes (TODO: implement that last option)
try:
# TODO: expecting w_other to be an W_CPPClass is too limiting
other = self.space.interp_w(W_CPPClass, w_other, can_be_None=False)
for name in ["", "__gnu_cxx", "__1"]:
nss = scope_byname(self.space, name)
meth_idx = capi.c_get_global_operator(
self.space, nss, self.clsdecl, other.clsdecl, "operator==")
if meth_idx != -1:
f = nss._make_cppfunction("operator==", meth_idx)
ol = W_CPPOverload(self.space, nss, [f])
# TODO: cache this operator (not done yet, as the above does not
# select all overloads)
return ol.call(self, [self, w_other])
except OperationError as e:
if not e.match(self.space, self.space.w_TypeError):
raise
# fallback 1: convert the object to a builtin equivalent
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.eq(w_as_builtin, w_other)
# fallback 2: direct pointer comparison (the class comparison is needed since
# the first data member in a struct and the struct have the same address)
other = self.space.interp_w(W_CPPClass, w_other, can_be_None=False) # TODO: factor out
iseq = (self._rawobject == other._rawobject) and (self.clsdecl == other.clsdecl)
return self.space.newbool(iseq)
def instance__ne__(self, w_other):
return self.space.not_(self.instance__eq__(w_other))
def instance__nonzero__(self):
if not self._rawobject or \
((self.flags & INSTANCE_FLAGS_IS_REF) and not self.get_rawobject()):
return self.space.w_False
return self.space.w_True
def instance__len__(self):
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.len(w_as_builtin)
raise oefmt(self.space.w_TypeError,
"'%s' has no length", self.clsdecl.name)
def instance__cmp__(self, w_other):
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.cmp(w_as_builtin, w_other)
raise oefmt(self.space.w_AttributeError,
"'%s' has no attribute __cmp__", self.clsdecl.name)
def instance__repr__(self):
w_as_builtin = self._get_as_builtin()
if w_as_builtin is not None:
return self.space.repr(w_as_builtin)
return self.space.newtext("<%s object at 0x%x>" %
(self.clsdecl.name, rffi.cast(rffi.ULONG, self.get_rawobject())))
def destruct(self):
if self._rawobject and not (self.flags & INSTANCE_FLAGS_IS_REF):
memory_regulator.unregister(self)
capi.c_destruct(self.space, self.clsdecl, self._rawobject)
self._rawobject = capi.C_NULL_OBJECT
def _finalize_(self):
if self.flags & INSTANCE_FLAGS_PYTHON_OWNS:
self.destruct()
W_CPPClass.typedef = TypeDef(
'CPPClass',
__python_owns__ = GetSetProperty(W_CPPClass.fget_python_owns, W_CPPClass.fset_python_owns),
__init__ = interp2app(W_CPPClass.instance__init__),
__eq__ = interp2app(W_CPPClass.instance__eq__),
__ne__ = interp2app(W_CPPClass.instance__ne__),
__nonzero__ = interp2app(W_CPPClass.instance__nonzero__),
__len__ = interp2app(W_CPPClass.instance__len__),
__cmp__ = interp2app(W_CPPClass.instance__cmp__),
__repr__ = interp2app(W_CPPClass.instance__repr__),
__destruct__ = interp2app(W_CPPClass.destruct),
)
W_CPPClass.typedef.acceptable_as_base_class = True
class MemoryRegulator:
# TODO: (?) An object address is not unique if e.g. the class has a
# public data member of class type at the start of its definition and
# has no virtual functions. A _key class that hashes on address and
# type would be better, but my attempt failed in the rtyper, claiming
# a call on None ("None()") and needed a default ctor. (??)
# Note that for now, the associated test carries an m_padding to make
# a difference in the addresses.
def __init__(self):
self.objects = rweakref.RWeakValueDictionary(int, W_CPPClass)
def register(self, obj):
if not obj._rawobject:
return
int_address = int(rffi.cast(rffi.LONG, obj._rawobject))
self.objects.set(int_address, obj)
def unregister(self, obj):
if not obj._rawobject:
return
int_address = int(rffi.cast(rffi.LONG, obj._rawobject))
self.objects.set(int_address, None)
def retrieve(self, address):
int_address = int(rffi.cast(rffi.LONG, address))
return self.objects.get(int_address)
memory_regulator = MemoryRegulator()
def get_pythonized_cppclass(space, handle):
state = space.fromcache(State)
try:
w_pycppclass = state.cppclass_registry[rffi.cast(rffi.LONG, handle)]
except KeyError:
final_name = capi.c_scoped_final_name(space, handle)
# the callback will cache the class by calling register_class
w_pycppclass = space.call_function(state.w_clgen_callback, space.newtext(final_name))
return w_pycppclass
def get_interface_func(space, w_callable, npar):
state = space.fromcache(State)
return space.call_function(state.w_fngen_callback, w_callable, space.newint(npar))
def wrap_cppinstance(space, rawobject, clsdecl,
do_cast=True, python_owns=False, is_ref=False, fresh=False):
rawobject = rffi.cast(capi.C_OBJECT, rawobject)
# cast to actual if requested and possible
w_pycppclass = None
if do_cast and rawobject:
actual = capi.c_actual_class(space, clsdecl, rawobject)
if actual != clsdecl.handle:
try:
w_pycppclass = get_pythonized_cppclass(space, actual)
offset = capi.c_base_offset1(space, actual, clsdecl, rawobject, -1)
rawobject = capi.direct_ptradd(rawobject, offset)
w_cppdecl = space.findattr(w_pycppclass, space.newtext("__cppdecl__"))
clsdecl = space.interp_w(W_CPPClassDecl, w_cppdecl, can_be_None=False)
except Exception:
# failed to locate/build the derived class, so stick to the base (note
# that only get_pythonized_cppclass is expected to raise, so none of
# the variables are re-assigned yet)
pass
if w_pycppclass is None:
w_pycppclass = get_pythonized_cppclass(space, clsdecl.handle)
# try to recycle existing object if this one is not newly created
if not fresh and rawobject:
obj = memory_regulator.retrieve(rawobject)
if obj is not None and obj.clsdecl is clsdecl:
return obj
# fresh creation
w_cppinstance = space.allocate_instance(W_CPPClass, w_pycppclass)
cppinstance = space.interp_w(W_CPPClass, w_cppinstance, can_be_None=False)
cppinstance.__init__(space, clsdecl, rawobject, is_ref, python_owns)
memory_regulator.register(cppinstance)
return w_cppinstance
def _addressof(space, w_obj):
try:
# attempt to extract address from array
return rffi.cast(rffi.INTPTR_T, converter.get_rawbuffer(space, w_obj))
except TypeError:
pass
# attempt to get address of C++ instance
return rffi.cast(rffi.INTPTR_T, converter.get_rawobject(space, w_obj, False))
@unwrap_spec(w_obj=W_Root)
def addressof(space, w_obj):
"""Takes a bound C++ instance or array, returns the raw address."""
address = _addressof(space, w_obj)
return space.newlong(address)
@unwrap_spec(owns=bool, cast=bool)
def _bind_object(space, w_obj, w_clsdecl, owns=False, cast=False):
try:
# attempt address from array or C++ instance
rawobject = rffi.cast(capi.C_OBJECT, _addressof(space, w_obj))
except Exception:
# accept integer value as address
rawobject = rffi.cast(capi.C_OBJECT, space.uint_w(w_obj))
decl = space.interp_w(W_CPPClassDecl, w_clsdecl, can_be_None=False)
return wrap_cppinstance(space, rawobject, decl, python_owns=owns, do_cast=cast)
@unwrap_spec(owns=bool, cast=bool)
def bind_object(space, w_obj, w_pycppclass, owns=False, cast=False):
"""Takes an address and a bound C++ class proxy, returns a bound instance."""
w_clsdecl = space.findattr(w_pycppclass, space.newtext("__cppdecl__"))
if not w_clsdecl:
w_clsdecl = scope_byname(space, space.text_w(w_pycppclass))
if not w_clsdecl:
raise oefmt(space.w_TypeError,
"no such class: %s", space.text_w(w_pycppclass))
return _bind_object(space, w_obj, w_clsdecl, owns, cast)
def move(space, w_obj):
"""Casts the given instance into an C++-style rvalue."""
obj = space.interp_w(W_CPPClass, w_obj, can_be_None=True)
if obj:
obj.flags |= INSTANCE_FLAGS_IS_R_VALUE
return w_obj
| 41.182094
| 104
| 0.658315
|
4ac03ef1da7114aa54fd5f519f17d2dee24e58b8
| 836
|
py
|
Python
|
utilities.py
|
MadReza/WebParser
|
0b048f16d861bf7113428bfffe97b24151660217
|
[
"MIT"
] | null | null | null |
utilities.py
|
MadReza/WebParser
|
0b048f16d861bf7113428bfffe97b24151660217
|
[
"MIT"
] | null | null | null |
utilities.py
|
MadReza/WebParser
|
0b048f16d861bf7113428bfffe97b24151660217
|
[
"MIT"
] | null | null | null |
###~~File Retriver~~###
from glob import glob
def getFilePathsMatching(path):
return glob(path);
###~~Cleaning~~###
import string
import re
from nltk.corpus import stopwords
from nltk import word_tokenize
#from nltk import wordpunct_tokenize, Text
cachedStopWords = stopwords.words("english")
cachedPunctuation = list(string.punctuation)
def removeStopWords(text):
text = text.lower().encode('utf8', 'ignore')
text = word_tokenize(text)
text = [re.sub(r'\b\d+\.?\d*\b', '', token) for token in text] #removing numbers
text = [token for token in text if token not in cachedPunctuation]
text = ''.join([word + " " for word in text if word not in cachedStopWords])
return text.split()
# text = wordpunct_tokenize(text)
# text = Text(text)
# return (a for a in text if a not in cachedPunctuation)
| 28.827586
| 84
| 0.697368
|
c3cdadc68295e379bc7e0ae992a11aab794979b0
| 2,639
|
py
|
Python
|
config/config.py
|
dataforgoodfr/batch8_actioncontrelafaim
|
0e28d42a10bc4734f1d618bd57ad3b3cfdae1cbe
|
[
"MIT"
] | null | null | null |
config/config.py
|
dataforgoodfr/batch8_actioncontrelafaim
|
0e28d42a10bc4734f1d618bd57ad3b3cfdae1cbe
|
[
"MIT"
] | 11
|
2020-10-17T13:37:35.000Z
|
2020-11-13T18:00:49.000Z
|
config/config.py
|
dataforgoodfr/batch8_actioncontrelafaim
|
0e28d42a10bc4734f1d618bd57ad3b3cfdae1cbe
|
[
"MIT"
] | null | null | null |
# inspired from: https://stackoverflow.com/a/38409774/8086033
from pathlib import Path
import inspect
import yaml
class sources:
def __init__(self, abs_root):
# WFP Hunger Covid Snapshots
self.wfp_hunger_covid_snapshots_overview_url = "https://static.hungermapdata.org/hungermap/reports/hunger_covid_weekly_snapshot.pdf"
self.wpf_hunger_covid_snapshots_overview_path = abs_root / "./data/sources/hunger_covid_weekly_snapshot.pdf"
self.wfp_hunger_covid_snapshots_hyperlinks_mapping_path = abs_root / "./data/meta/wfp_hunger_covid_snapshots_hyperlinks_mapping.csv"
self.wfp_hunger_covid_weekly_snapshots_folder_path = abs_root / "./data/clean/wfp_hunger_covid_weekly_snapshots/"
self.wfp_hunger_covid_weekly_snapshots_metadata_path = self.wfp_hunger_covid_weekly_snapshots_folder_path / "snapshots_metadata.csv"
# IPC Alerts Acute Food Insecurity
self.ipc_alerts_url = "http://www.ipcinfo.org/ipcinfo-website/resources/alerts-archive/en/"
self.ipc_alerts_folder_path = abs_root / "./data/clean/ipc_alerts_acute_food_insecurity/"
self.ipc_alerts_metadata_path = self.ipc_alerts_folder_path / "ipc_alerts_metadata.csv"
# WAS6.11 diarrhoea cases below five years old
self.was611_diarrhoea_cases_output_path = abs_root / "./data/clean/indic_was611_diarrhoea_careseeking_below_five.csv"
self.was611_diarrhoea_output_folder_path = abs_root / "./data/sources/"
self.was611_diarrhoea_dataset_name = "unicef-mnch-diarcare"
self.was611_diarrhoea_ressource_index = 1
# Food Security Portal API
self.food_security_api_url = "http://www.foodsecurityportal.org/api/about/"
self.food_security_url = "www.foodsecurityportal.org/"
self.food_security_output_folder_path = abs_root / "./data/clean/food_security/"
# Food and Agriculture Organization FAO
self.fao_api_bulk_url = "http://fenixservices.fao.org/faostat/static/bulkdownloads/datasets_E.xml"
self.fao_output_folder = abs_root / "./data/clean/FAO/"
self.fao_metadata_csv_path = abs_root / "./data/meta/FAO_metadata.csv"
class meta:
def __init__(self, abs_root):
self.countries_ISO_3166_1_csv_path = abs_root / "./data/meta/countries - ISO 3166-1.csv"
class Config:
def __init__(self, abs_root):
self.notebooks_collection_path = abs_root / "src/notebooks/collection/"
self.sources = sources(abs_root)
self.meta = meta(abs_root)
project_absolute_root = Path(inspect.getfile(inspect.currentframe())).absolute().parent.parent
config = Config(project_absolute_root)
| 51.745098
| 140
| 0.755589
|
ddbe37ae0609c196e81dd3d1ffcd9d3dc54e0f68
| 771
|
py
|
Python
|
configs/selfsup/_test_/models/resnet18_simsiam.py
|
dungdinhanh/mmselfsup
|
67fc764f4f5512701f93e8e1fa39e09ee008a54a
|
[
"Apache-2.0"
] | null | null | null |
configs/selfsup/_test_/models/resnet18_simsiam.py
|
dungdinhanh/mmselfsup
|
67fc764f4f5512701f93e8e1fa39e09ee008a54a
|
[
"Apache-2.0"
] | null | null | null |
configs/selfsup/_test_/models/resnet18_simsiam.py
|
dungdinhanh/mmselfsup
|
67fc764f4f5512701f93e8e1fa39e09ee008a54a
|
[
"Apache-2.0"
] | 1
|
2022-03-15T11:31:48.000Z
|
2022-03-15T11:31:48.000Z
|
# model settings
model = dict(
type='SimSiamL',
backbone=dict(
type='ResNet',
depth=18,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='SyncBN'),
zero_init_residual=True),
neck=dict(
type='NonLinearNeck',
in_channels=512,
hid_channels=1024,
out_channels=2048,
num_layers=3,
with_last_bn_affine=False,
with_avg_pool=True),
head=dict(
type='LatentPredictHead',
predictor=dict(
type='NonLinearNeck',
in_channels=2048,
hid_channels=512,
out_channels=2048,
with_avg_pool=False,
with_last_bn=False,
with_last_bias=True)
)
)
| 24.870968
| 49
| 0.551232
|
92dbf02ea3ada33896a8ea8ae29382f59f092c89
| 3,963
|
py
|
Python
|
fpms/modules/modes.py
|
joshschmelzle/wlanpi-fpms
|
0f57519170f8c164aa8420c98ee4a3f63a6673c1
|
[
"BSD-3-Clause"
] | null | null | null |
fpms/modules/modes.py
|
joshschmelzle/wlanpi-fpms
|
0f57519170f8c164aa8420c98ee4a3f63a6673c1
|
[
"BSD-3-Clause"
] | null | null | null |
fpms/modules/modes.py
|
joshschmelzle/wlanpi-fpms
|
0f57519170f8c164aa8420c98ee4a3f63a6673c1
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import os.path
import subprocess
import fpms.modules.wlanpi_oled as oled
from fpms.modules.pages.alert import Alert
from fpms.modules.pages.simpletable import SimpleTable
from fpms.modules.constants import (
WCONSOLE_SWITCHER_FILE,
HOTSPOT_SWITCHER_FILE,
WIPERF_SWITCHER_FILE,
SERVER_SWITCHER_FILE,
)
class Mode(object):
def __init__(self, g_vars):
# create simple table
self.simple_table_obj = SimpleTable(g_vars)
# create alert
self.alert_obj = Alert(g_vars)
def switcher(self, g_vars, resource_title, resource_switcher_file, mode_name):
'''
Function to perform generic set of operations to switch wlanpi mode
'''
reboot_image = g_vars['reboot_image']
# check resource is available
if not os.path.isfile(resource_switcher_file):
self.alert_obj.display_alert_error(g_vars, '{} mode not available.'.format(resource_title))
g_vars['display_state'] = 'page'
return
# Resource switcher was detected, so assume it's installed
if g_vars['current_mode'] == "classic":
# if in classic mode, switch to the resource
alert_msg = 'Switching to {} mode (rebooting...)'.format(resource_title)
switch = "on"
elif g_vars['current_mode'] == mode_name:
alert_msg = 'Switching to Classic mode (rebooting...)'
switch = "off"
else:
self.alert_obj.display_alert_error(g_vars, 'Unknown mode: {}'.format(g_vars['current_mode']))
g_vars['display_state'] = 'page'
return False
# Flip the mode
self.alert_obj.display_alert_info(g_vars, alert_msg, title="Success")
g_vars['shutdown_in_progress'] = True
time.sleep(2)
oled.drawImage(g_vars['reboot_image'])
g_vars['screen_cleared'] = True
try:
alert_msg = subprocess.check_output("{} {}".format(resource_switcher_file, switch), shell=True).decode() # reboots
time.sleep(1)
except subprocess.CalledProcessError as exc:
alert_msg = exc.output.decode()
# We only get to this point if the switch has failed for some reason
# (Note that the switcher script reboots the WLANPi)
g_vars['shutdown_in_progress'] = False
g_vars['screen_cleared'] = False
self.alert_obj.display_alert_error(g_vars, alert_msg)
g_vars['display_state'] = 'menu'
# allow 5 secs to view failure msg
time.sleep(3)
# move back up to menu branch
g_vars['current_menu_location'].pop()
return False
def wconsole_switcher(self, g_vars):
wconsole_switcher_file = WCONSOLE_SWITCHER_FILE
resource_title = "Wi-Fi Console"
mode_name = "wconsole"
resource_switcher_file = wconsole_switcher_file
# switch
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
def hotspot_switcher(self, g_vars):
hotspot_switcher_file = HOTSPOT_SWITCHER_FILE
resource_title = "Hotspot"
mode_name = "hotspot"
resource_switcher_file = hotspot_switcher_file
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
def wiperf_switcher(self, g_vars):
wiperf_switcher_file = WIPERF_SWITCHER_FILE
resource_title = "Wiperf"
mode_name = "wiperf"
resource_switcher_file = wiperf_switcher_file
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
def server_switcher(self, g_vars):
server_switcher_file = SERVER_SWITCHER_FILE
resource_title = "Server"
mode_name = "server"
resource_switcher_file = server_switcher_file
self.switcher(g_vars, resource_title, resource_switcher_file, mode_name)
return True
| 31.204724
| 127
| 0.660611
|
50eb4a4cc975b84dc891495722641b27a2023ceb
| 4,966
|
py
|
Python
|
Indel_frequency_calculator.py
|
CRISPRJWCHOI/IndelSearcher
|
5824069868499bfbbd74582d5dc7a9a64a15035b
|
[
"MIT"
] | 2
|
2020-05-13T03:13:24.000Z
|
2021-04-30T03:30:08.000Z
|
Indel_frequency_calculator.py
|
CRISPRJWCHOI/IndelSearcher
|
5824069868499bfbbd74582d5dc7a9a64a15035b
|
[
"MIT"
] | null | null | null |
Indel_frequency_calculator.py
|
CRISPRJWCHOI/IndelSearcher
|
5824069868499bfbbd74582d5dc7a9a64a15035b
|
[
"MIT"
] | 2
|
2021-01-30T01:43:19.000Z
|
2021-07-12T11:47:58.000Z
|
#!/usr/bin/env python
import os
import sys
import pdb
from datetime import datetime
from collections import namedtuple as nt
from collections import OrderedDict
sOutput_dir = sys.argv[1]
def Calculate_indel_freq():
if not os.path.isdir('{outdir}/result/freq/freq_result'.format(outdir=sOutput_dir)): os.mkdir('{outdir}/result/freq/freq_result'.format(outdir=sOutput_dir))
for sFile in os.listdir('{outdir}/result/freq'.format(outdir=sOutput_dir)):
#print sFile
if os.path.isfile(os.path.join('{outdir}/result/freq'.format(outdir=sOutput_dir), sFile)):
with open(os.path.join('{outdir}/result/freq'.format(outdir=sOutput_dir), sFile)) as Input_freq,\
open(os.path.join('{outdir}/result/freq/freq_result'.format(outdir=sOutput_dir), sFile), 'w') as Output_freq:
sRef = Input_freq.readline() # first row is ref.
sDelemiter = Input_freq.readline() # second row is '-------' delemiter.
Output_freq.write(sRef+sDelemiter)
lSeq_indel = [] # [namedtuple1(['TGCA', '30M3I']) namedtuple2 ...
dFreq_count = {} # {'30M3I':2 ... }
for sRow in Input_freq:
Seq_indel = nt('Seq_indel', ['seq', 'indel', 'freq', 'ref_needle', 'query_needle'])
if sRow == sRef: continue
if sRow[0] == '-': continue
try:
lCol = sRow.replace('\n', '').split('\t')
Seq_indel.seq = lCol[0]
Seq_indel.indel = lCol[1]
Seq_indel.ref_needle = lCol[3]
Seq_indel.query_needle = lCol[4]
lSeq_indel.append(Seq_indel)
except IndexError:
print sFile, lCol
continue
try:
dFreq_count[Seq_indel.indel] += 1
except KeyError:
dFreq_count[Seq_indel.indel] = 1
#end: for sRow
# Add freq infomation pre-result data.
lResult = []
iTotal = len(lSeq_indel)
#print 'dFreq_count', dFreq_count
#print 'lSeq_indel', lSeq_indel
for Seq_indel in lSeq_indel:
iCount = dFreq_count[Seq_indel.indel]
Seq_indel.freq = float(iCount) / iTotal
lResult.append(Seq_indel)
lResult.sort(key=lambda x: x.indel)
lResult.sort(key=lambda x: x.freq, reverse=True)
#print 'lResult', lResult
for Seq_indel in lResult:
#print Seq_indel.__dict__
Output_freq.write('\t'.join(map(str, [Seq_indel.seq, Seq_indel.indel, Seq_indel.freq, Seq_indel.ref_needle, Seq_indel.query_needle]))+'\n')
#end: with open
#end: if os.path
#end: sFile
def Make_indel_summary():
lOutput = []
for sFile in os.listdir('{outdir}/result/freq/freq_result'.format(outdir=sOutput_dir)):
if os.path.isfile(os.path.join('{outdir}/result/freq/freq_result'.format(outdir=sOutput_dir), sFile)):
with open(os.path.join('{outdir}/result/freq/freq_result'.format(outdir=sOutput_dir), sFile)) as Input_freq:
sRef = Input_freq.readline() # first row is ref.
sDelemiter = Input_freq.readline() # second row is '-------' delemiter.
dINDEL = OrderedDict()
lTable = [sRow.replace('\n', '').split('\t') for sRow in Input_freq]
iTotal = len(lTable)
for lCol in lTable:
sINDEL = lCol[1]
try:
dINDEL[sINDEL] += 1
except KeyError:
dINDEL[sINDEL] = 1
dINDEL = OrderedDict(sorted(dINDEL.items(), key=lambda t: t[1], reverse=True))
llINDEL = [[sKey, iValue, round(iValue/float(iTotal),3)*100] for sKey, iValue in dINDEL.items()]
sINDEL_result = ''.join([':'.join(map(str, lINDEL))+', ' for lINDEL in llINDEL])[:-2]
lOutput.append([sFile, iTotal, sINDEL_result])
#Output_freq.write('\t'.join([sFile, sTotal, sINDEL_result]) + '\n')
lOutput = sorted(lOutput, key=lambda x: x[1], reverse=True)
with open('{outdir}/result/freq/freq_result/Indel_summary.txt'.format(outdir=sOutput_dir), 'w') as Output_freq:
for lCol in lOutput:
Output_freq.write('\t'.join(map(str, lCol)) + '\n')
if __name__ == '__main__':
print 'Indel frequency calculator start: ', datetime.now()
Calculate_indel_freq()
Make_indel_summary()
print 'Indel frequency calculator end: ', datetime.now()
| 41.383333
| 160
| 0.538059
|
39410c02d654e24f14f675898acc5e62bc1647bc
| 2,078
|
py
|
Python
|
samples/polybench/heat-3d.py
|
jnice-81/dace
|
5211794a2d17b7189037ac485ab0b292fb02aa0d
|
[
"BSD-3-Clause"
] | 227
|
2019-03-15T23:39:06.000Z
|
2022-03-30T07:49:08.000Z
|
samples/polybench/heat-3d.py
|
jnice-81/dace
|
5211794a2d17b7189037ac485ab0b292fb02aa0d
|
[
"BSD-3-Clause"
] | 834
|
2019-07-31T22:49:31.000Z
|
2022-03-28T14:01:32.000Z
|
samples/polybench/heat-3d.py
|
jnice-81/dace
|
5211794a2d17b7189037ac485ab0b292fb02aa0d
|
[
"BSD-3-Clause"
] | 64
|
2019-03-19T05:40:37.000Z
|
2022-03-11T15:02:42.000Z
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import polybench
N = dace.symbol('N')
tsteps = dace.symbol('tsteps')
#datatypes = [dace.float64, dace.int32, dace.float32]
datatype = dace.float64
# Dataset sizes
sizes = [{
tsteps: 20,
N: 10
}, {
tsteps: 40,
N: 20
}, {
tsteps: 100,
N: 40
}, {
tsteps: 500,
N: 120
}, {
tsteps: 1000,
N: 200
}]
args = [
([N, N, N], datatype),
([N, N, N], datatype) #, N, tsteps
]
@dace.program(datatype[N, N, N], datatype[N, N, N]) #, dace.int32, dace.int32)
def heat3d(A, B): #, N, tsteps):
for t in range(tsteps):
@dace.map
def a(i: _[1:N - 1], j: _[1:N - 1], k: _[1:N - 1]):
a11 << A[i + 1, j, k]
a12 << A[i - 1, j, k]
a21 << A[i, j + 1, k]
a22 << A[i, j - 1, k]
a31 << A[i, j, k + 1]
a32 << A[i, j, k - 1]
a << A[i, j, k]
b >> B[i, j, k]
b = 0.125 * (a11 - datatype(2.0) * a + a12) +\
0.125 * (a21 - datatype(2.0) * a + a22) +\
0.125 * (a31 - datatype(2.0) * a + a32) +\
a
@dace.map
def a(i: _[1:N - 1], j: _[1:N - 1], k: _[1:N - 1]):
a11 << B[i + 1, j, k]
a12 << B[i - 1, j, k]
a21 << B[i, j + 1, k]
a22 << B[i, j - 1, k]
a31 << B[i, j, k + 1]
a32 << B[i, j, k - 1]
a << B[i, j, k]
b >> A[i, j, k]
b = 0.125 * (a11 - datatype(2.0) * a + a12) +\
0.125 * (a21 - datatype(2.0) * a + a22) +\
0.125 * (a31 - datatype(2.0) * a + a32) +\
a
def init_array(A, B): #, N, tsteps):
n = N.get()
for i in range(n):
for j in range(n):
for k in range(n):
A[i, j, k] = datatype((i + j + (n - k)) * 10) / n
B[i, j, k] = datatype((i + j + (n - k)) * 10) / n
if __name__ == '__main__':
polybench.main(sizes, args, [(0, 'A')], init_array, heat3d)
| 25.341463
| 79
| 0.400385
|
23d0a2b67c525d9313fb7bf1db1366b63b71e27f
| 666
|
py
|
Python
|
core/audit/recorder.py
|
AntonioGr7/grimai
|
d3b4724ca5636d8afefa322a5020c5a927ce0db4
|
[
"MIT"
] | null | null | null |
core/audit/recorder.py
|
AntonioGr7/grimai
|
d3b4724ca5636d8afefa322a5020c5a927ce0db4
|
[
"MIT"
] | null | null | null |
core/audit/recorder.py
|
AntonioGr7/grimai
|
d3b4724ca5636d8afefa322a5020c5a927ce0db4
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
import numpy as np
class Recorder:
def __init__(self):
self.loss = 0
self.loss_history = []
self.best_loss = np.inf
self.metrics = {}
self.accuracy = 0
self.f1_score = 0
self.count = 0
def __reset__(self):
self.count = 0
self.loss = 0
self.accuracy = 0
self.f1_score = 0
self.accuracy = 0
self.f1_score = 0
def __update_batch__(self,loss):
self.loss += loss
self.count += 1
def __update_epoch__(self):
self.loss = self.loss / self.count
self.loss_history.append(self.loss)
| 22.965517
| 43
| 0.567568
|
a9b366dddb639bbc774d318518c34224a21a5869
| 1,637
|
py
|
Python
|
bcc/urls.py
|
blkCodeCollctve/bcc-cms
|
25a6f91d603311e755f6104a4960c8b19e1ab986
|
[
"MIT"
] | 1
|
2017-12-28T00:32:25.000Z
|
2017-12-28T00:32:25.000Z
|
bcc/urls.py
|
blkCodeCollctve/bcc-cms
|
25a6f91d603311e755f6104a4960c8b19e1ab986
|
[
"MIT"
] | 1
|
2020-07-20T21:17:43.000Z
|
2020-07-20T21:17:43.000Z
|
bcc/urls.py
|
blkCodeCollctve/bcc-cms
|
25a6f91d603311e755f6104a4960c8b19e1ab986
|
[
"MIT"
] | 1
|
2019-10-01T21:49:22.000Z
|
2019-10-01T21:49:22.000Z
|
"""bcc URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
from bcc.views import index
import config
static_dir = settings.LOCAL_STATIC_ROOT if config.DEBUG else settings.STATIC_ROOT
urlpatterns = [
# url(r'^django-admin/', include(admin.site.urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's serving mechanism
url(r'', include(wagtail_urls)),
] + static(settings.STATIC_URL, document_root=static_dir) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 44.243243
| 87
| 0.698228
|
3a0aefe62415327fe0d109f029de9ff96f036b89
| 1,958
|
py
|
Python
|
boards/views.py
|
yardy0x/infomate.club
|
d30a704ff1906bffd2d3d20309dfcc9015dd943c
|
[
"Apache-2.0"
] | 1
|
2020-06-29T16:57:54.000Z
|
2020-06-29T16:57:54.000Z
|
boards/views.py
|
yardy0x/infomate.club
|
d30a704ff1906bffd2d3d20309dfcc9015dd943c
|
[
"Apache-2.0"
] | 8
|
2021-04-08T21:57:00.000Z
|
2022-03-12T00:50:04.000Z
|
boards/views.py
|
yardy0x/infomate.club
|
d30a704ff1906bffd2d3d20309dfcc9015dd943c
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
from django.conf import settings
from django.core.cache import cache
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import cache_page
from django.views.decorators.http import last_modified
from auth.helpers import authorized_user
from boards.cache import board_last_modified_at
from boards.models import Board, BoardBlock, BoardFeed
@cache_page(settings.STATIC_PAGE_CACHE_SECONDS)
def index(request):
boards = Board.objects.filter(is_visible=True).all()
return render(request, "index.html", {
"boards": boards
})
@last_modified(board_last_modified_at)
def board(request, board_slug):
board = get_object_or_404(Board, slug=board_slug)
if board.is_private:
me = authorized_user(request)
if not me:
return render(request, "board_no_access.html", {
"board": board
}, status=401)
cached_page = cache.get(f"board_{board.slug}")
if cached_page and board.refreshed_at and board.refreshed_at <= \
datetime.utcnow() - timedelta(seconds=settings.BOARD_CACHE_SECONDS):
return cached_page
blocks = BoardBlock.objects.filter(board=board)
feeds = BoardFeed.objects.filter(board=board)
result = render(request, "board.html", {
"board": board,
"blocks": blocks,
"feeds": feeds,
})
cache.set(f"board_{board.slug}", result, settings.BOARD_CACHE_SECONDS)
return result
@cache_page(settings.STATIC_PAGE_CACHE_SECONDS)
def export(request, board_slug):
board = get_object_or_404(Board, slug=board_slug)
return render(request, "export.html", {
"board": board,
})
@cache_page(settings.STATIC_PAGE_CACHE_SECONDS)
def what(request):
return render(request, "what.html")
@cache_page(settings.STATIC_PAGE_CACHE_SECONDS)
def privacy_policy(request):
return render(request, "docs/privacy_policy.html")
| 29.666667
| 80
| 0.721655
|
c2937321a4461960fbe77ddbaacac035c18d3614
| 417
|
py
|
Python
|
aldryn_search/conf.py
|
jdgoettsch/aldryn-search
|
d9720a33ee6251d29e2ec86014af4eb998403f45
|
[
"BSD-3-Clause"
] | null | null | null |
aldryn_search/conf.py
|
jdgoettsch/aldryn-search
|
d9720a33ee6251d29e2ec86014af4eb998403f45
|
[
"BSD-3-Clause"
] | 1
|
2020-04-09T10:50:31.000Z
|
2020-04-10T08:21:08.000Z
|
aldryn_search/conf.py
|
jdgoettsch/aldryn-search
|
d9720a33ee6251d29e2ec86014af4eb998403f45
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf import settings
from appconf import AppConf
class AldrynSearchAppConf(AppConf):
CMS_PAGE = True
DEFAULT_LANGUAGE = settings.LANGUAGE_CODE
INDEX_BASE_CLASS = 'aldryn_search.base.AldrynIndexBase'
LANGUAGE_FROM_ALIAS = 'aldryn_search.utils.language_from_alias'
PAGINATION = 10
REGISTER_APPHOOK = True
class Meta:
prefix = 'ALDRYN_SEARCH'
| 23.166667
| 67
| 0.733813
|
cd93fd94f50ae6d432b278d9e8790c835acbf87c
| 58,382
|
py
|
Python
|
tests/test_coco_format.py
|
jenhaoyang/datumaro
|
add81ddb59502362fa65fa07e5bc4d8c9f61afde
|
[
"MIT"
] | null | null | null |
tests/test_coco_format.py
|
jenhaoyang/datumaro
|
add81ddb59502362fa65fa07e5bc4d8c9f61afde
|
[
"MIT"
] | null | null | null |
tests/test_coco_format.py
|
jenhaoyang/datumaro
|
add81ddb59502362fa65fa07e5bc4d8c9f61afde
|
[
"MIT"
] | 1
|
2021-12-15T22:15:59.000Z
|
2021-12-15T22:15:59.000Z
|
from functools import partial
from itertools import product
from unittest import TestCase
import os
import os.path as osp
import numpy as np
from datumaro.components.annotation import (
AnnotationType, Bbox, Caption, Label, LabelCategories, Mask, Points,
PointsCategories, Polygon,
)
from datumaro.components.dataset import Dataset
from datumaro.components.environment import Environment
from datumaro.components.extractor import DatasetItem
from datumaro.components.media import Image
from datumaro.plugins.coco_format.converter import (
CocoCaptionsConverter, CocoConverter, CocoImageInfoConverter,
CocoInstancesConverter, CocoLabelsConverter, CocoPanopticConverter,
CocoPersonKeypointsConverter, CocoStuffConverter,
)
from datumaro.plugins.coco_format.importer import CocoImporter
from datumaro.util.test_utils import (
TestDir, check_save_and_load, compare_datasets,
)
from .requirements import Requirements, mark_requirement
DUMMY_DATASET_DIR = osp.join(osp.dirname(__file__), 'assets', 'coco_dataset')
class CocoImporterTest(TestCase):
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_instances(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Bbox(2, 2, 3, 1, label=1,
group=1, id=1, attributes={'is_crowd': False})
]
),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40},
annotations=[
Polygon([0, 0, 1, 0, 1, 2, 0, 2], label=0,
id=1, group=1, attributes={'is_crowd': False,
'x': 1, 'y': 'hello'}),
Mask(np.array( [[1, 1, 0, 0, 0]] * 10 ), label=1,
id=2, group=2, attributes={'is_crowd': True}),
]
),
], categories=['a', 'b', 'c'])
formats = ['coco', 'coco_instances']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_instances')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_instances',
'annotations', 'instances_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_instances',
'annotations', 'instances_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_instances_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Bbox(2, 2, 3, 1, label=1,
group=1, id=1, attributes={'is_crowd': False})
]
),
], categories=['a', 'b', 'c'])
format = 'coco_instances'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations', 'instances_default.json'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc.json'))
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_instances_with_original_cat_ids(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Bbox(2, 2, 3, 1, label=2,
group=1, id=1, attributes={'is_crowd': False})
]
),
], categories=['class-0', 'a', 'b', 'class-3', 'c'])
actual_dataset = Dataset.import_from(
osp.join(DUMMY_DATASET_DIR, 'coco_instances',
'annotations', 'instances_train.json'),
'coco_instances',
keep_original_category_ids=True)
compare_datasets(self, expected_dataset, actual_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_captions(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Caption('hello', id=1, group=1),
]),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40},
annotations=[
Caption('world', id=1, group=1),
Caption('text', id=2, group=2),
]),
])
formats = ['coco', 'coco_captions']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_captions')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_captions',
'annotations', 'captions_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_captions',
'annotations', 'captions_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_captions_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Caption('hello', id=1, group=1),
]),
])
format = 'coco_captions'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations', 'captions_default.json'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc.json'))
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_labels(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Label(1, id=1, group=1),
]),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40},
annotations=[
Label(0, id=1, group=1),
Label(1, id=2, group=2),
]),
], categories=['a', 'b'])
formats = ['coco', 'coco_labels']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_labels')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_labels',
'annotations', 'labels_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_labels',
'annotations', 'labels_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_labels_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Label(1, id=1, group=1),
]),
], categories=['a', 'b'])
format = 'coco_labels'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations', 'labels_default.json'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc.json'))
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_keypoints(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Points([0, 0, 0, 2, 4, 1], [0, 1, 2], label=1,
id=1, group=1, attributes={'is_crowd': False}),
Bbox(2, 2, 3, 1, label=1,
id=1, group=1, attributes={'is_crowd': False}),
]),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40},
annotations=[
Points([1, 2, 3, 4, 2, 3], label=0,
id=1, group=1, attributes={'is_crowd': False,
'x': 1, 'y': 'hello'}),
Polygon([0, 0, 1, 0, 1, 2, 0, 2], label=0,
id=1, group=1, attributes={'is_crowd': False,
'x': 1, 'y': 'hello'}),
Points([2, 4, 4, 4, 4, 2], label=1,
id=2, group=2, attributes={'is_crowd': True}),
Mask(np.array( [[1, 1, 0, 0, 0]] * 10 ), label=1,
id=2, group=2, attributes={'is_crowd': True}),
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(['a', 'b']),
AnnotationType.points: PointsCategories.from_iterable(
(i, None, [[0, 1], [1, 2]]) for i in range(2)
),
})
formats = ['coco', 'coco_person_keypoints']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_person_keypoints')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_person_keypoints',
'annotations', 'person_keypoints_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_person_keypoints',
'annotations', 'person_keypoints_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_keypoints_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Points([0, 0, 0, 2, 4, 1], [0, 1, 2], label=1,
id=1, group=1, attributes={'is_crowd': False}),
Bbox(2, 2, 3, 1, label=1,
id=1, group=1, attributes={'is_crowd': False}),
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(['a', 'b']),
AnnotationType.points: PointsCategories.from_iterable(
(i, None, [[0, 1], [1, 2]]) for i in range(2)
),
})
format = 'coco_person_keypoints'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations',
'person_keypoints_default.json'), osp.join(dataset_dir,
'annotations', 'aa_bbbb_cccc.json')
)
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_keypoints_with_original_cat_ids(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Points([0, 0, 0, 2, 4, 1], [0, 1, 2], label=2,
id=1, group=1, attributes={'is_crowd': False}),
Bbox(2, 2, 3, 1, label=2,
id=1, group=1, attributes={'is_crowd': False}),
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
['class-0', 'a', 'b']
),
AnnotationType.points: PointsCategories.from_iterable(
[(i, None, [[0, 1], [1, 2]]) for i in range(1, 3)],
),
})
actual_dataset = Dataset.import_from(
osp.join(DUMMY_DATASET_DIR, 'coco_person_keypoints',
'annotations', 'person_keypoints_train.json'),
'coco_person_keypoints',
keep_original_category_ids=True)
compare_datasets(self, expected_dataset, actual_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_image_info(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5}),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40})
])
formats = ['coco', 'coco_image_info']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_image_info')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_image_info',
'annotations', 'image_info_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_image_info',
'annotations', 'image_info_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_image_info_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5}),
])
format = 'coco_image_info'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations', 'image_info_default.json'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc.json'))
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_panoptic(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Mask(np.ones((5, 5)), label=0, id=460551,
group=460551, attributes={'is_crowd': False}),
]),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40},
annotations=[
Mask(np.array( [[1, 1, 0, 0, 0]] * 10 ), label=0,
id=7, group=7, attributes={'is_crowd': False}),
Mask(np.array( [[0, 0, 1, 1, 0]] * 10 ), label=1,
id=20, group=20, attributes={'is_crowd': True}),
]),
], categories=['a', 'b'])
formats = ['coco', 'coco_panoptic']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_panoptic')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_panoptic',
'annotations', 'panoptic_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_panoptic',
'annotations', 'panoptic_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_panoptic_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Mask(np.ones((5, 5)), label=0, id=460551,
group=460551, attributes={'is_crowd': False}),
]),
], categories=['a', 'b'])
format = 'coco_panoptic'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations', 'panoptic_default'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc'))
os.rename(osp.join(dataset_dir, 'annotations', 'panoptic_default.json'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc.json'))
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_panoptic_with_original_cat_ids(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Mask(np.ones((5, 5)), label=1, id=460551,
group=460551, attributes={'is_crowd': False}),
]),
], categories=['class-0', 'a', 'b'])
actual_dataset = Dataset.import_from(
osp.join(DUMMY_DATASET_DIR, 'coco_panoptic',
'annotations', 'panoptic_train.json'),
'coco_panoptic',
keep_original_category_ids=True)
compare_datasets(self, expected_dataset, actual_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_stuff(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='train', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Mask(np.array(
[[0, 0, 1, 1, 0, 1, 1, 0, 0, 0]] * 5
), label=0,
id=7, group=7, attributes={'is_crowd': False}),
]),
DatasetItem(id='b', subset='val', image=np.ones((10, 5, 3)),
attributes={'id': 40},
annotations=[
Mask(np.array( [[1, 1, 0, 0, 0]] * 10 ), label=1,
id=2, group=2, attributes={'is_crowd': False}),
]),
], categories=['a', 'b'])
formats = ['coco', 'coco_stuff']
paths = [
('', osp.join(DUMMY_DATASET_DIR, 'coco_stuff')),
('train', osp.join(DUMMY_DATASET_DIR, 'coco_stuff',
'annotations', 'stuff_train.json')),
('val', osp.join(DUMMY_DATASET_DIR, 'coco_stuff',
'annotations', 'stuff_val.json')),
]
for format, (subset, path) in product(formats, paths):
if subset:
expected = expected_dataset.get_subset(subset)
else:
expected = expected_dataset
with self.subTest(path=path, format=format, subset=subset):
dataset = Dataset.import_from(path, format)
compare_datasets(self, expected, dataset, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_import_stuff_with_any_annotation_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='a', subset='default', image=np.ones((5, 10, 3)),
attributes={'id': 5},
annotations=[
Mask(np.array(
[[0, 0, 1, 1, 0, 1, 1, 0, 0, 0]] * 5
), label=0,
id=7, group=7, attributes={'is_crowd': False}),
]),
], categories=['a', 'b'])
format = 'coco_stuff'
with TestDir() as test_dir:
dataset_dir = osp.join(test_dir, 'dataset')
expected_dataset.export(dataset_dir, format, save_images=True)
os.rename(osp.join(dataset_dir, 'annotations', 'stuff_default.json'),
osp.join(dataset_dir, 'annotations', 'aa_bbbb_cccc.json'))
imported_dataset = Dataset.import_from(dataset_dir, format)
compare_datasets(self, expected_dataset, imported_dataset,
require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_detect(self):
subdirs = [
'coco',
'coco_captions',
'coco_image_info',
'coco_instances',
'coco_labels',
'coco_panoptic',
'coco_person_keypoints',
'coco_stuff',
]
env = Environment()
for subdir in subdirs:
with self.subTest(subdir=subdir):
dataset_dir = osp.join(DUMMY_DATASET_DIR, subdir)
detected_formats = env.detect_dataset(dataset_dir)
self.assertEqual([CocoImporter.NAME], detected_formats)
class CocoConverterTest(TestCase):
def _test_save_and_load(self, source_dataset, converter, test_dir,
target_dataset=None, importer_args=None, **kwargs):
return check_save_and_load(self, source_dataset, converter, test_dir,
importer='coco',
target_dataset=target_dataset, importer_args=importer_args, **kwargs)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_captions(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train',
annotations=[
Caption('hello', id=1, group=1),
Caption('world', id=2, group=2),
], attributes={'id': 1}),
DatasetItem(id=2, subset='train',
annotations=[
Caption('test', id=3, group=3),
], attributes={'id': 2}),
DatasetItem(id=3, subset='val',
annotations=[
Caption('word', id=1, group=1),
], attributes={'id': 1}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoCaptionsConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_instances(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
# Bbox + single polygon
Bbox(0, 1, 2, 2,
label=2, group=1, id=1,
attributes={ 'is_crowd': False }),
Polygon([0, 1, 2, 1, 2, 3, 0, 3],
attributes={ 'is_crowd': False },
label=2, group=1, id=1),
], attributes={'id': 1}),
DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)),
annotations=[
# Mask + bbox
Mask(np.array([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': True },
label=4, group=3, id=3),
Bbox(1, 0, 2, 2, label=4, group=3, id=3,
attributes={ 'is_crowd': True }),
], attributes={'id': 2}),
DatasetItem(id=3, subset='val', image=np.ones((4, 4, 3)),
annotations=[
# Bbox + mask
Bbox(0, 1, 2, 2, label=4, group=3, id=3,
attributes={ 'is_crowd': True }),
Mask(np.array([
[0, 0, 0, 0],
[1, 1, 1, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': True },
label=4, group=3, id=3),
], attributes={'id': 1}),
], categories=[str(i) for i in range(10)])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Polygon([0, 1, 2, 1, 2, 3, 0, 3],
attributes={ 'is_crowd': False },
label=2, group=1, id=1),
], attributes={'id': 1}),
DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Mask(np.array([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': True },
label=4, group=3, id=3),
], attributes={'id': 2}),
DatasetItem(id=3, subset='val', image=np.ones((4, 4, 3)),
annotations=[
Mask(np.array([
[0, 0, 0, 0],
[1, 1, 1, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': True },
label=4, group=3, id=3),
], attributes={'id': 1})
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
CocoInstancesConverter.convert, test_dir,
target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_panoptic(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Mask(image=np.array([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]
]),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
], attributes={'id': 1}),
DatasetItem(id=2, subset='val', image=np.ones((5, 5, 3)),
annotations=[
Mask(image=np.array([
[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
Mask(image=np.array([
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]
]),
attributes={ 'is_crowd': False },
label=2, group=2, id=2),
], attributes={'id': 2}),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(dataset,
partial(CocoPanopticConverter.convert, save_images=True),
test_dir, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_stuff(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Mask(np.array([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
], attributes={'id': 2}),
DatasetItem(id=2, subset='val', image=np.ones((4, 4, 3)),
annotations=[
Mask(np.array([
[0, 0, 0, 0],
[1, 1, 1, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
], attributes={'id': 1}),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(dataset,
CocoStuffConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_merge_polygons_on_loading(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((6, 10, 3)),
annotations=[
Polygon([0, 0, 4, 0, 4, 4],
label=3, id=4, group=4),
Polygon([5, 0, 9, 0, 5, 5],
label=3, id=4, group=4),
]
),
], categories=[str(i) for i in range(10)])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((6, 10, 3)),
annotations=[
Mask(np.array([
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
# only internal fragment (without the border),
# but not everywhere...
),
label=3, id=4, group=4,
attributes={ 'is_crowd': False }),
], attributes={'id': 1}
),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
CocoInstancesConverter.convert, test_dir,
importer_args={'merge_instance_polygons': True},
target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_crop_covered_segments(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]],
),
label=2, id=1, z_order=0),
Polygon([1, 1, 4, 1, 4, 4, 1, 4],
label=1, id=2, z_order=1),
]
),
], categories=[str(i) for i in range(10)])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 1, 1, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[1, 1, 1, 0, 0]],
),
attributes={ 'is_crowd': True },
label=2, id=1, group=1),
Polygon([1, 1, 4, 1, 4, 4, 1, 4],
label=1, id=2, group=2,
attributes={ 'is_crowd': False }),
], attributes={'id': 1}
),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
partial(CocoInstancesConverter.convert, crop_covered=True),
test_dir, target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_convert_polygons_to_mask(self):
"""
<b>Description:</b>
Ensure that the dataset polygon annotation can be properly converted into dataset segmentation mask.
<b>Expected results:</b>
Dataset segmentation mask converted from dataset polygon annotation is equal to expected mask.
<b>Steps:</b>
1. Prepare dataset with polygon annotation (source dataset)
2. Prepare dataset with expected mask segmentation mode (target dataset)
3. Convert source dataset to target, with conversion of annotation from polygon to mask. Verify that result
segmentation mask is equal to expected mask.
"""
# 1. Prepare dataset with polygon annotation (source dataset)
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((6, 10, 3)),
annotations=[
Polygon([0, 0, 4, 0, 4, 4],
label=3, id=4, group=4),
Polygon([5, 0, 9, 0, 5, 5],
label=3, id=4, group=4),
]
),
], categories=[str(i) for i in range(10)])
# 2. Prepare dataset with expected mask segmentation mode (target dataset)
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((6, 10, 3)),
annotations=[
Mask(np.array([
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
# only internal fragment (without the border),
# but not everywhere...
),
attributes={ 'is_crowd': True },
label=3, id=4, group=4),
], attributes={'id': 1}
),
], categories=[str(i) for i in range(10)])
# 3. Convert source dataset to target, with conversion of annotation from polygon to mask. Verify that result
# segmentation mask is equal to expected mask.
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
partial(CocoInstancesConverter.convert, segmentation_mode='mask'),
test_dir, target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_convert_masks_to_polygons(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Mask(np.array([
[0, 1, 1, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]),
label=3, id=4, group=4),
]
),
], categories=[str(i) for i in range(10)])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 10, 3)),
annotations=[
Polygon(
[1, 0, 3, 2, 3, 0, 1, 0],
label=3, id=4, group=4,
attributes={ 'is_crowd': False }),
Polygon(
[5, 0, 5, 3, 8, 0, 5, 0],
label=3, id=4, group=4,
attributes={ 'is_crowd': False }),
], attributes={'id': 1}
),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
partial(CocoInstancesConverter.convert, segmentation_mode='polygons'),
test_dir,
target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_images(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', attributes={'id': 1}),
DatasetItem(id=2, subset='train', attributes={'id': 2}),
DatasetItem(id=2, subset='val', attributes={'id': 2}),
DatasetItem(id=3, subset='val', attributes={'id': 3}),
DatasetItem(id=4, subset='val', attributes={'id': 4}),
DatasetItem(id=5, subset='test', attributes={'id': 1}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoImageInfoConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_231)
def test_can_save_dataset_with_cjk_categories(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Bbox(0, 1, 2, 2,
label=0, group=1, id=1,
attributes={ 'is_crowd': False }),
], attributes={'id': 1}),
DatasetItem(id=2, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Bbox(1, 0, 2, 2, label=1, group=2, id=2,
attributes={ 'is_crowd': False }),
], attributes={'id': 2}),
DatasetItem(id=3, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Bbox(0, 1, 2, 2, label=2, group=3, id=3,
attributes={ 'is_crowd': False }),
], attributes={'id': 3}),
],
categories=[
"고양이", "ネコ", "猫"
]
)
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoInstancesConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_cyrillic_and_spaces_in_filename(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='кириллица с пробелом', subset='train',
attributes={'id': 1}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoImageInfoConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_labels(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train',
annotations=[
Label(4, id=1, group=1),
Label(9, id=2, group=2),
], attributes={'id': 1}),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoLabelsConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_keypoints(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.zeros((5, 5, 3)),
annotations=[
# Full instance annotations: polygon + keypoints
Points([0, 0, 0, 2, 4, 1], [0, 1, 2],
label=3, group=1, id=1),
Polygon([0, 0, 4, 0, 4, 4],
label=3, group=1, id=1),
# Full instance annotations: bbox + keypoints
Points([1, 2, 3, 4, 2, 3], group=2, id=2),
Bbox(1, 2, 2, 2, group=2, id=2),
# Solitary keypoints
Points([1, 2, 0, 2, 4, 1], label=5, id=3),
# Some other solitary annotations (bug #1387)
Polygon([0, 0, 4, 0, 4, 4], label=3, id=4),
# Solitary keypoints with no label
Points([0, 0, 1, 2, 3, 4], [0, 1, 2], id=5),
]),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
str(i) for i in range(10)),
AnnotationType.points: PointsCategories.from_iterable(
(i, None, [[0, 1], [1, 2]]) for i in range(10)
),
})
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.zeros((5, 5, 3)),
annotations=[
Points([0, 0, 0, 2, 4, 1], [0, 1, 2],
label=3, group=1, id=1,
attributes={'is_crowd': False}),
Polygon([0, 0, 4, 0, 4, 4],
label=3, group=1, id=1,
attributes={'is_crowd': False}),
Points([1, 2, 3, 4, 2, 3],
group=2, id=2,
attributes={'is_crowd': False}),
Bbox(1, 2, 2, 2,
group=2, id=2,
attributes={'is_crowd': False}),
Points([1, 2, 0, 2, 4, 1],
label=5, group=3, id=3,
attributes={'is_crowd': False}),
Bbox(0, 1, 4, 1,
label=5, group=3, id=3,
attributes={'is_crowd': False}),
Points([0, 0, 1, 2, 3, 4], [0, 1, 2],
group=5, id=5,
attributes={'is_crowd': False}),
Bbox(1, 2, 2, 2,
group=5, id=5,
attributes={'is_crowd': False}),
], attributes={'id': 1}),
], categories={
AnnotationType.label: LabelCategories.from_iterable(
str(i) for i in range(10)),
AnnotationType.points: PointsCategories.from_iterable(
(i, None, [[0, 1], [1, 2]]) for i in range(10)
),
})
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
CocoPersonKeypointsConverter.convert, test_dir,
target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_no_subsets(self):
test_dataset = Dataset.from_iterable([
DatasetItem(id=1, attributes={'id': 1}),
DatasetItem(id=2, attributes={'id': 2}),
])
with TestDir() as test_dir:
self._test_save_and_load(test_dataset,
CocoConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_dataset_with_image_info(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=Image(path='1.jpg', size=(10, 15)),
attributes={'id': 1}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoImageInfoConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_relative_paths(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='1', image=np.ones((4, 2, 3)),
attributes={'id': 1}),
DatasetItem(id='subdir1/1', image=np.ones((2, 6, 3)),
attributes={'id': 2}),
DatasetItem(id='subdir2/1', image=np.ones((5, 4, 3)),
attributes={'id': 3}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(CocoImageInfoConverter.convert, save_images=True),
test_dir, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_image_with_arbitrary_extension(self):
expected = Dataset.from_iterable([
DatasetItem(id='q/1', image=Image(path='q/1.JPEG',
data=np.zeros((4, 3, 3))), attributes={'id': 1}),
DatasetItem(id='a/b/c/2', image=Image(path='a/b/c/2.bmp',
data=np.zeros((3, 4, 3))), attributes={'id': 2}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected,
partial(CocoImageInfoConverter.convert, save_images=True),
test_dir, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_preserve_coco_ids(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id='some/name1', image=np.ones((4, 2, 3)),
attributes={'id': 40}),
])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
partial(CocoImageInfoConverter.convert, save_images=True),
test_dir, require_images=True)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_annotation_attributes(self):
expected_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.ones((4, 2, 3)), annotations=[
Polygon([0, 0, 4, 0, 4, 4], label=5, group=1, id=1,
attributes={'is_crowd': False, 'x': 5, 'y': 'abc'}),
], attributes={'id': 1})
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(expected_dataset,
CocoConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_auto_annotation_ids(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[
Polygon([0, 0, 4, 0, 4, 4], label=0),
])
], categories=[str(i) for i in range(10)])
target_dataset = Dataset.from_iterable([
DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[
Polygon([0, 0, 4, 0, 4, 4], label=0, id=1, group=1,
attributes={'is_crowd': False}),
], attributes={'id': 1})
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
CocoConverter.convert, test_dir, target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_subset_can_contain_underscore(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=2, image=np.ones((4, 2, 3)), subset='subset_1',
annotations=[Polygon([0, 0, 4, 0, 4, 4], label=0, id=1, group=1,
attributes={'is_crowd': False}),
], attributes={'id': 1})
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
CocoConverter.convert, test_dir)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_reindex(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[
Polygon([0, 0, 4, 0, 4, 4], label=0, id=5),
], attributes={'id': 22})
], categories=[str(i) for i in range(10)])
target_dataset = Dataset.from_iterable([
DatasetItem(id=2, image=np.ones((4, 2, 3)), annotations=[
Polygon([0, 0, 4, 0, 4, 4], label=0, id=1, group=1,
attributes={'is_crowd': False}),
], attributes={'id': 1})
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
partial(CocoConverter.convert, reindex=True),
test_dir, target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_images_in_single_dir(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((2, 4, 3)),
attributes={'id': 1}),
])
with TestDir() as test_dir:
self._test_save_and_load(dataset,
partial(CocoImageInfoConverter.convert, save_images=True,
merge_images=True),
test_dir, require_images=True)
self.assertTrue(osp.isfile(osp.join(test_dir, 'images', '1.jpg')))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_images_in_separate_dirs(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((2, 4, 3)),
attributes={'id': 1}),
])
with TestDir() as test_dir:
self._test_save_and_load(dataset,
partial(CocoImageInfoConverter.convert, save_images=True,
merge_images=False),
test_dir, require_images=True)
self.assertTrue(osp.isfile(osp.join(
test_dir, 'images', 'train', '1.jpg')))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_inplace_save_writes_only_updated_data(self):
expected = Dataset.from_iterable([
DatasetItem(1, subset='a'),
DatasetItem(2, subset='a', image=np.ones((3, 2, 3))),
DatasetItem(2, subset='b'),
])
with TestDir() as path:
dataset = Dataset.from_iterable([
DatasetItem(1, subset='a'),
DatasetItem(2, subset='b'),
DatasetItem(3, subset='c', image=np.ones((2, 2, 3))),
])
dataset.export(path, 'coco', save_images=True)
dataset.put(DatasetItem(2, subset='a', image=np.ones((3, 2, 3))))
dataset.remove(3, 'c')
dataset.save(save_images=True)
self.assertEqual({'image_info_a.json', 'image_info_b.json'},
set(os.listdir(osp.join(path, 'annotations'))))
self.assertTrue(osp.isfile(osp.join(path, 'images', 'a', '2.jpg')))
self.assertFalse(osp.isfile(osp.join(path, 'images', 'c', '3.jpg')))
compare_datasets(self, expected, Dataset.import_from(path, 'coco'),
require_images=True, ignored_attrs={'id'})
@mark_requirement(Requirements. DATUM_BUG_425)
def test_can_save_and_load_grouped_masks_and_polygons(self):
source_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
),
label=0, id=0, z_order=0, group=1),
Polygon([1, 1, 1, 3, 3, 3, 3, 1],
label=0, id=1, z_order=0, group=1),
]
),
], categories=['label_1'])
target_dataset = Dataset.from_iterable([
DatasetItem(id=1, image=np.zeros((5, 5, 3)),
annotations=[
Mask(np.array([
[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]],
),
attributes={ 'is_crowd': True },
label=0, id=0, group=1),
], attributes={'id': 1}
),
], categories=['label_1'])
with TestDir() as test_dir:
self._test_save_and_load(source_dataset,
partial(CocoInstancesConverter.convert),
test_dir, target_dataset=target_dataset)
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_panoptic_with_meta_file(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Mask(image=np.array([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]
]),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
], attributes={'id': 1}),
DatasetItem(id=2, subset='val', image=np.ones((5, 5, 3)),
annotations=[
Mask(image=np.array([
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]
]),
attributes={ 'is_crowd': False },
label=2, group=2, id=2),
], attributes={'id': 2}),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(dataset,
partial(CocoPanopticConverter.convert, save_images=True,
save_dataset_meta=True),
test_dir, require_images=True)
self.assertTrue(osp.isfile(osp.join(test_dir, 'dataset_meta.json')))
@mark_requirement(Requirements.DATUM_GENERAL_REQ)
def test_can_save_and_load_stuff_with_meta_file(self):
dataset = Dataset.from_iterable([
DatasetItem(id=1, subset='train', image=np.ones((4, 4, 3)),
annotations=[
Mask(np.array([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 1, 1],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
], attributes={'id': 2}),
DatasetItem(id=2, subset='val', image=np.ones((4, 4, 3)),
annotations=[
Mask(np.array([
[0, 0, 0, 0],
[1, 1, 1, 0],
[1, 1, 0, 0],
[0, 0, 0, 0]],
),
attributes={ 'is_crowd': False },
label=4, group=3, id=3),
], attributes={'id': 1}),
], categories=[str(i) for i in range(10)])
with TestDir() as test_dir:
self._test_save_and_load(dataset,
partial(CocoPanopticConverter.convert, save_images=True,
save_dataset_meta=True),
test_dir, require_images=True)
self.assertTrue(osp.isfile(osp.join(test_dir, 'dataset_meta.json')))
| 42.959529
| 117
| 0.498527
|
28a81663ed9ef0dadf52fcce182b92acdbf7bef0
| 1,480
|
py
|
Python
|
vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py
|
salv-orlando/vmware-nsx
|
6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py
|
salv-orlando/vmware-nsx
|
6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd
|
[
"Apache-2.0"
] | null | null | null |
vmware_nsx/shell/admin/plugins/nsxv3/resources/config.py
|
salv-orlando/vmware-nsx
|
6ad0d595aa8099004eb6dd5ff62c7a91b0e11dfd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import registry
from oslo_log import log as logging
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
from vmware_nsx.shell import resources as shell
LOG = logging.getLogger(__name__)
@admin_utils.output_header
@admin_utils.unpack_payload
def validate_configuration(resource, event, trigger, **kwargs):
"""Validate the nsxv3 configuration"""
try:
utils.NsxV3PluginWrapper()
except Exception as e:
LOG.error("Configuration validation failed: %s", e)
else:
LOG.info("Configuration validation succeeded")
registry.subscribe(validate_configuration,
constants.CONFIG,
shell.Operations.VALIDATE.value)
| 36.097561
| 78
| 0.741216
|
f39c18706ce845aec0ce36d7d16aac0c32d22623
| 1,041
|
py
|
Python
|
list/migrations/0026_auto_20211116_1920.py
|
NiklasMerz/shoppinglist
|
38c494b2a2f80a0c543beaf0d9d9a75870bdbb22
|
[
"MIT"
] | null | null | null |
list/migrations/0026_auto_20211116_1920.py
|
NiklasMerz/shoppinglist
|
38c494b2a2f80a0c543beaf0d9d9a75870bdbb22
|
[
"MIT"
] | 45
|
2021-11-03T20:48:50.000Z
|
2021-12-14T21:22:12.000Z
|
list/migrations/0026_auto_20211116_1920.py
|
NiklasMerz/shoppinglist
|
38c494b2a2f80a0c543beaf0d9d9a75870bdbb22
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-11-16 19:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('list', '0025_remove_lineitem_time'),
]
operations = [
migrations.RemoveField(
model_name='lineitem',
name='item',
),
migrations.CreateModel(
name='BrandItem',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('description', models.CharField(max_length=255)),
('item', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='brand_items', to='list.item')),
],
),
migrations.AddField(
model_name='lineitem',
name='brand_item',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='line_items', to='list.branditem'),
),
]
| 32.53125
| 157
| 0.602305
|
e7115ab70839b3be92f80d5f7dbe422256d9979d
| 54,410
|
py
|
Python
|
Lib/site-packages/pymongo/mongo_client.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | 1
|
2017-08-07T14:52:02.000Z
|
2017-08-07T14:52:02.000Z
|
Lib/site-packages/pymongo/mongo_client.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/pymongo/mongo_client.py
|
adzhou/Python27
|
a7113b69d54a04cc780143241c2f1fe81939ad3a
|
[
"bzip2-1.0.6"
] | null | null | null |
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Tools for connecting to MongoDB.
.. seealso:: :doc:`/examples/high_availability` for examples of connecting
to replica sets or sets of mongos servers.
To get a :class:`~pymongo.database.Database` instance from a
:class:`MongoClient` use either dictionary-style or attribute-style
access:
.. doctest::
>>> from pymongo import MongoClient
>>> c = MongoClient()
>>> c.test_database
Database(MongoClient('localhost', 27017), u'test_database')
>>> c['test-database']
Database(MongoClient('localhost', 27017), u'test-database')
"""
import contextlib
import datetime
import threading
import warnings
import weakref
from collections import defaultdict
from bson.codec_options import DEFAULT_CODEC_OPTIONS
from bson.py3compat import (integer_types,
string_type)
from bson.son import SON
from pymongo import (common,
database,
helpers,
message,
periodic_executor,
uri_parser)
from pymongo.client_options import ClientOptions
from pymongo.cursor_manager import CursorManager
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidOperation,
InvalidURI,
NetworkTimeout,
NotMasterError,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from pymongo.server_selectors import (writable_preferred_server_selector,
writable_server_selector)
from pymongo.server_type import SERVER_TYPE
from pymongo.topology import Topology
from pymongo.topology_description import TOPOLOGY_TYPE
from pymongo.settings import TopologySettings
from pymongo.write_concern import WriteConcern
class MongoClient(common.BaseObject):
HOST = "localhost"
PORT = 27017
# Define order to retrieve options from ClientOptions for __repr__.
# No host/port; these are retrieved from TopologySettings.
_constructor_args = ('document_class', 'tz_aware', 'connect')
def __init__(
self,
host=None,
port=None,
document_class=dict,
tz_aware=False,
connect=True,
**kwargs):
"""Client for a MongoDB instance, a replica set, or a set of mongoses.
The client object is thread-safe and has connection-pooling built in.
If an operation fails because of a network error,
:class:`~pymongo.errors.ConnectionFailure` is raised and the client
reconnects in the background. Application code should handle this
exception (recognizing that the operation failed) and then continue to
execute.
The `host` parameter can be a full `mongodb URI
<http://dochub.mongodb.org/core/connections>`_, in addition to
a simple hostname. It can also be a list of hostnames or
URIs. Any port specified in the host string(s) will override
the `port` parameter. If multiple mongodb URIs containing
database or auth information are passed, the last database,
username, and password present will be used. For username and
passwords reserved characters like ':', '/', '+' and '@' must be
escaped following RFC 2396.
.. warning:: When using PyMongo in a multiprocessing context, please
read :ref:`multiprocessing` first.
:Parameters:
- `host` (optional): hostname or IP address of a single mongod or
mongos instance to connect to, or a mongodb URI, or a list of
hostnames / mongodb URIs. If `host` is an IPv6 literal
it must be enclosed in '[' and ']' characters following
the RFC2732 URL syntax (e.g. '[::1]' for localhost). Multihomed
and round robin DNS addresses are **not** supported.
- `port` (optional): port number on which to connect
- `document_class` (optional): default class to use for
documents returned from queries on this client
- `tz_aware` (optional): if ``True``,
:class:`~datetime.datetime` instances returned as values
in a document by this :class:`MongoClient` will be timezone
aware (otherwise they will be naive)
- `connect` (optional): if ``True`` (the default), immediately
begin connecting to MongoDB in the background. Otherwise connect
on the first operation.
| **Other optional parameters can be passed as keyword arguments:**
- `maxPoolSize` (optional): The maximum allowable number of
concurrent connections to each connected server. Requests to a
server will block if there are `maxPoolSize` outstanding
connections to the requested server. Defaults to 100. Cannot be 0.
- `minPoolSize` (optional): The minimum required number of concurrent
connections that the pool will maintain to each connected server.
Default is 0.
- `maxIdleTimeMS` (optional): The maximum number of milliseconds that
a connection can remain idle in the pool before being removed and
replaced. Defaults to `None` (no limit).
- `socketTimeoutMS`: (integer or None) Controls how long (in
milliseconds) the driver will wait for a response after sending an
ordinary (non-monitoring) database operation before concluding that
a network error has occurred. Defaults to ``None`` (no timeout).
- `connectTimeoutMS`: (integer or None) Controls how long (in
milliseconds) the driver will wait during server monitoring when
connecting a new socket to a server before concluding the server
is unavailable. Defaults to ``20000`` (20 seconds).
- `serverSelectionTimeoutMS`: (integer) Controls how long (in
milliseconds) the driver will wait to find an available,
appropriate server to carry out a database operation; while it is
waiting, multiple server monitoring operations may be carried out,
each controlled by `connectTimeoutMS`. Defaults to ``30000`` (30
seconds).
- `waitQueueTimeoutMS`: (integer or None) How long (in milliseconds)
a thread will wait for a socket from the pool if the pool has no
free sockets. Defaults to ``None`` (no timeout).
- `waitQueueMultiple`: (integer or None) Multiplied by maxPoolSize
to give the number of threads allowed to wait for a socket at one
time. Defaults to ``None`` (no limit).
- `socketKeepAlive`: (boolean) Whether to send periodic keep-alive
packets on connected sockets. Defaults to ``False`` (do not send
keep-alive packets).
- `heartbeatFrequencyMS`: (optional) The number of milliseconds
between periodic server checks, or None to accept the default
frequency of 10 seconds.
- `event_listeners`: a list or tuple of event listeners. See
:mod:`~pymongo.monitoring` for details.
| **Write Concern options:**
| (Only set if passed. No default values.)
- `w`: (integer or string) If this is a replica set, write operations
will block until they have been replicated to the specified number
or tagged set of servers. `w=<int>` always includes the replica set
primary (e.g. w=3 means write to the primary and wait until
replicated to **two** secondaries). Passing w=0 **disables write
acknowledgement** and all other write concern options.
- `wtimeout`: (integer) Used in conjunction with `w`. Specify a value
in milliseconds to control how long to wait for write propagation
to complete. If replication does not complete in the given
timeframe, a timeout exception is raised.
- `j`: If ``True`` block until write operations have been committed
to the journal. Cannot be used in combination with `fsync`. Prior
to MongoDB 2.6 this option was ignored if the server was running
without journaling. Starting with MongoDB 2.6 write operations will
fail with an exception if this option is used when the server is
running without journaling.
- `fsync`: If ``True`` and the server is running without journaling,
blocks until the server has synced all data files to disk. If the
server is running with journaling, this acts the same as the `j`
option, blocking until write operations have been committed to the
journal. Cannot be used in combination with `j`.
| **Replica set keyword arguments for connecting with a replica set
- either directly or via a mongos:**
- `replicaSet`: (string or None) The name of the replica set to
connect to. The driver will verify that all servers it connects to
match this name. Implies that the hosts specified are a seed list
and the driver should attempt to find all members of the set.
Defaults to ``None``.
- `read_preference`: The read preference for this client.
See :class:`~pymongo.read_preferences.ReadPreference` for all
available read preference options. Defaults to ``PRIMARY``.
| **SSL configuration:**
- `ssl`: If ``True``, create the connection to the server using SSL.
Defaults to ``False``.
- `ssl_certfile`: The certificate file used to identify the local
connection against mongod. Implies ``ssl=True``. Defaults to
``None``.
- `ssl_keyfile`: The private keyfile used to identify the local
connection against mongod. If included with the ``certfile`` then
only the ``ssl_certfile`` is needed. Implies ``ssl=True``.
Defaults to ``None``.
- `ssl_pem_passphrase`: The password or passphrase for decrypting
the private key in ``ssl_certfile`` or ``ssl_keyfile``. Only
necessary if the private key is encrypted. Only supported by python
2.7.9+ (pypy 2.5.1+) and 3.3+. Defaults to ``None``.
- `ssl_cert_reqs`: Specifies whether a certificate is required from
the other side of the connection, and whether it will be validated
if provided. It must be one of the three values ``ssl.CERT_NONE``
(certificates ignored), ``ssl.CERT_REQUIRED`` (certificates
required and validated), or ``ssl.CERT_OPTIONAL`` (the same as
CERT_REQUIRED, unless the server was configured to use anonymous
ciphers). If the value of this parameter is not ``ssl.CERT_NONE``
and a value is not provided for ``ssl_ca_certs`` PyMongo will
attempt to load system provided CA certificates. If the python
version in use does not support loading system CA certificates
then the ``ssl_ca_certs`` parameter must point to a file of CA
certificates. Implies ``ssl=True``. Defaults to
``ssl.CERT_REQUIRED`` if not provided and ``ssl=True``.
- `ssl_ca_certs`: The ca_certs file contains a set of concatenated
"certification authority" certificates, which are used to validate
certificates passed from the other end of the connection.
Implies ``ssl=True``. Defaults to ``None``.
- `ssl_crlfile`: The path to a PEM or DER formatted certificate
revocation list. Only supported by python 2.7.9+ (pypy 2.5.1+)
and 3.4+. Defaults to ``None``.
- `ssl_match_hostname`: If ``True`` (the default), and
`ssl_cert_reqs` is not ``ssl.CERT_NONE``, enables hostname
verification using the :func:`~ssl.match_hostname` function from
python's :mod:`~ssl` module. Think very carefully before setting
this to ``False`` as that could make your application vulnerable to
man-in-the-middle attacks.
| **Read Concern options:**
| (If not set explicitly, this will use the server default)
- `readConcernLevel`: (string) The read concern level specifies the
level of isolation for read operations. For example, a read
operation using a read concern level of ``majority`` will only
return data that has been written to a majority of nodes. If the
level is left unspecified, the server default will be used.
.. mongodoc:: connections
.. versionchanged:: 3.0
:class:`~pymongo.mongo_client.MongoClient` is now the one and only
client class for a standalone server, mongos, or replica set.
It includes the functionality that had been split into
:class:`~pymongo.mongo_client.MongoReplicaSetClient`: it can connect
to a replica set, discover all its members, and monitor the set for
stepdowns, elections, and reconfigs.
The :class:`~pymongo.mongo_client.MongoClient` constructor no
longer blocks while connecting to the server or servers, and it no
longer raises :class:`~pymongo.errors.ConnectionFailure` if they
are unavailable, nor :class:`~pymongo.errors.ConfigurationError`
if the user's credentials are wrong. Instead, the constructor
returns immediately and launches the connection process on
background threads.
Therefore the ``alive`` method is removed since it no longer
provides meaningful information; even if the client is disconnected,
it may discover a server in time to fulfill the next operation.
In PyMongo 2.x, :class:`~pymongo.MongoClient` accepted a list of
standalone MongoDB servers and used the first it could connect to::
MongoClient(['host1.com:27017', 'host2.com:27017'])
A list of multiple standalones is no longer supported; if multiple
servers are listed they must be members of the same replica set, or
mongoses in the same sharded cluster.
The behavior for a list of mongoses is changed from "high
availability" to "load balancing". Before, the client connected to
the lowest-latency mongos in the list, and used it until a network
error prompted it to re-evaluate all mongoses' latencies and
reconnect to one of them. In PyMongo 3, the client monitors its
network latency to all the mongoses continuously, and distributes
operations evenly among those with the lowest latency. See
:ref:`mongos-load-balancing` for more information.
The ``connect`` option is added.
The ``start_request``, ``in_request``, and ``end_request`` methods
are removed, as well as the ``auto_start_request`` option.
The ``copy_database`` method is removed, see the
:doc:`copy_database examples </examples/copydb>` for alternatives.
The :meth:`MongoClient.disconnect` method is removed; it was a
synonym for :meth:`~pymongo.MongoClient.close`.
:class:`~pymongo.mongo_client.MongoClient` no longer returns an
instance of :class:`~pymongo.database.Database` for attribute names
with leading underscores. You must use dict-style lookups instead::
client['__my_database__']
Not::
client.__my_database__
"""
if host is None:
host = self.HOST
if isinstance(host, string_type):
host = [host]
if port is None:
port = self.PORT
if not isinstance(port, int):
raise TypeError("port must be an instance of int")
seeds = set()
username = None
password = None
dbase = None
opts = {}
for entity in host:
if "://" in entity:
if entity.startswith("mongodb://"):
res = uri_parser.parse_uri(entity, port, warn=True)
seeds.update(res["nodelist"])
username = res["username"] or username
password = res["password"] or password
dbase = res["database"] or dbase
opts = res["options"]
else:
idx = entity.find("://")
raise InvalidURI("Invalid URI scheme: "
"%s" % (entity[:idx],))
else:
seeds.update(uri_parser.split_hosts(entity, port))
if not seeds:
raise ConfigurationError("need to specify at least one host")
# _pool_class, _monitor_class, and _condition_class are for deep
# customization of PyMongo, e.g. Motor.
pool_class = kwargs.pop('_pool_class', None)
monitor_class = kwargs.pop('_monitor_class', None)
condition_class = kwargs.pop('_condition_class', None)
keyword_opts = kwargs
keyword_opts['document_class'] = document_class
keyword_opts['tz_aware'] = tz_aware
keyword_opts['connect'] = connect
# Validate all keyword options.
keyword_opts = dict(common.validate(k, v)
for k, v in keyword_opts.items())
opts.update(keyword_opts)
self.__options = options = ClientOptions(
username, password, dbase, opts)
self.__default_database_name = dbase
self.__lock = threading.Lock()
self.__cursor_manager = None
self.__kill_cursors_queue = []
self._event_listeners = options.pool_options.event_listeners
# Cache of existing indexes used by ensure_index ops.
self.__index_cache = {}
self.__index_cache_lock = threading.Lock()
super(MongoClient, self).__init__(options.codec_options,
options.read_preference,
options.write_concern,
options.read_concern)
self.__all_credentials = {}
creds = options.credentials
if creds:
self._cache_credentials(creds.source, creds)
self._topology_settings = TopologySettings(
seeds=seeds,
replica_set_name=options.replica_set_name,
pool_class=pool_class,
pool_options=options.pool_options,
monitor_class=monitor_class,
condition_class=condition_class,
local_threshold_ms=options.local_threshold_ms,
server_selection_timeout=options.server_selection_timeout,
heartbeat_frequency=options.heartbeat_frequency)
self._topology = Topology(self._topology_settings)
if connect:
self._topology.open()
def target():
client = self_ref()
if client is None:
return False # Stop the executor.
MongoClient._process_periodic_tasks(client)
return True
executor = periodic_executor.PeriodicExecutor(
interval=common.KILL_CURSOR_FREQUENCY,
min_interval=0.5,
target=target,
name="pymongo_kill_cursors_thread")
# We strongly reference the executor and it weakly references us via
# this closure. When the client is freed, stop the executor soon.
self_ref = weakref.ref(self, executor.close)
self._kill_cursors_executor = executor
executor.open()
def _cache_credentials(self, source, credentials, connect=False):
"""Save a set of authentication credentials.
The credentials are used to login a socket whenever one is created.
If `connect` is True, verify the credentials on the server first.
"""
# Don't let other threads affect this call's data.
all_credentials = self.__all_credentials.copy()
if source in all_credentials:
# Nothing to do if we already have these credentials.
if credentials == all_credentials[source]:
return
raise OperationFailure('Another user is already authenticated '
'to this database. You must logout first.')
if connect:
server = self._get_topology().select_server(
writable_preferred_server_selector)
# get_socket() logs out of the database if logged in with old
# credentials, and logs in with new ones.
with server.get_socket(all_credentials) as sock_info:
sock_info.authenticate(credentials)
# If several threads run _cache_credentials at once, last one wins.
self.__all_credentials[source] = credentials
def _purge_credentials(self, source):
"""Purge credentials from the authentication cache."""
self.__all_credentials.pop(source, None)
def _cached(self, dbname, coll, index):
"""Test if `index` is cached."""
cache = self.__index_cache
now = datetime.datetime.utcnow()
with self.__index_cache_lock:
return (dbname in cache and
coll in cache[dbname] and
index in cache[dbname][coll] and
now < cache[dbname][coll][index])
def _cache_index(self, dbname, collection, index, cache_for):
"""Add an index to the index cache for ensure_index operations."""
now = datetime.datetime.utcnow()
expire = datetime.timedelta(seconds=cache_for) + now
with self.__index_cache_lock:
if database not in self.__index_cache:
self.__index_cache[dbname] = {}
self.__index_cache[dbname][collection] = {}
self.__index_cache[dbname][collection][index] = expire
elif collection not in self.__index_cache[dbname]:
self.__index_cache[dbname][collection] = {}
self.__index_cache[dbname][collection][index] = expire
else:
self.__index_cache[dbname][collection][index] = expire
def _purge_index(self, database_name,
collection_name=None, index_name=None):
"""Purge an index from the index cache.
If `index_name` is None purge an entire collection.
If `collection_name` is None purge an entire database.
"""
with self.__index_cache_lock:
if not database_name in self.__index_cache:
return
if collection_name is None:
del self.__index_cache[database_name]
return
if not collection_name in self.__index_cache[database_name]:
return
if index_name is None:
del self.__index_cache[database_name][collection_name]
return
if index_name in self.__index_cache[database_name][collection_name]:
del self.__index_cache[database_name][collection_name][index_name]
def _server_property(self, attr_name):
"""An attribute of the current server's description.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
Not threadsafe if used multiple times in a single method, since
the server may change. In such cases, store a local reference to a
ServerDescription first, then use its properties.
"""
server = self._topology.select_server(
writable_server_selector)
return getattr(server.description, attr_name)
@property
def event_listeners(self):
"""The event listeners registered for this client.
See :mod:`~pymongo.monitoring` for details.
"""
return self._event_listeners.event_listeners
@property
def address(self):
"""(host, port) of the current standalone, primary, or mongos, or None.
Accessing :attr:`address` raises :exc:`~.errors.InvalidOperation` if
the client is load-balancing among mongoses, since there is no single
address. Use :attr:`nodes` instead.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
.. versionadded:: 3.0
"""
topology_type = self._topology._description.topology_type
if topology_type == TOPOLOGY_TYPE.Sharded:
raise InvalidOperation(
'Cannot use "address" property when load balancing among'
' mongoses, use "nodes" instead.')
if topology_type not in (TOPOLOGY_TYPE.ReplicaSetWithPrimary,
TOPOLOGY_TYPE.Single):
return None
return self._server_property('address')
@property
def primary(self):
"""The (host, port) of the current primary of the replica set.
Returns ``None`` if this client is not connected to a replica set,
there is no primary, or this client was created without the
`replicaSet` option.
.. versionadded:: 3.0
MongoClient gained this property in version 3.0 when
MongoReplicaSetClient's functionality was merged in.
"""
return self._topology.get_primary()
@property
def secondaries(self):
"""The secondary members known to this client.
A sequence of (host, port) pairs. Empty if this client is not
connected to a replica set, there are no visible secondaries, or this
client was created without the `replicaSet` option.
.. versionadded:: 3.0
MongoClient gained this property in version 3.0 when
MongoReplicaSetClient's functionality was merged in.
"""
return self._topology.get_secondaries()
@property
def arbiters(self):
"""Arbiters in the replica set.
A sequence of (host, port) pairs. Empty if this client is not
connected to a replica set, there are no arbiters, or this client was
created without the `replicaSet` option.
"""
return self._topology.get_arbiters()
@property
def is_primary(self):
"""If this client is connected to a server that can accept writes.
True if the current server is a standalone, mongos, or the primary of
a replica set. If the client is not connected, this will block until a
connection is established or raise ServerSelectionTimeoutError if no
server is available.
"""
return self._server_property('is_writable')
@property
def is_mongos(self):
"""If this client is connected to mongos. If the client is not
connected, this will block until a connection is established or raise
ServerSelectionTimeoutError if no server is available..
"""
return self._server_property('server_type') == SERVER_TYPE.Mongos
@property
def max_pool_size(self):
"""The maximum allowable number of concurrent connections to each
connected server. Requests to a server will block if there are
`maxPoolSize` outstanding connections to the requested server.
Defaults to 100. Cannot be 0.
When a server's pool has reached `max_pool_size`, operations for that
server block waiting for a socket to be returned to the pool. If
``waitQueueTimeoutMS`` is set, a blocked operation will raise
:exc:`~pymongo.errors.ConnectionFailure` after a timeout.
By default ``waitQueueTimeoutMS`` is not set.
"""
return self.__options.pool_options.max_pool_size
@property
def min_pool_size(self):
"""The minimum required number of concurrent connections that the pool
will maintain to each connected server. Default is 0.
"""
return self.__options.pool_options.min_pool_size
@property
def max_idle_time_ms(self):
"""The maximum number of milliseconds that a connection can remain
idle in the pool before being removed and replaced. Defaults to
`None` (no limit).
"""
return self.__options.pool_options.max_idle_time_ms
@property
def nodes(self):
"""Set of all currently connected servers.
.. warning:: When connected to a replica set the value of :attr:`nodes`
can change over time as :class:`MongoClient`'s view of the replica
set changes. :attr:`nodes` can also be an empty set when
:class:`MongoClient` is first instantiated and hasn't yet connected
to any servers, or a network partition causes it to lose connection
to all servers.
"""
description = self._topology.description
return frozenset(s.address for s in description.known_servers)
@property
def max_bson_size(self):
"""The largest BSON object the connected server accepts in bytes.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
"""
return self._server_property('max_bson_size')
@property
def max_message_size(self):
"""The largest message the connected server accepts in bytes.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
"""
return self._server_property('max_message_size')
@property
def max_write_batch_size(self):
"""The maxWriteBatchSize reported by the server.
If the client is not connected, this will block until a connection is
established or raise ServerSelectionTimeoutError if no server is
available.
Returns a default value when connected to server versions prior to
MongoDB 2.6.
"""
return self._server_property('max_write_batch_size')
@property
def local_threshold_ms(self):
"""The local threshold for this instance."""
return self.__options.local_threshold_ms
@property
def server_selection_timeout(self):
"""The server selection timeout for this instance in seconds."""
return self.__options.server_selection_timeout
def _is_writable(self):
"""Attempt to connect to a writable server, or return False.
"""
topology = self._get_topology() # Starts monitors if necessary.
try:
svr = topology.select_server(writable_server_selector)
# When directly connected to a secondary, arbiter, etc.,
# select_server returns it, whatever the selector. Check
# again if the server is writable.
return svr.description.is_writable
except ConnectionFailure:
return False
def close(self):
"""Disconnect from MongoDB.
Close all sockets in the connection pools and stop the monitor threads.
If this instance is used again it will be automatically re-opened and
the threads restarted.
"""
self._topology.close()
def set_cursor_manager(self, manager_class):
"""DEPRECATED - Set this client's cursor manager.
Raises :class:`TypeError` if `manager_class` is not a subclass of
:class:`~pymongo.cursor_manager.CursorManager`. A cursor manager
handles closing cursors. Different managers can implement different
policies in terms of when to actually kill a cursor that has
been closed.
:Parameters:
- `manager_class`: cursor manager to use
.. versionchanged:: 3.3
Deprecated, for real this time.
.. versionchanged:: 3.0
Undeprecated.
"""
warnings.warn(
"set_cursor_manager is Deprecated",
DeprecationWarning,
stacklevel=2)
manager = manager_class(self)
if not isinstance(manager, CursorManager):
raise TypeError("manager_class must be a subclass of "
"CursorManager")
self.__cursor_manager = manager
def _get_topology(self):
"""Get the internal :class:`~pymongo.topology.Topology` object.
If this client was created with "connect=False", calling _get_topology
launches the connection process in the background.
"""
self._topology.open()
return self._topology
@contextlib.contextmanager
def _get_socket(self, selector):
server = self._get_topology().select_server(selector)
try:
with server.get_socket(self.__all_credentials) as sock_info:
yield sock_info
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
# Server Discovery And Monitoring Spec: "When an application
# operation fails because of any network error besides a socket
# timeout...."
raise
except NotMasterError:
# "When the client sees a "not master" error it MUST replace the
# server's description with type Unknown. It MUST request an
# immediate check of the server."
self._reset_server_and_request_check(server.description.address)
raise
except ConnectionFailure:
# "Client MUST replace the server's description with type Unknown
# ... MUST NOT request an immediate check of the server."
self.__reset_server(server.description.address)
raise
def _socket_for_writes(self):
return self._get_socket(writable_server_selector)
@contextlib.contextmanager
def _socket_for_reads(self, read_preference):
preference = read_preference or ReadPreference.PRIMARY
# Get a socket for a server matching the read preference, and yield
# sock_info, slave_ok. Server Selection Spec: "slaveOK must be sent to
# mongods with topology type Single. If the server type is Mongos,
# follow the rules for passing read preference to mongos, even for
# topology type Single."
# Thread safe: if the type is single it cannot change.
topology = self._get_topology()
single = topology.description.topology_type == TOPOLOGY_TYPE.Single
with self._get_socket(read_preference) as sock_info:
slave_ok = (single and not sock_info.is_mongos) or (
preference != ReadPreference.PRIMARY)
yield sock_info, slave_ok
def _send_message_with_response(self, operation, read_preference=None,
exhaust=False, address=None):
"""Send a message to MongoDB and return a Response.
:Parameters:
- `operation`: a _Query or _GetMore object.
- `read_preference` (optional): A ReadPreference.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.
- `address` (optional): Optional address when sending a message
to a specific server, used for getMore.
"""
with self.__lock:
# If needed, restart kill-cursors thread after a fork.
self._kill_cursors_executor.open()
topology = self._get_topology()
if address:
server = topology.select_server_by_address(address)
if not server:
raise AutoReconnect('server %s:%d no longer available'
% address)
else:
selector = read_preference or writable_server_selector
server = topology.select_server(selector)
# A _Query's slaveOk bit is already set for queries with non-primary
# read preference. If this is a direct connection to a mongod, override
# and *always* set the slaveOk bit. See bullet point 2 in
# server-selection.rst#topology-type-single.
set_slave_ok = (
topology.description.topology_type == TOPOLOGY_TYPE.Single
and server.description.server_type != SERVER_TYPE.Mongos)
return self._reset_on_error(
server,
server.send_message_with_response,
operation,
set_slave_ok,
self.__all_credentials,
self._event_listeners,
exhaust)
def _reset_on_error(self, server, func, *args, **kwargs):
"""Execute an operation. Reset the server on network error.
Returns fn()'s return value on success. On error, clears the server's
pool and marks the server Unknown.
Re-raises any exception thrown by fn().
"""
try:
return func(*args, **kwargs)
except NetworkTimeout:
# The socket has been closed. Don't reset the server.
raise
except ConnectionFailure:
self.__reset_server(server.description.address)
raise
def __reset_server(self, address):
"""Clear our connection pool for a server and mark it Unknown."""
self._topology.reset_server(address)
def _reset_server_and_request_check(self, address):
"""Clear our pool for a server, mark it Unknown, and check it soon."""
self._topology.reset_server_and_request_check(address)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.address == other.address
return NotImplemented
def __ne__(self, other):
return not self == other
def _repr_helper(self):
def option_repr(option, value):
"""Fix options whose __repr__ isn't usable in a constructor."""
if option == 'document_class':
if value is dict:
return 'document_class=dict'
else:
return 'document_class=%s.%s' % (value.__module__,
value.__name__)
if option in common.TIMEOUT_VALIDATORS and value is not None:
return "%s=%s" % (option, int(value * 1000))
return '%s=%r' % (option, value)
# Host first...
options = ['host=%r' % [
'%s:%d' % (host, port)
for host, port in self._topology_settings.seeds]]
# ... then everything in self._constructor_args...
options.extend(
option_repr(key, self.__options._options[key])
for key in self._constructor_args)
# ... then everything else.
options.extend(
option_repr(key, self.__options._options[key])
for key in self.__options._options
if key not in set(self._constructor_args))
return ', '.join(options)
def __repr__(self):
return ("MongoClient(%s)" % (self._repr_helper(),))
def __getattr__(self, name):
"""Get a database by name.
Raises :class:`~pymongo.errors.InvalidName` if an invalid
database name is used.
:Parameters:
- `name`: the name of the database to get
"""
if name.startswith('_'):
raise AttributeError(
"MongoClient has no attribute %r. To access the %s"
" database, use client[%r]." % (name, name, name))
return self.__getitem__(name)
def __getitem__(self, name):
"""Get a database by name.
Raises :class:`~pymongo.errors.InvalidName` if an invalid
database name is used.
:Parameters:
- `name`: the name of the database to get
"""
return database.Database(self, name)
def close_cursor(self, cursor_id, address=None):
"""Send a kill cursors message soon with the given id.
Raises :class:`TypeError` if `cursor_id` is not an instance of
``(int, long)``. What closing the cursor actually means
depends on this client's cursor manager.
This method may be called from a :class:`~pymongo.cursor.Cursor`
destructor during garbage collection, so it isn't safe to take a
lock or do network I/O. Instead, we schedule the cursor to be closed
soon on a background thread.
:Parameters:
- `cursor_id`: id of cursor to close
- `address` (optional): (host, port) pair of the cursor's server.
If it is not provided, the client attempts to close the cursor on
the primary or standalone, or a mongos server.
.. versionchanged:: 3.0
Added ``address`` parameter.
"""
if not isinstance(cursor_id, integer_types):
raise TypeError("cursor_id must be an instance of (int, long)")
if self.__cursor_manager is not None:
self.__cursor_manager.close(cursor_id, address)
else:
self.__kill_cursors_queue.append((address, [cursor_id]))
def kill_cursors(self, cursor_ids, address=None):
"""DEPRECATED - Send a kill cursors message soon with the given ids.
Raises :class:`TypeError` if `cursor_ids` is not an instance of
``list``.
:Parameters:
- `cursor_ids`: list of cursor ids to kill
- `address` (optional): (host, port) pair of the cursor's server.
If it is not provided, the client attempts to close the cursor on
the primary or standalone, or a mongos server.
.. versionchanged:: 3.3
Deprecated.
.. versionchanged:: 3.0
Now accepts an `address` argument. Schedules the cursors to be
closed on a background thread instead of sending the message
immediately.
"""
warnings.warn(
"kill_cursors is deprecated.",
DeprecationWarning,
stacklevel=2)
if not isinstance(cursor_ids, list):
raise TypeError("cursor_ids must be a list")
# "Atomic", needs no lock.
self.__kill_cursors_queue.append((address, cursor_ids))
# This method is run periodically by a background thread.
def _process_periodic_tasks(self):
"""Process any pending kill cursors requests and
maintain connection pool parameters."""
address_to_cursor_ids = defaultdict(list)
# Other threads or the GC may append to the queue concurrently.
while True:
try:
address, cursor_ids = self.__kill_cursors_queue.pop()
except IndexError:
break
address_to_cursor_ids[address].extend(cursor_ids)
# Don't re-open topology if it's closed and there's no pending cursors.
if address_to_cursor_ids:
listeners = self._event_listeners
publish = listeners.enabled_for_commands
topology = self._get_topology()
for address, cursor_ids in address_to_cursor_ids.items():
try:
if address:
# address could be a tuple or _CursorAddress, but
# select_server_by_address needs (host, port).
server = topology.select_server_by_address(
tuple(address))
else:
# Application called close_cursor() with no address.
server = topology.select_server(
writable_server_selector)
try:
namespace = address.namespace
db, coll = namespace.split('.', 1)
except AttributeError:
namespace = None
db = coll = "OP_KILL_CURSORS"
spec = SON([('killCursors', coll),
('cursors', cursor_ids)])
with server.get_socket(self.__all_credentials) as sock_info:
if (sock_info.max_wire_version >= 4 and
namespace is not None):
sock_info.command(db, spec)
else:
if publish:
start = datetime.datetime.now()
request_id, msg = message.kill_cursors(cursor_ids)
if publish:
duration = datetime.datetime.now() - start
listeners.publish_command_start(
spec, db, request_id, address)
start = datetime.datetime.now()
try:
sock_info.send_message(msg, 0)
except Exception as exc:
if publish:
dur = ((datetime.datetime.now() - start)
+ duration)
listeners.publish_command_failure(
dur, message._convert_exception(exc),
'killCursors', request_id, address)
raise
if publish:
duration = ((datetime.datetime.now() - start)
+ duration)
# OP_KILL_CURSORS returns no reply, fake one.
reply = {'cursorsUnknown': cursor_ids, 'ok': 1}
listeners.publish_command_success(
duration, reply, 'killCursors', request_id,
address)
except Exception:
helpers._handle_exception()
try:
self._topology.update_pool()
except Exception:
helpers._handle_exception()
def server_info(self):
"""Get information about the MongoDB server we're connected to."""
return self.admin.command("buildinfo",
read_preference=ReadPreference.PRIMARY)
def database_names(self):
"""Get a list of the names of all databases on the connected server."""
return [db["name"] for db in
self._database_default_options('admin').command(
"listDatabases")["databases"]]
def drop_database(self, name_or_database):
"""Drop a database.
Raises :class:`TypeError` if `name_or_database` is not an instance of
:class:`basestring` (:class:`str` in python 3) or
:class:`~pymongo.database.Database`.
:Parameters:
- `name_or_database`: the name of a database to drop, or a
:class:`~pymongo.database.Database` instance representing the
database to drop
"""
name = name_or_database
if isinstance(name, database.Database):
name = name.name
if not isinstance(name, string_type):
raise TypeError("name_or_database must be an instance "
"of %s or a Database" % (string_type.__name__,))
self._purge_index(name)
self[name].command("dropDatabase",
read_preference=ReadPreference.PRIMARY)
def get_default_database(self):
"""Get the database named in the MongoDB connection URI.
>>> uri = 'mongodb://host/my_database'
>>> client = MongoClient(uri)
>>> db = client.get_default_database()
>>> assert db.name == 'my_database'
Useful in scripts where you want to choose which database to use
based only on the URI in a configuration file.
"""
if self.__default_database_name is None:
raise ConfigurationError('No default database defined')
return self[self.__default_database_name]
def get_database(self, name, codec_options=None, read_preference=None,
write_concern=None, read_concern=None):
"""Get a :class:`~pymongo.database.Database` with the given name and
options.
Useful for creating a :class:`~pymongo.database.Database` with
different codec options, read preference, and/or write concern from
this :class:`MongoClient`.
>>> client.read_preference
Primary()
>>> db1 = client.test
>>> db1.read_preference
Primary()
>>> from pymongo import ReadPreference
>>> db2 = client.get_database(
... 'test', read_preference=ReadPreference.SECONDARY)
>>> db2.read_preference
Secondary(tag_sets=None)
:Parameters:
- `name`: The name of the database - a string.
- `codec_options` (optional): An instance of
:class:`~bson.codec_options.CodecOptions`. If ``None`` (the
default) the :attr:`codec_options` of this :class:`MongoClient` is
used.
- `read_preference` (optional): The read preference to use. If
``None`` (the default) the :attr:`read_preference` of this
:class:`MongoClient` is used. See :mod:`~pymongo.read_preferences`
for options.
- `write_concern` (optional): An instance of
:class:`~pymongo.write_concern.WriteConcern`. If ``None`` (the
default) the :attr:`write_concern` of this :class:`MongoClient` is
used.
- `read_concern` (optional): An instance of
:class:`~pymongo.read_concern.ReadConcern`. If ``None`` (the
default) the :attr:`read_concern` of this :class:`MongoClient` is
used.
"""
return database.Database(
self, name, codec_options, read_preference,
write_concern, read_concern)
def _database_default_options(self, name):
"""Get a Database instance with the default settings."""
return self.get_database(
name, codec_options=DEFAULT_CODEC_OPTIONS,
read_preference=ReadPreference.PRIMARY,
write_concern=WriteConcern())
@property
def is_locked(self):
"""Is this server locked? While locked, all write operations
are blocked, although read operations may still be allowed.
Use :meth:`unlock` to unlock.
"""
ops = self._database_default_options('admin').current_op()
return bool(ops.get('fsyncLock', 0))
def fsync(self, **kwargs):
"""Flush all pending writes to datafiles.
:Parameters:
Optional parameters can be passed as keyword arguments:
- `lock`: If True lock the server to disallow writes.
- `async`: If True don't block while synchronizing.
.. warning:: `async` and `lock` can not be used together.
.. warning:: MongoDB does not support the `async` option
on Windows and will raise an exception on that
platform.
"""
self.admin.command("fsync",
read_preference=ReadPreference.PRIMARY, **kwargs)
def unlock(self):
"""Unlock a previously locked server.
"""
cmd = {"fsyncUnlock": 1}
with self._socket_for_writes() as sock_info:
if sock_info.max_wire_version >= 4:
try:
sock_info.command("admin", cmd)
except OperationFailure as exc:
# Ignore "DB not locked" to replicate old behavior
if exc.code != 125:
raise
else:
helpers._first_batch(sock_info, "admin", "$cmd.sys.unlock",
{}, -1, True, self.codec_options,
ReadPreference.PRIMARY, cmd, self._event_listeners)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __iter__(self):
return self
def __next__(self):
raise TypeError("'MongoClient' object is not iterable")
next = __next__
| 43.949919
| 83
| 0.600202
|
40e24f0e11b085370b32c95d9fd620cf2bbff9fc
| 3,418
|
py
|
Python
|
sample.py
|
vittorio-nardone/Image-Captioning-Project
|
fdbcc972e8ff96ff987893f78cf483dd0f32d4b2
|
[
"MIT"
] | null | null | null |
sample.py
|
vittorio-nardone/Image-Captioning-Project
|
fdbcc972e8ff96ff987893f78cf483dd0f32d4b2
|
[
"MIT"
] | null | null | null |
sample.py
|
vittorio-nardone/Image-Captioning-Project
|
fdbcc972e8ff96ff987893f78cf483dd0f32d4b2
|
[
"MIT"
] | null | null | null |
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import torch
import matplotlib.pyplot as plt
import numpy as np
import argparse
import sys
sys.path.append('/opt/cocoapi/PythonAPI')
import pickle
import os
from torchvision import transforms
from model import EncoderCNN, DecoderRNN
from PIL import Image
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def load_image(image_path, transform=None):
image = Image.open(image_path)
image = image.resize([224, 224], Image.LANCZOS)
if transform is not None:
image = transform(image).unsqueeze(0)
return image
def clean_sentence(output, idx2word):
sentence = ''
for x in output:
word = idx2word[x]
if word == '<end>':
break
elif word == '<start>':
pass
elif word == '.':
sentence += word
else:
sentence += ' ' + word
return sentence.strip()
def main(args):
# Image preprocessing
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# Load vocabulary wrapper
with open(args.vocab_path, 'rb') as f:
vocab = pickle.load(f)
# Build models
if (args.net == 'resnet50'):
encoder = EncoderCNN(args.embed_size).eval()
elif (args.net == 'resnet152'):
encoder = EncoderCNN152(args.embed_size).eval()
decoder = DecoderRNN(args.embed_size, args.hidden_size, len(vocab), args.num_layers)
encoder = encoder.to(device)
decoder = decoder.to(device)
# Load the trained model parameters
encoder.load_state_dict(torch.load(args.encoder_path))
decoder.load_state_dict(torch.load(args.decoder_path))
# Prepare an image
image = load_image(args.image, transform)
image_tensor = image.to(device)
# Generate an caption from the image
feature = encoder(image_tensor)
sampled_ids = decoder.sample(feature)
# Convert word_ids to words
sentence = clean_sentence(sampled_ids, vocab.idx2word)
# Print out generated caption
print("File: '{}' - Caption: '{}'".format(args.image, sentence))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('image', type=str, help='input image for generating caption')
parser.add_argument('--encoder_path', type=str, default='models/encoder-10.pkl', help='path for trained encoder')
parser.add_argument('--decoder_path', type=str, default='models/decoder-10.pkl', help='path for trained decoder')
parser.add_argument('--vocab_path', type=str, default='vocab.pkl', help='path for vocabulary wrapper')
# Model parameters (should be same as paramters in train.py)
parser.add_argument('--net', default='resnet50', const='resnet50', nargs='?', choices=['resnet50', 'resnet152'],
help='encoder pretrained network (default resnet50")')
parser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')
parser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')
parser.add_argument('--num_layers', type=int , default=1, help='number of layers in lstm')
args = parser.parse_args()
main(args)
| 33.841584
| 117
| 0.656524
|
bfcc0ce2d896c79b72808a2a4bd98facd79d208c
| 257
|
py
|
Python
|
app/__init__.py
|
HWaruguru/Global-News-Highlight
|
6c4fccf6eab0d4f9244d7f32aeb36bcf0070c2f3
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
HWaruguru/Global-News-Highlight
|
6c4fccf6eab0d4f9244d7f32aeb36bcf0070c2f3
|
[
"MIT"
] | null | null | null |
app/__init__.py
|
HWaruguru/Global-News-Highlight
|
6c4fccf6eab0d4f9244d7f32aeb36bcf0070c2f3
|
[
"MIT"
] | null | null | null |
from flask import Flask
from .config import DevConfig
# Initializing application
app = Flask(__name__, instance_relative_config = True)
# Setting up configuration
app.config.from_object(DevConfig)
app.config.from_pyfile('config.py')
from app import views
| 23.363636
| 54
| 0.81323
|
f2068292aea5c2ad94ff02478b26757596355db5
| 345
|
py
|
Python
|
easy/Mars Lander - Episode 1.py
|
izanbf1803/Codingame
|
864c13b664e0609d0eded0fae4a655c8e3881c76
|
[
"MIT"
] | 1
|
2018-02-04T13:40:31.000Z
|
2018-02-04T13:40:31.000Z
|
easy/Mars Lander - Episode 1.py
|
izanbf1803/Codingame-solutions
|
864c13b664e0609d0eded0fae4a655c8e3881c76
|
[
"MIT"
] | null | null | null |
easy/Mars Lander - Episode 1.py
|
izanbf1803/Codingame-solutions
|
864c13b664e0609d0eded0fae4a655c8e3881c76
|
[
"MIT"
] | 2
|
2020-07-28T05:21:00.000Z
|
2021-12-04T01:18:30.000Z
|
import sys
import math
surface_n = int(input()) # the number of points used to draw the surface of Mars.
for i in range(surface_n):
land_x, land_y = [int(j) for j in input().split()]
while True:
x, y, h_speed, v_speed, fuel, rotate, power = [int(i) for i in input().split()]
if v_speed <= -40: print("0 4")
else: print("0 0")
| 28.75
| 83
| 0.637681
|
fee220b770b99c2ba07b97dba49548f7d547a8ae
| 2,281
|
py
|
Python
|
py/testdir_single_jvm/test_exec2_result_race.py
|
gigliovale/h2o
|
be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1
|
[
"Apache-2.0"
] | 882
|
2015-05-22T02:59:21.000Z
|
2022-02-17T05:02:48.000Z
|
py/testdir_single_jvm/test_exec2_result_race.py
|
VonRosenchild/h2o-2
|
be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1
|
[
"Apache-2.0"
] | 1
|
2022-02-22T12:15:02.000Z
|
2022-02-22T12:15:02.000Z
|
py/testdir_single_jvm/test_exec2_result_race.py
|
VonRosenchild/h2o-2
|
be350f3f2c2fb6f135cc07c41f83fd0e4f521ac1
|
[
"Apache-2.0"
] | 392
|
2015-05-22T17:04:11.000Z
|
2022-02-22T09:04:39.000Z
|
import unittest, random, sys, time, os
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_browse as h2b, h2o_exec as h2e
initList = [
'Result.hex = c(0)',
'Result.hex = c(1)',
'Result.hex = c(2)',
'Result.hex = c(3)',
'Result.hex = c(4)',
'Result.hex = c(5)',
'Result.hex = c(6)',
'Result.hex = c(7)',
'Result.hex = c(8)',
'Result.hex = c(9)',
'Result.hex = c(10)',
]
exprList = [
'Result.hex = Result.hex + 1',
]
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init()
@classmethod
def tearDownClass(cls):
# wait while I inspect things
# time.sleep(1500)
h2o.tear_down_cloud()
def test_exec2_result_race(self):
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
# zero the list of Results using node[0]
# FIX! is the zerolist not eing seen correctl? is it not initializing to non-zero?
for execExpr in initList:
h2e.exec_expr(h2o.nodes[0], execExpr, resultKey="Result.hex", timeoutSecs=20)
### print "\nexecResult:", execResult
trial = 0
while (trial < 200):
for execExpr in exprList:
# for the first 100 trials: do each expression at node 0,
# for the second 100 trials: do each expression at a random node, to facilate key movement
# FIX! there's some problem with the initList not taking if rotated amongst nodes?
if (trial < 100):
nodeX = 0
else:
nodeX = random.randint(0,lenNodes-1)
resultKey = "Result.hex"
execResultInspect, min_value = h2e.exec_expr(h2o.nodes[nodeX], execExpr,
resultKey=resultKey, timeoutSecs=20)
print min_value, execExpr
h2o.verboseprint("min_value: ", min_value, "trial:", trial)
### h2b.browseJsonHistoryAsUrlLastMatch("Inspect")
trial += 1
if __name__ == '__main__':
h2o.unit_main()
| 31.246575
| 106
| 0.543621
|
6e830c26bce24007d61ffa5e9b1021d8a5ab991c
| 1,465
|
py
|
Python
|
blog/views.py
|
bonnygl/my-first-blog
|
52e595dfe37401e6ae6ac4bd89ba6801486a0046
|
[
"Apache-2.0"
] | null | null | null |
blog/views.py
|
bonnygl/my-first-blog
|
52e595dfe37401e6ae6ac4bd89ba6801486a0046
|
[
"Apache-2.0"
] | null | null | null |
blog/views.py
|
bonnygl/my-first-blog
|
52e595dfe37401e6ae6ac4bd89ba6801486a0046
|
[
"Apache-2.0"
] | null | null | null |
from django.utils import timezone
from django.shortcuts import render, get_object_or_404, redirect, render
from .forms import PostForm
from .models import Post
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
return render(request, 'blog/post_detail.html', {'post': post})
def post_new(request):
if request.method == 'POST':
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm()
return render(request, 'blog/post_edit.html', {'form': form})
def post_edit(request, pk):
post = get_object_or_404(Post, pk=pk)
if request.method == "POST":
form = PostForm(request.POST, instance=post)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.published_date = timezone.now()
post.save()
return redirect('post_detail', pk=post.pk)
else:
form = PostForm(instance=post)
return render(request, 'blog/post_edit.html', {'form': form})
| 35.731707
| 94
| 0.643003
|
5cd99f75bfda7a662e387ec07deac47aa6faad4e
| 394
|
py
|
Python
|
tests/storage_adapter_tests/integration_tests/json_integration_tests.py
|
ttracx/ChatterBot
|
ec6cb0ae09304ddac7bef9c3dcf9b148b7568d76
|
[
"BSD-3-Clause"
] | 1
|
2016-05-21T21:26:15.000Z
|
2016-05-21T21:26:15.000Z
|
tests/storage_adapter_tests/integration_tests/json_integration_tests.py
|
ttracx/ChatterBot
|
ec6cb0ae09304ddac7bef9c3dcf9b148b7568d76
|
[
"BSD-3-Clause"
] | null | null | null |
tests/storage_adapter_tests/integration_tests/json_integration_tests.py
|
ttracx/ChatterBot
|
ec6cb0ae09304ddac7bef9c3dcf9b148b7568d76
|
[
"BSD-3-Clause"
] | null | null | null |
from tests.base_case import ChatBotTestCase
from .base import StorageIntegrationTests
class JsonStorageIntegrationTests(StorageIntegrationTests, ChatBotTestCase):
def setUp(self):
super(JsonStorageIntegrationTests, self).setUp()
self.chatbot.storage_adapters = []
self.chatbot.add_adapter(
"chatterbot.adapters.storage.JsonDatabaseAdapter"
)
| 28.142857
| 76
| 0.741117
|
2fcbfc48539661b93503421fb7a6a9df610c6395
| 7,204
|
py
|
Python
|
preatrained_model.py
|
ashishkishor/Machine-learning
|
eca60d24ce10820c9df32c050275cb838634b640
|
[
"MIT"
] | null | null | null |
preatrained_model.py
|
ashishkishor/Machine-learning
|
eca60d24ce10820c9df32c050275cb838634b640
|
[
"MIT"
] | null | null | null |
preatrained_model.py
|
ashishkishor/Machine-learning
|
eca60d24ce10820c9df32c050275cb838634b640
|
[
"MIT"
] | null | null | null |
import re # For preprocessing
import pandas as pd # For data handling
from time import time # To time our operations
from collections import defaultdict # For word frequency
import spacy # For preprocessing
df = pd.read_csv('result.csv')
df.head()
df.isnull().sum()
nlp = spacy.load('en_core_web_sm') # Spacy to load the english language
# Linguistic Preprocessing
def clean(sen):
text = [token.lemma_ for token in sen if
not token.is_stop] # This removes stop-words from our sentences and do lemmatization
if len(text) > 2: # Removing words less than 3 words
return ' '.join(text)
more_clean = (re.sub("[^A-Za-z']+", ' ', str(row)).lower() for row in
df['text']) # To remove words with wrong characters and lowering all
# Calling function and spacy library for preprocessing
text = [clean(doc) for doc in nlp.pipe(more_clean, batch_size=5000, n_threads=-1)]
df_cleansent = pd.DataFrame({'clean': text}) # Clean sentences
df_cleansent.shape
u=df['label']
u.shape
df_cleansent = df_cleansent.join(u) # Adding labels at the back of sentences
df_cleansent = df_cleansent.dropna().drop_duplicates() #This is to drop NULL and blank columns
df_cleansent.shape
sentences = [row.split() for row in df_cleansent['clean']]
word_freq = defaultdict(int) # Defining the dictionary
for sent in sentences:
for i in sent:
word_freq[i] += 1
len(word_freq)
sorted(word_freq, key=word_freq.get, reverse=True)[:10]
import multiprocessing
cores = multiprocessing.cpu_count()
from gensim.models import Word2Vec
from gensim.models.keyedvectors import KeyedVectors
w2v_model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True)
#Importing all necessary libraries for our model
from keras.models import Sequential
from keras.layers import *
from keras.preprocessing.sequence import pad_sequences
import sys
import numpy as np
import string
import logging
import random
#Initializing weights
w2v_weights = w2v_model.wv.vectors
VocabSize, EmbeddingSize = w2v_weights.shape #Vocavulary size and dimension of each words
print("The Vocabulary Size is : {} - Embedding Dim: {}".format(VocabSize, EmbeddingSize))
#Function to extract word from word2vec
def word2token(word):
try:
return w2v_model.wv.vocab[word].index
except KeyError:
return 0
#Class to convert the sentences to match with the index in vocabulory which corresponds to vector of that word
MaxSeqLength = 200
class Sent2vec:
def __init__(self, data, seq_length):
self.data = data
self.categories = data.label.unique()
self.seq_length = seq_length
def __iter__(self):
for txt, cat in zip(self.data.iloc[:, 0], self.data.iloc[:, 1]):
words = np.array([word2token(w) for w in txt.split(' ')[:self.seq_length] if w != ''])
yield (words, cat)
new_sent = Sent2vec(df_cleansent, MaxSeqLength)
#Generate the labels
label = {r: m for r, m in zip(new_sent.categories, range(len(new_sent.categories)))}
""" {'Facts': 0,
'Ratio of the decision': 1,
'Ruling by Lower Court': 2,
'Argument': 3,
'Ruling by Present Court': 4,
'Precedent': 5,
'Statute': 6}
"""
setx = []
sety = []
for w, c in new_sent:
setx.append(w)
sety.append(label[c])
# Padding to equalize the vectors
setx = pad_sequences(setx, maxlen=MaxSeqLength, padding='pre', value=0)
sety = np.array(sety)
print(setx.shape)
print(sety.shape)
import matplotlib.pyplot as plt
valPer = 0.15 # Percentage of Validation set
Samples = setx.shape[0]
ValN = int(valPer * Samples) # No.of validation data
TrainN = Samples - ValN # No. of train data
#Randomize distribution
random_i = random.sample(range(Samples), Samples)
TrainX = setx[random_i[:TrainN]]
TrainY = sety[random_i[:TrainN]]
ValX = setx[random_i[TrainN:TrainN+ValN]]
ValY = sety[random_i[TrainN:TrainN+ValN]]
print(TrainX)
print(TrainY)
print(ValX)
print(ValY)
print(random_i)
print("Training sample shapes - X: {} - Y: {}".format(TrainX.shape, TrainY.shape))
print("Validation sample shapes - X: {} - Y: {}".format(ValX.shape, ValY.shape))
#Plotting the distribution for training set
categories, ccount = np.unique(TrainY, return_counts=True)
plt.figure(figsize=(16, 8))
plt.title("Training Set - ""Category Distribution")
plt.xticks(range(len(categories)), label.keys())
plt.bar(categories, ccount, align='center')
plt.show()
#Plotting the distribution for validation set
categories, ccount = np.unique(ValY, return_counts=True)
plt.figure(figsize=(16, 8))
plt.title("Validation Set - Category Distribution")
plt.xticks(range(len(categories)), label.keys())
plt.bar(categories, ccount, align='center')
plt.show()
n_categories = len(categories)
#Keras layer with word2vec embedding
model = Sequential()
model.add(Embedding(input_dim=VocabSize,
output_dim=EmbeddingSize,
weights=[w2v_weights],
input_length=MaxSeqLength,
mask_zero=True,
trainable=False))
#model.add(LSTM(6, return_sequences=True, input_shape=(4, 8)) )
# returns a sequence of vectors of dimension 32
#model.add(Dropout(0.2))
# returns a sequence of vectors of dimension 32
# model.add(LSTM(6))
model.add(Bidirectional(LSTM(100)))
model.add(Dropout(0.4))
model.add(Dense(n_categories, activation='softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
history = model.fit(TrainX, TrainY, epochs=6, batch_size=8,
validation_data=(ValX, ValY), verbose=1)
#Load the best weights
#model.load_weights('model13.h5')
#
# plt.figure(figsize=(12, 12))
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('Loss')
# plt.legend(['train', 'val'], loc='upper left')
# plt.show()
#
# plt.figure(figsize=(12, 12))
# plt.plot(history.history['accuracy'])
# plt.plot(history.history['val_accuracy'])
# plt.title('Accuracy')
# plt.legend(['train', 'val'], loc='upper left')
# plt.show()
#model.save_weights("model13.h5")
#Predicting on validation set
result = model.predict(ValX)
i = 0
from sklearn import metrics
# print(metrics.classification_report(ValY,result))
# result =model.predict(TrainX)
# converting the result into 1 hot vector
prob = [None] * 1158
x = 0
y = 0
print("loop")
# k=0
index = 0
for i in result:
maxx = -1
y = 0
# if(k>10):
# break
for j in i:
# print(*i)
if maxx < j:
maxx = j
value = j
ind = y
y = y + 1
prob[x] = ind
x = x + 1
prob = np.array(prob)
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
#Printing ConfusionMatrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(ValY, prob)
cm
#Printing Classifcation Report
from sklearn.metrics import classification_report
matrix = classification_report(ValY, prob, labels=[0, 1, 2, 3, 4, 5, 6])
print('Classification report : \n', matrix)
| 29.165992
| 111
| 0.674764
|
659eebd891eb979369f2988357ab0b68d48e4384
| 1,828
|
py
|
Python
|
pyeiscp/tools.py
|
klaashoekstra94/python-eiscp
|
80e47603ff5755c2a809ff79eeb9474a7940add3
|
[
"MIT"
] | 1
|
2021-01-01T18:12:19.000Z
|
2021-01-01T18:12:19.000Z
|
pyeiscp/tools.py
|
klaashoekstra94/python-eiscp
|
80e47603ff5755c2a809ff79eeb9474a7940add3
|
[
"MIT"
] | 5
|
2020-07-30T11:33:37.000Z
|
2021-12-18T10:33:46.000Z
|
pyeiscp/tools.py
|
klaashoekstra94/python-eiscp
|
80e47603ff5755c2a809ff79eeb9474a7940add3
|
[
"MIT"
] | 4
|
2020-07-11T07:58:07.000Z
|
2021-03-29T18:32:41.000Z
|
"""Provides a raw console to test module and demonstrate usage."""
import argparse
import asyncio
import logging
import pyeiscp
__all__ = ("console", "monitor")
async def console(loop, log):
"""Connect to receiver and show events as they occur.
Pulls the following arguments from the command line (not method arguments):
:param host:
Hostname or IP Address of the device.
:param port:
TCP port number of the device.
:param verbose:
Show debug logging.
:param messages:
A sequence of one or more messages to send to the device.
"""
parser = argparse.ArgumentParser(description=console.__doc__)
parser.add_argument("--host", default="127.0.0.1", help="IP of AVR")
parser.add_argument("--port", default="60128", help="Port of AVR")
parser.add_argument("--verbose", "-v", action="count")
parser.add_argument("messages", nargs="*")
args = parser.parse_args()
if args.verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level)
def log_callback(message):
"""Receives event callback from eISCP Protocol class."""
zone, command, value = message
log.info("Zone: %s | %s: %s" % (zone, command, value))
def connect_callback():
log.info("Successfully (re)connected to AVR")
host = args.host
port = int(args.port)
conn = await pyeiscp.Connection.create(
host=host, port=port, loop=loop, update_callback=log_callback, connect_callback=connect_callback
)
for message in args.messages:
conn.send(message)
def monitor():
"""Wrapper to call console with a loop."""
log = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
asyncio.ensure_future(console(loop, log))
loop.run_forever()
| 28.5625
| 104
| 0.66302
|
b181657f8c381bc97e85a908c5b859b2f11c933c
| 6,176
|
py
|
Python
|
config.py
|
saydulk/admin4
|
dc99fa7c8a1e0417b131b7627a748e5a9b12c1bb
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
saydulk/admin4
|
dc99fa7c8a1e0417b131b7627a748e5a9b12c1bb
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
saydulk/admin4
|
dc99fa7c8a1e0417b131b7627a748e5a9b12c1bb
|
[
"Apache-2.0"
] | 14
|
2017-01-12T11:13:49.000Z
|
2019-04-19T10:02:50.000Z
|
# The Admin4 Project
# (c) 2013-2014 Andreas Pflug
#
# Licensed under the Apache License,
# see LICENSE.TXT for conditions of usage
import wx
import logger
from wh import StringType, evalAsPython
ignoreStoredPositions=False
class Config(wx.Config):
"""
OSX: ~/Library/Preferences/<name> Preferences
"""
def __init__(self, name):
wx.Config.__init__(self, name, style = wx.CONFIG_USE_LOCAL_FILE)
def getServers(self):
return self.Read("Servers", [])
def Decorate(self, name, obj=None, subname=None):
if subname:
name="%s/%s" % (name, subname)
if obj:
return "%s/%s" % (obj.__module__, name)
else:
return name
def Read(self, name, default="", obj=None, subname=None):
"""
Read(name, default="", obj=None)
Read a config value <name>
config name might be decorated with <obj> module name
"""
val=super(Config, self).Read(self.Decorate(name, obj, subname))
if not val:
return default
if not isinstance(default, StringType):
py=evalAsPython(val)
if py != None:
val=py
else:
logger.debug("Couldn't pythonize '%s'", val)
if val == None:
return default
return val
def Write(self, name, val, obj=None, subname=None):
"""
Write(name, value, obj=None)
Write a config value <name>
config name might be decorated with <obj> module name
"""
super(Config, self).Write(self.Decorate(name, obj, subname), str(val))
self.Flush()
def getName(self, aspect, module, name):
if not isinstance(module, StringType):
if not name:
if hasattr(module, 'name'):
name=module.name
elif hasattr(module, 'resname'):
name=module.resname
module=module.__module__
name="%s/%s:%s" % (module, name, aspect)
return name.replace('.', '/')
def getWinName(self, win):
if isinstance(win, wx.Frame):
cls="Frame"
else:
cls="Dialog"
return self.getName(cls, win.__module__, win.__class__.__name__)
def storeWindowPositions(self, win):
name=self.getWinName(win)
size=win.GetSize()
pos=win.GetPosition()
if win.GetParent():
pos -= win.GetParent().GetPosition()
self.Write("%sPosition" % name, ((size.x, size.y), (pos.x, pos.y)))
if hasattr(win, "manager"):
str=win.manager.SavePerspective()
self.Write("%sPerspective" % name, str)
def GetPerspective(self, win):
if ignoreStoredPositions:
return ""
name=self.getWinName(win)
return self.Read("%sPerspective" % name, "")
def getWindowPositions(self, win):
if ignoreStoredPositions:
return None, None
name=self.getWinName(win)
size, pos=self.Read("%sPosition" % name, (None, None))
xmax = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X)
ymax = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y)
if size and (size[0] < 30 or size[1] < 30):
size=None
if size:
xmax -= size[0]
ymax -= size[1]
else:
xmax -= 30
ymax -= 30
if pos and win.GetParent():
pp = win.GetParent().GetPosition()
pos = (pos[0]+pp.x, pos[1]+pp.y)
if pos and (pos[0] < 0 or pos[0] > xmax or pos[1] < 0 or pos[1] > ymax):
pos=None
return size, pos
def restoreGridPositions(self, grid, module=None, name='Grid'):
if ignoreStoredPositions:
return
if not module:
module=grid
name=self.getName("ColumnWidths", module, name)
colWidths=evalAsPython(self.Read(name, "{}"), {})
for i in range(grid.GetNumberCols()):
colname=grid.GetColLabelValue(i)
width=colWidths.get(colname)
if width:
grid.SetColSize(i, width)
def storeGridPositions(self, grid, module=None, name='Grid'):
if not module:
module=grid
name=self.getName("ColumnWidths", module, name)
colWidths=evalAsPython(self.Read(name, "{}"), {})
for i in range(grid.GetNumberCols()):
colname=grid.GetColLabelValue(i)
width=grid.GetColSize(i)
colWidths[colname] = width
self.Write(name, colWidths)
def restoreListviewPositions(self, listview, module, name=None):
if ignoreStoredPositions:
return
colcount=listview.GetColumnCount()
if colcount > 1:
colWidths=self.Read(self.getName("ColumnWidths", module, name), None)
if not colWidths:
return
if isinstance(colWidths, list):
for col in range(colcount):
if col >= len(colWidths):
return
listview.SetColumnWidth(col, colWidths[col])
elif isinstance(colWidths, dict):
for col in range(colcount):
colname = listview.GetColumn(col).GetText()
w=colWidths.get(colname)
if w != None:
listview.SetColumnWidth(col, w)
else:
logger.debug("Strange ColumnWidths format %s", str(colWidths))
def storeListviewPositions(self, listview, module, name=None):
colcount=listview.GetColumnCount()
if colcount > 1:
colWidths={}
for col in range(colcount):
colname = listview.GetColumn(col).GetText()
colWidths[colname] = listview.GetColumnWidth(col)
self.Write(self.getName("ColumnWidths", module, name), colWidths)
def existsServer(self, dlg, name):
cls=dlg.moduleClass()
servers=self.getServers()
return "%s/%s"%(cls,name) in servers
def storeServerSettings(self, dlg, settings):
settings=settings
cls=dlg.moduleClass()
name="%s/%s" % (cls, settings['name'])
settings['class'] = cls
servers=self.getServers()
if name not in servers:
servers.append(name)
self.Write("Servers", str(servers))
self.Write("Server-%s" % name, settings)
def getServerSettings(self, sname):
settings=self.Read("Server-%s" % sname, {})
return settings
def getHintCfg(self, hint, module):
h=module.__module__.split('.')
return "%s/%s" % ("/".join(h[:-1]), hint)
def GetWantHint(self, hint, module):
return self.Read("Hints", True, None, self.getHintCfg(hint, module))
def SetWantHint(self, hint, module, how):
self.Write("Hints", how, None, self.getHintCfg(hint, module))
| 27.945701
| 76
| 0.630019
|
731b95e19254c98f675b255cf5d54b4f50a935e8
| 1,812
|
py
|
Python
|
tests/covid_health/test_ita.py
|
ggbaro/covid-health-ita
|
267801c3de021078a1ca5d3b93b47515315f0300
|
[
"MIT"
] | 3
|
2020-03-25T22:20:07.000Z
|
2020-03-29T10:01:24.000Z
|
tests/covid_health/test_ita.py
|
ggbaro/covid-health-ita
|
267801c3de021078a1ca5d3b93b47515315f0300
|
[
"MIT"
] | null | null | null |
tests/covid_health/test_ita.py
|
ggbaro/covid-health-ita
|
267801c3de021078a1ca5d3b93b47515315f0300
|
[
"MIT"
] | null | null | null |
import pytest
import pandas as pd
from covid_health.ita import (
prep_istat,
prep_salutegov,
prep_other_data,
prep_pcm_dpc,
)
# ISTAT
@pytest.mark.parametrize(
"figure", ["2019_pop_regions", "2018_pop_regions", "2019_pop_provinces"],
)
def test_istat_geodemo(figure):
result = prep_istat.parse_istat_geodemo(figure)
assert isinstance(result, pd.DataFrame)
assert result.shape[1] > 1
print(result.columns)
def test_parse_istat_daily_deaths():
result = prep_istat.parse_daily_deaths()
assert isinstance(result, tuple)
assert isinstance(result[0], pd.DataFrame)
assert isinstance(result[1], tuple)
assert isinstance(result[1][0], pd.Series)
assert result[0].shape[1] > 1
print(result[0].columns)
# SALUTE GOV
@pytest.mark.parametrize(
"figure",
[
"hospital_beds_by_discipline_hospital",
"asl_expenditure_by_device_2014",
"asl_expenditure_by_device_2015",
"asl_expenditure_by_device_2016",
"asl_expenditure_by_device_2017",
"asl_comuni_pop",
],
)
def test_salutegov(figure):
result = prep_salutegov.parse_dataset(figure, verbose=1)
assert isinstance(result, pd.DataFrame)
assert result.shape[1] > 1
print(result.columns)
def test_salutegov_missingfigure():
with pytest.raises(NotImplementedError):
prep_salutegov.parse_dataset("non-existing-figure", verbose=1)
# DPC
@pytest.mark.parametrize(
"figure", ["dpc-regions", "dpc-province",],
)
def test_dpc(figure):
result = prep_pcm_dpc.parse_covid_data(figure)
assert isinstance(result, pd.DataFrame)
assert result.shape[1] > 1
print(result.columns)
def test_dpc_missingfigure():
with pytest.raises(NotImplementedError):
prep_pcm_dpc.parse_covid_data("non-existing-figure")
| 26.26087
| 77
| 0.715784
|
9a4859a891e2dd68a637983d4e62a91f9baced58
| 3,696
|
py
|
Python
|
tests/export/test_export_variants.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/export/test_export_variants.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/export/test_export_variants.py
|
szilvajuhos/scout
|
2f4a03fb3192a57c99fd62be626e8c22051e81af
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from scout.export.variant import export_mt_variants, export_verified_variants
from scout.constants.variants_export import MT_EXPORT_HEADER, VERIFIED_VARIANTS_HEADER
from scout.constants import CALLERS
def test_export_mt_variants(case_obj, real_populated_database):
"""Test the function that creates lines with MT variants to be exported"""
adapter = real_populated_database
case_id = case_obj['_id']
assert case_id
# load MT variants for this case
nr_loaded = adapter.load_variants(case_obj=case_obj,
category='snv', chrom='MT', start=1, end=16500)
assert nr_loaded > 0
mt_variants = list(adapter.variant_collection.find({'chromosome':'MT'}))
assert len(mt_variants) == nr_loaded # it's all MT variants, but double-checking it
# Assert that there is at least one sample to create the excel file for
samples = case_obj.get('individuals')
assert samples
# test function that exports variant lines
for sample in samples:
sample_lines = export_mt_variants(variants=mt_variants, sample_id=sample['individual_id'])
# check that rows to write to excel corespond to number of variants
assert len(sample_lines) == len(mt_variants)
# check that cols to write to excel corespond to fields of excel header
assert len(sample_lines[0]) == len(MT_EXPORT_HEADER)
def test_export_verified_variants(case_obj, real_populated_database, variant_objs):
"""Test the function that creates lines with verified variants to be exported"""
adapter = real_populated_database
case_id = case_obj['_id']
# load variants to populated_database
assert sum(1 for i in adapter.variants(case_id=case_id, nr_of_variants=-1)) == 0
nr_loaded = adapter.load_variants(case_obj=case_obj)
assert nr_loaded > 0
valid_status = ['False positive', 'True positive', 'False positive'] # for 3 vars
test_vars = list(adapter.variant_collection.find().limit(3))
assert len(test_vars) == 3
# Make sure that no variant is set as validated:
assert sum(1 for i in adapter.variant_collection.find({'validation':{'$exists':True}})) == 0
# Set test variant as validated
for i in range(3):
# Set validation status of a variant
adapter.variant_collection.find_one_and_update(
{'_id' : test_vars[i]['_id'] },
{'$set' : {'validation' : valid_status[i] } }
)
# insert validation events
adapter.event_collection.insert_one({
'verb' : 'validate',
'institute' : test_vars[i]['institute'],
'variant_id' : test_vars[i]['variant_id'],
'case' : case_id
})
# There should be 3 validated variants now
assert sum(1 for i in adapter.variant_collection.find({'validation':{'$exists':True}})) == 3
# Call function to get aggregated data (variant + case data):
cust = case_obj['owner']
aggregated_vars = adapter.verified(cust)
assert len(aggregated_vars) == 3 # same number of variants is returned
unique_callers = set()
for var_type, var_callers in CALLERS.items():
for caller in var_callers:
unique_callers.add(caller.get('id'))
n_individuals = len(case_obj['individuals'])
# Call function that creates document lines
document_lines = export_verified_variants(aggregated_vars, unique_callers)
assert len(document_lines) == 3 * n_individuals # one line per variant and individual
for line in document_lines:
# Make sure that document cells will be the same as in document header
assert len(line) == len(VERIFIED_VARIANTS_HEADER + list(unique_callers))
| 40.615385
| 98
| 0.695617
|
db2426aba32becf712bed9e2d75ac7bdc121613a
| 5,207
|
py
|
Python
|
model/atturesnext_feat_nlocal.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | 1
|
2022-03-23T11:54:33.000Z
|
2022-03-23T11:54:33.000Z
|
model/atturesnext_feat_nlocal.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | null | null | null |
model/atturesnext_feat_nlocal.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | null | null | null |
"""
uresnext_nlocal with global attention
"""
from model.block import *
import torch.nn as nn
import torch.nn.functional as F
import torch
import torchvision.models as models
class AttUResNeXt_feat_nlocal(nn.Module):
"""Decoder part of the UNet
Parameters:
n_classes -- number of the classes of the given dataset
Tips:
set align_corners = True for better performance of semantic segmentation (https://github.com/pytorch/vision/issues/1708)
"""
def __init__(self, args, n_classes=6):
super().__init__()
self.n_classes = n_classes
# self.att_params = torch.nn.ParameterList([torch.nn.Parameter(torch.ones(1, dtype=torch.float32))
# for i in range(4)])
self.a1 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.a2 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.a3 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.a4 = torch.nn.Parameter(torch.tensor(1,dtype=torch.float32))
self.gap = nn.AdaptiveAvgPool2d((1, 1))
### encoder ###
resnext = models.resnext101_32x8d(pretrained=args.is_pretrained)
self.firstconv = resnext.conv1
self.firstbn = resnext.bn1
self.firstrelu = resnext.relu
self.firstmaxpool = resnext.maxpool
self.encoder1 = resnext.layer1
self.encoder2 = resnext.layer2
self.encoder3 = resnext.layer3
self.encoder4 = resnext.layer4
### decoder ###
# level 1
# self.nlocal1 = NLBlockND(2048, inter_channels=1024) # half the inter channels for computational efficiency
self.conv1 = nn.Conv2d(2048, 1024, 3, padding='same')
self.attblock1 = AttBlock_v2(2048 + 2048, 2048)
# level 2
self.nlocal2 = NLBlockND(1024, inter_channels=512)
self.dconv1 = DoubleConv(2048, 1024)
self.attblock2 = AttBlock_v2(2048 + 1024, 1024)
self.conv2 = nn.Conv2d(1024, 512, 3, padding='same')
# level 3
self.nlocal3 = NLBlockND(512, inter_channels=256)
self.dconv2 = DoubleConv(1024, 512)
self.attblock3 = AttBlock_v2(2048 + 512, 512)
self.conv3 = nn.Conv2d(512, 256, 3, padding='same')
# level 4
self.dconv3 = DoubleConv(512, 256)
self.attblock4 = AttBlock_v2(2048 + 256, 256)
self.conv4 = nn.Conv2d(256, 64, 3, padding='same')
# level 5
self.dconv4 = DoubleConv(128, 64)
# level 6
self.dconv5 = DoubleConv(64, 64)
self.conv5 = nn.Conv2d(64, self.n_classes, 3, padding='same')
def forward(self, img):
### encoder ###
x1 = self.firstconv(img)
x1 = self.firstbn(x1)
e0 = self.firstrelu(x1)
e1 = self.firstmaxpool(e0)
e1 = self.encoder1(e1)
e2 = self.encoder2(e1)
e3 = self.encoder3(e2)
e4 = self.encoder4(e3)
### decoder ###
x = self.nlocal1(e4)
context = self.gap(x)
# level 1
att1 = self.attblock1(x, context)
# level 2
# interpolation -- mini-batch x channels x [optional depth] x [optional height] x width.
x = F.interpolate(x, size=(16,16), mode='bilinear', align_corners=True)
x = self.conv1(x)
x = torch.cat((e3, x), dim=1)
x = self.dconv1(x)
x = self.nlocal2(x)
att2 = self.attblock2(x, context)
# level 3
x = F.interpolate(x, size=(32,32), mode='bilinear', align_corners=True)
x = self.conv2(x)
x = torch.cat((e2, x), dim=1)
x = self.dconv2(x)
x = self.nlocal3(x)
att3 = self.attblock3(x, context)
# level 4
x = F.interpolate(x, size=(64,64), mode='bilinear', align_corners=True)
x = self.conv3(x)
x = torch.cat((e1, x), dim=1)
x = self.dconv3(x)
att4 = self.attblock4(x, context)
# level 5
# att_sum = (self.att_params[0]*att1 + self.att_params[1]*att2
# + self.att_params[2]*att3 + self.att_params[3]*att4) / 4.0 #weighted attention
att_sum = (self.a1*att1 + self.a2*att2 + self.a3*att3 + self.a4*att4) / 4.0 #weighted attention
x = F.interpolate(x, size=(128,128), mode='bilinear', align_corners=True)
x = self.conv4(x)
x = torch.cat((e0, x), dim=1)
x = self.dconv4(x)
x = att_sum * x
# level 6
x = F.interpolate(x, size=(256,256), mode='bilinear', align_corners=True)
x = self.dconv5(x)
x = self.conv5(x)
x = F.log_softmax(x, dim=1)
return x
if __name__ == '__main__':
### import for test ###
backbone = models.resnext101_32x8d(pretrained=False)
# print(backbone)
### usage ###
# save the graph of the atturesnext
net = AttUResNeXt_feat_nlocal()
# from torchsummary import summary
# summary(backbone.cuda(), (3, 256, 256))
# data = torch.rand(1, 3, 256, 256)
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(r'logs/atturesnext_feat')
writer.add_graph(net, torch.rand(1,3,256,256))
writer.close()
pass
| 36.159722
| 128
| 0.593048
|
605d02b525c617770c14eee3d85eb57eaaee717f
| 723
|
py
|
Python
|
yeast8_FBA_low_high_glucose_fluxes/merge_fluxes_yeast8.py
|
NCBI-Codeathons/Integrating-multi-omics-data-in-yeast
|
b8be2470bfecc7c2d69debfca15881aab0124c5a
|
[
"MIT"
] | 3
|
2020-03-31T11:22:37.000Z
|
2021-03-20T09:56:23.000Z
|
yeast8_FBA_low_high_glucose_fluxes/merge_fluxes_yeast8.py
|
NCBI-Codeathons/Integrating-multi-omics-data-in-yeast
|
b8be2470bfecc7c2d69debfca15881aab0124c5a
|
[
"MIT"
] | null | null | null |
yeast8_FBA_low_high_glucose_fluxes/merge_fluxes_yeast8.py
|
NCBI-Codeathons/Integrating-multi-omics-data-in-yeast
|
b8be2470bfecc7c2d69debfca15881aab0124c5a
|
[
"MIT"
] | 2
|
2020-03-11T13:20:04.000Z
|
2020-03-13T20:38:15.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 13 12:47:52 2020
@author: samur
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.io
import os.path
from numpy import linalg as LA
import pandas as pd
from os import listdir
from os.path import isfile, join
import re
import csv
from copy import deepcopy
yeast8=pd.read_excel('Yeast8.xlsx')
#list(yeast8.columns)
highflux=pd.read_table('flux_profile_yeast8_highglucose.txt',index_col=None,header=None)
lowflux=pd.read_table('flux_profile_yeast8_lowglucose.txt',index_col=None,header=None)
yeast8['lowglucoseflux']=lowflux
yeast8['highglucoseflux']=highflux
yeast8.to_csv('yeast8_glucose_fluxes.csv')
| 21.909091
| 89
| 0.753804
|
36b9bfe4f04af50f48511e8cb6ff412abbb1acdf
| 1,134
|
py
|
Python
|
3_1_4_keypadpiano.py
|
hajin-kim/Python-EV3
|
4d590772ee82697f43dea5a878275a917f13acc5
|
[
"MIT"
] | null | null | null |
3_1_4_keypadpiano.py
|
hajin-kim/Python-EV3
|
4d590772ee82697f43dea5a878275a917f13acc5
|
[
"MIT"
] | null | null | null |
3_1_4_keypadpiano.py
|
hajin-kim/Python-EV3
|
4d590772ee82697f43dea5a878275a917f13acc5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch, DataLog
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import SoundFile, ImageFile
# This program requires LEGO EV3 MicroPython v2.0 or higher.
# Click "Open user guide" on the EV3 extension tab for more information.
# Create your objects here.
ev3 = EV3Brick()
# Write your program here.
ev3.speaker.set_volume(25) # in percentage
ev3.speaker.beep()
while True:
wait(10)
if Button.LEFT in ev3.buttons.pressed():
ev3.speaker.beep(440, 20)
if Button.RIGHT in ev3.buttons.pressed():
ev3.speaker.beep(494, 20)
if Button.UP in ev3.buttons.pressed():
ev3.speaker.beep(554, 20)
if Button.CENTER in ev3.buttons.pressed():
ev3.speaker.beep(587, 20)
if Button.DOWN in ev3.buttons.pressed():
ev3.speaker.beep(659, 20)
| 27.658537
| 78
| 0.708995
|
148688603f2d4b75f1a1e3114a97f197120d5a64
| 3,514
|
py
|
Python
|
reactivex/scheduler/mainloop/pygamescheduler.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
reactivex/scheduler/mainloop/pygamescheduler.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
reactivex/scheduler/mainloop/pygamescheduler.py
|
christiansandberg/RxPY
|
036027d2858ea6c9d45839c863bd791e5bb50c36
|
[
"MIT"
] | null | null | null |
import logging
import threading
from typing import Any, Optional, TypeVar
from reactivex import abc, typing
from reactivex.internal import PriorityQueue
from reactivex.internal.constants import DELTA_ZERO
from ..periodicscheduler import PeriodicScheduler
from ..scheduleditem import ScheduledItem
_TState = TypeVar("_TState")
log = logging.getLogger("Rx")
class PyGameScheduler(PeriodicScheduler):
"""A scheduler that schedules works for PyGame.
Note that this class expects the caller to invoke run() repeatedly.
http://www.pygame.org/docs/ref/time.html
http://www.pygame.org/docs/ref/event.html"""
def __init__(self, pygame: Any):
"""Create a new PyGameScheduler.
Args:
pygame: The PyGame module to use; typically, you would get this by
import pygame
"""
super().__init__()
self._pygame = pygame # TODO not used, refactor to actually use pygame?
self._lock = threading.Lock()
self._queue: PriorityQueue[ScheduledItem] = PriorityQueue()
def schedule(
self, action: typing.ScheduledAction[_TState], state: Optional[_TState] = None
) -> abc.DisposableBase:
"""Schedules an action to be executed.
Args:
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
log.debug("PyGameScheduler.schedule(state=%s)", state)
return self.schedule_absolute(self.now, action, state=state)
def schedule_relative(
self,
duetime: typing.RelativeTime,
action: typing.ScheduledAction[_TState],
state: Optional[_TState] = None,
) -> abc.DisposableBase:
"""Schedules an action to be executed after duetime.
Args:
duetime: Relative time after which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
duetime = max(DELTA_ZERO, self.to_timedelta(duetime))
return self.schedule_absolute(self.now + duetime, action, state=state)
def schedule_absolute(
self,
duetime: typing.AbsoluteTime,
action: typing.ScheduledAction[_TState],
state: Optional[_TState] = None,
) -> abc.DisposableBase:
"""Schedules an action to be executed at duetime.
Args:
duetime: Absolute time at which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
duetime = self.to_datetime(duetime)
si: ScheduledItem = ScheduledItem(self, state, action, duetime)
with self._lock:
self._queue.enqueue(si)
return si.disposable
def run(self) -> None:
while self._queue:
with self._lock:
item: ScheduledItem = self._queue.peek()
diff = item.duetime - self.now
if diff > DELTA_ZERO:
break
item = self._queue.dequeue()
if not item.is_cancelled():
item.invoke()
| 30.824561
| 86
| 0.623506
|
21c1930d8f0c2baa48c618d813be9aba5edc67fa
| 1,555
|
py
|
Python
|
metaworld/policies/sawyer_drawer_open_v1_policy.py
|
Simon0xzx/metaworld
|
2d441eed70b6f5cb1f35883b0517c4bd2812268c
|
[
"MIT"
] | 681
|
2019-09-09T19:34:37.000Z
|
2022-03-31T12:17:58.000Z
|
metaworld/policies/sawyer_drawer_open_v1_policy.py
|
Simon0xzx/metaworld
|
2d441eed70b6f5cb1f35883b0517c4bd2812268c
|
[
"MIT"
] | 212
|
2019-09-18T14:43:44.000Z
|
2022-03-27T22:21:00.000Z
|
metaworld/policies/sawyer_drawer_open_v1_policy.py
|
Simon0xzx/metaworld
|
2d441eed70b6f5cb1f35883b0517c4bd2812268c
|
[
"MIT"
] | 157
|
2019-09-12T05:06:05.000Z
|
2022-03-29T14:47:24.000Z
|
import numpy as np
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
class SawyerDrawerOpenV1Policy(Policy):
@staticmethod
@assert_fully_parsed
def _parse_obs(obs):
return {
'hand_pos': obs[:3],
'drwr_pos': obs[3:6],
'unused_info': obs[6:],
}
def get_action(self, obs):
o_d = self._parse_obs(obs)
action = Action({
'delta_pos': np.arange(3),
'grab_effort': 3
})
# NOTE this policy looks different from the others because it must
# modify its p constant part-way through the task
pos_curr = o_d['hand_pos']
pos_drwr = o_d['drwr_pos']
# align end effector's Z axis with drawer handle's Z axis
if np.linalg.norm(pos_curr[:2] - pos_drwr[:2]) > 0.06:
to_pos = pos_drwr + np.array([0., 0., 0.3])
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=4.)
# drop down to touch drawer handle
elif abs(pos_curr[2] - pos_drwr[2]) > 0.04:
to_pos = pos_drwr
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=4.)
# push toward a point just behind the drawer handle
# also increase p value to apply more force
else:
to_pos = pos_drwr + np.array([0., -0.06, 0.])
action['delta_pos'] = move(o_d['hand_pos'], to_pos, p=50.)
# keep gripper open
action['grab_effort'] = -1.
return action.array
| 31.734694
| 74
| 0.578778
|
ad2f7440bd142e92bea6cf4f3b56e01632d58852
| 1,972
|
py
|
Python
|
ibmsecurity/isam/aac/api_protection/grants_user.py
|
zone-zero/ibmsecurity
|
7d3e38104b67e1b267e18a44845cb756a5302c3d
|
[
"Apache-2.0"
] | 46
|
2017-03-21T21:08:59.000Z
|
2022-02-20T22:03:46.000Z
|
ibmsecurity/isam/aac/api_protection/grants_user.py
|
zone-zero/ibmsecurity
|
7d3e38104b67e1b267e18a44845cb756a5302c3d
|
[
"Apache-2.0"
] | 201
|
2017-03-21T21:25:52.000Z
|
2022-03-30T21:38:20.000Z
|
ibmsecurity/isam/aac/api_protection/grants_user.py
|
zone-zero/ibmsecurity
|
7d3e38104b67e1b267e18a44845cb756a5302c3d
|
[
"Apache-2.0"
] | 91
|
2017-03-22T16:25:36.000Z
|
2022-02-04T04:36:29.000Z
|
import logging
logger = logging.getLogger(__name__)
def get(isamAppliance, userid, check_mode=False, force=False):
"""
Get grants by userid
"""
return isamAppliance.invoke_get("Get grants by userid",
"/iam/access/v8/grants/userIds/{0}".format(userid))
def get_recent(isamAppliance, userid, timestamp, token_type='refresh_token', check_mode=False, force=False):
"""
Get grants by userid and tokens more recent than given timestamp, also pass back any other tokens found
token_type will check refresh tokens and can be changed or ignored by passing None
other tokens could include recent access tokens (not refresh tokens)
"""
ret_obj = get(isamAppliance=isamAppliance, userid=userid)
recent_tokens = []
other_tokens = []
for attrbs in ret_obj['data']:
for tok in attrbs['tokens']:
if tok['dateCreated'] > timestamp and (tok['subType'] == token_type or token_type is None):
recent_tokens.append(tok)
else:
other_tokens.append(tok)
new_ret_obj = isamAppliance.create_return_object()
new_ret_obj['data']['recent'] = recent_tokens
new_ret_obj['data']['other'] = other_tokens
return new_ret_obj
def delete(isamAppliance, userid, check_mode=False, force=False):
"""
Delete grants by userid
"""
if force is True or _check(isamAppliance, userid) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete("Delete grants by userid",
"/iam/access/v8/grants/userIds/{0}".format(userid))
return isamAppliance.create_return_object()
def _check(isamAppliance, userid):
try:
ret_obj = get(isamAppliance, userid)
if len(ret_obj['data']) > 0:
return True
except:
pass
return False
| 31.301587
| 108
| 0.646552
|
bb44dd855b4a8e946ebc40f5b58db5f95d87ccb7
| 329
|
py
|
Python
|
networks/admin.py
|
KeoH/orchestrapi
|
575e66a86c42b5c249fd943bb5f40c8c310139aa
|
[
"MIT"
] | 1
|
2021-07-05T19:37:37.000Z
|
2021-07-05T19:37:37.000Z
|
networks/admin.py
|
KeoH/orchestrapi
|
575e66a86c42b5c249fd943bb5f40c8c310139aa
|
[
"MIT"
] | 6
|
2020-06-05T19:30:52.000Z
|
2021-07-05T19:28:53.000Z
|
networks/admin.py
|
KeoH/orchestrapi
|
575e66a86c42b5c249fd943bb5f40c8c310139aa
|
[
"MIT"
] | 1
|
2020-05-15T23:58:24.000Z
|
2020-05-15T23:58:24.000Z
|
from django.contrib import admin
from .models import NetworkBridge
class NetworkBridgeAdmin(admin.ModelAdmin):
list_display = ['slug', 'name', 'network_id', '_num_of_containers']
def _num_of_containers(self, obj):
return len(obj.get_containers_list())
admin.site.register(NetworkBridge, NetworkBridgeAdmin)
| 23.5
| 71
| 0.759878
|
1dc794d33fd362f3326cb2457d2a822da4e6ab39
| 891
|
py
|
Python
|
DaniMundo/blog/admin.py
|
vlajna95/DaniMundo_semaphore
|
36e39eaff07fd09ab2e1792fc13debf3f8f971ae
|
[
"CC0-1.0"
] | null | null | null |
DaniMundo/blog/admin.py
|
vlajna95/DaniMundo_semaphore
|
36e39eaff07fd09ab2e1792fc13debf3f8f971ae
|
[
"CC0-1.0"
] | null | null | null |
DaniMundo/blog/admin.py
|
vlajna95/DaniMundo_semaphore
|
36e39eaff07fd09ab2e1792fc13debf3f8f971ae
|
[
"CC0-1.0"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .forms import CustomUserCreationForm, CustomUserChangeForm
from .models import CustomUser, Article
@admin.register(CustomUser)
class CustomUserAdmin(UserAdmin):
model = CustomUser
add_form = CustomUserCreationForm
form = CustomUserChangeForm
# prepopulated_fields = {"slug": ("username", )}
list_display = ["first_name", "last_name", "username", "email", "date_joined", "last_login"]
list_filter = ["date_joined", "last_login"]
search_fields = ["first_name", "last_name", "username", "email"]
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title", )}
list_display = ["title", "summary", "status", "date_created", "date_updated"]
list_filter = ["author", "status", "date_created", "date_updated"]
search_fields = ["title", "summary", "body"]
| 37.125
| 93
| 0.744108
|
1ad96cc2f7e3fbf227e17dbd352d0b1df51f4b90
| 3,977
|
py
|
Python
|
chinese_whispers/chinese_whispers.py
|
3wille/chinese-whispers-python
|
e7257a69561858ef8ae278a595f76bbad2227d6a
|
[
"MIT"
] | null | null | null |
chinese_whispers/chinese_whispers.py
|
3wille/chinese-whispers-python
|
e7257a69561858ef8ae278a595f76bbad2227d6a
|
[
"MIT"
] | null | null | null |
chinese_whispers/chinese_whispers.py
|
3wille/chinese-whispers-python
|
e7257a69561858ef8ae278a595f76bbad2227d6a
|
[
"MIT"
] | null | null | null |
import random
import sys
from collections import defaultdict
from math import log2
from operator import itemgetter
if sys.version_info[:2] >= (3, 5):
from typing import Any, Callable, Sequence, Tuple, ItemsView, Union, Dict, Optional, Set
from networkx.classes import Graph
def top_weighting(G, node, neighbor):
# type: (Graph, Any, Any) -> float
"""A weight is the edge weight."""
return G[node][neighbor].get('weight', 1.)
def lin_weighting(G, node, neighbor):
# type: (Graph, Any, Any) -> float
"""A weight is the edge weight divided to the node degree."""
return G[node][neighbor].get('weight', 1.) / G.degree[neighbor]
def log_weighting(G, node, neighbor):
# type: (Graph, Any, Any) -> float
"""A weight is the edge weight divided to the log2 of node degree."""
return G[node][neighbor].get('weight', 1.) / log2(G.degree[neighbor] + 1)
"""Shortcuts for the node weighting functions."""
WEIGHTING = {
'top': top_weighting,
'lin': lin_weighting,
'log': log_weighting
} # type: Dict[str, Callable[[Graph, Any, Any], float]]
def chinese_whispers(G, weighting='top', iterations=20, seed=None, label_key='label'):
# type: (Graph, Union[str, Callable[[Graph, Any, Any], float]], int, Optional[int], str) -> Graph
"""Perform clustering of nodes in a graph G using the 'weighting' method.
Three weighing schemas are available:
top
Just use the edge weights from the input graph.
lin
Normalize an edge weight by the degree of the related node.
log
Normalize an edge weight by the logarithm of the related node degree.
It is possible to specify the maximum number of iterations as well as the random seed to use."""
if isinstance(weighting, str):
weighting_func = WEIGHTING[weighting]
else:
weighting_func = weighting
if seed:
rng = random.Random(seed)
shuffle_func = rng.shuffle
choice_func = rng.choice
else:
shuffle_func = random.shuffle
choice_func = random.choice
for i, node in enumerate(G):
G.nodes[node][label_key] = i + 1
nodes = list(G)
for i in range(iterations):
changes = False
shuffle_func(nodes)
for node in nodes:
previous = G.nodes[node][label_key]
if G[node]:
scores = score(G, node, weighting_func, label_key)
G.nodes[node][label_key] = random_argmax(scores.items(), choice_func=choice_func)
changes = changes or previous != G.nodes[node][label_key]
if not changes:
break
return G
def score(G, node, weighting_func, label_key):
# type: (Graph, Any, Callable[[Graph, Any, Any], float], str) -> Dict[int, float]
"""Compute label scores in the given node neighborhood."""
scores = defaultdict(float) # type: Dict[int, float]
if node not in G:
return scores
for neighbor in G[node]:
scores[G.nodes[neighbor][label_key]] += weighting_func(G, node, neighbor)
return scores
def random_argmax(items, choice_func=random.choice):
# type: (Union[Sequence[Tuple[Any, float]], ItemsView[Any, float]], Callable[[Sequence[Any]], Any]) -> Optional[int]
"""An argmax function that breaks the ties randomly."""
if not items:
# https://github.com/python/mypy/issues/1003
return None
_, maximum = max(items, key=itemgetter(1))
keys = [k for k, v in items if v == maximum]
return choice_func(keys)
def aggregate_clusters(G, label_key='label'):
# type: (Graph, str) -> Dict[int, Set[Any]]
"""Produce a dictionary with the keys being cluster IDs and the values being sets of cluster elements."""
clusters = {} # type: Dict[int, Set[Any]]
for node in G:
label = G.nodes[node][label_key]
if label not in clusters:
clusters[label] = {node}
else:
clusters[label].add(node)
return clusters
| 28.818841
| 120
| 0.641438
|
d784b20db927c1098c92f89a39de29996269b94f
| 142
|
py
|
Python
|
patent/patent/utils/__init__.py
|
ld-xy/Classified-prediction-of-Medical-images
|
62431f72ab8698821f9955319db165f53f907e85
|
[
"Apache-2.0"
] | null | null | null |
patent/patent/utils/__init__.py
|
ld-xy/Classified-prediction-of-Medical-images
|
62431f72ab8698821f9955319db165f53f907e85
|
[
"Apache-2.0"
] | null | null | null |
patent/patent/utils/__init__.py
|
ld-xy/Classified-prediction-of-Medical-images
|
62431f72ab8698821f9955319db165f53f907e85
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/7/17 14:24
# @Author : LD!!
# @FileName: __init__.py.py
# @Software: PyCharm
# @QQ :1614030192
| 17.75
| 28
| 0.56338
|
ab6063c03edc216d0802883c0357b70576cf69fb
| 4,017
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/community/kubevirt/plugins/modules/kubevirt_preset.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/kubevirt/plugins/modules/kubevirt_preset.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/community/kubevirt/plugins/modules/kubevirt_preset.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: kubevirt_preset
short_description: Manage KubeVirt virtual machine presets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine presets.
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine presets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine preset.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine preset exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine preset."
type: dict
extends_documentation_fragment:
- community.kubernetes.k8s_auth_options
- community.kubevirt.kubevirt_vm_options
- community.kubevirt.kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine preset 'vmi-preset-small'
community.kubevirt.kubevirt_preset:
state: present
name: vmi-preset-small
namespace: vms
memory: 64M
selector:
matchLabels:
kubevirt.io/vmPreset: vmi-preset-small
- name: Remove virtual machine preset 'vmi-preset-small'
community.kubevirt.kubevirt_preset:
state: absent
name: vmi-preset-small
namespace: vms
'''
RETURN = '''
kubevirt_preset:
description:
- The virtual machine preset managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstancepreset)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible_collections.community.kubernetes.plugins.module_utils.common import AUTH_ARG_SPEC
from ansible_collections.community.kubevirt.plugins.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC
)
KIND = 'VirtualMachineInstancePreset'
VMP_ARG_SPEC = {
'selector': {'type': 'dict'},
}
class KubeVirtVMPreset(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(VM_COMMON_ARG_SPEC)
argument_spec.update(VMP_ARG_SPEC)
return argument_spec
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
if selector:
definition['spec']['selector'] = selector
# FIXME: Devices must be set, but we don't yet support any
# attributes there, remove when we do:
definition['spec']['domain']['devices'] = dict()
# defaults for template
defaults = {'disks': [], 'volumes': [], 'interfaces': [], 'networks': []}
# Execute the CURD of VM:
dummy, definition = self.construct_vm_definition(KIND, definition, definition, defaults)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_preset': result,
'result': result_crud,
})
def main():
module = KubeVirtVMPreset()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| 25.916129
| 120
| 0.66119
|
b31f244781021b0e996ba86ec22bec3ad107fe46
| 8,092
|
py
|
Python
|
library/nsxt_uplink_profiles.py
|
mbovo/ansible-for-nsxt
|
dc057d876e1f0de4bf4caa244577b07b96d2a517
|
[
"BSD-2-Clause"
] | null | null | null |
library/nsxt_uplink_profiles.py
|
mbovo/ansible-for-nsxt
|
dc057d876e1f0de4bf4caa244577b07b96d2a517
|
[
"BSD-2-Clause"
] | null | null | null |
library/nsxt_uplink_profiles.py
|
mbovo/ansible-for-nsxt
|
dc057d876e1f0de4bf4caa244577b07b96d2a517
|
[
"BSD-2-Clause"
] | 1
|
2019-02-01T08:58:40.000Z
|
2019-02-01T08:58:40.000Z
|
#!/usr/bin/env python
#
# Copyright 2018 VMware, Inc.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''TODO
author: Rahul Raghuvanshi
'''
EXAMPLES = '''
- nsxt_uplink_profiles:
hostname: "10.192.167.137"
username: "admin"
password: "Admin!23Admin"
validate_certs: False
'''
RETURN = '''# '''
import json, time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, request
from ansible.module_utils._text import to_native
def get_profile_params(args=None):
args_to_remove = ['state', 'username', 'password', 'port', 'hostname', 'validate_certs']
for key in args_to_remove:
args.pop(key, None)
for key, value in args.copy().items():
if value == None:
args.pop(key, None)
return args
def get_host_switch_profiles(module, manager_url, mgr_username, mgr_password, validate_certs):
try:
(rc, resp) = request(manager_url+ '/host-switch-profiles', headers=dict(Accept='application/json'),
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg='Error accessing host profiles. Error [%s]' % (to_native(err)))
return resp
def get_uplink_profile_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, display_name):
host_switch_profiles = get_host_switch_profiles(module, manager_url, mgr_username, mgr_password, validate_certs)
for host_switch_profile in host_switch_profiles['results']:
if host_switch_profile.__contains__('display_name') and host_switch_profile['display_name'] == display_name:
return host_switch_profile
return None
def check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, profile_params):
existing_profile = get_uplink_profile_from_display_name(module, manager_url, mgr_username, mgr_password, validate_certs, profile_params['display_name'])
if existing_profile is None:
return False
if existing_profile.__contains__('mtu') and profile_params.__contains__('mtu') and \
existing_profile['mtu'] != profile_params['mtu']:
return True
if existing_profile.__contains__('transport_vlan') and profile_params.__contains__('transport_vlan') and \
existing_profile['transport_vlan'] != profile_params['transport_vlan']:
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(display_name=dict(required=True, type='str'),
mtu=dict(required=False, type='int'),
teaming=dict(required=True, type='dict',
name=dict(required=True, type='str'),
policy=dict(required=True, type='str'),
standby_list=dict(required=False, type='list'),
active_list=dict(required=True, type='list')),
transport_vlan=dict(required=False, type='int'),
named_teamings=dict(required=False, type='list'),
lags=dict(required=False, type='list'),
resource_type=dict(required=True, type='str', choices=['UplinkHostSwitchProfile']),
state=dict(required=True, choices=['present', 'absent']))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
profile_params = get_profile_params(module.params.copy())
state = module.params['state']
mgr_hostname = module.params['hostname']
mgr_username = module.params['username']
mgr_password = module.params['password']
validate_certs = module.params['validate_certs']
display_name = module.params['display_name']
manager_url = 'https://{}/api/v1'.format(mgr_hostname)
host_switch_profile_dict = get_uplink_profile_from_display_name (module, manager_url, mgr_username, mgr_password, validate_certs, display_name)
host_switch_profile_id, revision = None, None
if host_switch_profile_dict:
host_switch_profile_id = host_switch_profile_dict['id']
revision = host_switch_profile_dict['_revision']
if state == 'present':
headers = dict(Accept="application/json")
headers['Content-Type'] = 'application/json'
updated = check_for_update(module, manager_url, mgr_username, mgr_password, validate_certs, profile_params)
if not updated:
# add the block
if module.check_mode:
module.exit_json(changed=True, debug_out=str(json.dumps(profile_params)), id='12345')
request_data = json.dumps(profile_params)
try:
if host_switch_profile_id:
module.exit_json(changed=False, id=host_switch_profile_id, message="Uplink profile with display_name %s already exist."% module.params['display_name'])
(rc, resp) = request(manager_url+ '/host-switch-profiles', data=request_data, headers=headers, method='POST',
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg="Failed to add host profile. Request body [%s]. Error[%s]." % (request_data, to_native(err)))
module.exit_json(changed=True, id=resp["id"], body= str(resp), message="host profile with display name %s created." % module.params['display_name'])
else:
if module.check_mode:
module.exit_json(changed=True, debug_out=str(json.dumps(profile_params)), id=host_switch_profile_id)
profile_params['_revision'] = revision # update current revision
request_data = json.dumps(profile_params)
id = host_switch_profile_id
try:
(rc, resp) = request(manager_url+ '/host-switch-profiles/%s' % id, data=request_data, headers=headers, method='PUT',
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs, ignore_errors=True)
except Exception as err:
module.fail_json(msg="Failed to update host profile with id %s. Request body [%s]. Error[%s]." % (id, request_data, to_native(err)))
module.exit_json(changed=True, id=resp["id"], body= str(resp), message="host profile with id %s updated." % id)
elif state == 'absent':
# delete the array
id = host_switch_profile_id
if id is None:
module.exit_json(changed=False, msg='No host switch profile exist with display name %s' % display_name)
if module.check_mode:
module.exit_json(changed=True, debug_out=str(json.dumps(profile_params)), id=id)
try:
(rc, resp) = request(manager_url + "/host-switch-profiles/%s" % id, method='DELETE',
url_username=mgr_username, url_password=mgr_password, validate_certs=validate_certs)
except Exception as err:
module.fail_json(msg="Failed to delete host profile with id %s. Error[%s]." % (id, to_native(err)))
module.exit_json(changed=True, object_name=id, message="host profile with id %s deleted." % id)
if __name__ == '__main__':
main()
| 49.341463
| 165
| 0.700692
|
5c4eb065634013ed5319dba055bda7285af57ae4
| 5,947
|
py
|
Python
|
joulescope/parameters_v1.py
|
rnestler/pyjoulescope
|
b9eff73d2236e05d5c3631dbd112c1ef54854005
|
[
"Apache-2.0"
] | null | null | null |
joulescope/parameters_v1.py
|
rnestler/pyjoulescope
|
b9eff73d2236e05d5c3631dbd112c1ef54854005
|
[
"Apache-2.0"
] | null | null | null |
joulescope/parameters_v1.py
|
rnestler/pyjoulescope
|
b9eff73d2236e05d5c3631dbd112c1ef54854005
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the JouleScope parameters available to the application.
"""
from .parameter import Parameter
# list of [param_name, permission, path, default, [(value_name, value), ...]]
# * permission is either 'r' (read-only) or 'rw' (read-write)
# * path is the USB (request, index) for the transaction
# * default is the name for the default value
PARAMETERS = [
Parameter(
'sensor_power',
'rw',
'setting',
'on',
[
('off', 0),
('on', 1),
]
),
Parameter(
'source',
'rw',
'setting',
'off',
[
('off', 0x00),
('raw', 0xC0, ['on']),
('pattern_usb', 0x09),
('pattern_control', 0x0A),
('pattern_sensor', 0xAF),
],
),
Parameter(
'i_range',
'rw',
'setting',
'off',
[
('auto', 0x80, ['on']),
('10 A', 0x01, ['0', 0]),
('2 A', 0x02, ['1', 1]),
('180 mA', 0x04, ['2', 2]),
('18 mA', 0x08, ['3', 3]),
('1.8 mA', 0x10, ['4', 4]),
('180 µA', 0x20, ['5', 5]),
('18 µA', 0x40, ['6', 6]),
('off', 0x00),
],
),
Parameter(
'v_range',
'rw',
'setting',
'15V',
[
('15V', 0, ['low', 0]),
('5V', 1, ['high', 1]),
],
),
Parameter(
'ovr_to_lsb',
'rw',
'setting',
'off',
[
('off', 0),
('on', 1),
],
),
Parameter(
'trigger_source',
'rw',
'extio',
'auto',
[
('auto', 0),
('gpi0', 2),
('gpi1', 3),
],
),
Parameter(
'current_lsb',
'rw',
'extio',
'normal',
[
('normal', 0),
('gpi0', 2),
('gpi1', 3),
],
),
Parameter(
'voltage_lsb',
'rw',
'extio',
'normal',
[
('normal', 0),
('gpi0', 2),
('gpi1', 3),
],
),
Parameter(
'gpo0',
'rw',
'extio',
'0',
[
('0', 0, [0]),
('1', 1, [1]),
('start_pulse', 2),
('sample_toggle', 3),
],
),
Parameter(
'gpo1',
'rw',
'extio',
'0',
[
('0', 0, [0]),
('1', 1, [1]),
],
),
Parameter(
'io_voltage',
'rw',
'extio',
'3.3V',
[
('1.8V', 1800),
('2.1V', 2100),
('2.5V', 2500),
('2.7V', 2700),
('3.0V', 3000),
('3.3V', 3300),
('5.0V', 5000),
],
),
Parameter(
'control_test_mode',
'rw',
'setting',
'normal',
[
('normal', 0x03),
('usb', 0x81),
('fpga', 0x82),
('both', 0x83), # also set 'source' to 'pattern_sensor'
],
),
Parameter(
'transfer_length',
'rw',
None, # when stream is configured
'256',
[('%d' % (2**x), (2**x)) for x in range(0, 9)],
'packets',
),
Parameter(
'transfer_outstanding',
'rw',
None, # when stream is configured
'8',
[('%d' % (2**x), (2**x)) for x in range(0, 4)],
),
Parameter(
'current_ranging', # virtual parameter!
'rw',
'current_ranging',
None,
None, # parse into current_ranging fields: type, samples_pre, samples_window, samples_post
),
Parameter(
'current_ranging_type',
'rw',
'current_ranging',
'mean',
[
('off', 'off'),
('mean', 'mean'),
('NaN', 'nan', ['nan'])
],
),
Parameter(
'current_ranging_samples_pre',
'rw',
'current_ranging',
'0',
[(str(d), d, [d]) for d in range(9)]
),
Parameter(
'current_ranging_samples_window',
'rw',
'current_ranging',
'n',
[('n', 'n')] + [(str(d), d, [d]) for d in range(13)]
),
Parameter(
'current_ranging_samples_post',
'rw',
'current_ranging',
'0',
[(str(d), d, [d]) for d in range(9)]
),
]
def _lookup_construct(x):
fwd = {}
rev = {}
for p in x:
d_fwd = {}
d_rev = {}
fwd[p.name] = d_fwd
rev[p.name] = d_rev
for value_name, value, aliases in p.values:
d_fwd[value_name] = value
d_rev[value] = value_name
for alias in aliases:
d_fwd[alias] = value
return fwd, rev
PARAMETERS_DICT = dict((p.name, p) for p in PARAMETERS)
_TO_VALUE, _TO_NAME = _lookup_construct(PARAMETERS)
def name_to_value(param_name, value_name):
return _TO_VALUE[param_name][value_name]
def value_to_name(param_name, value):
return _TO_NAME[param_name][value]
PARAMETERS_DEFAULTS = {
'auto': {
'source': 'on',
'i_range': 'auto',
'v_range': '15V',
},
'off': {
'source': 'off',
},
'ignore': {},
}
| 22.107807
| 99
| 0.420548
|
c26ff3478a601ba2802359ecb516719ae4f99315
| 17,202
|
py
|
Python
|
intersight/model/chassis_config_result_entry.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/chassis_config_result_entry.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/chassis_config_result_entry.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.chassis_config_result_entry_all_of import ChassisConfigResultEntryAllOf
from intersight.model.chassis_config_result_relationship import ChassisConfigResultRelationship
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.policy_abstract_config_result_entry import PolicyAbstractConfigResultEntry
from intersight.model.policy_config_result_context import PolicyConfigResultContext
globals()['ChassisConfigResultEntryAllOf'] = ChassisConfigResultEntryAllOf
globals()['ChassisConfigResultRelationship'] = ChassisConfigResultRelationship
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['PolicyAbstractConfigResultEntry'] = PolicyAbstractConfigResultEntry
globals()['PolicyConfigResultContext'] = PolicyConfigResultContext
class ChassisConfigResultEntry(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'CHASSIS.CONFIGRESULTENTRY': "chassis.ConfigResultEntry",
},
('object_type',): {
'CHASSIS.CONFIGRESULTENTRY': "chassis.ConfigResultEntry",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'config_result': (ChassisConfigResultRelationship,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'completed_time': (str,), # noqa: E501
'context': (PolicyConfigResultContext,), # noqa: E501
'message': (str,), # noqa: E501
'owner_id': (str,), # noqa: E501
'state': (str,), # noqa: E501
'type': (str,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'config_result': 'ConfigResult', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'completed_time': 'CompletedTime', # noqa: E501
'context': 'Context', # noqa: E501
'message': 'Message', # noqa: E501
'owner_id': 'OwnerId', # noqa: E501
'state': 'State', # noqa: E501
'type': 'Type', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ChassisConfigResultEntry - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "chassis.ConfigResultEntry", must be one of ["chassis.ConfigResultEntry", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "chassis.ConfigResultEntry", must be one of ["chassis.ConfigResultEntry", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
config_result (ChassisConfigResultRelationship): [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
completed_time (str): The completed time of the task in installer.. [optional] # noqa: E501
context (PolicyConfigResultContext): [optional] # noqa: E501
message (str): Localized message based on the locale setting of the user's context.. [optional] # noqa: E501
owner_id (str): The identifier of the object that owns the result message. The owner ID is used to correlate a given result entry to a task or entity. For example, a config result entry that describes the result of a workflow task may have the task's instance ID as the owner.. [optional] # noqa: E501
state (str): Values -- Ok, Ok-with-warning, Errored.. [optional] # noqa: E501
type (str): Indicates if the result is reported during the logical model validation/resource allocation phase. or the configuration applying phase. Values -- validation, config.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "chassis.ConfigResultEntry")
object_type = kwargs.get('object_type', "chassis.ConfigResultEntry")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ChassisConfigResultEntryAllOf,
PolicyAbstractConfigResultEntry,
],
'oneOf': [
],
}
| 54.609524
| 1,678
| 0.642948
|
3b50c1598a6c31aa66440678551a57cd125e9207
| 22,468
|
py
|
Python
|
tests/scripts/thread-cert/config.py
|
kkasperczyk-no/sdk-openthread
|
385e19da1ae15f27872c2543b97276a42f102ead
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/config.py
|
kkasperczyk-no/sdk-openthread
|
385e19da1ae15f27872c2543b97276a42f102ead
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/config.py
|
kkasperczyk-no/sdk-openthread
|
385e19da1ae15f27872c2543b97276a42f102ead
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
from enum import Enum
import coap
import dtls
import ipv6
import lowpan
import mesh_cop
import message
import mle
import net_crypto
import network_data
import network_diag
import network_layer
import simulator
import sniffer
from tlvs_parsing import SubTlvsFactory
# This extended address will generate the MESH_LOCAL_PREFIX
EXTENDED_PANID = '000db80000000000'
MESH_LOCAL_PREFIX = 'fd00:db8::/64'
MESH_LOCAL_PREFIX_REGEX_PATTERN = '^fd00:0?db8:0{0,4}:0{0,4}'
ROUTING_LOCATOR = '64/:0:ff:fe00:/16'
ROUTING_LOCATOR_REGEX_PATTERN = r'.*:(0)?:0{0,2}ff:fe00:\w{1,4}$'
LINK_LOCAL = 'fe80:/112'
LINK_LOCAL_REGEX_PATTERN = '^fe80:.*'
ALOC_FLAG_REGEX_PATTERN = '.*:fc..$'
LINK_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS = 'ff32:40:fd00:db8:0:0:0:1'
REALM_LOCAL_All_THREAD_NODES_MULTICAST_ADDRESS = 'ff33:40:fd00:db8:0:0:0:1'
REALM_LOCAL_ALL_NODES_ADDRESS = 'ff03::1'
REALM_LOCAL_ALL_ROUTERS_ADDRESS = 'ff03::2'
LINK_LOCAL_ALL_NODES_ADDRESS = 'ff02::1'
LINK_LOCAL_ALL_ROUTERS_ADDRESS = 'ff02::2'
DOMAIN_PREFIX = 'fd00:7d03:7d03:7d03::/64'
DOMAIN_PREFIX_REGEX_PATTERN = '^fd00:7d03:7d03:7d03:'
DOMAIN_PREFIX_ALTER = 'fd00:7d04:7d04:7d04::/64'
PORT_OFFSET = int(os.getenv('PORT_OFFSET', '0'))
BACKBONE_PREFIX = f'{0x9100 + PORT_OFFSET:04x}::/64'
BACKBONE_PREFIX_REGEX_PATTERN = f'^{0x9100 + PORT_OFFSET:04x}:'
BACKBONE_DOCKER_NETWORK_NAME = f'backbone{PORT_OFFSET}'
BACKBONE_IFNAME = 'eth0'
OTBR_DOCKER_IMAGE = os.getenv('OTBR_DOCKER_IMAGE', 'otbr-ot12-backbone-ci')
OTBR_DOCKER_NAME_PREFIX = f'otbr_{PORT_OFFSET}_'
ALL_NETWORK_BBRS_ADDRESS = 'ff32:40:fd00:db8:0:0:0:3'
ALL_DOMAIN_BBRS_ADDRESS = 'ff32:40:fd00:7d03:7d03:7d03:0:3'
ALL_DOMAIN_BBRS_ADDRESS_ALTER = 'ff32:40:fd00:7d04:7d04:7d04:0:3'
ONLINK_GUA_PREFIX = '2021::/64'
# Any address starts with 'fd' are considered on-link address.
ONLINK_PREFIX_REGEX_PATTERN = '^fd'
DEFAULT_MASTER_KEY = bytearray([
0x00,
0x11,
0x22,
0x33,
0x44,
0x55,
0x66,
0x77,
0x88,
0x99,
0xaa,
0xbb,
0xcc,
0xdd,
0xee,
0xff,
])
class ADDRESS_TYPE(Enum):
LINK_LOCAL = 'LINK_LOCAL'
GLOBAL = 'GLOBAL'
RLOC = 'RLOC'
ALOC = 'ALOC'
ML_EID = 'ML_EID'
DUA = 'DUA'
BACKBONE_GUA = 'BACKBONE_GUA'
OMR = 'OMR'
ONLINK_ULA = 'ONLINK_ULA'
ONLINK_GUA = 'ONLINK_GUA'
RSSI = {
'LINK_QULITY_0': -100,
'LINK_QULITY_1': -95,
'LINK_QULITY_2': -85,
'LINK_QULITY_3': -65,
}
SNIFFER_ID = int(os.getenv('SNIFFER_ID', 34))
PANID = 0xface
MAX_NEIGHBOR_AGE = 100
INFINITE_COST_TIMEOUT = 90
MAX_ADVERTISEMENT_INTERVAL = 32
MLE_END_DEVICE_TIMEOUT = 100
AQ_TIMEOUT = 3
ADDRESS_QUERY_INITIAL_RETRY_DELAY = 15
DEFAULT_CHILD_TIMEOUT = 6
VIRTUAL_TIME = int(os.getenv('VIRTUAL_TIME', 0))
PARENT_AGGREGATIOIN_DELAY = 5
DUA_DAD_DELAY = 5
LEADER_NOTIFY_SED_BY_CHILD_UPDATE_REQUEST = True
THREAD_VERSION_1_1 = 2
THREAD_VERSION_1_2 = 3
def create_default_network_data_prefix_sub_tlvs_factories():
return {
network_data.TlvType.HAS_ROUTE:
network_data.HasRouteFactory(routes_factory=network_data.RoutesFactory(
route_factory=network_data.RouteFactory())),
network_data.TlvType.BORDER_ROUTER:
network_data.BorderRouterFactory(),
network_data.TlvType.LOWPAN_ID:
network_data.LowpanIdFactory(),
}
def create_default_network_data_prefix_sub_tlvs_factory():
return network_data.PrefixSubTlvsFactory(
sub_tlvs_factories=create_default_network_data_prefix_sub_tlvs_factories())
def create_default_network_data_service_sub_tlvs_factories():
return {network_data.TlvType.SERVER: network_data.ServerFactory()}
def create_default_network_data_service_sub_tlvs_factory():
return network_data.ServiceSubTlvsFactory(
sub_tlvs_factories=create_default_network_data_service_sub_tlvs_factories())
def create_default_network_data_commissioning_data_sub_tlvs_factories():
return {
mesh_cop.TlvType.CHANNEL: mesh_cop.ChannelFactory(),
mesh_cop.TlvType.STEERING_DATA: mesh_cop.SteeringDataFactory(),
mesh_cop.TlvType.BORDER_AGENT_LOCATOR: mesh_cop.BorderAgentLocatorFactory(),
mesh_cop.TlvType.COMMISSIONER_SESSION_ID: mesh_cop.CommissionerSessionIdFactory(),
mesh_cop.TlvType.COMMISSIONER_UDP_PORT: mesh_cop.CommissionerUdpPortFactory(),
}
def create_default_network_data_commissioning_data_sub_tlvs_factory():
return network_data.CommissioningDataSubTlvsFactory(
sub_tlvs_factories=create_default_network_data_commissioning_data_sub_tlvs_factories())
def create_default_network_data_tlvs_factories():
return {
network_data.TlvType.PREFIX:
network_data.PrefixFactory(sub_tlvs_factory=create_default_network_data_prefix_sub_tlvs_factory()),
network_data.TlvType.SERVICE:
network_data.ServiceFactory(sub_tlvs_factory=create_default_network_data_service_sub_tlvs_factory()),
network_data.TlvType.COMMISSIONING:
network_data.CommissioningDataFactory(
sub_tlvs_factory=create_default_network_data_commissioning_data_sub_tlvs_factory()),
}
def create_default_network_data_tlvs_factory():
return network_data.NetworkDataTlvsFactory(sub_tlvs_factories=create_default_network_data_tlvs_factories())
def create_default_mle_tlv_route64_factory():
return mle.Route64Factory(link_quality_and_route_data_factory=mle.LinkQualityAndRouteDataFactory())
def create_default_mle_tlv_network_data_factory():
return mle.NetworkDataFactory(network_data_tlvs_factory=create_default_network_data_tlvs_factory())
def create_default_mle_tlv_address_registration_factory():
return mle.AddressRegistrationFactory(
addr_compressed_factory=mle.AddressCompressedFactory(),
addr_full_factory=mle.AddressFullFactory(),
)
def create_default_mle_tlv_thread_discovery_factory():
return mle.ThreadDiscoveryFactory(thread_discovery_tlvs_factory=create_default_thread_discovery_tlvs_factory())
def create_default_thread_discovery_tlvs_factory():
return mesh_cop.ThreadDiscoveryTlvsFactory(sub_tlvs_factories=create_default_thread_discovery_sub_tlvs_factories())
def create_default_thread_discovery_sub_tlvs_factories():
return {
mesh_cop.TlvType.DISCOVERY_REQUEST: mesh_cop.DiscoveryRequestFactory(),
mesh_cop.TlvType.DISCOVERY_RESPONSE: mesh_cop.DiscoveryResponseFactory(),
mesh_cop.TlvType.EXTENDED_PANID: mesh_cop.ExtendedPanidFactory(),
mesh_cop.TlvType.NETWORK_NAME: mesh_cop.NetworkNameFactory(),
mesh_cop.TlvType.STEERING_DATA: mesh_cop.SteeringDataFactory(),
mesh_cop.TlvType.JOINER_UDP_PORT: mesh_cop.JoinerUdpPortFactory(),
mesh_cop.TlvType.COMMISSIONER_UDP_PORT: mesh_cop.CommissionerUdpPortFactory(),
}
def create_default_mle_tlvs_factories():
return {
mle.TlvType.SOURCE_ADDRESS: mle.SourceAddressFactory(),
mle.TlvType.MODE: mle.ModeFactory(),
mle.TlvType.TIMEOUT: mle.TimeoutFactory(),
mle.TlvType.CHALLENGE: mle.ChallengeFactory(),
mle.TlvType.RESPONSE: mle.ResponseFactory(),
mle.TlvType.LINK_LAYER_FRAME_COUNTER: mle.LinkLayerFrameCounterFactory(),
mle.TlvType.MLE_FRAME_COUNTER: mle.MleFrameCounterFactory(),
mle.TlvType.ROUTE64: create_default_mle_tlv_route64_factory(),
mle.TlvType.ADDRESS16: mle.Address16Factory(),
mle.TlvType.LEADER_DATA: mle.LeaderDataFactory(),
mle.TlvType.NETWORK_DATA: create_default_mle_tlv_network_data_factory(),
mle.TlvType.TLV_REQUEST: mle.TlvRequestFactory(),
mle.TlvType.SCAN_MASK: mle.ScanMaskFactory(),
mle.TlvType.CONNECTIVITY: mle.ConnectivityFactory(),
mle.TlvType.LINK_MARGIN: mle.LinkMarginFactory(),
mle.TlvType.STATUS: mle.StatusFactory(),
mle.TlvType.VERSION: mle.VersionFactory(),
mle.TlvType.ADDRESS_REGISTRATION: create_default_mle_tlv_address_registration_factory(),
mle.TlvType.CHANNEL: mle.ChannelFactory(),
mle.TlvType.PANID: mle.PanIdFactory(),
mle.TlvType.ACTIVE_TIMESTAMP: mle.ActiveTimestampFactory(),
mle.TlvType.PENDING_TIMESTAMP: mle.PendingTimestampFactory(),
mle.TlvType.CSL_CHANNEL: mle.CslChannelFactory(),
mle.TlvType.CSL_SYNCHRONIZED_TIMEOUT: mle.CslSynchronizedTimeoutFactory(),
mle.TlvType.ACTIVE_OPERATIONAL_DATASET: mle.ActiveOperationalDatasetFactory(),
mle.TlvType.PENDING_OPERATIONAL_DATASET: mle.PendingOperationalDatasetFactory(),
mle.TlvType.TIME_REQUEST: mle.TimeRequestFactory(),
mle.TlvType.TIME_PARAMETER: mle.TimeParameterFactory(),
mle.TlvType.THREAD_DISCOVERY: create_default_mle_tlv_thread_discovery_factory(),
mle.TlvType.LINK_METRICS_QUERY: mle.LinkMetricsQueryFactory(),
mle.TlvType.LINK_METRICS_MANAGEMENT: mle.LinkMetricsManagementFactory(),
mle.TlvType.LINK_METRICS_REPORT: mle.LinkMetricsReportFactory(),
mle.TlvType.LINK_PROBE: mle.LinkProbeFactory(),
}
def create_default_mle_crypto_engine(master_key):
return net_crypto.CryptoEngine(crypto_material_creator=net_crypto.MleCryptoMaterialCreator(master_key))
def create_default_mle_message_factory(master_key):
return mle.MleMessageFactory(
aux_sec_hdr_factory=net_crypto.AuxiliarySecurityHeaderFactory(),
mle_command_factory=mle.MleCommandFactory(tlvs_factories=create_default_mle_tlvs_factories()),
crypto_engine=create_default_mle_crypto_engine(master_key),
)
def create_deafult_network_tlvs_factories():
return {
network_layer.TlvType.TARGET_EID:
network_layer.TargetEidFactory(),
network_layer.TlvType.MAC_EXTENDED_ADDRESS:
network_layer.MacExtendedAddressFactory(),
network_layer.TlvType.RLOC16:
network_layer.Rloc16Factory(),
network_layer.TlvType.ML_EID:
network_layer.MlEidFactory(),
network_layer.TlvType.STATUS:
network_layer.StatusFactory(),
network_layer.TlvType.TIME_SINCE_LAST_TRANSACTION:
network_layer.TimeSinceLastTransactionFactory(),
network_layer.TlvType.ROUTER_MASK:
network_layer.RouterMaskFactory(),
network_layer.TlvType.ND_OPTION:
network_layer.NdOptionFactory(),
network_layer.TlvType.ND_DATA:
network_layer.NdDataFactory(),
network_layer.TlvType.THREAD_NETWORK_DATA:
network_layer.ThreadNetworkDataFactory(create_default_network_data_tlvs_factory()),
network_layer.TlvType.XTAL_ACCURACY:
network_layer.XtalAccuracyFactory(),
# Routing information are distributed in a Thread network by MLE Routing TLV
# which is in fact MLE Route64 TLV. Thread specificaton v1.1. - Chapter 5.20
network_layer.TlvType.MLE_ROUTING:
create_default_mle_tlv_route64_factory(),
network_layer.TlvType.IPv6_ADDRESSES:
network_layer.IPv6AddressesFactory(),
}
def create_default_network_tlvs_factory():
return SubTlvsFactory(sub_tlvs_factories=create_deafult_network_tlvs_factories())
def create_default_mesh_cop_tlvs_factories():
return {
mesh_cop.TlvType.CHANNEL: mesh_cop.ChannelFactory(),
mesh_cop.TlvType.PAN_ID: mesh_cop.PanidFactory(),
mesh_cop.TlvType.EXTENDED_PANID: mesh_cop.ExtendedPanidFactory(),
mesh_cop.TlvType.NETWORK_NAME: mesh_cop.NetworkNameFactory(),
mesh_cop.TlvType.PSKC: mesh_cop.PSKcFactory(),
mesh_cop.TlvType.NETWORK_MASTER_KEY: mesh_cop.NetworkMasterKeyFactory(),
mesh_cop.TlvType.NETWORK_KEY_SEQUENCE_COUNTER: mesh_cop.NetworkKeySequenceCounterFactory(),
mesh_cop.TlvType.NETWORK_MESH_LOCAL_PREFIX: mesh_cop.NetworkMeshLocalPrefixFactory(),
mesh_cop.TlvType.STEERING_DATA: mesh_cop.SteeringDataFactory(),
mesh_cop.TlvType.BORDER_AGENT_LOCATOR: mesh_cop.BorderAgentLocatorFactory(),
mesh_cop.TlvType.COMMISSIONER_ID: mesh_cop.CommissionerIdFactory(),
mesh_cop.TlvType.COMMISSIONER_SESSION_ID: mesh_cop.CommissionerSessionIdFactory(),
mesh_cop.TlvType.SECURITY_POLICY: mesh_cop.SecurityPolicyFactory(),
mesh_cop.TlvType.GET: mesh_cop.GetFactory(),
mesh_cop.TlvType.ACTIVE_TIMESTAMP: mesh_cop.ActiveTimestampFactory(),
mesh_cop.TlvType.COMMISSIONER_UDP_PORT: mesh_cop.CommissionerUdpPortFactory(),
mesh_cop.TlvType.STATE: mesh_cop.StateFactory(),
mesh_cop.TlvType.JOINER_DTLS_ENCAPSULATION: mesh_cop.JoinerDtlsEncapsulationFactory(),
mesh_cop.TlvType.JOINER_UDP_PORT: mesh_cop.JoinerUdpPortFactory(),
mesh_cop.TlvType.JOINER_IID: mesh_cop.JoinerIIDFactory(),
mesh_cop.TlvType.JOINER_ROUTER_LOCATOR: mesh_cop.JoinerRouterLocatorFactory(),
mesh_cop.TlvType.JOINER_ROUTER_KEK: mesh_cop.JoinerRouterKEKFactory(),
mesh_cop.TlvType.PROVISIONING_URL: mesh_cop.ProvisioningUrlFactory(),
mesh_cop.TlvType.VENDOR_NAME: mesh_cop.VendorNameFactory(),
mesh_cop.TlvType.VENDOR_MODEL: mesh_cop.VendorModelFactory(),
mesh_cop.TlvType.VENDOR_SW_VERSION: mesh_cop.VendorSWVersionFactory(),
mesh_cop.TlvType.VENDOR_DATA: mesh_cop.VendorDataFactory(),
mesh_cop.TlvType.VENDOR_STACK_VERSION: mesh_cop.VendorStackVersionFactory(),
mesh_cop.TlvType.UDP_ENCAPSULATION: mesh_cop.UdpEncapsulationFactory(),
mesh_cop.TlvType.IPV6_ADDRESS: mesh_cop.Ipv6AddressFactory(),
mesh_cop.TlvType.PENDING_TIMESTAMP: mesh_cop.PendingTimestampFactory(),
mesh_cop.TlvType.DELAY_TIMER: mesh_cop.DelayTimerFactory(),
mesh_cop.TlvType.CHANNEL_MASK: mesh_cop.ChannelMaskFactory(),
mesh_cop.TlvType.COUNT: mesh_cop.CountFactory(),
mesh_cop.TlvType.PERIOD: mesh_cop.PeriodFactory(),
mesh_cop.TlvType.SCAN_DURATION: mesh_cop.ScanDurationFactory(),
mesh_cop.TlvType.ENERGY_LIST: mesh_cop.EnergyListFactory(),
}
def create_default_mesh_cop_tlvs_factory():
return SubTlvsFactory(sub_tlvs_factories=create_default_mesh_cop_tlvs_factories())
def create_default_network_diag_tlv_factories():
return {
network_diag.TlvType.EXT_ADDRESS: network_layer.MacExtendedAddressFactory(),
network_diag.TlvType.ADDRESS16: mle.Address16Factory(),
network_diag.TlvType.MODE: mle.ModeFactory(),
network_diag.TlvType.POLLING_PERIOD: mle.TimeoutFactory(),
network_diag.TlvType.CONNECTIVITY: mle.ConnectivityFactory(),
network_diag.TlvType.ROUTE64: create_default_mle_tlv_route64_factory(),
network_diag.TlvType.LEADER_DATA: mle.LeaderDataFactory(),
network_diag.TlvType.NETWORK_DATA: create_default_mle_tlv_network_data_factory(),
network_diag.TlvType.IPV6_ADDRESS_LIST: network_diag.Ipv6AddressListFactory(),
network_diag.TlvType.MAC_COUNTERS: network_diag.MacCountersFactory(),
network_diag.TlvType.BATTERY_LEVEL: network_diag.BatteryLevelFactory(),
network_diag.TlvType.SUPPLY_VOLTAGE: network_diag.SupplyVoltageFactory(),
network_diag.TlvType.CHILD_TABLE: network_diag.ChildTableFactory(),
network_diag.TlvType.CHANNEL_PAGES: network_diag.ChannelPagesFactory(),
network_diag.TlvType.TYPE_LIST: network_diag.TypeListFactory(),
network_diag.TlvType.MAX_CHILD_TIMEOUT: network_diag.MaxChildTimeoutFactory()
}
def create_default_network_diag_tlvs_factory():
return SubTlvsFactory(sub_tlvs_factories=create_default_network_diag_tlv_factories())
def create_default_uri_path_based_payload_factories():
network_layer_tlvs_factory = create_default_network_tlvs_factory()
mesh_cop_tlvs_factory = create_default_mesh_cop_tlvs_factory()
network_diag_tlvs_factory = create_default_network_diag_tlvs_factory()
return {
'/a/as': network_layer_tlvs_factory,
'/a/aq': network_layer_tlvs_factory,
'/a/ar': network_layer_tlvs_factory,
'/a/ae': network_layer_tlvs_factory,
'/a/an': network_layer_tlvs_factory,
'/a/sd': network_layer_tlvs_factory,
'/c/lp': mesh_cop_tlvs_factory,
'/c/cs': mesh_cop_tlvs_factory,
'/d/da': network_diag_tlvs_factory,
'/d/dg': network_diag_tlvs_factory,
'/d/dq': network_diag_tlvs_factory,
'/d/dr': network_diag_tlvs_factory,
'/n/mr': network_layer_tlvs_factory,
'/n/dr': network_layer_tlvs_factory,
'/n/dn': network_layer_tlvs_factory,
}
def create_default_coap_message_factory():
return coap.CoapMessageFactory(
options_factory=coap.CoapOptionsFactory(),
uri_path_based_payload_factories=create_default_uri_path_based_payload_factories(),
message_id_to_uri_path_binder=coap.CoapMessageIdToUriPathBinder(),
)
def create_default_ipv6_hop_by_hop_options_factories():
return {109: ipv6.MPLOptionFactory()}
def create_default_ipv6_hop_by_hop_options_factory():
return ipv6.HopByHopOptionsFactory(options_factories=create_default_ipv6_hop_by_hop_options_factories())
def create_default_based_on_src_dst_ports_udp_payload_factory(master_key):
mle_message_factory = create_default_mle_message_factory(master_key)
coap_message_factory = create_default_coap_message_factory()
dtls_message_factory = create_default_dtls_message_factory()
return ipv6.UdpBasedOnSrcDstPortsPayloadFactory(
src_dst_port_based_payload_factories={
19788: mle_message_factory,
61631: coap_message_factory,
1000: dtls_message_factory,
5683: coap_message_factory,
5684: dtls_message_factory,
})
def create_default_dtls_message_factory():
return dtls.MessageFactory()
def create_default_ipv6_icmp_body_factories():
return {
ipv6.ICMP_DESTINATION_UNREACHABLE: ipv6.ICMPv6DestinationUnreachableFactory(),
ipv6.ICMP_ECHO_REQUEST: ipv6.ICMPv6EchoBodyFactory(),
ipv6.ICMP_ECHO_RESPONSE: ipv6.ICMPv6EchoBodyFactory(),
'default': ipv6.BytesPayloadFactory(),
}
def create_default_ipv6_upper_layer_factories(master_key):
return {
ipv6.IPV6_NEXT_HEADER_UDP:
ipv6.UDPDatagramFactory(
udp_header_factory=ipv6.UDPHeaderFactory(),
udp_payload_factory=create_default_based_on_src_dst_ports_udp_payload_factory(master_key),
),
ipv6.IPV6_NEXT_HEADER_ICMP:
ipv6.ICMPv6Factory(body_factories=create_default_ipv6_icmp_body_factories()),
}
def create_default_lowpan_extension_headers_factories():
return {
ipv6.IPV6_NEXT_HEADER_HOP_BY_HOP:
lowpan.LowpanHopByHopFactory(hop_by_hop_options_factory=create_default_ipv6_hop_by_hop_options_factory())
}
def create_default_ipv6_extension_headers_factories():
return {
ipv6.IPV6_NEXT_HEADER_HOP_BY_HOP:
ipv6.HopByHopFactory(hop_by_hop_options_factory=create_default_ipv6_hop_by_hop_options_factory())
}
def create_default_ipv6_packet_factory(master_key):
return ipv6.IPv6PacketFactory(
ehf=create_default_ipv6_extension_headers_factories(),
ulpf=create_default_ipv6_upper_layer_factories(master_key),
)
def create_default_lowpan_decompressor(context_manager):
return lowpan.LowpanDecompressor(
lowpan_ip_header_factory=lowpan.LowpanIpv6HeaderFactory(context_manager=context_manager),
lowpan_extension_headers_factory=lowpan.LowpanExtensionHeadersFactory(
ext_headers_factories=create_default_lowpan_extension_headers_factories()),
lowpan_udp_header_factory=lowpan.LowpanUdpHeaderFactory(),
)
def create_default_thread_context_manager():
context_manager = lowpan.ContextManager()
context_manager[0] = lowpan.Context(MESH_LOCAL_PREFIX)
return context_manager
def create_default_lowpan_parser(context_manager, master_key=DEFAULT_MASTER_KEY):
return lowpan.LowpanParser(
lowpan_mesh_header_factory=lowpan.LowpanMeshHeaderFactory(),
lowpan_decompressor=create_default_lowpan_decompressor(context_manager),
lowpan_fragements_buffers_manager=lowpan.LowpanFragmentsBuffersManager(),
ipv6_packet_factory=create_default_ipv6_packet_factory(master_key),
)
def create_default_thread_message_factory(master_key=DEFAULT_MASTER_KEY):
context_manager = create_default_thread_context_manager()
lowpan_parser = create_default_lowpan_parser(context_manager, master_key)
return message.MessageFactory(lowpan_parser=lowpan_parser)
def create_default_thread_sniffer(use_message_factory=True):
return sniffer.Sniffer(create_default_thread_message_factory() if use_message_factory else None)
def create_default_simulator(use_message_factory=True):
if VIRTUAL_TIME:
return simulator.VirtualTime(use_message_factory=use_message_factory)
return simulator.RealTime(use_message_factory=use_message_factory)
| 41.684601
| 119
| 0.768649
|
35161c1e0197c0641e9475aa6c2510ded0a4ac7c
| 3,938
|
py
|
Python
|
templates/gd32-common/scripts/board_h_modify.py
|
flyghost/OneOS-V2.1.0
|
6fedab0558c07fe679d63ba1eb8ee9992c044d86
|
[
"Apache-2.0"
] | null | null | null |
templates/gd32-common/scripts/board_h_modify.py
|
flyghost/OneOS-V2.1.0
|
6fedab0558c07fe679d63ba1eb8ee9992c044d86
|
[
"Apache-2.0"
] | null | null | null |
templates/gd32-common/scripts/board_h_modify.py
|
flyghost/OneOS-V2.1.0
|
6fedab0558c07fe679d63ba1eb8ee9992c044d86
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import shutil
import flash_size_table
import sram_size_table
import sector_size_table
def modify(series,model,soc):
old_path = os.getcwd()
source_path = os.path.dirname(os.path.realpath(__file__))
os.chdir(source_path+'/../target/board')
flash_size = flash_size_table.get_size_kb(soc)
bootloader_size = sector_size_table.get_partition_size(series, 'bootloader')
if (bootloader_size == 0):
if flash_size > 64:
bootloader_size = 32
else:
bootloader_size = 16
cfg_size = sector_size_table.get_partition_size(series, 'cfg')
download_size = sector_size_table.get_partition_size(series, 'download')
bank0_size = sector_size_table.get_bank0_size_array(series)
addr_idx = ''
with open('board.h', 'r') as f:
with open('board.new', 'w') as g:
for line in f.readlines():
if 'GD32_FLASH_END_ADDRESS' in line:
g.write(line)
elif 'GD32_SRAM1_END' in line:
g.write(line)
elif 'GD32_SRAM2_END' in line:
g.write(line)
elif '#include <gd32f' in line:
g.write('#include <%s.h>\n' % series.lower())
elif '#define SOC_MODEL' in line:
g.write('#define SOC_MODEL "%s"\n' % soc.upper())
elif '#ifdef OS_USE_BOOTLOADER' in line:
addr_idx = 'bootloader'
g.write(line)
elif '#else' in line:
if (addr_idx == 'bootloader'):
addr_idx = 'no_bootloader'
g.write(line)
elif '#endif' in line:
if (addr_idx == 'bootloader'):
addr_idx = ''
g.write(line)
elif '#define GD32_APP_ADDR' in line:
if addr_idx == 'bootloader':
app_addr = 0x08000000 + bootloader_size * 1024 + cfg_size * 1024
else:
app_addr = 0x08000000
g.write("#define GD32_APP_ADDR 0x%08x\n" % app_addr)
elif '#define GD32_APP_SIZE' in line:
if addr_idx == 'bootloader':
if flash_size <= bank0_size:
app_size = flash_size - bootloader_size - cfg_size
else:
app_size = bank0_size - bootloader_size - cfg_size
app_size = app_size * 1024
else:
app_size = flash_size * 1024
g.write("#define GD32_APP_SIZE 0x%08x\n" % app_size)
elif 'GD32_FLASH_START_ADRESS' in line:
g.write('#define GD32_FLASH_START_ADRESS ((uint32_t)0x%s)\n' % flash_size_table.get_startaddr(soc))
elif 'GD32_FLASH_SIZE' in line:
g.write('#define GD32_FLASH_SIZE (%d * 1024)\n' % flash_size_table.get_size_kb(soc))
elif 'GD32_SRAM1_START' in line:
g.write('#define GD32_SRAM1_START (0x%s)\n' % sram_size_table.get_startaddr(soc, 'sram_1'))
elif 'GD32_SRAM1_SIZE' in line:
g.write('#define GD32_SRAM1_SIZE (%d * 1024)\n' % sram_size_table.get_size_kb(soc, 'sram_1'))
elif 'GD32_SRAM2_START' in line:
g.write('#define GD32_SRAM2_START (0x%s)\n' % sram_size_table.get_startaddr(soc, 'sram_2'))
elif 'GD32_SRAM2_SIZE' in line:
g.write('#define GD32_SRAM2_SIZE (%d * 1024)\n' % sram_size_table.get_size_kb(soc, 'sram_2'))
else:
g.write(line)
shutil.move('board.new', 'board.h')
os.chdir(old_path)
| 48.02439
| 120
| 0.517522
|
887d65a108cd69d860b8de96d1b18ad172953ade
| 20,276
|
py
|
Python
|
12AX_task/lsnn/spiking_models.py
|
IGITUGraz/Spike-Frequency-Adaptation-Supports-Network-Computations
|
58a33584b34b477cfb1d3614460f66f94344fb9c
|
[
"MIT"
] | 1
|
2021-07-17T10:34:15.000Z
|
2021-07-17T10:34:15.000Z
|
12AX_task/lsnn/spiking_models.py
|
IGITUGraz/Spike-Frequency-Adaptation-Supports-Network-Computations
|
58a33584b34b477cfb1d3614460f66f94344fb9c
|
[
"MIT"
] | null | null | null |
12AX_task/lsnn/spiking_models.py
|
IGITUGraz/Spike-Frequency-Adaptation-Supports-Network-Computations
|
58a33584b34b477cfb1d3614460f66f94344fb9c
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2019 the LSNN team, TU Graz
"""
from distutils.version import LooseVersion
import datetime
from collections import OrderedDict
from collections import namedtuple
import numpy as np
import numpy.random as rd
import tensorflow as tf
from tensorflow.python.framework.ops import Tensor
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
from tensorflow.python.ops.variables import Variable, RefVariable
else:
print("Using tensorflow version older then 1.11 -> skipping RefVariable storing")
from tensorflow.python.ops.variables import Variable
from lsnn.toolbox.rewiring_tools import weight_sampler
from lsnn.toolbox.tensorflow_einsums.einsum_re_written import einsum_bi_ijk_to_bjk
from lsnn.toolbox.tensorflow_utils import tf_roll
from time import time
Cell = tf.contrib.rnn.BasicRNNCell
def map_to_named_tuple(S, f):
state_dict = S._asdict()
new_state_dict = OrderedDict({})
for k, v in state_dict.items():
new_state_dict[k] = f(v)
new_named_tuple = S.__class__(**new_state_dict)
return new_named_tuple
def placeholder_container_for_rnn_state(cell_state_size, dtype, batch_size, name='TupleStateHolder'):
with tf.name_scope(name):
default_dict = cell_state_size._asdict()
placeholder_dict = OrderedDict({})
for k, v in default_dict.items():
if np.shape(v) == ():
v = [v]
shape = np.concatenate([[batch_size], v])
placeholder_dict[k] = tf.placeholder(shape=shape, dtype=dtype, name=k)
placeholder_tuple = cell_state_size.__class__(**placeholder_dict)
return placeholder_tuple
def placeholder_container_from_example(state_example, name='TupleStateHolder'):
with tf.name_scope(name):
default_dict = state_example._asdict()
placeholder_dict = OrderedDict({})
for k, v in default_dict.items():
placeholder_dict[k] = tf.placeholder(shape=v.shape, dtype=v.dtype, name=k)
placeholder_tuple = state_example.__class__(**placeholder_dict)
return placeholder_tuple
def feed_dict_with_placeholder_container(dict_to_update, state_holder, state_value, batch_selection=None):
if state_value is None:
return dict_to_update
assert state_holder.__class__ == state_value.__class__, 'Should have the same class, got {} and {}'.format(
state_holder.__class__, state_value.__class__)
for k, v in state_value._asdict().items():
if batch_selection is None:
dict_to_update.update({state_holder._asdict()[k]: v})
else:
dict_to_update.update({state_holder._asdict()[k]: v[batch_selection]})
return dict_to_update
#################################
# Rewirite the Spike function without hack
#################################
@tf.custom_gradient
def SpikeFunction(v_scaled, dampening_factor):
z_ = tf.greater(v_scaled, 0.)
z_ = tf.cast(z_, dtype=tf.float32)
def grad(dy):
dE_dz = dy
dz_dv_scaled = tf.maximum(1 - tf.abs(v_scaled), 0)
dz_dv_scaled *= dampening_factor
dE_dv_scaled = dE_dz * dz_dv_scaled
return [dE_dv_scaled,
tf.zeros_like(dampening_factor)]
return tf.identity(z_, name="SpikeFunction"), grad
def weight_matrix_with_delay_dimension(w, d, n_delay):
"""
Generate the tensor of shape n_in x n_out x n_delay that represents the synaptic weights with the right delays.
:param w: synaptic weight value, float tensor of shape (n_in x n_out)
:param d: delay number, int tensor of shape (n_in x n_out)
:param n_delay: number of possible delays
:return:
"""
with tf.name_scope('WeightDelayer'):
w_d_list = []
for kd in range(n_delay):
mask = tf.equal(d, kd)
w_d = tf.where(condition=mask, x=w, y=tf.zeros_like(w))
w_d_list.append(w_d)
delay_axis = len(d.shape)
WD = tf.stack(w_d_list, axis=delay_axis)
return WD
# PSP on output layer
def exp_convolve(tensor, decay): # tensor shape (trial, time, neuron)
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float64]
tensor_time_major = tf.transpose(tensor, perm=[1, 0, 2])
initializer = tf.zeros_like(tensor_time_major[0])
filtered_tensor = tf.scan(lambda a, x: a * decay + (1 - decay) * x, tensor_time_major, initializer=initializer)
filtered_tensor = tf.transpose(filtered_tensor, perm=[1, 0, 2])
return filtered_tensor
LIFStateTuple = namedtuple('LIFStateTuple', ('v', 'z', 'i_future_buffer', 'z_buffer'))
def tf_cell_to_savable_dict(cell, sess, supplement={}):
"""
Usefull function to return a python/numpy object from of of the tensorflow cell object defined here.
The idea is simply that varaibles and Tensors given as attributes of the object with be replaced by there numpy value evaluated on the current tensorflow session.
:param cell: tensorflow cell object
:param sess: tensorflow session
:param supplement: some possible
:return:
"""
dict_to_save = {}
dict_to_save['cell_type'] = str(cell.__class__)
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
dict_to_save['time_stamp'] = time_stamp
dict_to_save.update(supplement)
tftypes = [Variable, Tensor]
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
tftypes.append(RefVariable)
for k, v in cell.__dict__.items():
if k == 'self':
pass
elif type(v) in tftypes:
dict_to_save[k] = sess.run(v)
elif type(v) in [bool, int, float, np.int64, np.ndarray]:
dict_to_save[k] = v
else:
print('WARNING: attribute of key {} and value {} has type {}, recoding it as string.'.format(k, v, type(v)))
dict_to_save[k] = str(v)
return dict_to_save
class LIF(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.0,
V0=1., eprop=False):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
tau = tf.cast(tau, dtype=dtype)
dt = tf.cast(dt, dtype=dtype)
print('Injected noise current per neuron: ', injected_noise_current)
self.dampening_factor = dampening_factor
# Parameters
self.n_delay = n_delay
self.n_refractory = n_refractory
self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
self.V0 = V0
self.eprop = eprop
self.injected_noise_current = injected_noise_current
self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
with tf.variable_scope('InputWeights'):
# Input weights
if 0 < rewiring_connectivity < 1:
self.w_in_val, self.w_in_sign, self.w_in_var, _ = weight_sampler(n_in, n_rec, rewiring_connectivity,
neuron_sign=in_neuron_sign)
else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
self.w_in_val = self.V0 * self.w_in_val
self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
if 0 < rewiring_connectivity < 1:
self.w_rec_val, self.w_rec_sign, self.w_rec_var, _ = weight_sampler(n_rec, n_rec,
rewiring_connectivity,
neuron_sign=rec_neuron_sign)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
self.w_rec_val = self.w_rec_val * self.V0
self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
self.w_rec_val) # Disconnect autotapse
self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
@property
def output_size(self):
return self.n_rec
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
if self.eprop:
new_z = tf.stop_gradient(new_z)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return new_z, new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
if self.injected_noise_current.any() > 0:
add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0] + add_current
I_reset = z * thr * self.dt
new_v = decay * v + (1 - decay) * i_t - I_reset
# Spike generation
v_scaled = (v - thr) / thr
# new_z = differentiable_spikes(v_scaled=v_scaled)
new_z = SpikeFunction(v_scaled, self.dampening_factor)
if n_refractory > 0:
is_ref = tf.greater(tf.reduce_max(z_buffer[:, :, -n_refractory:], axis=2), 0)
new_z = tf.where(is_ref, tf.zeros_like(new_z), new_z)
new_z = new_z * 1 / self.dt
return new_v, new_z
ALIFStateTuple = namedtuple('ALIFState', (
'z',
'v',
'b',
'i_future_buffer',
'z_buffer'))
class ALIF(LIF):
def __init__(self, n_in, n_rec, tau=20, thr=0.01,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1,
tau_adaptation=200., beta=1.6,
rewiring_connectivity=-1, dampening_factor=0.3,
in_neuron_sign=None, rec_neuron_sign=None, injected_noise_current=0.0,
V0=1., eprop=False):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param tau_adaptation: adaptation time constant for the threshold voltage
:param beta: amplitude of adpatation
:param rewiring_connectivity: number of non-zero synapses in weight matrices (at initialization)
:param in_neuron_sign: vector of +1, -1 to specify input neuron signs
:param rec_neuron_sign: same of recurrent neurons
:param injected_noise_current: amplitude of current noise
:param V0: to choose voltage unit, specify the value of V0=1 Volt in the desired unit (example V0=1000 to set voltage in millivolts)
"""
super(ALIF, self).__init__(n_in=n_in, n_rec=n_rec, tau=tau, thr=thr, dt=dt, n_refractory=n_refractory,
dtype=dtype, n_delay=n_delay,
rewiring_connectivity=rewiring_connectivity,
dampening_factor=dampening_factor, in_neuron_sign=in_neuron_sign,
rec_neuron_sign=rec_neuron_sign,
injected_noise_current=injected_noise_current,
V0=V0, eprop=eprop)
if tau_adaptation is None: raise ValueError("alpha parameter for adaptive bias must be set")
if beta is None: raise ValueError("beta parameter for adaptive bias must be set")
self.tau_adaptation = tf.Variable(tau_adaptation, dtype=dtype, name="TauAdaptation", trainable=False)
self.beta = tf.Variable(beta, dtype=dtype, name="Beta", trainable=False)
self.decay_b = np.exp(-dt / tau_adaptation)
@property
def output_size(self):
return [self.n_rec, self.n_rec, self.n_rec]
@property
def state_size(self):
return ALIFStateTuple(v=self.n_rec,
z=self.n_rec,
b=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
b0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return ALIFStateTuple(
v=v0,
z=z0,
b=b0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
with tf.name_scope('ALIFcall'):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_b = self.decay_b * state.b + (1. - self.decay_b) * state.z
thr = self.thr + new_b * self.beta * self.V0
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer,
decay=self._decay,
thr=thr)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = ALIFStateTuple(v=new_v,
z=new_z,
b=new_b,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return [new_z, new_v, thr], new_state
def static_rnn_with_gradient(cell, inputs, state, loss_function, T, verbose=True):
batch_size = tf.shape(inputs)[0]
thr_list = []
state_list = []
z_list = []
v_list = []
if verbose: print('Building forward Graph...', end=' ')
t0 = time()
for t in range(T):
outputs, state = cell(inputs[:, t, :], state)
z, v, thr = outputs
z_list.append(z)
v_list.append(v)
thr_list.append(thr)
state_list.append(state)
zs = tf.stack(z_list, axis=1)
vs = tf.stack(v_list, axis=1)
thrs = tf.stack(thr_list, axis=1)
loss = loss_function(zs)
de_dz_partial = tf.gradients(loss, zs)[0]
if de_dz_partial is None:
de_dz_partial = tf.zeros_like(zs)
print('Warning: Partial de_dz is None')
print('Done in {:.2f}s'.format(time() - t0))
def namedtuple_to_list(state):
return list(state._asdict().values())
zero_state_as_list = cell.zero_state(batch_size, tf.float32)
de_dstate = namedtuple_to_list(cell.zero_state(batch_size, dtype=tf.float32))
g_list = []
if verbose: print('Building backward Graph...', end=' ')
t0 = time()
for t in np.arange(T)[::-1]:
# gradient from next state
if t < T - 1:
state = namedtuple_to_list(state_list[t])
next_state = namedtuple_to_list(state_list[t + 1])
de_dstate = tf.gradients(ys=next_state, xs=state, grad_ys=de_dstate)
for k_var, de_dvar in enumerate(de_dstate):
if de_dvar is None:
de_dstate[k_var] = tf.zeros_like(zero_state_as_list[k_var])
print('Warning: var {} at time {} is None'.format(k_var, t))
# add the partial derivative due to current error
de_dstate[0] = de_dstate[0] + de_dz_partial[:, t]
g_list.append(de_dstate[0])
g_list = list(reversed(g_list))
gs = tf.stack(g_list, axis=1)
print('Done in {:.2f}s'.format(time() - t0))
return zs, vs, thrs, gs, state_list[-1]
| 38.328922
| 166
| 0.61378
|
cca4902ab57a5ad835a70205a55e0ef97ac1c0a6
| 1,601
|
py
|
Python
|
spacy/lang/ja/syntax_iterators.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 4
|
2021-08-11T05:46:23.000Z
|
2021-09-11T05:16:57.000Z
|
spacy/lang/ja/syntax_iterators.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 1
|
2021-03-01T19:01:37.000Z
|
2021-03-01T19:01:37.000Z
|
spacy/lang/ja/syntax_iterators.py
|
g4brielvs/spaCy
|
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
|
[
"BSD-3-Clause",
"MIT"
] | 2
|
2021-01-26T17:29:02.000Z
|
2021-03-13T08:54:53.000Z
|
from typing import Union, Iterator
from ...symbols import NOUN, PROPN, PRON, VERB
from ...tokens import Doc, Span
# TODO: this can probably be pruned a bit
# fmt: off
labels = ["nsubj", "nmod", "ddoclike", "nsubjpass", "pcomp", "pdoclike", "doclike", "obl", "dative", "appos", "attr", "ROOT"]
# fmt: on
def noun_chunks(doclike: Union[Doc, Span]) -> Iterator[Span]:
"""Detect base noun phrases from a dependency parse. Works on Doc and Span."""
doc = doclike.doc # Ensure works on both Doc and Span.
np_deps = [doc.vocab.strings.add(label) for label in labels]
doc.vocab.strings.add("conj")
np_label = doc.vocab.strings.add("NP")
seen = set()
for i, word in enumerate(doclike):
if word.pos not in (NOUN, PROPN, PRON):
continue
# Prevent nested chunks from being produced
if word.i in seen:
continue
if word.dep in np_deps:
unseen = [w.i for w in word.subtree if w.i not in seen]
if not unseen:
continue
# this takes care of particles etc.
seen.update(j.i for j in word.subtree)
# This avoids duplicating embedded clauses
seen.update(range(word.i + 1))
# if the head of this is a verb, mark that and rights seen
# Don't do the subtree as that can hide other phrases
if word.head.pos == VERB:
seen.add(word.head.i)
seen.update(w.i for w in word.head.rights)
yield unseen[0], word.i + 1, np_label
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| 37.232558
| 125
| 0.603998
|
3047c699deae3c076ccc09ee33730faf41208b7d
| 10,372
|
py
|
Python
|
src/pytypes/onefuzztypes/enums.py
|
muhammadmeisam/onefuzz
|
a025d194547d39307bf804ebee0fc0e206759916
|
[
"MIT"
] | null | null | null |
src/pytypes/onefuzztypes/enums.py
|
muhammadmeisam/onefuzz
|
a025d194547d39307bf804ebee0fc0e206759916
|
[
"MIT"
] | null | null | null |
src/pytypes/onefuzztypes/enums.py
|
muhammadmeisam/onefuzz
|
a025d194547d39307bf804ebee0fc0e206759916
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from enum import Enum
from typing import List
class OS(Enum):
windows = "windows"
linux = "linux"
class DashboardEvent(Enum):
heartbeat = "heartbeat"
new_file = "new_file"
repro_state = "repro_state"
task_state = "task_state"
job_state = "job_state"
proxy_state = "proxy_state"
pool_state = "pool_state"
node_state = "node_state"
scaleset_state = "scaleset_state"
class TelemetryEvent(Enum):
task = "task"
state_changed = "state_changed"
@classmethod
def can_share(cls) -> List["TelemetryEvent"]:
"""only these events will be shared to the central telemetry"""
return [cls.task, cls.state_changed]
class TelemetryData(Enum):
component_type = "component_type"
current_state = "current_state"
job_id = "job_id"
task_id = "task_id"
task_type = "task_type"
vm_id = "vm_id"
@classmethod
def can_share(cls) -> List["TelemetryData"]:
"""only these types of data will be shared to the central telemetry"""
return [cls.current_state, cls.vm_id, cls.job_id, cls.task_id, cls.task_type]
class TaskFeature(Enum):
input_queue_from_container = "input_queue_from_container"
supervisor_exe = "supervisor_exe"
supervisor_env = "supervisor_env"
supervisor_options = "supervisor_options"
supervisor_input_marker = "supervisor_input_marker"
stats_file = "stats_file"
stats_format = "stats_format"
target_exe = "target_exe"
target_exe_optional = "target_exe_optional"
target_env = "target_env"
target_options = "target_options"
analyzer_exe = "analyzer_exe"
analyzer_env = "analyzer_env"
analyzer_options = "analyzer_options"
rename_output = "rename_output"
target_options_merge = "target_options_merge"
target_workers = "target_workers"
generator_exe = "generator_exe"
generator_env = "generator_env"
generator_options = "generator_options"
wait_for_files = "wait_for_files"
target_timeout = "target_timeout"
check_asan_log = "check_asan_log"
check_debugger = "check_debugger"
check_retry_count = "check_retry_count"
ensemble_sync_delay = "ensemble_sync_delay"
preserve_existing_outputs = "preserve_existing_outputs"
check_fuzzer_help = "check_fuzzer_help"
expect_crash_on_failure = "expect_crash_on_failure"
report_list = "report_list"
minimized_stack_depth = "minimized_stack_depth"
# Permissions for an Azure Blob Storage Container.
#
# See: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-container # noqa: E501
class ContainerPermission(Enum):
Read = "Read"
Write = "Write"
List = "List"
Delete = "Delete"
class JobState(Enum):
init = "init"
enabled = "enabled"
stopping = "stopping"
stopped = "stopped"
@classmethod
def available(cls) -> List["JobState"]:
"""set of states that indicate if tasks can be added to it"""
return [x for x in cls if x not in [cls.stopping, cls.stopped]]
@classmethod
def needs_work(cls) -> List["JobState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.stopping]
@classmethod
def shutting_down(cls) -> List["JobState"]:
return [cls.stopping, cls.stopped]
class TaskState(Enum):
init = "init"
waiting = "waiting"
scheduled = "scheduled"
setting_up = "setting_up"
running = "running"
stopping = "stopping"
stopped = "stopped"
wait_job = "wait_job"
@classmethod
def has_started(cls) -> List["TaskState"]:
return [cls.running, cls.stopping, cls.stopped]
@classmethod
def needs_work(cls) -> List["TaskState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.stopping]
@classmethod
def available(cls) -> List["TaskState"]:
"""set of states that indicate if the task isn't stopping"""
return [x for x in cls if x not in [TaskState.stopping, TaskState.stopped]]
@classmethod
def shutting_down(cls) -> List["TaskState"]:
return [TaskState.stopping, TaskState.stopped]
class TaskType(Enum):
libfuzzer_fuzz = "libfuzzer_fuzz"
libfuzzer_coverage = "libfuzzer_coverage"
libfuzzer_crash_report = "libfuzzer_crash_report"
libfuzzer_merge = "libfuzzer_merge"
libfuzzer_regression = "libfuzzer_regression"
generic_analysis = "generic_analysis"
generic_supervisor = "generic_supervisor"
generic_merge = "generic_merge"
generic_generator = "generic_generator"
generic_crash_report = "generic_crash_report"
generic_regression = "generic_regression"
class VmState(Enum):
init = "init"
extensions_launch = "extensions_launch"
extensions_failed = "extensions_failed"
vm_allocation_failed = "vm_allocation_failed"
running = "running"
stopping = "stopping"
stopped = "stopped"
@classmethod
def needs_work(cls) -> List["VmState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.extensions_launch, cls.stopping]
@classmethod
def available(cls) -> List["VmState"]:
"""set of states that indicate if the repro vm isn't stopping"""
return [x for x in cls if x not in [cls.stopping, cls.stopped]]
class UpdateType(Enum):
Task = "Task"
Job = "Job"
Repro = "Repro"
Proxy = "Proxy"
Pool = "Pool"
Node = "Node"
Scaleset = "Scaleset"
TaskScheduler = "TaskScheduler"
class Compare(Enum):
Equal = "Equal"
AtLeast = "AtLeast"
AtMost = "AtMost"
class ContainerType(Enum):
analysis = "analysis"
coverage = "coverage"
crashes = "crashes"
inputs = "inputs"
no_repro = "no_repro"
readonly_inputs = "readonly_inputs"
reports = "reports"
setup = "setup"
tools = "tools"
unique_inputs = "unique_inputs"
unique_reports = "unique_reports"
regression_reports = "regression_reports"
@classmethod
def reset_defaults(cls) -> List["ContainerType"]:
return [
cls.analysis,
cls.coverage,
cls.crashes,
cls.inputs,
cls.no_repro,
cls.readonly_inputs,
cls.reports,
cls.setup,
cls.unique_inputs,
cls.unique_reports,
cls.regression_reports,
]
@classmethod
def user_config(cls) -> List["ContainerType"]:
return [cls.setup, cls.inputs, cls.readonly_inputs]
class StatsFormat(Enum):
AFL = "AFL"
class ErrorCode(Enum):
INVALID_REQUEST = 450
INVALID_PERMISSION = 451
MISSING_EULA_AGREEMENT = 452
INVALID_JOB = 453
INVALID_TASK = 453
UNABLE_TO_ADD_TASK_TO_JOB = 454
INVALID_CONTAINER = 455
UNABLE_TO_RESIZE = 456
UNAUTHORIZED = 457
UNABLE_TO_USE_STOPPED_JOB = 458
UNABLE_TO_CHANGE_JOB_DURATION = 459
UNABLE_TO_CREATE_NETWORK = 460
VM_CREATE_FAILED = 461
MISSING_NOTIFICATION = 462
INVALID_IMAGE = 463
UNABLE_TO_CREATE = 464
UNABLE_TO_PORT_FORWARD = 465
UNABLE_TO_FIND = 467
TASK_FAILED = 468
INVALID_NODE = 469
NOTIFICATION_FAILURE = 470
UNABLE_TO_UPDATE = 471
PROXY_FAILED = 472
class HeartbeatType(Enum):
MachineAlive = "MachineAlive"
TaskAlive = "TaskAlive"
class PoolType(Enum):
managed = "managed"
unmanaged = "unmanaged"
class PoolState(Enum):
init = "init"
running = "running"
shutdown = "shutdown"
halt = "halt"
@classmethod
def needs_work(cls) -> List["PoolState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.shutdown, cls.halt]
@classmethod
def available(cls) -> List["PoolState"]:
"""set of states that indicate if it's available for work"""
return [cls.running]
class ScalesetState(Enum):
init = "init"
setup = "setup"
resize = "resize"
running = "running"
shutdown = "shutdown"
halt = "halt"
creation_failed = "creation_failed"
@classmethod
def needs_work(cls) -> List["ScalesetState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.setup, cls.resize, cls.shutdown, cls.halt]
@classmethod
def available(cls) -> List["ScalesetState"]:
"""set of states that indicate if it's available for work"""
unavailable = [cls.shutdown, cls.halt, cls.creation_failed]
return [x for x in cls if x not in unavailable]
@classmethod
def modifying(cls) -> List["ScalesetState"]:
"""set of states that indicate scaleset is resizing"""
return [
cls.halt,
cls.init,
cls.setup,
]
class Architecture(Enum):
x86_64 = "x86_64"
class NodeTaskState(Enum):
init = "init"
setting_up = "setting_up"
running = "running"
class AgentMode(Enum):
fuzz = "fuzz"
repro = "repro"
proxy = "proxy"
class NodeState(Enum):
init = "init"
free = "free"
setting_up = "setting_up"
rebooting = "rebooting"
ready = "ready"
busy = "busy"
done = "done"
shutdown = "shutdown"
halt = "halt"
@classmethod
def needs_work(cls) -> List["NodeState"]:
return [cls.done, cls.shutdown, cls.halt]
@classmethod
def ready_for_reset(cls) -> List["NodeState"]:
# If Node is in one of these states, ignore updates
# from the agent.
return [cls.done, cls.shutdown, cls.halt]
class GithubIssueState(Enum):
open = "open"
closed = "closed"
class GithubIssueSearchMatch(Enum):
title = "title"
body = "body"
class TaskDebugFlag(Enum):
keep_node_on_failure = "keep_node_on_failure"
keep_node_on_completion = "keep_node_on_completion"
class WebhookMessageState(Enum):
queued = "queued"
retrying = "retrying"
succeeded = "succeeded"
failed = "failed"
class UserFieldOperation(Enum):
add = "add"
replace = "replace"
class UserFieldType(Enum):
Bool = "Bool"
Int = "Int"
Str = "Str"
DictStr = "DictStr"
ListStr = "ListStr"
| 25.994987
| 125
| 0.655033
|
35bf432dc0d46d4bf55e5a86c13902bb143b22bb
| 1,689
|
py
|
Python
|
4-es-cluster-on-demand-creation-and-restore/register-snapshot-repo-read-only.py
|
muellerc/es-workshop
|
8d0a630fa3a06501f73e1aa72d4bf8125b6d164a
|
[
"MIT-0"
] | null | null | null |
4-es-cluster-on-demand-creation-and-restore/register-snapshot-repo-read-only.py
|
muellerc/es-workshop
|
8d0a630fa3a06501f73e1aa72d4bf8125b6d164a
|
[
"MIT-0"
] | null | null | null |
4-es-cluster-on-demand-creation-and-restore/register-snapshot-repo-read-only.py
|
muellerc/es-workshop
|
8d0a630fa3a06501f73e1aa72d4bf8125b6d164a
|
[
"MIT-0"
] | null | null | null |
import boto3
import requests
from requests_aws4auth import AWS4Auth
host = 'https://vpc-elastic-search-domain-olp-kr3zgrug73z73lst4c675du3uq.eu-central-1.es.amazonaws.com/' # include https:// and trailing /
region = 'eu-central-1' # e.g. us-west-1
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
# Register repository
path = '_snapshot/logs-xyz-index-snapshot-repo' # the Elasticsearch API endpoint
url = host + path
payload = {
"type": "s3",
"settings": {
"readonly": "true",
"bucket": "cmr-logs-xyz-index-snapshot-repo",
"region": "eu-central-1",
"role_arn": "arn:aws:iam::689573718314:role/ElasticsearchCuratorIndexSnapshotRole"
}
}
headers = {"Content-Type": "application/json"}
r = requests.put(url, auth=awsauth, json=payload, headers=headers)
print(r.status_code)
print(r.text)
# # Take snapshot
#
# path = '_snapshot/my-snapshot-repo/my-snapshot'
# url = host + path
#
# r = requests.put(url, auth=awsauth)
#
# print(r.text)
#
# # Delete index
#
# path = 'my-index'
# url = host + path
#
# r = requests.delete(url, auth=awsauth)
#
# print(r.text)
#
# # Restore snapshots (all indices)
#
# path = '_snapshot/my-snapshot-repo/my-snapshot/_restore'
# url = host + path
#
# r = requests.post(url, auth=awsauth)
#
# print(r.text)
#
# # Restore snapshot (one index)
#
# path = '_snapshot/my-snapshot-repo/my-snapshot/_restore'
# url = host + path
#
# payload = {"indices": "my-index"}
#
# headers = {"Content-Type": "application/json"}
#
# r = requests.post(url, auth=awsauth, json=payload, headers=headers)
#
# print(r.text)
| 23.788732
| 138
| 0.69627
|
0cb949164f54f44824a1f0ce27b08bd3c3458cd7
| 588
|
py
|
Python
|
setup.py
|
jackee777/pybabelfy
|
55c266a32564c003d35d129da4723fb74860e1d4
|
[
"MIT"
] | 11
|
2016-11-03T10:09:14.000Z
|
2021-01-06T12:03:09.000Z
|
setup.py
|
jackee777/pybabelfy
|
55c266a32564c003d35d129da4723fb74860e1d4
|
[
"MIT"
] | 2
|
2015-12-02T02:18:43.000Z
|
2021-05-23T16:11:11.000Z
|
setup.py
|
jackee777/pybabelfy
|
55c266a32564c003d35d129da4723fb74860e1d4
|
[
"MIT"
] | 5
|
2016-11-07T14:51:21.000Z
|
2020-12-21T11:04:48.000Z
|
'''
Created on 02/12/2015
@author: david
'''
from distutils.core import setup
setup(
name = 'pybabelfy',
packages = ['pybabelfy'], # this must be the same as the name above
version = '0.1',
description = 'A Python interface for Babelfy',
author = 'David Vilares',
author_email = 'david.vilares89@gmail.com',
url = 'https://github.com/aghie/pybabelfy', # use the URL to the github repo
download_url = 'https://github.com/aghie/pybabelfy/tarball/0.1', # I'll explain this in a second
keywords = ['babelfy', 'wrapper', 'api'], # arbitrary keywords
classifiers = [],
)
| 28
| 98
| 0.680272
|
4689fbf1d0fb8c459c29f814e501a80c813dffb7
| 1,653
|
py
|
Python
|
applications/init/models/_init.py
|
himelpdas/pastebeam_web_server
|
607fe5aaab571fadd99e100fdd774fc953384cf1
|
[
"BSD-3-Clause"
] | null | null | null |
applications/init/models/_init.py
|
himelpdas/pastebeam_web_server
|
607fe5aaab571fadd99e100fdd774fc953384cf1
|
[
"BSD-3-Clause"
] | null | null | null |
applications/init/models/_init.py
|
himelpdas/pastebeam_web_server
|
607fe5aaab571fadd99e100fdd774fc953384cf1
|
[
"BSD-3-Clause"
] | null | null | null |
from pymongo import MongoClient
_client = MongoClient()
_database = _client.test_database
MONGO_ACCOUNTS = _database.auth_user
import bson.json_util as json
from bson.binary import Binary
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Hash import HMAC,SHA512
class SecureRSAKeyPair(object):
random_gen = Random.new().read
def __init__(self, password, pbkdf2 = False, strength = 2048):
self.password = password
self.key = RSA.generate(strength, self.random_gen)
self.public_key = None
self.private_key = None
self.passphrase = None
self.salt = None
if pbkdf2:
self.salt = self.random_gen(8) #never reuse salt again to generate a new key derivation
self.createPassphrase()
self.generate()
def createPassphrase(self):
"""
ensure the key derivation hash function is different from web2py's, or else the key will match the one on server
dkLen should be 24 since the private key encrypts via 3DES CBC
"""
self.passphrase = PBKDF2(self.password, self.salt, dkLen=24, count=1000, prf=lambda p, s: HMAC.new(p, s, SHA512).digest()).encode("hex")
def generate(self):
self.private_key = self.key.exportKey(passphrase = self.passphrase or self.password)
self.public_key = self.key.publickey().exportKey()
#MUST BE KEPT OUTSIDE CALLBACK FUNCTION!
import zmq
publisher_context = zmq.Context()
publisher_socket = publisher_context.socket(zmq.PUB)
publisher_socket.connect("tcp://localhost:%s" % 8882)
| 39.357143
| 145
| 0.690865
|
30c44702dcca19c7acf63ec5d5e96d9b9f512b35
| 9,924
|
py
|
Python
|
contrib/spendfrom/spendfrom.py
|
DiplexSoftfork/Skyrcoin
|
379ea3f22e8c75168230289be51ee24a7f1c1590
|
[
"MIT"
] | null | null | null |
contrib/spendfrom/spendfrom.py
|
DiplexSoftfork/Skyrcoin
|
379ea3f22e8c75168230289be51ee24a7f1c1590
|
[
"MIT"
] | null | null | null |
contrib/spendfrom/spendfrom.py
|
DiplexSoftfork/Skyrcoin
|
379ea3f22e8c75168230289be51ee24a7f1c1590
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Use the raw transactions API to spend PIVs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a pivxd or pivx-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the pivx data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/PIVX/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "PIVX")
return os.path.expanduser("~/.pivx")
def read_bitcoin_config(dbdir):
"""Read the pivx.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "pivx.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a pivx JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 26890 if testnet else 15890
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the pivxd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(pivxd):
info = pivxd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
pivxd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = pivxd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(pivxd):
address_summary = dict()
address_to_account = dict()
for info in pivxd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = pivxd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = pivxd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-pivx-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(pivxd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(pivxd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to pivxd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = pivxd.createrawtransaction(inputs, outputs)
signed_rawtx = pivxd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(pivxd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = pivxd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(pivxd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = pivxd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(pivxd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get PIVs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send PIVs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of pivx.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
pivxd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(pivxd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(pivxd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(pivxd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(pivxd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = pivxd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.029851
| 111
| 0.628476
|
54380f3e8020bf093053f117e058a82681002633
| 1,527
|
py
|
Python
|
Zeroer.py
|
kwfredeen/Expression-Zeroer
|
31e3db180b8877c168ec7cf12c2468df6086102e
|
[
"MIT"
] | null | null | null |
Zeroer.py
|
kwfredeen/Expression-Zeroer
|
31e3db180b8877c168ec7cf12c2468df6086102e
|
[
"MIT"
] | null | null | null |
Zeroer.py
|
kwfredeen/Expression-Zeroer
|
31e3db180b8877c168ec7cf12c2468df6086102e
|
[
"MIT"
] | null | null | null |
#MIT License
#Copyright (c) 2021 Ki Fredeen
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
print('Welcome! use "/quit" or "/q" to quit')
while(True):
print('Please input expression starting with "=":')
expression = input()
if expression == '/quit' or expression == '/q':
print('Thanks!')
quit()
if expression[0] != "=":
print('Expression must start with "="!')
continue
expression = expression.removeprefix('=')
print('=IF(' + expression + ' < 0, 0, ' + expression + ')')
| 38.175
| 79
| 0.722331
|
97b9c9ccf016cb7f926aab5d8cf7d051d62f8b34
| 785
|
py
|
Python
|
version.py
|
grycap/ec4docker
|
3895ae379446b6aed59af43a4841dacac4b2ca49
|
[
"Apache-2.0"
] | 6
|
2016-02-28T08:23:40.000Z
|
2021-04-05T12:49:17.000Z
|
version.py
|
dealfonso/ec4docker
|
3895ae379446b6aed59af43a4841dacac4b2ca49
|
[
"Apache-2.0"
] | 2
|
2016-07-25T02:04:17.000Z
|
2016-10-14T15:32:13.000Z
|
version.py
|
dealfonso/ec4docker
|
3895ae379446b6aed59af43a4841dacac4b2ca49
|
[
"Apache-2.0"
] | 4
|
2016-03-10T10:35:07.000Z
|
2018-07-24T15:57:59.000Z
|
# coding: utf-8
#
# EC4Docker - Elastic Cluster for Docker
# https://github.com/grycap/ec4docker
#
# Copyright (C) GRyCAP - I3M - UPV
# Developed by Carlos A. caralla@upv.es
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VERSION="0.10.3"
def get():
global VERSION
return VERSION
| 31.4
| 74
| 0.741401
|
62f9ff390cdce54e94ce5fbceb2bbeb7e40fdaac
| 8,078
|
py
|
Python
|
python/genconfigs.py
|
reap2sow1/general-purpose-scripts
|
4a83c2ad5b230145db346c5e1dc5c5a00f2aa02d
|
[
"MIT"
] | null | null | null |
python/genconfigs.py
|
reap2sow1/general-purpose-scripts
|
4a83c2ad5b230145db346c5e1dc5c5a00f2aa02d
|
[
"MIT"
] | null | null | null |
python/genconfigs.py
|
reap2sow1/general-purpose-scripts
|
4a83c2ad5b230145db346c5e1dc5c5a00f2aa02d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Generates/interaces common configurations for scripts in the below repo.
This command line tool is used to generate the configurations used by the
scripts in the general-purpose-scripts repo. The configurations are bare and
are expected to be modified by the end user to take full advantage of using
all the scripts.
"""
# Standard Library Imports
import appdirs
import argparse
import json
import os
import pathlib
import subprocess
import sys
import traceback
# Third Party Imports
# Local Application Imports
from pylib.argparse import CustomRawDescriptionHelpFormatter
# constants and other program configurations
_PROGRAM_NAME = os.path.basename(os.path.abspath(__file__))
_arg_parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=lambda prog: CustomRawDescriptionHelpFormatter(
prog, max_help_position=35
),
allow_abbrev=False,
)
GPS_CONFIG_FILE = "gps.json"
GPS_CONFIG_FILE_DIR_PATH = appdirs.user_config_dir(_PROGRAM_NAME)
GPS_CONFIG_FILE_PATH = os.path.join(GPS_CONFIG_FILE_DIR_PATH, GPS_CONFIG_FILE)
COMMON_CONFIGURATION_PREFIX = "_" # _FOO
# configuration keys
GITHUB_USERNAME_KEY = "github_username"
GITHUB_API_TOKEN_KEY = "github_api_token"
SCRIPT_CONFIGS_KEY = "script_configs_keys"
WEBHOSTED_GIT_ACCOUNT_URL_KEY = "webhosted_git_account_url"
FORKED_REPOS_TO_UPSTREAM_URLS_KEY = "forked_repos_to_upstream_urls"
OWNER_KEY = "owner"
# scripts
DISABLE_GITHUB_ACTIONS = "disable_github_actions.py"
UPDATE_REMOTE_FORKS = "update_remote_forks.py"
# positional/optional argument labels
# used at the command line and to reference values of arguments
EXPORT_SHORT_OPTION = "e"
EXPORT_LONG_OPTION = "export"
SHOW_PATH_SHORT_OPTION = "s"
SHOW_PATH_LONG_OPTION = "show_path"
SHOW_PATH_CLI_NAME = SHOW_PATH_LONG_OPTION.replace("_", "-")
# positional/optional argument default values
_EXPORT_OPTION_DEFAULT_VALUE = True
DEFAULT_GENCONFIGS_CONFIGS = {
GITHUB_USERNAME_KEY: "",
GITHUB_API_TOKEN_KEY: "",
SCRIPT_CONFIGS_KEY: {
UPDATE_REMOTE_FORKS: {
WEBHOSTED_GIT_ACCOUNT_URL_KEY: "",
FORKED_REPOS_TO_UPSTREAM_URLS_KEY: {
"awesome-python": "https://github.com/vinta/awesome-python"
},
},
DISABLE_GITHUB_ACTIONS: {OWNER_KEY: ""},
},
}
def _get_grandparents_pid():
"""Return the grandparent's process id.
Returns
-------
str
The grandparent's process id.
"""
return subprocess.run(
["ps", "--pid", str(os.getppid()), "-o", "ppid", "--no-headers"],
capture_output=True,
encoding="utf-8",
).stdout.strip()
def _get_parent_program_name(file):
"""Determine the parent program's name.
Parameters
----------
file : str
File/program name that is the parent. Intended for backwards
compatibility with non-supported operating systems.
Returns
-------
str
File/program name that is the parent.
"""
mappings = {"python": 1, "python3": 1}
if file != _EXPORT_OPTION_DEFAULT_VALUE:
return file
else:
# Grandparent's pid is needed because of the shims that are
# called prior to the actual script.
completed_process = subprocess.run(
[
"ps",
"--pid",
str(_get_grandparents_pid()),
"-o",
"command",
"--no-headers",
],
capture_output=True,
encoding="utf-8",
)
process_cmd = completed_process.stdout.strip().split()
fil_index = mappings.get(
os.path.basename(process_cmd[0]),
0,
)
return os.path.basename(process_cmd[fil_index])
def retrieve_cmd_args():
"""Retrieve command arguments from the command line.
Returns
-------
Namespace
An object that holds attributes pulled from the command line.
Raises
------
SystemExit
If user input is not considered valid when parsing arguments.
"""
try:
_arg_parser.add_argument(
f"-{EXPORT_SHORT_OPTION}",
f"--{EXPORT_LONG_OPTION}",
nargs="?",
const=_EXPORT_OPTION_DEFAULT_VALUE,
help=(
"creates the intermediate configuration file tailored to "
"a script"
),
metavar="SCRIPT_NAME",
)
_arg_parser.add_argument(
f"-{SHOW_PATH_SHORT_OPTION}",
f"--{SHOW_PATH_CLI_NAME}",
action="store_true",
help=("shows the to-be path for the generated configuration file"),
)
args = vars(_arg_parser.parse_args())
return args
except SystemExit:
sys.exit(1)
def _flatten_genconfigs_configs(script_name, config_datastructure):
"""Minimize the depth of the configuration datastructure passed in.
Script configurations are added to the root node. Which may at most
have two child nodes. From there, the script configurations tree
is removed.
Parameters
----------
script_name : str
The script name whose configurations are appended to the
root of the configuration datastructure.
config_datastructure : dict
The program configuration file contents.
"""
# LBYL ok, as currently I except most scripts will just use common
# configurations, with few using the common configuration prefix.
script_configs = (
config_datastructure[SCRIPT_CONFIGS_KEY][script_name]
if script_name in config_datastructure[SCRIPT_CONFIGS_KEY]
else tuple()
)
for config in script_configs:
if (
COMMON_CONFIGURATION_PREFIX in config
and config[ # noqa: W503
len(
COMMON_CONFIGURATION_PREFIX
) : # noqa: E203,E501 black adds padding for complex slice expressions https://github.com/psf/black/issues/446
]
in config_datastructure
):
continue
# Surface the key:value pair, in preparation to flatten the
# data structure.
config_datastructure[config] = script_configs[config]
del config_datastructure[SCRIPT_CONFIGS_KEY]
def _load_configs():
"""Load the program configuration file.
Returns
-------
dict
The program configuration file contents.
bool
If the config file could be loaded.
"""
try:
with open(GPS_CONFIG_FILE_PATH, "r") as file:
return json.load(file)
except FileNotFoundError:
return False
def main(args):
"""Start the main program execution."""
try:
if args[SHOW_PATH_LONG_OPTION]:
print(GPS_CONFIG_FILE_PATH)
elif args[EXPORT_LONG_OPTION]:
configs = _load_configs()
if not configs:
raise FileNotFoundError(1, "_", GPS_CONFIG_FILE_PATH)
script_name = _get_parent_program_name(args[EXPORT_LONG_OPTION])
_flatten_genconfigs_configs(script_name, configs)
print(json.dumps(configs, indent=4))
else:
pathlib.Path(GPS_CONFIG_FILE_DIR_PATH).mkdir(
parents=True, exist_ok=True
)
with open(GPS_CONFIG_FILE_PATH, "w") as file:
json.dump(DEFAULT_GENCONFIGS_CONFIGS, file, indent=4)
sys.exit(0)
except FileNotFoundError as except_obj:
print(
f"{_PROGRAM_NAME}: could not find file: {except_obj.filename}",
file=sys.stderr,
)
sys.exit(1)
except Exception as except_obj:
traceback.print_exception(
type(except_obj),
except_obj,
except_obj.__traceback__,
file=sys.stderr,
)
print(
f"{_PROGRAM_NAME}: an unknown error occurred, see the above!",
file=sys.stderr,
)
sys.exit(1)
if __name__ == "__main__":
args = retrieve_cmd_args()
main(args)
sys.exit(0)
| 28.747331
| 127
| 0.642238
|
99f6869e782fd99454a92733e5cff7564a933697
| 17,734
|
py
|
Python
|
ibllib/pipes/ephys_preprocessing.py
|
nbonacchi/ibllib
|
9066c00a8e9a65a1d209144a2ac54d0b87bec0b3
|
[
"MIT"
] | null | null | null |
ibllib/pipes/ephys_preprocessing.py
|
nbonacchi/ibllib
|
9066c00a8e9a65a1d209144a2ac54d0b87bec0b3
|
[
"MIT"
] | null | null | null |
ibllib/pipes/ephys_preprocessing.py
|
nbonacchi/ibllib
|
9066c00a8e9a65a1d209144a2ac54d0b87bec0b3
|
[
"MIT"
] | null | null | null |
import logging
import re
import shutil
import subprocess
from collections import OrderedDict
import traceback
from pathlib import Path
import numpy as np
import pandas as pd
import mtscomp
import alf.io
from ibllib.ephys import ephysqc, spikes, sync_probes
from ibllib.io import ffmpeg, spikeglx
from ibllib.io.video import label_from_path
from ibllib.io.extractors import ephys_fpga, ephys_passive, camera
from ibllib.pipes import tasks
from ibllib.pipes.training_preprocessing import TrainingRegisterRaw as EphysRegisterRaw
from ibllib.qc.task_extractors import TaskQCExtractor
from ibllib.qc.task_metrics import TaskQC
from ibllib.qc.camera import run_all_qc as run_camera_qc
from ibllib.dsp import rms
from oneibl.one import OneOffline
_logger = logging.getLogger("ibllib")
# level 0
class EphysPulses(tasks.Task):
"""
Extract Pulses from raw electrophysiology data into numpy arrays
Perform the probes synchronisation with nidq (3B) or main probe (3A)
"""
cpu = 2
io_charge = 30 # this jobs reads raw ap files
priority = 90 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
def _run(self, overwrite=False):
# outputs numpy
syncs, out_files = ephys_fpga.extract_sync(self.session_path, overwrite=overwrite)
for out_file in out_files:
_logger.info(f"extracted pulses for {out_file}")
status, sync_files = sync_probes.sync(self.session_path)
return out_files + sync_files
class RawEphysQC(tasks.Task):
"""
Computes raw electrophysiology QC
"""
cpu = 2
io_charge = 30 # this jobs reads raw ap files
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
def _run(self, overwrite=False):
qc_files = ephysqc.raw_qc_session(self.session_path, overwrite=overwrite)
return qc_files
class EphysAudio(tasks.Task):
"""
Computes raw electrophysiology QC
"""
cpu = 2
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
def _run(self, overwrite=False):
command = "ffmpeg -i {file_in} -y -nostdin -c:a flac -nostats {file_out}"
file_in = next(self.session_path.rglob("_iblrig_micData.raw.wav"), None)
if file_in is None:
return
file_out = file_in.with_suffix(".flac")
status, output_file = ffmpeg.compress(file_in=file_in, file_out=file_out, command=command)
return [output_file]
class SpikeSorting_KS2_Matlab(tasks.Task):
"""
Computes raw electrophysiology QC
"""
gpu = 1
io_charge = 70 # this jobs reads raw ap files
priority = 60
level = 1 # this job doesn't depend on anything
@staticmethod
def _sample2v(ap_file):
md = spikeglx.read_meta_data(ap_file.with_suffix(".meta"))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v["ap"][0]
@staticmethod
def _fetch_ks2_commit_hash():
command2run = "git --git-dir ~/Documents/MATLAB/Kilosort2/.git rev-parse --verify HEAD"
process = subprocess.Popen(
command2run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
info, error = process.communicate()
if process.returncode != 0:
_logger.error(
f"Can't fetch matlab ks2 commit hash, will still attempt to run \n"
f"Error: {error.decode('utf-8')}"
)
return ""
return info.decode("utf-8").strip()
def _run_ks2(self, ap_file):
"""
Runs the ks2 matlab spike sorting for one probe dataset
the spike sorting output can either be with the probe (<1.5.5) or in the
session_path/spike_sorters/ks2_matlab/probeXX folder
:return: path of the folder containing ks2 spike sorting output
"""
label = ap_file.parts[-2]
if ap_file.parent.joinpath("spike_sorting_ks2.log").exists():
_logger.info(f"Already ran: spike_sorting_ks2.log found for {ap_file}, skipping.")
return ap_file.parent
ks2_dir = self.session_path.joinpath("spike_sorters", "ks2_matlab", label)
if ks2_dir.joinpath("spike_sorting_ks2.log").exists():
_logger.info(f"Already ran: spike_sorting_ks2.log found in {ks2_dir}, skipping.")
return ks2_dir
# get the scratch drive from the shell script
SHELL_SCRIPT = Path.home().joinpath(
"Documents/PYTHON/iblscripts/deploy/serverpc/kilosort2/task_ks2_matlab.sh"
)
with open(SHELL_SCRIPT) as fid:
lines = fid.readlines()
line = [line for line in lines if line.startswith("SCRATCH_DRIVE=")][0]
m = re.search(r"\=(.*?)(\#|\n)", line)[0]
scratch_drive = Path(m[1:-1].strip())
assert scratch_drive.exists()
# clean up and create directory, this also checks write permissions
# scratch dir has the following shape: ks2m/ZM_3003_2020-07-29_001_probe00
# first makes sure the tmp dir is clean
shutil.rmtree(scratch_drive.joinpath("ks2m"), ignore_errors=True)
scratch_dir = scratch_drive.joinpath(
"ks2m", "_".join(list(self.session_path.parts[-3:]) + [label])
)
if scratch_dir.exists():
shutil.rmtree(scratch_dir, ignore_errors=True)
scratch_dir.mkdir(parents=True, exist_ok=True)
# decompresses using mtscomp
tmp_ap_file = scratch_dir.joinpath(ap_file.name).with_suffix(".bin")
mtscomp.decompress(cdata=ap_file, out=tmp_ap_file)
# run matlab spike sorting: with R2019a, it would be much easier to run with
# -batch option as matlab errors are redirected to stderr automatically
command2run = f"{SHELL_SCRIPT} {scratch_dir}"
_logger.info(command2run)
process = subprocess.Popen(
command2run,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable="/bin/bash",
)
info, error = process.communicate()
info_str = info.decode("utf-8").strip()
if process.returncode != 0:
raise RuntimeError(error.decode("utf-8"))
elif "run_ks2_ibl.m failed" in info_str:
raise RuntimeError("Matlab error ks2 log below:")
_logger.info(info_str)
# clean up and copy: output to session/spike_sorters/ks2_matlab/probeXX (ks2_dir)
tmp_ap_file.unlink() # remove the uncompressed temp binary file
scratch_dir.joinpath("temp_wh.dat").unlink() # remove the memmapped pre-processed file
shutil.move(scratch_dir, ks2_dir)
self.version = self._fetch_ks2_commit_hash()
return ks2_dir
def _run(self, overwrite=False):
"""
Multiple steps. For each probe:
- Runs ks2 (skips if it already ran)
- synchronize the spike sorting
- output the probe description files
:param overwrite:
:return: list of files to be registered on database
"""
efiles = spikeglx.glob_ephys_files(self.session_path)
ap_files = [(ef.get("ap"), ef.get("label")) for ef in efiles if "ap" in ef.keys()]
out_files = []
for ap_file, label in ap_files:
try:
ks2_dir = self._run_ks2(ap_file) # runs ks2, skips if it already ran
probe_out_path = self.session_path.joinpath("alf", label)
probe_out_path.mkdir(parents=True, exist_ok=True)
spikes.ks2_to_alf(
ks2_dir,
bin_path=ap_file.parent,
out_path=probe_out_path,
bin_file=ap_file,
ampfactor=self._sample2v(ap_file),
)
out, _ = spikes.sync_spike_sorting(ap_file=ap_file, out_path=probe_out_path)
out_files.extend(out)
# convert ks2_output into tar file and also register
# Make this in case spike sorting is in old raw_ephys_data folders, for new
# sessions it should already exist
tar_dir = self.session_path.joinpath('spike_sorters', 'ks2_matlab', label)
tar_dir.mkdir(parents=True, exist_ok=True)
out = spikes.ks2_to_tar(ks2_dir, tar_dir)
out_files.extend(out)
except BaseException:
_logger.error(traceback.format_exc())
self.status = -1
continue
probe_files = spikes.probes_description(self.session_path, one=self.one)
return out_files + probe_files
class EphysVideoCompress(tasks.Task):
priority = 40
level = 1
def _run(self, **kwargs):
# avi to mp4 compression
command = ('ffmpeg -i {file_in} -y -nostdin -codec:v libx264 -preset slow -crf 17 '
'-loglevel 0 -codec:a copy {file_out}')
output_files = ffmpeg.iblrig_video_compression(self.session_path, command)
if len(output_files) == 0:
_logger.info('No compressed videos found; skipping timestamp extraction')
return
labels = [label_from_path(x) for x in output_files]
# Video timestamps extraction
data, files = camera.extract_all(self.session_path, save=True, labels=labels)
output_files.extend(files)
# Video QC
run_camera_qc(self.session_path, update=True, one=self.one, cameras=labels)
return output_files
# level 1
class EphysTrials(tasks.Task):
priority = 90
level = 1
def _behaviour_criterion(self):
"""
Computes and update the behaviour criterion on Alyx
"""
import alf.io
from brainbox.behavior import training
trials = alf.io.load_object(self.session_path.joinpath("alf"), "trials")
good_enough = training.criterion_delay(
n_trials=trials["intervals"].shape[0],
perf_easy=training.compute_performance_easy(trials),
)
eid = self.one.eid_from_path(self.session_path)
self.one.alyx.json_field_update(
"sessions", eid, "extended_qc", {"behavior": int(good_enough)}
)
def _run(self):
dsets, out_files = ephys_fpga.extract_all(self.session_path, save=True)
if self.one is None or isinstance(self.one, OneOffline):
return out_files
self._behaviour_criterion()
# Run the task QC
qc = TaskQC(self.session_path, one=self.one, log=_logger)
qc.extractor = TaskQCExtractor(self.session_path, lazy=True, one=qc.one)
# Extract extra datasets required for QC
qc.extractor.data = dsets
qc.extractor.extract_data()
# Aggregate and update Alyx QC fields
qc.run(update=True)
return out_files
class EphysCellsQc(tasks.Task):
priority = 90
level = 3
def _compute_cell_qc(self, folder_probe):
"""
Computes the cell QC given an extracted probe alf path
:param folder_probe: folder
:return:
"""
# compute the straight qc
_logger.info(f"Computing cluster qc for {folder_probe}")
spikes = alf.io.load_object(folder_probe, 'spikes')
clusters = alf.io.load_object(folder_probe, 'clusters')
df_units, drift = ephysqc.spike_sorting_metrics(
spikes.times, spikes.clusters, spikes.amps, spikes.depths,
cluster_ids=np.arange(clusters.channels.size))
# if the ks2 labels file exist, load them and add the column
file_labels = folder_probe.joinpath('cluster_KSLabel.tsv')
if file_labels.exists():
ks2_labels = pd.read_csv(file_labels, sep='\t')
ks2_labels.rename(columns={'KSLabel': 'ks2_label'}, inplace=True)
df_units = pd.concat(
[df_units, ks2_labels['ks2_label'].reindex(df_units.index)], axis=1)
# save as parquet file
df_units.to_parquet(folder_probe.joinpath("clusters.metrics.pqt"))
return folder_probe.joinpath("clusters.metrics.pqt"), df_units, drift
def _label_probe_qc(self, folder_probe, df_units, drift):
"""
Labels the json field of the alyx corresponding probe insertion
:param folder_probe:
:param df_units:
:param drift:
:return:
"""
eid = self.one.eid_from_path(self.session_path)
pdict = self.one.alyx.rest('insertions', 'list', session=eid, name=folder_probe.parts[-1])
if len(pdict) != 1:
return
isok = df_units['label'] == 1
qcdict = {'n_units': int(df_units.shape[0]),
'n_units_qc_pass': int(np.sum(isok)),
'firing_rate_max': np.max(df_units['firing_rate'][isok]),
'firing_rate_median': np.median(df_units['firing_rate'][isok]),
'amplitude_max_uV': np.max(df_units['amp_max'][isok]) * 1e6,
'amplitude_median_uV': np.max(df_units['amp_median'][isok]) * 1e6,
'drift_rms_um': rms(drift['drift_um']),
}
file_wm = folder_probe.joinpath('_kilosort_whitening.matrix.npy')
if file_wm.exists():
wm = np.load(file_wm)
qcdict['whitening_matrix_conditioning'] = np.linalg.cond(wm)
# groom qc dict (this function will eventually go directly into the json field update)
for k in qcdict:
if isinstance(qcdict[k], np.int64):
qcdict[k] = int(qcdict[k])
elif isinstance(qcdict[k], float):
qcdict[k] = np.round(qcdict[k], 2)
self.one.alyx.json_field_update("insertions", pdict[0]["id"], "json", qcdict)
def _run(self):
"""
Post spike-sorting quality control at the cluster level.
Outputs a QC table in the clusters ALF object and labels corresponding probes in Alyx
"""
files_spikes = Path(self.session_path).joinpath('alf').rglob('spikes.times.npy')
folder_probes = [f.parent for f in files_spikes]
out_files = []
for folder_probe in folder_probes:
try:
qc_file, df_units, drift = self._compute_cell_qc(folder_probe)
out_files.append(qc_file)
self._label_probe_qc(folder_probe, df_units, drift)
except BaseException:
_logger.error(traceback.format_exc())
self.status = -1
continue
return out_files
class EphysMtscomp(tasks.Task):
priority = 50 # ideally after spike sorting
level = 0
def _run(self):
"""
Compress ephys files looking for `compress_ephys.flag` within the probes folder
Original bin file will be removed
The registration flag created contains targeted file names at the root of the session
"""
out_files = []
ephys_files = spikeglx.glob_ephys_files(self.session_path)
ephys_files += spikeglx.glob_ephys_files(self.session_path, ext="ch")
ephys_files += spikeglx.glob_ephys_files(self.session_path, ext="meta")
for ef in ephys_files:
for typ in ["ap", "lf", "nidq"]:
bin_file = ef.get(typ)
if not bin_file:
continue
if bin_file.suffix.find("bin") == 1:
sr = spikeglx.Reader(bin_file)
if sr.is_mtscomp:
out_files.append(bin_file)
else:
_logger.info(f"Compressing binary file {bin_file}")
out_files.append(sr.compress_file(keep_original=False))
out_files.append(bin_file.with_suffix('.ch'))
else:
out_files.append(bin_file)
return out_files
class EphysDLC(tasks.Task):
gpu = 1
cpu = 4
io_charge = 90
level = 2
def _run(self):
"""empty placeholder for job creation only"""
pass
class EphysPassive(tasks.Task):
cpu = 1
io_charge = 90
level = 1
def _run(self):
"""returns a list of pathlib.Paths. """
data, paths = ephys_passive.PassiveChoiceWorld(self.session_path).extract(save=True)
if any([x is None for x in paths]):
self.status = -1
# Register?
return paths
class EphysExtractionPipeline(tasks.Pipeline):
label = __name__
def __init__(self, session_path=None, **kwargs):
super(EphysExtractionPipeline, self).__init__(session_path, **kwargs)
tasks = OrderedDict()
self.session_path = session_path
# level 0
tasks["EphysRegisterRaw"] = EphysRegisterRaw(self.session_path)
tasks["EphysPulses"] = EphysPulses(self.session_path)
tasks["EphysRawQC"] = RawEphysQC(self.session_path)
tasks["EphysAudio"] = EphysAudio(self.session_path)
tasks["EphysMtscomp"] = EphysMtscomp(self.session_path)
# level 1
tasks["SpikeSorting"] = SpikeSorting_KS2_Matlab(
self.session_path, parents=[tasks["EphysMtscomp"], tasks["EphysPulses"]])
tasks["EphysVideoCompress"] = EphysVideoCompress(
self.session_path, parents=[tasks["EphysPulses"]])
tasks["EphysTrials"] = EphysTrials(self.session_path, parents=[tasks["EphysPulses"]])
tasks["EphysPassive"] = EphysPassive(self.session_path, parents=[tasks["EphysPulses"]])
# level 2
tasks["EphysCellsQc"] = EphysCellsQc(self.session_path, parents=[tasks["SpikeSorting"]])
tasks["EphysDLC"] = EphysDLC(self.session_path, parents=[tasks["EphysVideoCompress"]])
self.tasks = tasks
| 38.975824
| 98
| 0.628961
|
e3806c313dbdaaba517fc1ed2285f9b1fe55aa72
| 3,141
|
py
|
Python
|
tests/api_tests.py
|
HusseinKabbout/qwc-document-service
|
c8d856390006e0f5ecc0d28b0d53da55e5505381
|
[
"MIT"
] | null | null | null |
tests/api_tests.py
|
HusseinKabbout/qwc-document-service
|
c8d856390006e0f5ecc0d28b0d53da55e5505381
|
[
"MIT"
] | null | null | null |
tests/api_tests.py
|
HusseinKabbout/qwc-document-service
|
c8d856390006e0f5ecc0d28b0d53da55e5505381
|
[
"MIT"
] | 1
|
2020-04-24T11:36:26.000Z
|
2020-04-24T11:36:26.000Z
|
import os
import unittest
from urllib.parse import urlparse, parse_qs, urlencode
from flask import Response, json
from flask.testing import FlaskClient
from flask_jwt_extended import JWTManager, create_access_token
import server
class ApiTestCase(unittest.TestCase):
"""Test case for server API"""
def setUp(self):
server.app.testing = True
self.app = FlaskClient(server.app, Response)
JWTManager(server.app)
def tearDown(self):
pass
def jwtHeader(self):
with server.app.test_request_context():
access_token = create_access_token('test')
return {'Authorization': 'Bearer {}'.format(access_token)}
def test_getdocument_pdf(self):
params = {
"format": "pdf",
"MaxOrderID": "10800"
}
response = self.app.get('/demo?' + urlencode(params), headers=self.jwtHeader())
self.assertEqual(200, response.status_code, "Status code is not OK")
self.assertTrue(isinstance(response.data, bytes), "Response is not a valid PDF")
def test_getdocument_html(self):
params = {
"format": "html",
"MaxOrderID": "10800"
}
response = self.app.get('/demo?' + urlencode(params), headers=self.jwtHeader())
success = False
try:
html = response.data.decode("utf-8")
success = html.startswith("<html")
except Exception as e:
print(e)
success = False
self.assertEqual(200, response.status_code, "Status code is not OK")
self.assertTrue(success, "Response is not a valid HTML")
def test_getdocument_csv(self):
params = {
"format": "csv",
"MaxOrderID": "10800"
}
response = self.app.get('/demo?' + urlencode(params), headers=self.jwtHeader())
success = False
try:
csv = response.data.decode("utf-8")
success = True
except Exception as e:
print(e)
success = False
self.assertEqual(200, response.status_code, "Status code is not OK")
self.assertTrue(success, "Response is not a valid CSV")
def test_getdocument_xls(self):
params = {
"format": "xls",
"MaxOrderID": "10800"
}
response = self.app.get('/demo?' + urlencode(params), headers=self.jwtHeader())
self.assertEqual(200, response.status_code, "Status code is not OK")
self.assertTrue(isinstance(response.data, bytes), "Response is not a valid XLS")
def test_getdocument_xlsx(self):
params = {
"format": "xlsx",
"MaxOrderID": "10800"
}
response = self.app.get('/demo?' + urlencode(params), headers=self.jwtHeader())
self.assertEqual(200, response.status_code, "Status code is not OK")
self.assertTrue(isinstance(response.data, bytes), "Response is not a valid XLSX")
def test_getdocument_404(self):
response = self.app.get('/test', headers=self.jwtHeader())
self.assertEqual(404, response.status_code, "Status code is not OK")
| 33.414894
| 89
| 0.60936
|
a9648592d04d912b40f962ad10880dbd22394bc7
| 3,796
|
py
|
Python
|
math/ilp/ilp4.py
|
thammegowda/algos
|
a23f6129c2f031c86314a489c4a8a26f9c7130bf
|
[
"Apache-2.0"
] | 3
|
2015-10-25T06:25:01.000Z
|
2017-02-03T01:51:49.000Z
|
math/ilp/ilp4.py
|
thammegowda/algos
|
a23f6129c2f031c86314a489c4a8a26f9c7130bf
|
[
"Apache-2.0"
] | null | null | null |
math/ilp/ilp4.py
|
thammegowda/algos
|
a23f6129c2f031c86314a489c4a8a26f9c7130bf
|
[
"Apache-2.0"
] | 1
|
2016-08-17T16:37:48.000Z
|
2016-08-17T16:37:48.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author : Thamme Gowda
Date : December 6, 2017
Taken from http://people.brunel.ac.uk/~mastjjb/jeb/or/moreip.html
"""
"""
A project manager in a company is considering a portfolio of 10 large project investments. These investments differ in the estimated long-run profit (net present value) they will generate as well as in the amount of capital required.
Let Pj and Cj denote the estimated profit and capital required (both given in units of millions of £) for investment opportunity j (j=1,...,10) respectively. The total amount of capital available for these investments is Q (in units of millions of £)
Investment opportunities 3 and 4 are mutually exclusive and so are 5 and 6. Furthermore, neither 5 nor 6 can be undertaken unless either 3 or 4 is undertaken. At least two and at most four investment opportunities have to be undertaken from the set {1,2,7,8,9,10}.
The project manager wishes to select the combination of capital investments that will maximise the total estimated long-run profit subject to the restrictions described above.
Formulate this problem using an integer programming model and comment on the difficulties of solving this model.
"""
"""
Lets pick values
P = [10, 5, 3, 2, 4, 6, 9, 5, 3, 2]
C = [20, 40, 10, 7, 6, 9, 8, 5, 5, 4]
Q = 120
Variables: x_i \in {0, 1} for \i= 1,2...10
Constraints:
\sum{i \in 1,...10} x_i * c_i <= 120
x_3 + x_4 <= 1
x_5 + x_6 <= 1
x_5 <= x_3 + x_4
x_6 <= x_3 + x_4
x_1 + x_2 + x_7 + x_8 + x_9 + x_10 >= 2
x_1 + x_2 + x_7 + x_8 + x_9 + x_10 <= 4
Objective:
Maximize : \sum{i \in 1...10} p_i * x_i
In standard Form:
(1) all constraints are equalities => good for expressing it as a matrix multiplication
(2) only binary variables => because my quantum variables are binary!
(3) Minimization objective => because the quantum annealer minimizes the objetive function
Constraints:
\sum{i \in 1,...10} x_i * c_i + s1 + 2 s2 + 4 s3 + 8 s4 + 16 s5 + 32 s6 + 64 s7<= 120
x3 + x4 + s8 = 1
x5 + x6 + s9 = 1
-x3 - x4 + x5 + s10 + s11 = 0 because -x3 - x4 + x5 <= 0
-x3 - x4 + x6 + s12 + s13 = 0
x1 + x2 + x7 + x8 + x9 + x10 - s14 - s15 - s16 - s17 = 2
x1 + x2 + x7 + x8 + x9 + x10 + s18 + s19 + s20 + s21 = 4
"""
from gurobipy import *
try:
# Create a new model
m = Model("01-ilp-std")
Q = 120
P = [10, 5, 3, 2, 4, 6, 9, 5, 3, 2]
C = [20, 40, 10, 7, 6, 9, 8, 5, 5, 4]
X = [m.addVar(vtype=GRB.BINARY, name="x%d" % (i+1)) for i in range(len(P))]
S = [m.addVar(vtype=GRB.BINARY, name="s%d" % (i+1)) for i in range(21)] # 21 slack variables are needed
# Set objective
#m.setObjective(sum(p * x for p, x in zip(P, X)), GRB.MAXIMIZE)
m.setObjective(-sum(p * x for p, x in zip(P, X)), GRB.MINIMIZE)
m.addConstr(sum(c*x for c,x in zip(C, X)) +
S[0] + 2*S[1] + 4*S[2] + 8*S[3] + 16*S[4] + 32*S[5] + 64*S[6], GRB.EQUAL, Q, "c6")
# Add constraints
# note index starts from 0, different than the above description
m.addConstr(X[2] + X[3] + S[7], GRB.EQUAL, 1, "c0")
m.addConstr(X[4] + X[5] + S[8], GRB.EQUAL, 1, "c1")
m.addConstr(-X[2] - X[3] + X[4] + S[9] + S[10], GRB.EQUAL, 0, "c2")
m.addConstr(-X[2] - X[3] + X[5] + S[11] + S[12], GRB.EQUAL, 0, "c3")
m.addConstr(X[0] + X[1] + X[6] + X[7] + X[8] + X[9] - S[13] - S[14] - S[15] - S[16], GRB.EQUAL, 2, "c4")
m.addConstr(X[0] + X[1] + X[6] + X[7] + X[8] + X[9] + S[17] + S[18] + S[19] + S[20], GRB.EQUAL, 4, "c5")
m.optimize()
for v in m.getVars():
print('%s %g' % (v.varName, v.x))
print('Obj: %g' % m.objVal)
except GurobiError as e:
print('Error code ' + str(e.errno) + ": " + str(e))
except AttributeError:
print('Encountered an attribute error')
| 40.382979
| 264
| 0.60432
|
2e020aef874347b492ea5766f5391bdff0b457eb
| 18,319
|
py
|
Python
|
tests/test_app_routers_ledger_{token_address}_details_data_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 2
|
2021-08-19T12:35:25.000Z
|
2022-02-16T04:13:38.000Z
|
tests/test_app_routers_ledger_{token_address}_details_data_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 46
|
2021-09-02T03:22:05.000Z
|
2022-03-31T09:20:00.000Z
|
tests/test_app_routers_ledger_{token_address}_details_data_GET.py
|
BoostryJP/ibet-Prime
|
924e7f8da4f8feea0a572e8b5532e09bcdf2dc99
|
[
"Apache-2.0"
] | 1
|
2021-11-17T23:18:27.000Z
|
2021-11-17T23:18:27.000Z
|
"""
Copyright BOOSTRY Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
SPDX-License-Identifier: Apache-2.0
"""
from datetime import datetime
from app.model.db import (
Token,
TokenType,
LedgerDetailsData
)
from tests.account_config import config_eth_account
class TestAppRoutersLedgerTokenAddressDetailsDataGET:
# target API endpoint
base_url = "/ledger/{token_address}/details_data"
###########################################################################
# Normal Case
###########################################################################
# <Normal_1_1>
# set issuer-address
def test_normal_1_1(self, client, db):
user = config_eth_account("user1")
issuer_address = user["address"]
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# prepare data
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = ""
_token.issuer_address = issuer_address
_token.token_address = token_address
_token.abi = {}
db.add(_token)
_details_data_1_1 = LedgerDetailsData()
_details_data_1_1.token_address = token_address
_details_data_1_1.data_id = "data_id_1"
_details_data_1_1.data_created = datetime.strptime("2022/01/01 15:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_1_1)
_details_data_2_1 = LedgerDetailsData()
_details_data_2_1.token_address = token_address
_details_data_2_1.data_id = "data_id_2"
_details_data_2_1.data_created = datetime.strptime("2022/01/02 00:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_2_1)
_details_data_2_2 = LedgerDetailsData()
_details_data_2_2.token_address = token_address
_details_data_2_2.data_id = "data_id_2"
_details_data_2_2.data_created = datetime.strptime("2022/01/02 00:20:30.000002",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_2_2)
_details_data_3_1 = LedgerDetailsData()
_details_data_3_1.token_address = token_address
_details_data_3_1.data_id = "data_id_3"
_details_data_3_1.data_created = datetime.strptime("2022/01/02 15:20:30.000010",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_1)
_details_data_3_2 = LedgerDetailsData()
_details_data_3_2.token_address = token_address
_details_data_3_2.data_id = "data_id_3"
_details_data_3_2.data_created = datetime.strptime("2022/01/02 15:20:30.000009",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_2)
_details_data_3_3 = LedgerDetailsData()
_details_data_3_3.token_address = token_address
_details_data_3_3.data_id = "data_id_3"
_details_data_3_3.data_created = datetime.strptime("2022/01/02 15:20:30.000008",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_3)
_details_data_4_1 = LedgerDetailsData()
_details_data_4_1.token_address = token_address
_details_data_4_1.data_id = "data_id_4"
_details_data_4_1.data_created = datetime.strptime("2022/01/03 00:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_4_1)
# Not Target
_details_data_5_1 = LedgerDetailsData()
_details_data_5_1.token_address = "test"
_details_data_5_1.data_id = "dummy"
db.add(_details_data_5_1)
resp = client.get(
self.base_url.format(token_address=token_address),
headers={
"issuer-address": issuer_address,
}
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"result_set": {
"count": 4,
"offset": None,
"limit": None,
"total": 4
},
"details_data": [
{
"data_id": "data_id_1",
"count": 1,
"created": "2022-01-02T00:20:30.000001+09:00",
},
{
"data_id": "data_id_2",
"count": 2,
"created": "2022-01-02T09:20:30.000002+09:00",
},
{
"data_id": "data_id_3",
"count": 3,
"created": "2022-01-03T00:20:30.000010+09:00",
},
{
"data_id": "data_id_4",
"count": 1,
"created": "2022-01-03T09:20:30.000001+09:00",
},
]
}
# <Normal_1_2>
# set issuer-address
def test_normal_1_2(self, client, db):
user = config_eth_account("user1")
issuer_address = user["address"]
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# prepare data
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = ""
_token.issuer_address = issuer_address
_token.token_address = token_address
_token.abi = {}
db.add(_token)
_details_data_1_1 = LedgerDetailsData()
_details_data_1_1.token_address = token_address
_details_data_1_1.data_id = "data_id_1"
_details_data_1_1.data_created = datetime.strptime("2022/01/01 15:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_1_1)
_details_data_2_1 = LedgerDetailsData()
_details_data_2_1.token_address = token_address
_details_data_2_1.data_id = "data_id_2"
_details_data_2_1.data_created = datetime.strptime("2022/01/02 00:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_2_1)
_details_data_2_2 = LedgerDetailsData()
_details_data_2_2.token_address = token_address
_details_data_2_2.data_id = "data_id_2"
_details_data_2_2.data_created = datetime.strptime("2022/01/02 00:20:30.000002",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_2_2)
_details_data_3_1 = LedgerDetailsData()
_details_data_3_1.token_address = token_address
_details_data_3_1.data_id = "data_id_3"
_details_data_3_1.data_created = datetime.strptime("2022/01/02 15:20:30.000010",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_1)
_details_data_3_2 = LedgerDetailsData()
_details_data_3_2.token_address = token_address
_details_data_3_2.data_id = "data_id_3"
_details_data_3_2.data_created = datetime.strptime("2022/01/02 15:20:30.000009",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_2)
_details_data_3_3 = LedgerDetailsData()
_details_data_3_3.token_address = token_address
_details_data_3_3.data_id = "data_id_3"
_details_data_3_3.data_created = datetime.strptime("2022/01/02 15:20:30.000008",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_3)
_details_data_4_1 = LedgerDetailsData()
_details_data_4_1.token_address = token_address
_details_data_4_1.data_id = "data_id_4"
_details_data_4_1.data_created = datetime.strptime("2022/01/03 00:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_4_1)
# Not Target
_details_data_5_1 = LedgerDetailsData()
_details_data_5_1.token_address = "test"
_details_data_5_1.data_id = "dummy"
db.add(_details_data_5_1)
resp = client.get(
self.base_url.format(token_address=token_address),
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"result_set": {
"count": 4,
"offset": None,
"limit": None,
"total": 4
},
"details_data": [
{
"data_id": "data_id_1",
"count": 1,
"created": "2022-01-02T00:20:30.000001+09:00",
},
{
"data_id": "data_id_2",
"count": 2,
"created": "2022-01-02T09:20:30.000002+09:00",
},
{
"data_id": "data_id_3",
"count": 3,
"created": "2022-01-03T00:20:30.000010+09:00",
},
{
"data_id": "data_id_4",
"count": 1,
"created": "2022-01-03T09:20:30.000001+09:00",
},
]
}
# <Normal_2>
# limit-offset
def test_normal_2(self, client, db):
user = config_eth_account("user1")
issuer_address = user["address"]
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# prepare data
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = ""
_token.issuer_address = issuer_address
_token.token_address = token_address
_token.abi = {}
db.add(_token)
_details_data_1_1 = LedgerDetailsData()
_details_data_1_1.token_address = token_address
_details_data_1_1.data_id = "data_id_1"
_details_data_1_1.data_created = datetime.strptime("2022/01/01 15:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_1_1)
_details_data_2_1 = LedgerDetailsData()
_details_data_2_1.token_address = token_address
_details_data_2_1.data_id = "data_id_2"
_details_data_2_1.data_created = datetime.strptime("2022/01/02 00:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_2_1)
_details_data_2_2 = LedgerDetailsData()
_details_data_2_2.token_address = token_address
_details_data_2_2.data_id = "data_id_2"
_details_data_2_2.data_created = datetime.strptime("2022/01/02 00:20:30.000002",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/02
db.add(_details_data_2_2)
_details_data_3_1 = LedgerDetailsData()
_details_data_3_1.token_address = token_address
_details_data_3_1.data_id = "data_id_3"
_details_data_3_1.data_created = datetime.strptime("2022/01/02 15:20:30.000010",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_1)
_details_data_3_2 = LedgerDetailsData()
_details_data_3_2.token_address = token_address
_details_data_3_2.data_id = "data_id_3"
_details_data_3_2.data_created = datetime.strptime("2022/01/02 15:20:30.000009",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_2)
_details_data_3_3 = LedgerDetailsData()
_details_data_3_3.token_address = token_address
_details_data_3_3.data_id = "data_id_3"
_details_data_3_3.data_created = datetime.strptime("2022/01/02 15:20:30.000008",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_3_3)
_details_data_4_1 = LedgerDetailsData()
_details_data_4_1.token_address = token_address
_details_data_4_1.data_id = "data_id_4"
_details_data_4_1.data_created = datetime.strptime("2022/01/03 00:20:30.000001",
'%Y/%m/%d %H:%M:%S.%f') # JST 2022/01/03
db.add(_details_data_4_1)
# Not Target
_details_data_5_1 = LedgerDetailsData()
_details_data_5_1.token_address = "test"
_details_data_5_1.data_id = "dummy"
db.add(_details_data_5_1)
resp = client.get(
self.base_url.format(token_address=token_address),
params={
"offset": 1,
"limit": 2
},
headers={
"issuer-address": issuer_address,
}
)
# assertion
assert resp.status_code == 200
assert resp.json() == {
"result_set": {
"count": 4,
"offset": 1,
"limit": 2,
"total": 4
},
"details_data": [
{
"data_id": "data_id_2",
"count": 2,
"created": "2022-01-02T09:20:30.000002+09:00",
},
{
"data_id": "data_id_3",
"count": 3,
"created": "2022-01-03T00:20:30.000010+09:00",
},
]
}
###########################################################################
# Error Case
###########################################################################
# <Error_1>
# Parameter Error(issuer-address)
def test_error_1(self, client, db):
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# request target API
resp = client.get(
self.base_url.format(token_address=token_address),
headers={
"issuer-address": "test",
}
)
# assertion
assert resp.status_code == 422
assert resp.json() == {
"meta": {
"code": 1,
"title": "RequestValidationError"
},
"detail": [
{
"loc": ["header", "issuer-address"],
"msg": "issuer-address is not a valid address",
"type": "value_error"
}
]
}
# <Error_2_1>
# Token Not Found
# set issuer-address
def test_error_2_1(self, client, db):
user = config_eth_account("user1")
issuer_address = user["address"]
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# prepare data
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = ""
_token.issuer_address = "0x1234567890123456789012345678901234567899" # not target
_token.token_address = token_address
_token.abi = {}
_token.token_status = 2
db.add(_token)
# request target API
resp = client.get(
self.base_url.format(token_address=token_address),
params={
"offset": 2,
"limit": 3,
},
headers={
"issuer-address": issuer_address,
}
)
# assertion
assert resp.status_code == 404
assert resp.json() == {
"meta": {
"code": 1,
"title": "NotFound"
},
"detail": "token does not exist"
}
# <Error_2_2>
# Token Not Found
# unset issuer-address
def test_error_2_2(self, client, db):
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# request target API
resp = client.get(
self.base_url.format(token_address=token_address),
params={
"offset": 2,
"limit": 3,
},
)
# assertion
assert resp.status_code == 404
assert resp.json() == {
"meta": {
"code": 1,
"title": "NotFound"
},
"detail": "token does not exist"
}
# <Error_3>
# Processing Token
def test_error_3(self, client, db):
user = config_eth_account("user1")
issuer_address = user["address"]
token_address = "0xABCdeF1234567890abcdEf123456789000000000"
# prepare data
_token = Token()
_token.type = TokenType.IBET_STRAIGHT_BOND
_token.tx_hash = ""
_token.issuer_address = issuer_address
_token.token_address = token_address
_token.abi = {}
_token.token_status = 0
db.add(_token)
# request target API
resp = client.get(
self.base_url.format(token_address=token_address),
params={
"offset": 2,
"limit": 3,
},
headers={
"issuer-address": issuer_address,
}
)
# assertion
assert resp.status_code == 400
assert resp.json() == {
"meta": {
"code": 1,
"title": "InvalidParameterError"
},
"detail": "wait for a while as the token is being processed"
}
| 36.859155
| 100
| 0.520443
|
4c8763ecfe25602716aa03996eed70692ab53229
| 1,876
|
py
|
Python
|
src/gutils/per_cell_export.py
|
GavinHuttley/gutils
|
d8b2b51b56d36adbbe56cac7f5e2df6d75cc61dc
|
[
"BSD-3-Clause"
] | null | null | null |
src/gutils/per_cell_export.py
|
GavinHuttley/gutils
|
d8b2b51b56d36adbbe56cac7f5e2df6d75cc61dc
|
[
"BSD-3-Clause"
] | 1
|
2020-08-18T01:01:56.000Z
|
2020-08-25T23:41:32.000Z
|
src/gutils/per_cell_export.py
|
GavinHuttley/gutils
|
d8b2b51b56d36adbbe56cac7f5e2df6d75cc61dc
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""Export grades for each cell to json"""
import json
import sys
from nbgrader.api import MissingEntry
from nbgrader.plugins import ExportPlugin
class PerCellExporter(ExportPlugin):
"""
Export plugin for nbgrader which ouputs per-cell grades to a json formatted file.
"""
def export(self, gradebook):
grades = {}
# It's awful, but I think this nested for loop is the only way to proceed
for assignment in gradebook.assignments:
grades[assignment.name] = {}
for notebook in assignment.notebooks:
grades[assignment.name][notebook.name] = {}
for grade_cell in notebook.grade_cells:
grades[assignment.name][notebook.name][grade_cell.name] = {}
for student in gradebook.students:
try:
grade = gradebook.find_grade(
grade_cell.name,
notebook.name,
assignment.name,
student.id,
)
grades[assignment.name][notebook.name][grade_cell.name][
student.id
] = grade.score
except MissingEntry:
grades[assignment.name][notebook.name][grade_cell.name][
student.id
] = None
if self.to:
out_file = self.to
else:
out_file = "grades.json"
with open(out_file, "w") as f:
json.dump(grades, f, indent=4)
def main():
sys.exit(
"Usage: $ nbgrader export --exporter=gutils.per_cell_export.PerCellExporter"
)
if __name__ == "__main__":
main()
| 30.258065
| 85
| 0.504797
|
049b2ba18bfcade5eee2cd139b23d7b52a8a1175
| 10,455
|
py
|
Python
|
tests/test_mciipm.py
|
adelosa/cardutil
|
fa31223aaac1f0749d50368bb639a311d98e279a
|
[
"MIT"
] | null | null | null |
tests/test_mciipm.py
|
adelosa/cardutil
|
fa31223aaac1f0749d50368bb639a311d98e279a
|
[
"MIT"
] | 1
|
2022-03-25T20:15:24.000Z
|
2022-03-30T09:20:34.000Z
|
tests/test_mciipm.py
|
adelosa/cardutil
|
fa31223aaac1f0749d50368bb639a311d98e279a
|
[
"MIT"
] | null | null | null |
import io
import unittest
from cardutil.mciipm import (
VbsWriter, VbsReader, IpmReader, IpmWriter, Block1014, Unblock1014, block_1014, unblock_1014, vbs_list_to_bytes,
vbs_bytes_to_list, IpmParamReader)
from tests import message_ascii_raw, message_ebcdic_raw, print_stream
class MciIpmTestCase(unittest.TestCase):
def test_real_message_example_ascii(self):
# create the input ipm file bytes -- test_file
message_list = [message_ascii_raw for _ in range(15)]
with io.BytesIO() as in_data:
with VbsWriter(in_data, blocked=True) as writer:
writer.write_many(message_list)
print_stream(in_data, "VBS in data")
# read vbs test file
reader = IpmReader(in_data, blocked=True)
results = list(reader)
self.assertEqual(len(results), len(message_list))
def test_real_message_example_ebcdic(self):
# write 1014 blocked test file
message_list = [message_ebcdic_raw for _ in range(15)]
with io.BytesIO() as in_data:
with VbsWriter(in_data, blocked=True) as writer:
writer.write_many(message_list)
print_stream(in_data, "1014 blocked in data")
# read blocked test file
reader = IpmReader(in_data, encoding='cp500', blocked=True)
results = list(reader)
print(results)
self.assertEqual(len(results), len(message_list))
def test_ipmwriter_vbs_file(self):
record = {'MTI': '1111', 'DE2': '8888999988889999'}
records = [record for _ in range(5)]
with io.BytesIO() as out_data:
writer = IpmWriter(out_data)
for record in records:
writer.write(record)
writer.close()
print_stream(out_data, 'VBS output file')
reader = IpmReader(out_data)
results = list(reader)
print(results)
self.assertEqual(results, records)
def test_ipm_reader_with_config(self):
record = {'MTI': '1111', 'DE2': '8888999988889999'}
# the following config applies the PAN masking formatter
bit_config = {"2": {"field_name": "PAN", "field_type": "LLVAR", "field_length": 0, "field_processor": "PAN"}}
records = [record]
with io.BytesIO() as out_data:
with IpmWriter(out_data) as writer:
writer.write_many(records)
print_stream(out_data, 'VBS output file')
reader = IpmReader(out_data, iso_config=bit_config)
results = list(reader)
print(results)
self.assertEqual(results, [{'MTI': '1111', 'DE2': '888899******9999'}])
def test_ipmwriter_blocked_file(self):
record = {'MTI': '1111', 'DE2': '8888999988889999'}
records = [record for _ in range(5)]
with io.BytesIO() as out_data:
with IpmWriter(out_data, blocked=True) as writer:
writer.write_many(records)
print_stream(out_data, 'VBS output file')
reader = IpmReader(out_data, blocked=True)
results = list(reader)
print(results)
self.assertEqual(results, records)
def test_vbsreader_vbs_file(self):
# create the input file bytes -- test_file
records = [b'12345678901234567890' for _ in range(5)]
with io.BytesIO() as in_data:
# write vbs test file
writer = VbsWriter(in_data)
for record in records:
writer.write(record)
else:
writer.close()
print_stream(in_data, "VBS in data")
reader = VbsReader(in_data)
results = list(reader)
print(results)
self.assertEqual(results, records)
def test_vbsreader_vbs_file_missing_0_len(self):
"""
The reader can handle VBS files that don't have final 0 length record
"""
# create the input file bytes -- test_file
records = [b'12345678901234567890' for _ in range(5)]
with io.BytesIO() as in_data:
# write vbs test file
writer = VbsWriter(in_data)
# don't call close method which writes the zero length record
for record in records:
writer.write(record)
print_stream(in_data, "VBS in data")
reader = VbsReader(in_data)
results = list(reader)
print(results)
self.assertEqual(results, records)
def test_vbsreader_blocked_file(self):
# create the input file bytes -- test_file
records = [b'12345678901234567890' for _ in range(5)]
with io.BytesIO() as in_data:
# write vbs test file
writer = VbsWriter(in_data, blocked=True)
for record in records:
writer.write(record)
else:
writer.close()
print_stream(in_data, "Blocked vbs data")
reader = VbsReader(in_data, blocked=True)
results = list(reader)
print(results)
self.assertEqual(results, records)
def test_file_blocker_compare(self):
"""
Checks that the Block1014 class works the same as the
:return:
"""
out_unblocked = io.BytesIO()
message_list = [message_ascii_raw for _ in range(10)]
writer = VbsWriter(out_unblocked)
for message in message_list:
writer.write(message)
writer.close()
out_blocked = io.BytesIO()
block_1014(out_unblocked, out_blocked)
out_blocked.seek(0)
blocked1 = out_blocked.read()
print(blocked1)
out_blocked2 = io.BytesIO()
writer = VbsWriter(out_blocked2, blocked=True)
for message in message_list:
writer.write(message)
writer.close()
out_blocked2.seek(0)
blocked2 = out_blocked2.read()
print(blocked2)
self.assertEqual(blocked1, blocked2)
out_blocked.seek(0)
out = io.BytesIO()
unblock_1014(out_blocked, out)
print_stream(out, "unblocked data")
def test_unblock1014_exceptions(self):
# create correct blocked
message_list = [message_ascii_raw for _ in range(10)]
out_blocked = io.BytesIO()
writer = VbsWriter(out_blocked, blocked=True)
for message in message_list:
writer.write(message)
writer.close()
out_blocked.seek(0)
blocked = out_blocked.read()
print(blocked)
# remove byte from end of file -- invalid file size
out_blocked_missing_data = io.BytesIO(blocked[:-2])
out_blocked_missing_data.seek(0)
out = io.BytesIO()
with self.assertRaises(ValueError):
unblock_1014(out_blocked_missing_data, out)
# bad pad chars
out_blocked_bad_fill = io.BytesIO(blocked[:-2] + b'$$')
out_blocked_bad_fill.seek(0)
out = io.BytesIO()
with self.assertRaises(ValueError):
unblock_1014(out_blocked_bad_fill, out)
def test_write_read_large_records(self):
"""
Checks that the Block1014 class handles large records (greater than 1014 bytes per record)
"""
blocked = io.BytesIO()
message_list = [b'*' * 2000 for _ in range(5)]
writer = VbsWriter(blocked, blocked=True)
for message in message_list:
writer.write(message)
writer.close()
print_stream(blocked, 'blocked')
reader = VbsReader(blocked, blocked=True)
for count, rec in enumerate(reader):
self.assertLess(count, 5)
self.assertEqual(rec, b'*' * 2000)
def test_block1014_file_obj(self):
"""
check that can access the underlying file object
:return:
"""
my_file = io.BytesIO()
my_file_block = Block1014(my_file)
self.assertEqual(my_file_block.tell(), 0)
self.assertEqual(my_file_block.undefined_func, None)
my_file_block.close()
def test_vbsreader_file_obj(self):
"""
check that can access the underlying file object
:return:
"""
my_file = io.BytesIO()
vbs = VbsReader(my_file)
self.assertEqual(vbs.tell(), 0)
self.assertEqual(vbs.undefined_func, None)
def test_vbswriter_file_obj(self):
"""
check that can access the underlying file object
:return:
"""
my_file = io.BytesIO()
vbs = VbsWriter(my_file)
self.assertEqual(vbs.tell(), 0)
self.assertEqual(vbs.undefined_func, None)
def test_unblock1014_file_obj(self):
"""
check that can access the underlying file object
:return:
"""
my_file = io.BytesIO()
my_file_block = Unblock1014(my_file)
self.assertEqual(my_file_block.tell(), 0)
self.assertEqual(my_file_block.undefined_func, None)
my_file_block.read()
def test_vbs_list_to_bytes_to_list(self):
test_bytes_list = [b'aaa', b'bbb', b'ccc']
vbs_data = vbs_list_to_bytes(test_bytes_list)
print(vbs_data)
self.assertEqual(vbs_data, b'\x00\x00\x00\x03aaa\x00\x00\x00\x03bbb\x00\x00\x00\x03ccc\x00\x00\x00\x00')
vbs_list = vbs_bytes_to_list(vbs_data)
print(vbs_list)
self.assertEqual(vbs_list, test_bytes_list)
def test_ipm_param_reader(self):
param_file_data = [
b'2011101414AIP0000T1IP0000T1 TABLE LIST ' + 188 * b'.' + b'001',
b'2014101414AIP0000T1IP0040T1 ACCOUNT RANGE TABLE ' + 188 * b'.' + b'036',
b'TRAILER RECORD IP0000T1 00000218 ',
b'........xxx....', # dummy record
b'1711114A0365116545113000000000MCC5116545113999999999MCC020000000152710084563AUS036CMCC NNYMCC N0000000362'
b'0000000000000000000000000000 000000NN 000000NNNN0NUNN0N N ',
]
with io.BytesIO() as test_param_stream:
with VbsWriter(test_param_stream, blocked=True) as test_param_vbs:
test_param_vbs.write_many(param_file_data)
test_param_stream.seek(0)
reader = IpmParamReader(test_param_stream, table_id='IP0040T1')
for record in reader:
print(record)
if __name__ == '__main__':
unittest.main()
| 33.834951
| 120
| 0.605452
|
4ecf68480532236cf0b75ecb4ca258417749bd60
| 19,089
|
py
|
Python
|
app.py
|
RekhaBhale/face-recognition
|
e26d6d0889a1e124dd9b59425f20e94fe687f55d
|
[
"BSL-1.0"
] | null | null | null |
app.py
|
RekhaBhale/face-recognition
|
e26d6d0889a1e124dd9b59425f20e94fe687f55d
|
[
"BSL-1.0"
] | 1
|
2019-04-27T12:00:55.000Z
|
2019-07-19T08:17:16.000Z
|
app.py
|
RekhaBhale/face-recognition
|
e26d6d0889a1e124dd9b59425f20e94fe687f55d
|
[
"BSL-1.0"
] | null | null | null |
from flask import Flask, json, Response, request, render_template
from werkzeug.utils import secure_filename
from os import path, getcwd
import time
from db import Database
from face import Face
import db as dbHandler
from md5 import test
import sqlite3
import json
app = Flask(__name__)
#1.configuration settings
app.config['file_allowed'] = ['image/png', 'image/jpeg']
app.config['storage'] = path.join(getcwd(), 'storage')
app.db = Database()
app.face = Face(app)
def success_handle(output, status=200, mimetype='application/json'):
return Response(output, status=status, mimetype=mimetype)
def error_handle(error_message, status=500, mimetype='application/json'):
return Response(json.dumps({"error": {"message": error_message}}), status=status, mimetype=mimetype)
#default page
@app.route('/')
def index():
return render_template('home.html')
#login page
@app.route('/login', methods=['POST','GET'])
def login():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
demo = test(str(password))
print (demo)
print (password)
#password.encode("ascii")
#demo = [input(password).encode("ascii")]
#print (demo)
#for message in demo:
# result = md5_to_hex(md5(message))
#print (password)
#print (result)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * from login WHERE username=? AND password=?", (username, demo))
results = cur.fetchall()
for row in results:
type = row[2]
if type == "admin":
return admin_home()
elif type == "police":
return police_home()
elif type == "company":
return company_home()
else:
print("error")
return render_template('home.html')
#2 police_db
@app.route('/addrec', methods=['POST', 'GET'])
def addrec():
if request.method == 'POST':
name_of_police_stn = request.form['name_of_police_stn']
police_stn_no = request.form['police_stn_no']
region1 = request.form['region1']
address_ps = request.form['address_ps']
ps_phone1 = request.form['ps_phone1']
head_officer = request.form['head_officer']
head_id = request.form['head_id']
head_aadhar = request.form['head_aadhar']
head_pan = request.form['head_pan']
head_email = request.form['head_email']
head_mob_no = request.form['head_mob_no']
head_user_id1 = request.form['head_user_id1']
head_pass1 = request.form['head_pass1']
head_pass21 = request.form['head_pass21']
pass_police = test(str(head_pass1))
print (pass_police)
#print (head_pan)
app.db.insertUser(name_of_police_stn, police_stn_no, region1, address_ps, ps_phone1, head_officer, head_id, head_aadhar, head_pan, head_email, head_mob_no, head_user_id1, pass_police, head_pass21)
return render_template('home.html')
else:
return render_template('home.html')
@app.route('/admin_user2det')
def show():
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from police_reg")
rows = cur.fetchall();
return render_template("admin_user2det.html", rows=rows)
#3,com
@app.route('/addrec1', methods=['POST', 'GET'])
def addrec1():
if request.method == 'POST':
name_of_comp = request.form['name_of_comp']
reg_no = request.form['unique_reg_no']
region= request.form['region']
address_comp = request.form['address_comp']
ps_phone = request.form['ps_phone']
hr_name = request.form['hr_name']
emp_id = request.form['emp_id']
hr_aadhar = request.form['hr_aadhar']
hr_pan = request.form['hr_pan']
hr_email = request.form['hr_email']
hr_mob_no = request.form['hr_mob_no']
head_user_id = request.form['head_user_id']
head_pass = request.form['head_pass']
head_pass2 = request.form['head_pass2']
pass_comp = test(str(head_pass))
print (pass_comp)
app.db.insertUser1('INSERT INTO company_reg (name_of_comp, reg_no, region, address_comp,ps_phone, hr_name, emp_id, hr_aadhar, hr_pan, hr_email, hr_mob_no, head_user_id, head_pass, head_pass2) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)',name_of_comp, reg_no, region, address_comp, ps_phone, hr_name, emp_id, hr_aadhar, hr_pan, hr_email, hr_mob_no, head_user_id, pass_comp, head_pass2)
#app.db.insertUser(name_of_comp, reg_no, region, address_comp, ps_phone, hr_name, emp_id, hr_aadhar, hr_pan, hr_email, hr_mob_no, head_user_id, head_pass, head_pass2)
return render_template('home.html')
else:
return render_template('home.html')
@app.route('/admin_user3det')
def list():
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from company_reg")
rows = cur.fetchall();
return render_template("admin_user3det.html", rows=rows)
@app.route('/admin_home')
def admin_home():
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from company_reg where status='false' ")
row1 = cur.fetchall()
cur.execute("select * from police_reg where status='false' ")
row2 = cur.fetchall()
return render_template("admin_home.html", row1=row1, row2=row2)
@app.route('/police_home')
def police_home():
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from users ORDER BY id DESC")
rows = cur.fetchall();
return render_template("police_home.html", rows=rows)
#4.face wala db
def get_user_by_id(user_id):
user = {}
results = app.db.select(
'SELECT users.id, users.name, users.created, faces.id, faces.user_id, faces.filename,faces.created FROM users LEFT JOIN faces ON faces.user_id = users.id WHERE users.id = ?',
[user_id])
index = 0
for row in results:
# print(row)
face = {
"id": row[3],
"user_id": row[4],
"filename": row[5],
"created": row[6],
}
if index == 0:
user = {
"id": row[0],
"name": row[1],
"created": row[2],
"faces": [],
}
if row[3]:
user["faces"].append(face)
index = index + 1
if 'id' in user:
return user
return None
@app.route('/delete_user_by_id', methods=['POST', 'GET'])
def delete_user_by_id():
if request.method == 'POST':
user_id = request.form['user_id']
app.db.delete('DELETE FROM users WHERE users.id = ?', [user_id])
# also delete all faces with user id
app.db.delete('DELETE FROM faces WHERE faces.user_id = ?', [user_id])
return render_template('layout_police.html')
else:
return render_template('layout_police.html')
#5. Actual working part
@app.route('/api', methods=['GET'])
def homepage():
output = json.dumps({"api": '1.0'})
return success_handle(output)
@app.route('/api/train', methods=['POST'])
def train():
output = json.dumps({"success": True})
if 'file' not in request.files:
print ("Face image is required")
return error_handle("Face image is required.")
else:
print("File request", request.files)
file = request.files['file']
if file.mimetype not in app.config['file_allowed']:
print("File extension is not allowed")
return error_handle("We are only allow upload file with *.png , *.jpg")
else:
# get name in form data
name = request.form['name']
contact = request.form['contact']
address = request.form['address']
aadhar = request.form['aadhar']
crime = request.form['crime']
act = request.form['act']
gender = request.form['gender']
dob = request.form['dob']
#print("Information of that face", dob)
print("File is allowed and will be saved in ", app.config['storage'])
filename = secure_filename(file.filename)
trained_storage = path.join(app.config['storage'], 'trained')
file.save(path.join(trained_storage, filename))
# let start save file to our storage
# save to our sqlite database.db
created = int(time.time())
user_id = app.db.insert('INSERT INTO users(name, contact,address,aadhar,crime,act,gender,dob, created) values(?,?,?,?,?,?,?,?,?)', [name, contact, address,aadhar,crime,act,gender,dob, created])
if user_id:
print("User saved in data", name, user_id)
# user has been save with user_id and now we need save faces table as well
face_id = app.db.insert('INSERT INTO faces(user_id, filename, created) values(?,?,?)',
[user_id, filename, created])
if face_id:
print("cool face has been saved")
face_data = {"id": face_id, "filename": filename, "created": created}
return_output = json.dumps({"id": user_id, "name": name, "face": [face_data]})
return success_handle(return_output)
else:
print("An error saving face image.")
return error_handle("n error saving face image.")
else:
print("Something happend")
return error_handle("An error inserting new user")
print("Request is contain image")
return success_handle(output)
# route for user profile
@app.route('/api/users/<int:user_id>', methods=['GET', 'DELETE'])
def user_profile(user_id):
if request.method == 'GET':
user = get_user_by_id(user_id)
if user:
return success_handle(json.dumps(user), 200)
else:
return error_handle("User not found", 404)
if request.method == 'DELETE':
delete_user_by_id(user_id)
return success_handle(json.dumps({"deleted": True}))
#criminal profile
@app.route('/view/<id>', methods=['POST', 'GET'])
def view(id):
print (id)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * from users WHERE id=?", [id])
results = cur.fetchall()
con.commit()
return render_template('criminal_profile.html', rows=results)
def get_users_data(user_id):
results = app.db.select('SELECT * from users WHERE id=?',[user_id])
for row in results:
print(row)
return results
# router for recognize a unknown face
@app.route('/api/recognize', methods=['POST','GET'])
def recognize():
if 'file' not in request.files:
return error_handle("Image is required")
else:
file = request.files['file']
# file extension valiate
if file.mimetype not in app.config['file_allowed']:
return error_handle("File extension is not allowed")
else:
filename = secure_filename(file.filename)
unknown_storage = path.join(app.config["storage"], 'unknown')
file_path = path.join(unknown_storage, filename)
file.save(file_path)
user_id = app.face.recognize(filename)
get_users_data(user_id)
#result = app.db.select('SELECT * from users WHERE id=?',[user_id])
#print (result)
if user_id:
user = get_user_by_id(user_id)
print(user)
print(type(user))
message = {"message": "yach nav".format(user["name"]),
"user": user}
#print (user)
#message = {"message": "Hey we found {0} matched with your face image",
# "user": user}
#view(user_id)
return success_handle(json.dumps(message))
else:
return error_handle("Sorry we can not found any people matched with your face image, try another image")
@app.route('/logout')
def logout():
return render_template('home.html')
@app.route('/home')
def home():
return render_template('home.html')
# admin window
#@app.route('/admin_home')
#def admin_home():
# return render_template('admin_home.html')
@app.route('/admin_user2det')
def admin_user2det():
return render_template('admin_user2det.html')
@app.route('/admin_user3det')
def admin_user3det():
return render_template('admin_user3det.html')
@app.route('/notification')
def notification():
return render_template('notification.html')
# police user window
@app.route('/police_add')
def police_add():
return render_template('police_add.html')
@app.route('/police_delete')
def police_delete():
return render_template('police_delete.html')
@app.route('/police_update')
def police_update():
return render_template('police_update.html')
# company user window
@app.route('/company_home', methods=['GET','POST'])
def company_home():
return render_template('company_home.html')
# registration details
@app.route('/reg2')
def reg2():
return render_template('reg2.html')
@app.route('/reg1')
def reg1():
return render_template('reg1.html')
#police delete
@app.route('/pro', methods=['POST','GET'])
def pro():
if request.method == 'POST':
id = request.form['id']
print (id)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
print ("conn")
cur.execute("SELECT * from users WHERE id=?", [id])
print ("query")
results = cur.fetchall()
return render_template('police_delete.html', rows=results)
else:
print("error")
return render_template('police_home.html')
@app.route('/delete/<id>')
def base1(id):
print(id)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("DELETE from users WHERE id=?", [id])
con.commit()
return police_home()
#update police
@app.route('/pro1', methods=['POST','GET'])
def pro1():
if request.method == 'POST':
id = request.form['id']
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * from users WHERE id=?", [id])
results = cur.fetchall()
return render_template('police_update.html', rows=results)
else:
print("error")
return render_template('police_home.html')
@app.route('/update1/<id>')
def update1(id):
print(id)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("SELECT * from users WHERE id=?", [id])
results = cur.fetchall()
con.commit()
print("done")
return render_template('update.html', rows=results)
#update form
@app.route('/upd', methods=['POST','GET'])
def upd():
print ("...")
if request.method == 'POST':
print ("a")
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
id = request.form['id']
print (id)
name = request.form['name']
dob = request.form['dob']
print (name)
gender = request.form['gender']
contact = request.form['contact']
address = request.form['address']
aadhar = request.form['aadhar']
crime = request.form['crime']
act = request.form['act']
print ("b")
cur.execute("UPDATE users set name=?, dob=? WHERE id=?", [name, dob, id])
print ("k")
con.commit()
return police_home()
else:
return police_home()
#accept reject logic for company user
@app.route('/accept/<head_user_id>', methods=['POST','GET'])
def accept(head_user_id):
print (head_user_id)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("UPDATE company_reg set status='true' where head_user_id=?", [head_user_id])
print ("up")
#con.commit()
#cur = con.cursor()
cur.execute("SELECT * from company_reg where head_user_id=?", [head_user_id])
results = cur.fetchall()
print ("one")
for row in results:
password = row[12]
type = row[15]
#con.commit()
print (type)
print (password)
#cur = con.cursor()
cur.execute("INSERT INTO login (username,password,type) values(?,?,?)", [head_user_id, password, type])
con.commit()
return admin_home()
@app.route('/reject/<head_user_id>', methods=['POST', 'GET'])
def reject(head_user_id):
print(head_user_id)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("DELETE from company_reg WHERE head_user_id=?", [head_user_id])
print ("delete succ")
con.commit()
return admin_home()
#accept reject logic for police user
@app.route('/acc/<head_user_id1>', methods=['POST', 'GET'])
def paccept(head_user_id1):
print (head_user_id1)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("UPDATE police_reg set status='true' where head_user_id1=?", [head_user_id1])
print ("up")
#con.commit()
#cur = con.cursor()
cur.execute("SELECT * from police_reg where head_user_id1=?", [head_user_id1])
results = cur.fetchall()
print ("one")
for row in results:
password = row[12]
type = row[14]
#con.commit()
print (type)
print (password)
#cur = con.cursor()
cur.execute("INSERT INTO login (username,password,type) values(?,?,?)", [head_user_id1, password, type])
con.commit()
return admin_home()
@app.route('/rej/<head_user_id1>', methods=['POST', 'GET'])
def pr(head_user_id1):
print(head_user_id1)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("DELETE from police_reg WHERE head_user_id1=?", [head_user_id1])
print ("delete succ")
con.commit()
return admin_home()
#company home criminal profile
@ app.route('/crimesearch', methods=['POST', 'GET'])
def crimesearch():
if request.method == 'POST':
name = request.form['name']
print (name)
con = sqlite3.connect("database.db")
con.row_factory = sqlite3.Row
cur = con.cursor()
print ("conn")
cur.execute("SELECT * from users WHERE name=?", [name])
print ("query")
results = cur.fetchall()
return render_template('criminal_profile.html', rows=results)
else:
print("error")
return render_template('police_home.html')
#@app.route('/cripro', methods=['POST', 'GET'])
#def cripro():
# Run the app
app.run()
| 32.354237
| 385
| 0.616166
|
09061d6ab2e2a7e4c8a1bb88d943ba9ef2db2d5d
| 2,204
|
py
|
Python
|
src/testcase/GN_APP/case/GN_APP_REGISTER/GN_APP_REGISTER_018.py
|
maiyajj/AutoTest_script-Appium_Connect
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
[
"Apache-2.0"
] | 28
|
2017-11-10T00:19:16.000Z
|
2022-02-19T16:42:05.000Z
|
src/testcase/GN_APP/case/GN_APP_REGISTER/GN_APP_REGISTER_018.py
|
maiyajj/AutoTest_script-Appium_Connect
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
[
"Apache-2.0"
] | null | null | null |
src/testcase/GN_APP/case/GN_APP_REGISTER/GN_APP_REGISTER_018.py
|
maiyajj/AutoTest_script-Appium_Connect
|
f9c2c42c281a9e2f984acb4a72dda0694b053f22
|
[
"Apache-2.0"
] | 23
|
2017-08-22T06:12:19.000Z
|
2021-09-18T05:45:41.000Z
|
# coding=utf-8
from src.testcase.GN_APP.WidgetOperation import *
class GNAPPRegister18(WidgetOperation):
@case_run(True)
def run(self):
self.case_module = u"注册" # 用例所属模块
self.case_title = u'注册页面-已经注册过的用户名,再次注册验证' # 用例名称
self.zentao_id = "1768" # 禅道ID
# 用例动作
def case(self):
self.widget_click(self.page["login_page"]["to_register"],
self.page["register_page"]["title"])
user_name = self.widget_click(self.page["register_page"]["username"],
self.page["register_page"]["title"])
# 发送数据
data = self.user["user_name"]
data = bytearray.fromhex(str(data)).decode('utf-8').replace(" ", "")
user_name.clear()
self.ac.send_keys(user_name, data, self.driver)
self.debug.info(u'[APP_INPUT] ["已注册用户名"] input success')
time.sleep(0.5)
self.show_pwd(self.wait_widget(self.page["register_page"]["check_box"]))
pwd = self.widget_click(self.page["register_page"]["password"],
self.page["register_page"]["title"])
data = "123456"
pwd.clear()
self.ac.send_keys(pwd, data, self.driver)
self.debug.info(u'[APP_INPUT] ["密码"] input success')
time.sleep(0.5)
check_code = self.widget_click(self.page["register_page"]["check_code"],
self.page["register_page"]["title"])
data = "123456"
check_code.clear()
self.ac.send_keys(check_code, data, self.driver)
self.debug.info(u'[APP_INPUT] ["注册验证码"] input success')
time.sleep(0.5)
widget_px = self.ac.get_location(self.wait_widget(self.page["register_page"]["register_button"]))
self.driver.tap([widget_px["centre"]])
self.debug.info(u'[APP_CLICK] operate_widget success')
while True:
try:
self.wait_widget(self.page["loading_popup"]["title"], 0.5, 0.1)
except TimeoutException:
break
# 截屏获取设备toast消息
ScreenShot(self.device_info, self.zentao_id, self.basename, self.debug)
self.case_over("screen")
| 36.131148
| 105
| 0.582123
|
4aa769f20d8e5d5dccd15270621f5062f03445bc
| 22,323
|
py
|
Python
|
home/Lib/telnetlib.py
|
jbalint/spark
|
caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f
|
[
"CNRI-Jython"
] | 1
|
2015-05-21T20:00:12.000Z
|
2015-05-21T20:00:12.000Z
|
home/Lib/telnetlib.py
|
jbalint/spark
|
caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f
|
[
"CNRI-Jython"
] | null | null | null |
home/Lib/telnetlib.py
|
jbalint/spark
|
caccf1cd9122dd4a7dc0f26a57ee4a649056aa6f
|
[
"CNRI-Jython"
] | null | null | null |
"""TELNET client class.
Based on RFC 854: TELNET Protocol Specification, by J. Postel and
J. Reynolds
Example:
>>> from telnetlib import Telnet
>>> tn = Telnet('www.python.org', 79) # connect to finger port
>>> tn.write('guido\r\n')
>>> print tn.read_all()
Login Name TTY Idle When Where
guido Guido van Rossum pts/2 <Dec 2 11:10> snag.cnri.reston..
>>>
Note that read_all() won't read until eof -- it just reads some data
-- but it guarantees to read at least one byte unless EOF is hit.
It is possible to pass a Telnet object to select.select() in order to
wait until more data is available. Note that in this case,
read_eager() may return '' even if there was data on the socket,
because the protocol negotiation may have eaten the data. This is why
EOFError is needed in some cases to distinguish between "no data" and
"connection closed" (since the socket also appears ready for reading
when it is closed).
To do:
- option negotiation
- timeout should be intrinsic to the connection object instead of an
option on one of the read calls only
"""
# Imported modules
import sys
import socket
import os
if os.name == 'java':
from select import cpython_compatible_select as select
else:
from select import select
del os
__all__ = ["Telnet"]
# Tunable parameters
DEBUGLEVEL = 0
# Telnet protocol defaults
TELNET_PORT = 23
# Telnet protocol characters (don't change)
IAC = chr(255) # "Interpret As Command"
DONT = chr(254)
DO = chr(253)
WONT = chr(252)
WILL = chr(251)
theNULL = chr(0)
SE = chr(240) # Subnegotiation End
NOP = chr(241) # No Operation
DM = chr(242) # Data Mark
BRK = chr(243) # Break
IP = chr(244) # Interrupt process
AO = chr(245) # Abort output
AYT = chr(246) # Are You There
EC = chr(247) # Erase Character
EL = chr(248) # Erase Line
GA = chr(249) # Go Ahead
SB = chr(250) # Subnegotiation Begin
# Telnet protocol options code (don't change)
# These ones all come from arpa/telnet.h
BINARY = chr(0) # 8-bit data path
ECHO = chr(1) # echo
RCP = chr(2) # prepare to reconnect
SGA = chr(3) # suppress go ahead
NAMS = chr(4) # approximate message size
STATUS = chr(5) # give status
TM = chr(6) # timing mark
RCTE = chr(7) # remote controlled transmission and echo
NAOL = chr(8) # negotiate about output line width
NAOP = chr(9) # negotiate about output page size
NAOCRD = chr(10) # negotiate about CR disposition
NAOHTS = chr(11) # negotiate about horizontal tabstops
NAOHTD = chr(12) # negotiate about horizontal tab disposition
NAOFFD = chr(13) # negotiate about formfeed disposition
NAOVTS = chr(14) # negotiate about vertical tab stops
NAOVTD = chr(15) # negotiate about vertical tab disposition
NAOLFD = chr(16) # negotiate about output LF disposition
XASCII = chr(17) # extended ascii character set
LOGOUT = chr(18) # force logout
BM = chr(19) # byte macro
DET = chr(20) # data entry terminal
SUPDUP = chr(21) # supdup protocol
SUPDUPOUTPUT = chr(22) # supdup output
SNDLOC = chr(23) # send location
TTYPE = chr(24) # terminal type
EOR = chr(25) # end or record
TUID = chr(26) # TACACS user identification
OUTMRK = chr(27) # output marking
TTYLOC = chr(28) # terminal location number
VT3270REGIME = chr(29) # 3270 regime
X3PAD = chr(30) # X.3 PAD
NAWS = chr(31) # window size
TSPEED = chr(32) # terminal speed
LFLOW = chr(33) # remote flow control
LINEMODE = chr(34) # Linemode option
XDISPLOC = chr(35) # X Display Location
OLD_ENVIRON = chr(36) # Old - Environment variables
AUTHENTICATION = chr(37) # Authenticate
ENCRYPT = chr(38) # Encryption option
NEW_ENVIRON = chr(39) # New - Environment variables
# the following ones come from
# http://www.iana.org/assignments/telnet-options
# Unfortunately, that document does not assign identifiers
# to all of them, so we are making them up
TN3270E = chr(40) # TN3270E
XAUTH = chr(41) # XAUTH
CHARSET = chr(42) # CHARSET
RSP = chr(43) # Telnet Remote Serial Port
COM_PORT_OPTION = chr(44) # Com Port Control Option
SUPPRESS_LOCAL_ECHO = chr(45) # Telnet Suppress Local Echo
TLS = chr(46) # Telnet Start TLS
KERMIT = chr(47) # KERMIT
SEND_URL = chr(48) # SEND-URL
FORWARD_X = chr(49) # FORWARD_X
PRAGMA_LOGON = chr(138) # TELOPT PRAGMA LOGON
SSPI_LOGON = chr(139) # TELOPT SSPI LOGON
PRAGMA_HEARTBEAT = chr(140) # TELOPT PRAGMA HEARTBEAT
EXOPL = chr(255) # Extended-Options-List
NOOPT = chr(0)
class Telnet:
"""Telnet interface class.
An instance of this class represents a connection to a telnet
server. The instance is initially not connected; the open()
method must be used to establish a connection. Alternatively, the
host name and optional port number can be passed to the
constructor, too.
Don't try to reopen an already connected instance.
This class has many read_*() methods. Note that some of them
raise EOFError when the end of the connection is read, because
they can return an empty string for other reasons. See the
individual doc strings.
read_until(expected, [timeout])
Read until the expected string has been seen, or a timeout is
hit (default is no timeout); may block.
read_all()
Read all data until EOF; may block.
read_some()
Read at least one byte or EOF; may block.
read_very_eager()
Read all data available already queued or on the socket,
without blocking.
read_eager()
Read either data already queued or some data available on the
socket, without blocking.
read_lazy()
Read all data in the raw queue (processing it first), without
doing any socket I/O.
read_very_lazy()
Reads all data in the cooked queue, without doing any socket
I/O.
read_sb_data()
Reads available data between SB ... SE sequence. Don't block.
set_option_negotiation_callback(callback)
Each time a telnet option is read on the input flow, this callback
(if set) is called with the following parameters :
callback(telnet socket, command, option)
option will be chr(0) when there is no option.
No other action is done afterwards by telnetlib.
"""
def __init__(self, host=None, port=0):
"""Constructor.
When called without arguments, create an unconnected instance.
With a hostname argument, it connects the instance; a port
number is optional.
"""
self.debuglevel = DEBUGLEVEL
self.host = host
self.port = port
self.sock = None
self.rawq = ''
self.irawq = 0
self.cookedq = ''
self.eof = 0
self.iacseq = '' # Buffer for IAC sequence.
self.sb = 0 # flag for SB and SE sequence.
self.sbdataq = ''
self.option_callback = None
if host is not None:
self.open(host, port)
def open(self, host, port=0):
"""Connect to a host.
The optional second argument is the port number, which
defaults to the standard telnet port (23).
Don't try to reopen an already connected instance.
"""
self.eof = 0
if not port:
port = TELNET_PORT
self.host = host
self.port = port
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
def __del__(self):
"""Destructor -- close the connection."""
self.close()
def msg(self, msg, *args):
"""Print a debug message, when the debug level is > 0.
If extra arguments are present, they are substituted in the
message using the standard string formatting operator.
"""
if self.debuglevel > 0:
print 'Telnet(%s,%d):' % (self.host, self.port),
if args:
print msg % args
else:
print msg
def set_debuglevel(self, debuglevel):
"""Set the debug level.
The higher it is, the more debug output you get (on sys.stdout).
"""
self.debuglevel = debuglevel
def close(self):
"""Close the connection."""
if self.sock:
self.sock.close()
self.sock = 0
self.eof = 1
self.iacseq = ''
self.sb = 0
def get_socket(self):
"""Return the socket object used internally."""
return self.sock
def fileno(self):
"""Return the fileno() of the socket object used internally."""
return self.sock.fileno()
def write(self, buffer):
"""Write a string to the socket, doubling any IAC characters.
Can block if the connection is blocked. May raise
socket.error if the connection is closed.
"""
if IAC in buffer:
buffer = buffer.replace(IAC, IAC+IAC)
self.msg("send %s", `buffer`)
self.sock.sendall(buffer)
def read_until(self, match, timeout=None):
"""Read until a given string is encountered or until timeout.
When no match is found, return whatever is available instead,
possibly the empty string. Raise EOFError if the connection
is closed and no cooked data is available.
"""
n = len(match)
self.process_rawq()
i = self.cookedq.find(match)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
s_reply = ([self], [], [])
s_args = s_reply
if timeout is not None:
s_args = s_args + (timeout,)
while not self.eof and select(*s_args) == s_reply:
i = max(0, len(self.cookedq)-n)
self.fill_rawq()
self.process_rawq()
i = self.cookedq.find(match, i)
if i >= 0:
i = i+n
buf = self.cookedq[:i]
self.cookedq = self.cookedq[i:]
return buf
return self.read_very_lazy()
def read_all(self):
"""Read all data until EOF; block until connection closed."""
self.process_rawq()
while not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_some(self):
"""Read at least one byte of cooked data unless EOF is hit.
Return '' if EOF is hit. Block if no data is immediately
available.
"""
self.process_rawq()
while not self.cookedq and not self.eof:
self.fill_rawq()
self.process_rawq()
buf = self.cookedq
self.cookedq = ''
return buf
def read_very_eager(self):
"""Read everything that's possible without blocking in I/O (eager).
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_eager(self):
"""Read readily available data.
Raise EOFError if connection closed and no cooked data
available. Return '' if no cooked data available otherwise.
Don't block unless in the midst of an IAC sequence.
"""
self.process_rawq()
while not self.cookedq and not self.eof and self.sock_avail():
self.fill_rawq()
self.process_rawq()
return self.read_very_lazy()
def read_lazy(self):
"""Process and return data that's already in the queues (lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block
unless in the midst of an IAC sequence.
"""
self.process_rawq()
return self.read_very_lazy()
def read_very_lazy(self):
"""Return any data available in the cooked queue (very lazy).
Raise EOFError if connection closed and no data available.
Return '' if no cooked data available otherwise. Don't block.
"""
buf = self.cookedq
self.cookedq = ''
if not buf and self.eof and not self.rawq:
raise EOFError, 'telnet connection closed'
return buf
def read_sb_data(self):
"""Return any data available in the SB ... SE queue.
Return '' if no SB ... SE available. Should only be called
after seeing a SB or SE command. When a new SB command is
found, old unread SB data will be discarded. Don't block.
"""
buf = self.sbdataq
self.sbdataq = ''
return buf
def set_option_negotiation_callback(self, callback):
"""Provide a callback function called after each receipt of a telnet option."""
self.option_callback = callback
def process_rawq(self):
"""Transfer from raw queue to cooked queue.
Set self.eof when connection is closed. Don't block unless in
the midst of an IAC sequence.
"""
buf = ['', '']
try:
while self.rawq:
c = self.rawq_getchar()
if not self.iacseq:
if c == theNULL:
continue
if c == "\021":
continue
if c != IAC:
buf[self.sb] = buf[self.sb] + c
continue
else:
self.iacseq += c
elif len(self.iacseq) == 1:
'IAC: IAC CMD [OPTION only for WILL/WONT/DO/DONT]'
if c in (DO, DONT, WILL, WONT):
self.iacseq += c
continue
self.iacseq = ''
if c == IAC:
buf[self.sb] = buf[self.sb] + c
else:
if c == SB: # SB ... SE start.
self.sb = 1
self.sbdataq = ''
elif c == SE:
self.sb = 0
self.sbdataq = self.sbdataq + buf[1]
buf[1] = ''
if self.option_callback:
# Callback is supposed to look into
# the sbdataq
self.option_callback(self.sock, c, NOOPT)
else:
# We can't offer automatic processing of
# suboptions. Alas, we should not get any
# unless we did a WILL/DO before.
self.msg('IAC %d not recognized' % ord(c))
elif len(self.iacseq) == 2:
cmd = self.iacseq[1]
self.iacseq = ''
opt = c
if cmd in (DO, DONT):
self.msg('IAC %s %d',
cmd == DO and 'DO' or 'DONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + WONT + opt)
elif cmd in (WILL, WONT):
self.msg('IAC %s %d',
cmd == WILL and 'WILL' or 'WONT', ord(opt))
if self.option_callback:
self.option_callback(self.sock, cmd, opt)
else:
self.sock.sendall(IAC + DONT + opt)
except EOFError: # raised by self.rawq_getchar()
self.iacseq = '' # Reset on EOF
self.sb = 0
pass
self.cookedq = self.cookedq + buf[0]
self.sbdataq = self.sbdataq + buf[1]
def rawq_getchar(self):
"""Get next char from raw queue.
Block if no data is immediately available. Raise EOFError
when connection is closed.
"""
if not self.rawq:
self.fill_rawq()
if self.eof:
raise EOFError
c = self.rawq[self.irawq]
self.irawq = self.irawq + 1
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
return c
def fill_rawq(self):
"""Fill raw queue from exactly one recv() system call.
Block if no data is immediately available. Set self.eof when
connection is closed.
"""
if self.irawq >= len(self.rawq):
self.rawq = ''
self.irawq = 0
# The buffer size should be fairly small so as to avoid quadratic
# behavior in process_rawq() above
buf = self.sock.recv(50)
self.msg("recv %s", `buf`)
self.eof = (not buf)
self.rawq = self.rawq + buf
def sock_avail(self):
"""Test whether data is available on the socket."""
return select([self], [], [], 0) == ([self], [], [])
def interact(self):
"""Interaction function, emulates a very dumb telnet client."""
if sys.platform == "win32":
self.mt_interact()
return
while 1:
rfd, wfd, xfd = select([self, sys.stdin], [], [])
if self in rfd:
try:
text = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
break
if text:
sys.stdout.write(text)
sys.stdout.flush()
if sys.stdin in rfd:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def mt_interact(self):
"""Multithreaded version of interact()."""
import thread
thread.start_new_thread(self.listener, ())
while 1:
line = sys.stdin.readline()
if not line:
break
self.write(line)
def listener(self):
"""Helper for mt_interact() -- this executes in the other thread."""
while 1:
try:
data = self.read_eager()
except EOFError:
print '*** Connection closed by remote host ***'
return
if data:
sys.stdout.write(data)
else:
sys.stdout.flush()
def expect(self, list, timeout=None):
"""Read until one from a list of a regular expressions matches.
The first argument is a list of regular expressions, either
compiled (re.RegexObject instances) or uncompiled (strings).
The optional second argument is a timeout, in seconds; default
is no timeout.
Return a tuple of three items: the index in the list of the
first regular expression that matches; the match object
returned; and the text read up till and including the match.
If EOF is read and no text was read, raise EOFError.
Otherwise, when nothing matches, return (-1, None, text) where
text is the text received so far (may be the empty string if a
timeout happened).
If a regular expression ends with a greedy match (e.g. '.*')
or if more than one expression can match the same input, the
results are undeterministic, and may depend on the I/O timing.
"""
re = None
list = list[:]
indices = range(len(list))
for i in indices:
if not hasattr(list[i], "search"):
if not re: import re
list[i] = re.compile(list[i])
while 1:
self.process_rawq()
for i in indices:
m = list[i].search(self.cookedq)
if m:
e = m.end()
text = self.cookedq[:e]
self.cookedq = self.cookedq[e:]
return (i, m, text)
if self.eof:
break
if timeout is not None:
r, w, x = select([self.fileno()], [], [], timeout)
if not r:
break
self.fill_rawq()
text = self.read_very_lazy()
if not text and self.eof:
raise EOFError
return (-1, None, text)
def test():
"""Test program for telnetlib.
Usage: python telnetlib.py [-d] ... [host [port]]
Default host is localhost; default port is 23.
"""
debuglevel = 0
while sys.argv[1:] and sys.argv[1] == '-d':
debuglevel = debuglevel+1
del sys.argv[1]
host = 'localhost'
if sys.argv[1:]:
host = sys.argv[1]
port = 0
if sys.argv[2:]:
portstr = sys.argv[2]
try:
port = int(portstr)
except ValueError:
port = socket.getservbyname(portstr, 'tcp')
tn = Telnet()
tn.set_debuglevel(debuglevel)
tn.open(host, port)
tn.interact()
tn.close()
if __name__ == '__main__':
test()
| 33.669683
| 88
| 0.547597
|
de39fb2a79b370301e6ed75fc1bb2ca829d3ee96
| 1,852
|
py
|
Python
|
internal/notes/builtin-SAVE/packages/codar-cheetah/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 1
|
2019-01-17T20:07:19.000Z
|
2019-01-17T20:07:19.000Z
|
internal/notes/builtin-SAVE/packages/codar-cheetah/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | null | null | null |
internal/notes/builtin-SAVE/packages/codar-cheetah/package.py
|
HPCToolkit/hpctest
|
5ff4455582bf39e75530a31badcf6142081b386b
|
[
"BSD-3-Clause"
] | 2
|
2019-08-06T18:13:57.000Z
|
2021-11-05T18:19:49.000Z
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from distutils.dir_util import copy_tree
class CodarCheetah(Package):
"""CODAR Cheetah:
The CODAR Experiment Harness for Exascale science applications.
"""
homepage = "https://github.com/CODARcode/cheetah"
url = "https://github.com/CODARcode/cheetah/archive/v0.1.tar.gz"
version('0.1', '6918021f74fa7a2f1de26c0bb31a63ef')
version('develop', git='https://github.com/CODARcode/cheetah.git',
branch='master')
depends_on('python@3:', type=('build', 'run'))
depends_on('savanna')
def install(self, spec, prefix):
copy_tree('.', prefix)
| 40.26087
| 78
| 0.672786
|
c796bb4672bc2aabc708ca87c504edf92974e7ba
| 1,687
|
py
|
Python
|
free_style/app/views.py
|
yudongqiu/GomokuWeb
|
c7d887733250f4b69efd04edf72a22bbd8257206
|
[
"MIT"
] | 5
|
2017-05-23T10:48:14.000Z
|
2019-03-24T09:53:45.000Z
|
free_style/app/views.py
|
yudongqiu/GomokuWeb
|
c7d887733250f4b69efd04edf72a22bbd8257206
|
[
"MIT"
] | null | null | null |
free_style/app/views.py
|
yudongqiu/GomokuWeb
|
c7d887733250f4b69efd04edf72a22bbd8257206
|
[
"MIT"
] | 4
|
2017-08-04T09:01:17.000Z
|
2018-07-18T10:30:04.000Z
|
from flask import render_template, request, jsonify
from app import app
import gomoku_web
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/_start', methods=['GET'])
def start():
game.reset()
p1 = request.args.get('p1', 'You', type=str)
p2 = request.args.get('p2', 'AI', type=str)
lv = request.args.get('lv', 5, type=int)
game.players = []
for player_name in [p1,p2]:
if player_name.startswith('AI'):
p = gomoku_web.Player(player_name,ai_script=player_name,level=lv)
else:
p = gomoku_web.Player(player_name)
game.players.append(p)
game.print_board()
return 'Success'
@app.route('/_player_set', methods=['GET'])
def player_set():
position = request.args.get('position','')
stone = tuple(int(i) for i in position.split(','))
action = (stone[0]+1, stone[1]+1) # we start from 1 in the game engine
next_action, winner = game.web_play(action)
if isinstance(next_action, tuple):
stone = (next_action[0]-1, next_action[1]-1)
else:
stone = None
return jsonify(next_move=stone, winner=winner)
@app.route('/_get_first_move')
def get_first_move():
next_action = game.ai_first_move()
if isinstance(next_action, tuple):
stone = (next_action[0]-1, next_action[1]-1)
else:
print("Error getting AI first move!")
stone = None
return jsonify(next_move=stone)
@app.route('/_reset', methods=['GET'])
def reset():
game.reset()
return 'Success'
@app.route('/_undo', methods=['GET'])
def undo():
game.undo()
return 'Success'
game = gomoku_web.Gomoku_Web(board_size=15)
| 28.59322
| 77
| 0.64019
|
3ca0070b6a8c4c0a032bd1f3c69c71e88f56eaf3
| 88,378
|
py
|
Python
|
airflow/jobs.py
|
beefjerky/cautious-invention
|
6cb785c8a97829833db5293ad3458dd16fcbf68c
|
[
"Apache-2.0"
] | 4
|
2017-06-25T14:09:31.000Z
|
2020-11-20T09:51:24.000Z
|
airflow/jobs.py
|
beefjerky/new_airflow
|
6cb785c8a97829833db5293ad3458dd16fcbf68c
|
[
"Apache-2.0"
] | null | null | null |
airflow/jobs.py
|
beefjerky/new_airflow
|
6cb785c8a97829833db5293ad3458dd16fcbf68c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from past.builtins import basestring
from collections import defaultdict, Counter
from datetime import datetime
import getpass
import logging
import socket
import multiprocessing
import os
import signal
import six
import sys
import threading
import time
from time import sleep
import psutil
from sqlalchemy import Column, Integer, String, DateTime, func, Index, or_
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from tabulate import tabulate
from airflow import executors, models, settings
from airflow import configuration as conf
from airflow.exceptions import AirflowException
from airflow.models import DagRun
from airflow.settings import Stats
from airflow.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils.state import State
from airflow.utils.db import provide_session, pessimistic_connection_handling
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorManager,
SimpleDag,
SimpleDagBag,
list_py_file_paths)
from airflow.utils.email import send_email
from airflow.utils.logging import LoggingMixin
from airflow.utils import asciiart
Base = models.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have it's own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(DateTime())
end_date = Column(DateTime())
latest_heartbeat = Column(DateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
)
def __init__(
self,
executor=executors.DEFAULT_EXECUTOR,
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = socket.getfqdn()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = datetime.now()
self.latest_heartbeat = datetime.now()
self.heartrate = heartrate
self.unixname = getpass.getuser()
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(datetime.now() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
def kill(self):
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = datetime.now()
try:
self.on_kill()
except:
self.logger.error('on_kill() method failed')
session.merge(job)
session.commit()
session.close()
raise AirflowException("Job shut down externally.")
def on_kill(self):
'''
Will be called when an external kill command is received
'''
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
'''
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
'''
session = settings.Session()
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
session.close()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (datetime.now() - job.latest_heartbeat).total_seconds())
# Don't keep session open while sleeping as it leaves a connection open
session.close()
sleep(sleep_for)
# Update last heartbeat time
session = settings.Session()
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = datetime.now()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
session.close()
self.logger.debug('[heart] Boom.')
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
session = settings.Session()
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
# Run
self._execute()
# Marking the success in the DB
self.end_date = datetime.now()
self.state = State.SUCCESS
session.merge(self)
session.commit()
session.close()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
class DagFileProcessor(AbstractDagFileProcessor):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, log_file):
"""
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_whitelist: If specified, only look at these DAG ID's
:type dag_id_whitelist: list[unicode]
:param log_file: the path to the file where log lines should be output
:type log_file: unicode
"""
self._file_path = file_path
self._log_file = log_file
# Queue that's used to pass results from the child process.
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@property
def log_file(self):
return self._log_file
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
log_file):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:param log_file: the logging output for the process should be directed
to this file
:type log_file: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
"""
def helper():
# This helper runs in the newly created process
# Re-direct stdout and stderr to a separate log file. Otherwise,
# the main log becomes too hard to read. No buffering to enable
# responsive file tailing
parent_dir, _ = os.path.split(log_file)
# Create the parent directory for the log file if necessary.
if not os.path.isdir(parent_dir):
os.makedirs(parent_dir)
f = open(log_file, "a")
original_stdout = sys.stdout
original_stderr = sys.stderr
sys.stdout = f
sys.stderr = f
try:
# Re-configure logging to use the new output streams
log_format = settings.LOG_FORMAT_WITH_THREAD_NAME
settings.configure_logging(log_format=log_format)
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
logging.info("Started process (PID=%s) to work on %s",
os.getpid(),
file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list)
result = scheduler_job.process_file(file_path,
pickle_dags)
result_queue.put(result)
end_time = time.time()
logging.info("Processing %s took %.3f seconds",
file_path,
end_time - start_time)
except:
# Log exceptions through the logging framework.
logging.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = original_stdout
sys.stderr = original_stderr
f.close()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self.log_file)
self._start_time = datetime.now()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
logging.warn("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
logging.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
logging.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=models.DAGS_FOLDER,
num_runs=-1,
file_process_interval=conf.getint('scheduler',
'min_file_process_interval'),
processor_poll_interval=1.0,
run_duration=None,
do_pickle=False,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited within the run_duration.
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = min(conf.getint('scheduler', 'max_threads'), multiprocessing.cpu_count())
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
if self.max_threads > 1:
self.logger.error("Cannot use more than 1 thread when using sqlite. Setting max_threads to 1")
self.max_threads = 1
self.using_sqlite = True
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# Parse and schedule each file no faster than this interval. Default
# to 3 minutes.
self.file_process_interval = file_process_interval
# Directory where log files for the processes that scheduled the DAGs reside
self.child_process_log_directory = conf.get('scheduler',
'child_process_log_directory')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([ti.sla for ti in dag.tasks]):
self.logger.info("Skipping SLA check for {} because "
"no tasks in DAG have SLAs".format(dag))
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(TI.state == State.SUCCESS)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = datetime.now()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < datetime.now():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < datetime.now():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(or_(SlaMiss.email_sent == False,
SlaMiss.notification_sent == False))
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.logger.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas, blocking_tis)
notification_sent = True
email_content = """\
Here's a list of tasks thas missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = []
for t in dag.tasks:
if t.email:
if isinstance(t.email, basestring):
l = [t.email]
elif isinstance(t.email, (list, tuple)):
l = t.email
for email in l:
if email not in emails:
emails.append(email)
if emails and len(slas):
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
session.close()
@staticmethod
@provide_session
def clear_nonexistent_import_errors(session, known_file_paths):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param known_file_paths: The list of existing files that are parsed for DAGs
:type known_file_paths: list[unicode]
"""
session.query(models.ImportError).filter(
~models.ImportError.filename.in_(known_file_paths)
).delete(synchronize_session='fetch')
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: models.Dagbag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval:
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < datetime.now() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = datetime.now()
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False,
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not dag.catchup:
# The logic is that we move start_date up until
# one period before, so that datetime.now() is AFTER
# the period end, and the job can be created...
now = datetime.now()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.logger.debug("Next run date based on tasks {}"
.format(next_run_date))
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.logger.debug("Dag start date: {}. Next run date: {}"
.format(dag.start_date, next_run_date))
# don't ever schedule in the future
if next_run_date > datetime.now():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= datetime.now():
next_run = dag.create_dagrun(
run_id='scheduled__' + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False
)
return next_run
def _process_task_instances(self, dag, queue):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
session = settings.Session()
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.logger.info("Examining DAG run {}".format(run))
# don't consider runs that are executed in the future
if run.execution_date > datetime.now():
self.logger.error("Execution date is in future: {}"
.format(run.execution_date))
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.logger.info("Active dag runs > max_active_run.")
continue
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.logger.debug("Examining active DAG run {}".format(run))
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.logger.debug('Queuing task: {}'.format(ti))
queue.append(ti.key)
session.close()
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
exists but is not in the running state. This normally should not
happen, but it can if the state of DagRuns are changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[State]
:param new_state: set TaskInstances to this state
:type new_state: State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: SimpleDagBag
"""
task_instances_to_change = (
session
.query(models.TaskInstance)
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids))
.filter(models.TaskInstance.state.in_(old_states))
.with_for_update()
.all()
)
""":type: list[TaskInstance]"""
for task_instance in task_instances_to_change:
dag_runs = DagRun.find(dag_id=task_instance.dag_id,
execution_date=task_instance.execution_date,
)
if len(dag_runs) == 0:
self.logger.warn("DagRun for %s %s does not exist",
task_instance.dag_id,
task_instance.execution_date)
continue
# There should only be one DAG run. Add some logging info if this
# is not the case for later debugging.
if len(dag_runs) > 1:
self.logger.warn("Multiple DagRuns found for {} {}: {}"
.format(task_instance.dag_id,
task_instance.execution_date,
dag_runs))
if not any(dag_run.state == State.RUNNING for dag_run in dag_runs):
self.logger.warn("Setting {} to state={} as it does not have "
"a DagRun in the {} state"
.format(task_instance,
new_state,
State.RUNNING))
task_instance.state = new_state
session.merge(task_instance)
session.commit()
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Fetches task instances from ORM in the specified states, figures
out pool limits, and sends them to the executor for execution.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: None
"""
# Get all the queued task instances from associated with scheduled
# DagRuns.
TI = models.TaskInstance
task_instances_to_examine = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.filter(TI.state.in_(states))
.all()
)
# Put one task instance on each line
if len(task_instances_to_examine) == 0:
self.logger.info("No tasks to send to the executor")
return
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.logger.info("Tasks up for execution:\n\t{}".format(task_instance_str))
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.logger.info("Figuring out tasks to run in Pool(name={pool}) "
"with {open_slots} open slots and {num_queued} "
"task instances in queue".format(**locals()))
if open_slots <= 0:
continue
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# DAG IDs with running tasks that equal the concurrency limit of the dag
dag_id_to_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.logger.info("No more slots free")
# Can't schedule any more since there are no more open slots.
break
if self.executor.has_task(task_instance):
self.logger.debug("Not handling task {} as the executor reports it is running"
.format(task_instance.key))
continue
if simple_dag_bag.get_dag(task_instance.dag_id).is_paused:
self.logger.info("Not executing queued {} since {} is paused"
.format(task_instance, task_instance.dag_id))
continue
# todo: remove this logic when backfills will be part of the scheduler
dag_run = task_instance.get_dagrun()
if dag_run and dag_run.is_backfill:
continue
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
if dag_id not in dag_id_to_running_task_count:
dag_id_to_running_task_count[dag_id] = \
DagRun.get_running_tasks(
session,
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids)
current_task_concurrency = dag_id_to_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.logger.info("DAG {} has {}/{} running tasks"
.format(dag_id,
current_task_concurrency,
task_concurrency_limit))
if current_task_concurrency > task_concurrency_limit:
self.logger.info("Not executing {} since the number "
"of tasks running from DAG {} is >= to the "
"DAG's task concurrency limit of {}"
.format(task_instance,
dag_id,
task_concurrency_limit))
continue
command = " ".join(TI.generate_command(
task_instance.dag_id,
task_instance.task_id,
task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=task_instance.pool,
file_path=simple_dag_bag.get_dag(task_instance.dag_id).full_filepath,
pickle_id=simple_dag_bag.get_dag(task_instance.dag_id).pickle_id))
priority = task_instance.priority_weight
queue = task_instance.queue
self.logger.info("Sending to executor {} with priority {} and queue {}"
.format(task_instance.key, priority, queue))
# Set the state to queued
task_instance.refresh_from_db(lock_for_update=True, session=session)
if task_instance.state not in states:
self.logger.info("Task {} was set to {} outside this scheduler."
.format(task_instance.key, task_instance.state))
session.commit()
continue
self.logger.info("Setting state of {} to {}".format(
task_instance.key, State.QUEUED))
task_instance.state = State.QUEUED
task_instance.queued_dttm = (datetime.now()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
session.commit()
# These attributes will be lost after the object expires, so save them.
task_id_ = task_instance.task_id
dag_id_ = task_instance.dag_id
execution_date_ = task_instance.execution_date
make_transient(task_instance)
task_instance.task_id = task_id_
task_instance.dag_id = dag_id_
task_instance.execution_date = execution_date_
self.executor.queue_command(
task_instance,
command,
priority=priority,
queue=queue)
open_slots -= 1
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:return: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.logger.info("Not processing DAG {} since it's paused"
.format(dag.dag_id))
continue
if not dag:
self.logger.error("DAG ID {} was not found in the DagBag"
.format(dag.dag_id))
continue
self.logger.info("Processing {}".format(dag.dag_id))
dag_run = self.create_dag_run(dag)
if dag_run:
self.logger.info("Created {}".format(dag_run))
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
models.DagStat.clean_dirty([d.dag_id for d in dags])
def _process_executor_events(self):
"""
Respond to executor events.
:param executor: the executor that's running the task instances
:type executor: BaseExecutor
:return: None
"""
for key, executor_state in list(self.executor.get_event_buffer().items()):
dag_id, task_id, execution_date = key
self.logger.info("Executor reports {}.{} execution_date={} as {}"
.format(dag_id,
task_id,
execution_date,
executor_state))
def _log_file_processing_stats(self,
known_file_paths,
processor_manager):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:param processor_manager: manager for the file processors
:type stats: DagFileProcessorManager
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = processor_manager.get_last_runtime(file_path)
processor_pid = processor_manager.get_pid(file_path)
processor_start_time = processor_manager.get_start_time(file_path)
runtime = ((datetime.now() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = processor_manager.get_last_finish_time(file_path)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.logger.info(log_str)
@provide_session
def _reset_state_for_orphaned_tasks(self, dag_run, session=None):
"""
This function checks for a DagRun if there are any tasks
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running = self.executor.running
tis = list()
tis.extend(dag_run.get_task_instances(state=State.SCHEDULED, session=session))
tis.extend(dag_run.get_task_instances(state=State.QUEUED, session=session))
for ti in tis:
if ti.key not in queued_tis and ti.key not in running:
self.logger.debug("Rescheduling orphaned task {}".format(ti))
ti.state = State.NONE
session.commit()
def _execute(self):
self.logger.info("Starting the scheduler")
pessimistic_connection_handling()
logging.basicConfig(level=logging.DEBUG)
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
# Use multiple processes to parse and generate tasks for the
# DAGs in parallel. By processing them in separate processes,
# we can get parallelism and isolation from potentially harmful
# user code.
self.logger.info("Processing files using up to {} processes at a time "
.format(self.max_threads))
self.logger.info("Running execute loop for {} seconds"
.format(self.run_duration))
self.logger.info("Processing each file at most {} times"
.format(self.num_runs))
self.logger.info("Process each file at most once every {} seconds"
.format(self.file_process_interval))
self.logger.info("Checking for new files in {} every {} seconds"
.format(self.subdir, self.dag_dir_list_interval))
# Build up a list of Python files that could contain DAGs
self.logger.info("Searching for files in {}".format(self.subdir))
known_file_paths = list_py_file_paths(self.subdir)
self.logger.info("There are {} files in {}"
.format(len(known_file_paths), self.subdir))
def processor_factory(file_path, log_file_path):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
log_file_path)
processor_manager = DagFileProcessorManager(self.subdir,
known_file_paths,
self.max_threads,
self.file_process_interval,
self.child_process_log_directory,
self.num_runs,
processor_factory)
try:
self._execute_helper(processor_manager)
finally:
self.logger.info("Exited execute loop")
# Kill all child processes on exit since we don't want to leave
# them as orphaned.
pids_to_kill = processor_manager.get_all_pids()
if len(pids_to_kill) > 0:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.logger.info("Terminating child PID: {}".format(child.pid))
child.terminate()
timeout = 5
self.logger.info("Waiting up to {}s for processes to exit..."
.format(timeout))
try:
psutil.wait_procs(child_processes, timeout)
except psutil.TimeoutExpired:
self.logger.debug("Ran out of time while waiting for "
"processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if len(child_processes) > 0:
for child in child_processes:
self.logger.info("Killing child PID: {}".format(child.pid))
child.kill()
child.wait()
def _execute_helper(self, processor_manager):
"""
:param processor_manager: manager to use
:type processor_manager: DagFileProcessorManager
:return: None
"""
self.executor.start()
session = settings.Session()
self.logger.info("Resetting state for orphaned tasks")
# grab orphaned tasks and make sure to reset their state
active_runs = DagRun.find(
state=State.RUNNING,
external_trigger=False,
session=session
)
for dr in active_runs:
self.logger.info("Resetting {} {}".format(dr.dag_id,
dr.execution_date))
self._reset_state_for_orphaned_tasks(dr, session=session)
session.close()
execute_start_time = datetime.now()
# Last time stats were printed
last_stat_print_time = datetime(2000, 1, 1)
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = datetime.now()
# Last time that the DAG dir was traversed to look for files
last_dag_dir_refresh_time = datetime.now()
# Use this value initially
known_file_paths = processor_manager.file_paths
# For the execute duration, parse and schedule DAGs
while (datetime.now() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.logger.debug("Starting Loop...")
loop_start_time = time.time()
# Traverse the DAG directory for Python files containing DAGs
# periodically
elapsed_time_since_refresh = (datetime.now() -
last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.logger.info("Searching for files in {}".format(self.subdir))
known_file_paths = list_py_file_paths(self.subdir)
last_dag_dir_refresh_time = datetime.now()
self.logger.info("There are {} files in {}"
.format(len(known_file_paths), self.subdir))
processor_manager.set_file_paths(known_file_paths)
self.logger.debug("Removing old import errors")
self.clear_nonexistent_import_errors(known_file_paths=known_file_paths)
# Kick of new processes and collect results from finished ones
self.logger.info("Heartbeating the process manager")
simple_dags = processor_manager.heartbeat()
if self.using_sqlite:
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.logger.debug("Waiting for processors to finish since we're "
"using sqlite")
processor_manager.wait_until_finished()
# Send tasks for execution if available
if len(simple_dags) > 0:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued, but the corresponding
# DAG run isn't running, set the state to NONE so we don't try to
# re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
# Call hearbeats
self.logger.info("Heartbeating the executor")
self.executor.heartbeat()
# Process events from the executor
self._process_executor_events()
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (datetime.now() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.logger.info("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = datetime.now()
# Occasionally print out stats about how fast the files are getting processed
if ((datetime.now() - last_stat_print_time).total_seconds() >
self.print_stats_interval):
if len(known_file_paths) > 0:
self._log_file_processing_stats(known_file_paths,
processor_manager)
last_stat_print_time = datetime.now()
loop_end_time = time.time()
self.logger.debug("Ran scheduling loop in {:.2f}s"
.format(loop_end_time - loop_start_time))
self.logger.debug("Sleeping for {:.2f}s"
.format(self._processor_poll_interval))
time.sleep(self._processor_poll_interval)
# Exit early for a test mode
if processor_manager.max_runs_reached():
self.logger.info("Exiting loop as all files have been processed "
"{} times".format(self.num_runs))
break
# Stop any processors
processor_manager.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
all_files_processed = True
for file_path in known_file_paths:
if processor_manager.get_last_finish_time(file_path) is None:
all_files_processed = False
break
if all_files_processed:
self.logger.info("Deactivating DAGs that haven't been touched since {}"
.format(execute_start_time.isoformat()))
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[SimpleDag]
"""
self.logger.info("Processing file {} for tasks to queue".format(file_path))
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path)
except Exception:
self.logger.exception("Failed at reloading the DAG file {}".format(file_path))
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.logger.info("DAG(s) {} retrieved from {}"
.format(dagbag.dags.keys(),
file_path))
else:
self.logger.warn("No viable dags retrieved from {}".format(file_path))
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
sync_time = datetime.now()
for dag in dagbag.dags.values():
models.DAG.sync_to_db(dag, dag.owner, sync_time)
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
task_ids = [task.task_id for task in dag.tasks]
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag.dag_id,
task_ids,
dag.full_filepath,
dag.concurrency,
dag.is_paused,
pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got it's state changed to RUNNING from somewhere
# other than the scheduler from getting it's state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.logger.info("Creating / updating {} in ORM".format(ti))
session.merge(ti)
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.logger.exception("Error logging import errors!")
try:
dagbag.kill_zombies()
except Exception:
self.logger.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.gauge('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
include_adhoc=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
*args, **kwargs):
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.include_adhoc = include_adhoc
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
super(BackfillJob, self).__init__(*args, **kwargs)
def _execute(self):
"""
Runs a dag for a specified date range.
"""
session = settings.Session()
DagRun = models.DagRun
# consider max_active_runs but ignore when running subdags
# "parent.child" as a dag_id is by convention a subdag
if self.dag.schedule_interval and "." not in self.dag.dag_id:
active_runs = DagRun.find(
dag_id=self.dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs
if len(active_runs) >= self.dag.max_active_runs:
self.logger.info("Dag {} has reached maximum amount of {} dag runs"
.format(self.dag.dag_id, self.dag.max_active_runs))
return
start_date = self.bf_start_date
end_date = self.bf_end_date
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = models.DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
executor_fails = Counter()
# Build a list of all instances to run
tasks_to_run = {}
failed = set()
succeeded = set()
started = set()
skipped = set()
not_ready = set()
deadlocked = set()
# create dag runs
dr_start_date = start_date or min([t.start_date for t in self.dag.tasks])
next_run_date = self.dag.normalize_schedule(dr_start_date)
end_date = end_date or datetime.now()
active_dag_runs = []
while next_run_date and next_run_date <= end_date:
run_id = BackfillJob.ID_FORMAT_PREFIX.format(next_run_date.isoformat())
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=next_run_date,
session=session)
if not run:
run = self.dag.create_dagrun(
run_id=run_id,
execution_date=next_run_date,
start_date=datetime.now(),
state=State.RUNNING,
external_trigger=False,
session=session,
)
else:
run = run[0]
# set required transient field
run.dag = self.dag
# explictely mark running as we can fill gaps
run.state = State.RUNNING
run.verify_integrity(session=session)
# for some reason if we dont refresh the reference to run is lost
run.refresh_from_db()
make_transient(run)
active_dag_runs.append(run)
next_run_date = self.dag.following_schedule(next_run_date)
run_count = 0
for run in active_dag_runs:
logging.info("Checking run {}".format(run))
run_count = run_count + 1
def get_task_instances_for_dag_run(dag_run):
# this needs a fresh session sometimes tis get detached
# can be more finegrained (excluding success or skipped)
tasks = {}
for ti in dag_run.get_task_instances():
tasks[ti.key] = ti
return tasks
# Triggering what is ready to get triggered
while not deadlocked:
tasks_to_run = get_task_instances_for_dag_run(run)
self.logger.debug("Clearing out not_ready list")
not_ready.clear()
for key, ti in list(tasks_to_run.items()):
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.logger.debug("Task instance to run {} state {}"
.format(ti, ti.state))
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
succeeded.add(key)
self.logger.debug("Task instance {} succeeded. "
"Don't rerun.".format(ti))
tasks_to_run.pop(key)
continue
elif ti.state == State.SKIPPED:
skipped.add(key)
self.logger.debug("Task instance {} skipped. "
"Don't rerun.".format(ti))
tasks_to_run.pop(key)
continue
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=True):
self.logger.debug('Sending {} to executor'.format(ti))
if ti.state == State.NONE:
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool)
started.add(key)
# Mark the task as not ready to run
elif ti.state in (State.NONE, State.UPSTREAM_FAILED):
self.logger.debug('Adding {} to not_ready'.format(ti))
not_ready.add(key)
session.commit()
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run, then the backfill is deadlocked
if not_ready and not_ready == set(tasks_to_run):
self.logger.warn("Deadlock discovered for tasks_to_run={}"
.format(tasks_to_run.values()))
deadlocked.update(tasks_to_run.values())
tasks_to_run.clear()
# Reacting to events
for key, state in list(executor.get_event_buffer().items()):
if key not in tasks_to_run:
self.logger.warn("{} state {} not in tasks_to_run={}"
.format(key, state,
tasks_to_run.values()))
continue
ti = tasks_to_run[key]
ti.refresh_from_db()
logging.info("Executor state: {} task {}".format(state, ti))
# executor reports failure
if state == State.FAILED:
# task reports running
if ti.state == State.RUNNING:
msg = (
'Executor reports that task instance {} failed '
'although the task says it is running.'.format(ti))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.error("Skipping {} ".format(ti))
skipped.add(key)
tasks_to_run.pop(key)
# anything else is a failure
else:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
# executor reports success
elif state == State.SUCCESS:
# task reports success
if ti.state == State.SUCCESS:
self.logger.info(
'Task instance {} succeeded'.format(ti))
succeeded.add(key)
tasks_to_run.pop(key)
# task reports failure
elif ti.state == State.FAILED:
self.logger.error("Task instance {} failed".format(ti))
failed.add(key)
tasks_to_run.pop(key)
# task reports skipped
elif ti.state == State.SKIPPED:
self.logger.info("Task instance {} skipped".format(ti))
skipped.add(key)
tasks_to_run.pop(key)
# this probably won't ever be triggered
elif ti in not_ready:
self.logger.info(
"{} wasn't expected to run, but it did".format(ti))
# executor reports success but task does not - this is weird
elif ti.state not in (
State.SCHEDULED,
State.QUEUED,
State.UP_FOR_RETRY):
self.logger.error(
"The airflow run command failed "
"at reporting an error. This should not occur "
"in normal circumstances. Task state is '{}',"
"reported state is '{}'. TI is {}"
"".format(ti.state, state, ti))
# if the executor fails 3 or more times, stop trying to
# run the task
executor_fails[key] += 1
if executor_fails[key] >= 3:
msg = (
'The airflow run command failed to report an '
'error for task {} three or more times. The '
'task is being marked as failed. This is very '
'unusual and probably means that an error is '
'taking place before the task even '
'starts.'.format(key))
self.logger.error(msg)
ti.handle_failure(msg)
tasks_to_run.pop(key)
msg = ' | '.join([
"[backfill progress]",
"dag run {6} of {7}",
"tasks waiting: {0}",
"succeeded: {1}",
"kicked_off: {2}",
"failed: {3}",
"skipped: {4}",
"deadlocked: {5}"
]).format(
len(tasks_to_run),
len(succeeded),
len(started),
len(failed),
len(skipped),
len(deadlocked),
run_count,
len(active_dag_runs))
self.logger.info(msg)
self.logger.debug("Finished dag run loop iteration. "
"Remaining tasks {}"
.format(tasks_to_run.values()))
if len(tasks_to_run) == 0:
break
# update dag run state
run.update_state(session=session)
if run.dag.is_paused:
models.DagStat.clean_dirty([run.dag_id], session=session)
executor.end()
session.commit()
session.close()
err = ''
if failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(failed))
if deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=True) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=True)
for t in deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(succeeded)
err += ' These tasks have started:\n{}\n'.format(started)
err += ' These tasks have failed:\n{}\n'.format(failed)
err += ' These tasks are skipped:\n{}\n'.format(skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(deadlocked)
if err:
raise AirflowException(err)
self.logger.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
# Keeps track of the fact that the task instance has been observed
# as running at least once
self.was_running = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
try:
self.task_runner.start()
ti = self.task_instance
session = settings.Session()
if self.task_runner.process:
ti.pid = self.task_runner.process.pid
ti.hostname = socket.getfqdn()
session.merge(ti)
session.commit()
session.close()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.logger.info("Task exited with return code {}"
.format(return_code))
return
# Periodically heartbeat so that the scheduler doesn't think this
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.logger.exception("Exception while trying to heartbeat! "
"Sleeping for {}s".format(self.heartrate))
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.logger.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# task is already terminating, let it breathe
return
self.task_instance.refresh_from_db()
ti = self.task_instance
if ti.state == State.RUNNING:
self.was_running = True
fqdn = socket.getfqdn()
if not (fqdn == ti.hostname and
self.task_runner.process.pid == ti.pid):
logging.warning("Recorded hostname and pid of {ti.hostname} "
"and {ti.pid} do not match this instance's "
"which are {fqdn} and "
"{self.task_runner.process.pid}. "
"Taking the poison pill. So long."
.format(**locals()))
raise AirflowException("Another worker/process is running this job")
elif (self.was_running
and self.task_runner.return_code() is None
and hasattr(self.task_runner, 'process')):
logging.warning(
"State of this instance has been externally set to "
"{}. Taking the poison pill. So long.".format(ti.state))
self.task_runner.terminate()
self.terminating = True
| 41.628827
| 110
| 0.546867
|
bd8f8072a351a5aca4bd8f2665a4d834505c5597
| 264
|
py
|
Python
|
pypytorch/functions/basic/add.py
|
dark-ai/pypytorch
|
d28e0f858ad7c33a14e4bb71dc68ae56ba97c5cf
|
[
"MIT"
] | 10
|
2019-08-13T10:29:14.000Z
|
2022-02-21T01:57:33.000Z
|
pypytorch/functions/basic/add.py
|
dark-ai/pypytorch
|
d28e0f858ad7c33a14e4bb71dc68ae56ba97c5cf
|
[
"MIT"
] | 1
|
2019-10-25T02:26:45.000Z
|
2019-10-25T11:15:10.000Z
|
pypytorch/functions/basic/add.py
|
dark-ai/pypytorch
|
d28e0f858ad7c33a14e4bb71dc68ae56ba97c5cf
|
[
"MIT"
] | 2
|
2019-08-17T00:48:37.000Z
|
2019-10-24T09:22:37.000Z
|
# -*- coding: utf-8 -*-
from pypytorch.functions.function import Function
class Add(Function):
def forward(self, a, b):
return a + b
def backward_0(self, grad):
return grad
def backward_1(self, grad):
return grad
| 15.529412
| 49
| 0.590909
|
eb457d367b51ad96569e754d389f7d0bafbd57d2
| 4,093
|
py
|
Python
|
httpx_scan.py
|
ismailbozkurt/httpx_scan
|
31f0ba83a4a4d1c1beb3ae14fbbf6380b6965dc6
|
[
"MIT"
] | null | null | null |
httpx_scan.py
|
ismailbozkurt/httpx_scan
|
31f0ba83a4a4d1c1beb3ae14fbbf6380b6965dc6
|
[
"MIT"
] | null | null | null |
httpx_scan.py
|
ismailbozkurt/httpx_scan
|
31f0ba83a4a4d1c1beb3ae14fbbf6380b6965dc6
|
[
"MIT"
] | null | null | null |
from time import sleep
import docker
client = docker.from_env()
def check_image_exist(image_tag):
try:
updated_tag = image_tag + ":latest"
image_list = client.images.list()
if len(image_list) != 0:
for image in image_list:
exist_tag = image.tags[0]
if updated_tag == exist_tag:
return True
return False
except Exception as err:
raise err
def build_image(dockerfile_path, dockerfile_name, image_tag):
try:
print("build executed")
client.images.build(path=dockerfile_path, dockerfile=dockerfile_name, tag=image_tag, forcerm=True)
return True
except Exception as err:
print(err)
return False
def force_installation_dockers(image_tag_list):
for image_dict in image_tag_list:
if check_image_exist(image_dict["image_tag"]) is False:
print(image_dict["image_tag"])
while True:
if build_image(image_dict["path"], image_dict["dockerfile"], image_dict["image_tag"]):
print("build successfully on {0}".format(image_dict["image_tag"]))
break
else:
print("on_sleep")
sleep(45)
else:
print("image exist installation skipped")
return True
return True
def httpx_exec(local_client, hosts_file, image_tag):
try:
resp = local_client.containers.run(image_tag,
["-l", "/dev/shm/{0}".format(hosts_file),
"-sc", "-td", "-ct", "-server", "-rt", "-title", "-method", "-ip", "-cname",
"-probe",
"-rate-limit", "{0}".format(3),
"-tls-grab", "-tls-probe", "-csp-probe",
"-vhost", "-p", "8080,10000,20000,2222,7080,9009,7443,2087,2096,8443,4100,2082,2083,2086,9999,2052,9001,9002,7000,7001,8082,8084,8085,8010,9000,2078,2080,2079,2053,2095,4000,5280,8888,9443,5800,631,8000,8008,8087,84,85,86,88,10125,9003,7071,8383,7547,3434,10443,8089,3004,81,4567,7081,82,444,1935,3000,9998,4433,4431,4443,83,90,8001,8099,80,300,443,591,593,832,981,1010,1311,2480,3128,3333,4243,4711,4712,4993,5000,5104,5108,6543,7396,7474,8014,8042,8069,8081,8088,8090,8091,8118,8123,8172,8222,8243,8280,8281,8333,8500,8834,8880,8983,9043,9060,9080,9090,9091,9200,9800,9981,12443,16080,18091,18092,20720,28017,6060,8080,10000,20000,2222,7080,9009,7443,2087,2096,8443,4100,2082,2083,2086,9999,2052,9001,9002,7000,7001,8082,8084,8085,8010,9000,2078,2080,2079,2053,2095,4000,5280,8888,9443,5800,631,8000,8008,8087,84,85,86,88,10125,9003,7071,8383,7547,3434,10443,8089,3004,81,4567,7081,82,444,1935,3000,9998,4433,4431,4443,83,90,8001,8099,80,300,443,591,593,832,981,1010,1311,2480,3128,3333,4243,4711,4712,4993,5000,5104,5108,6543,7396,7474,8014,8042,8069,8081,8088,8090,8091,8118,8123,8172,8222,8243,8280,8281,8333,8500,8834,8880,8983,9043,9060,9080,9090,9091,9200,9800,9981,12443,16080,18091,18092,20720,28017,6060",
"-json",
# "-pa", "-nc",
"-nfs", "-retries", "{0}".format(10), "-timeout", "{0}".format(10)
],
volumes={
'/tmp/httpx_scan': {
'bind': '/dev/shm', 'mode': 'rw'}},
auto_remove=True)
print(resp)
with open("out_httpx.txt", "w") as f:
f.write(resp.decode("utf-8"))
return resp
except Exception as err:
raise err
if __name__ == '__main__':
image_tag_list = [
{'path': '.',
"dockerfile": "Dockerfile.httpx",
'image_tag': 'httpx'}]
result = force_installation_dockers(image_tag_list)
if result:
httpx_exec(client, "subdomain.lst", "httpx")
| 49.313253
| 1,254
| 0.564867
|
6fc82cd32b72157e94e60ab7c7610ee0f30442be
| 472
|
py
|
Python
|
desafios/desafio043.py
|
genisyskernel/cursoemvideo-python
|
dec301e33933388c886fe78010f38adfb24dae82
|
[
"MIT"
] | 1
|
2020-10-26T04:33:14.000Z
|
2020-10-26T04:33:14.000Z
|
desafios/desafio043.py
|
genisyskernel/cursoemvideo-python
|
dec301e33933388c886fe78010f38adfb24dae82
|
[
"MIT"
] | null | null | null |
desafios/desafio043.py
|
genisyskernel/cursoemvideo-python
|
dec301e33933388c886fe78010f38adfb24dae82
|
[
"MIT"
] | null | null | null |
altura = float(input("Informe sua altura: "))
peso = float(input("Informe seu peso: "))
imc = peso / (altura ** 2)
if(imc <= 18.5):
situacao = "ABAIXO DO PESO"
elif(18.5 >= imc <= 25):
situacao = "PESO IDEAL"
elif(imc <= 30):
situacao = "SOBREPESO"
elif(imc <= 40):
situacao = "OBESIDADE"
else:
situacao = "OBESIDADE MORBIDA"
print("Sua altura: {0:.2f} metros.\nSeu peso: {1:.2f} kilos.\nIMC ( {2:.2f} ): {3}!".format(altura, peso, imc, situacao))
| 24.842105
| 121
| 0.603814
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.