hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794f3b6767a7884ddb0641a9e96d5abeb932d3b7
| 1,320
|
py
|
Python
|
CadVlan/manage.py
|
marcusgc/GloboNetworkAPI-WebUI
|
1172f14028f9c116d71df7489eda770446b131d2
|
[
"Apache-2.0"
] | 17
|
2015-05-19T20:03:45.000Z
|
2022-03-24T06:19:47.000Z
|
CadVlan/manage.py
|
marcusgc/GloboNetworkAPI-WebUI
|
1172f14028f9c116d71df7489eda770446b131d2
|
[
"Apache-2.0"
] | 41
|
2015-01-27T18:36:07.000Z
|
2021-06-10T20:34:03.000Z
|
CadVlan/manage.py
|
marcusgc/GloboNetworkAPI-WebUI
|
1172f14028f9c116d71df7489eda770446b131d2
|
[
"Apache-2.0"
] | 19
|
2016-09-12T07:35:42.000Z
|
2022-01-28T23:46:11.000Z
|
# -*- coding:utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write(
"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| 38.823529
| 203
| 0.751515
|
794f3bb1a554d6189ce477a8af7cd89806f561c1
| 8,992
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_available_service_aliases_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_available_service_aliases_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_available_service_aliases_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AvailableServiceAliasesOperations:
"""AvailableServiceAliasesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.AvailableServiceAliasesResult"]:
"""Gets all available service aliases for this subscription in this region.
:param location: The location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableServiceAliasesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.AvailableServiceAliasesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableServiceAliasesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableServiceAliasesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/availableServiceAliases'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
location: str,
**kwargs
) -> AsyncIterable["_models.AvailableServiceAliasesResult"]:
"""Gets all available service aliases for this resource group in this region.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param location: The location.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AvailableServiceAliasesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.AvailableServiceAliasesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailableServiceAliasesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AvailableServiceAliasesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/availableServiceAliases'} # type: ignore
| 48.085561
| 204
| 0.655916
|
794f3cf569829887ba32360a4dc7737bff1519e8
| 7,698
|
py
|
Python
|
auto_ks/kalman_smoother.py
|
opendoor-labs/auto_ks
|
e60bcc639ee7ee312fdb6c2d0b907e10dec09d49
|
[
"Apache-2.0"
] | 35
|
2019-10-19T00:30:05.000Z
|
2022-03-31T23:32:05.000Z
|
auto_ks/kalman_smoother.py
|
opendoor-labs/auto_ks
|
e60bcc639ee7ee312fdb6c2d0b907e10dec09d49
|
[
"Apache-2.0"
] | 4
|
2019-10-23T10:20:15.000Z
|
2021-01-20T13:10:40.000Z
|
auto_ks/kalman_smoother.py
|
opendoor-labs/auto_ks
|
e60bcc639ee7ee312fdb6c2d0b907e10dec09d49
|
[
"Apache-2.0"
] | 8
|
2020-01-09T09:19:29.000Z
|
2022-01-19T05:09:05.000Z
|
import numpy as np
import numpy.random as npr
from scipy import sparse
from scipy.sparse import linalg as splinalg
import IPython as ipy
import time
import numbers
class KalmanSmootherParameters:
def __init__(self, A, W_neg_sqrt, C, V_neg_sqrt):
self.A = A
self.W_neg_sqrt = W_neg_sqrt
self.C = C
self.V_neg_sqrt = V_neg_sqrt
def __add__(self, y):
if isinstance(y, KalmanSmootherParameters):
return KalmanSmootherParameters(self.A + y.A, self.W_neg_sqrt + y.W_neg_sqrt, self.C + y.C, self.V_neg_sqrt + y.V_neg_sqrt)
elif isinstance(y, KalmanSmootherParameterDerivatives):
return KalmanSmootherParameters(self.A + y.DA, self.W_neg_sqrt + y.DW_neg_sqrt, self.C + y.DC, self.V_neg_sqrt + y.DV_neg_sqrt)
else:
return NotImplementedError
def __sub__(self, y):
return self.__add__(-1.0 * y)
def __mul__(self, a):
assert isinstance(a, numbers.Number)
return KalmanSmootherParameters(self.A * a, self.W_neg_sqrt * a, self.C * a, self.V_neg_sqrt * a)
__radd__ = __add__
__rmul__ = __mul__
class KalmanSmootherParameterDerivatives:
def __init__(self, DA, DW_neg_sqrt, DC, DV_neg_sqrt):
self.DA = DA
self.DW_neg_sqrt = DW_neg_sqrt
self.DC = DC
self.DV_neg_sqrt = DV_neg_sqrt
def __mul__(self, a):
assert isinstance(a, numbers.Number)
return KalmanSmootherParameters(self.DA * a, self.DW_neg_sqrt * a, self.DC * a, self.DV_neg_sqrt * a)
__rmul__ = __mul__
def get_D(A, W_neg_sqrt, C, V_neg_sqrt, n, p, T, lam):
temp1 = sparse.kron(sparse.eye(T - 1),
W_neg_sqrt)
temp2 = sparse.kron(sparse.eye(T - 1), -W_neg_sqrt @ A)
D_11 = sparse.hstack([sparse.csc_matrix(((T - 1) * n, n)), temp1]) + sparse.hstack([
temp2, sparse.csc_matrix(((T - 1) * n, n))])
D_12 = sparse.csc_matrix(((T - 1) * n, T * p))
D_21 = sparse.kron(sparse.eye(T), -V_neg_sqrt @ C)
D_22 = sparse.kron(sparse.eye(T), V_neg_sqrt)
return sparse.bmat([
[D_11, D_12],
[D_21, D_22],
[lam * sparse.eye(T * n), None],
[None, lam * sparse.eye(T * p)]
])
def kalman_smoother(kalman_smoother_parameters, y, K, lam):
"""
minimize ||Dz||^2
subject to Bz=c
Args:
- kalman_smoother_paramters: KalmanSmootherParameters object.
- y: T x p output trajectory
- K: T x p boolean output mask
- lam: float, scale of Tikhonov regularization
Returns:
- xhat: state trajectory
- yhat: output trajectory
- DT: function that computes derivative
"""
T, p = y.shape
assert y.ndim == 2
assert type(y) is np.ndarray
np.testing.assert_array_equal(y.shape, (T, p))
solve, DT = _kalman_smoother(kalman_smoother_parameters, K, lam)
xhat, yhat, z = solve(y)
def DT1(dxhat=np.zeros(xhat.shape), dyhat=np.zeros(yhat.shape)):
return DT(z, dxhat=dxhat, dyhat=dyhat)
return xhat, yhat, DT1
def _kalman_smoother(kalman_smoother_parameters, K, lam):
"""
minimize ||Dz||^2
subject to Bz=c
Args:
- kalman_smoother_paramters: KalmanSmootherParameters object.
- K: T x p boolean output mask
- lam: float, scale of Tikhonov regularization
Returns:
- solve: a method that takes one argument: y, and smooths it
- DT: function that computes derivative
"""
A = kalman_smoother_parameters.A
W_neg_sqrt = kalman_smoother_parameters.W_neg_sqrt
C = kalman_smoother_parameters.C
V_neg_sqrt = kalman_smoother_parameters.V_neg_sqrt
T, _ = K.shape
n, _ = A.shape
p = V_neg_sqrt.shape[0]
z_size = (n + p) * T
# First we form the least squares coefficient matrix D
D = get_D(A, W_neg_sqrt, C, V_neg_sqrt, n, p, T, lam)
D_full = get_D(np.ones(A.shape), np.ones(
W_neg_sqrt.shape), np.ones(C.shape), np.ones(V_neg_sqrt.shape), n, p, T, 1)
D_rows, D_cols = D_full.nonzero()
del D_full
assert type(A) is np.ndarray or type(A) is np.matrix
assert type(W_neg_sqrt) is np.ndarray or type(W_neg_sqrt) is np.matrix
assert type(C) is np.ndarray or type(C) is np.matrix
assert type(V_neg_sqrt) is np.ndarray or type(V_neg_sqrt) is np.matrix
assert type(K) is np.ndarray
assert isinstance(lam, numbers.Number)
assert A.ndim == 2
np.testing.assert_array_equal(A.shape, (n, n))
np.testing.assert_array_equal(A.shape, (n, n))
np.testing.assert_array_equal(W_neg_sqrt.shape, (n, n))
np.testing.assert_array_equal(C.shape, (p, n))
np.testing.assert_array_equal(V_neg_sqrt.shape, (p, p))
np.testing.assert_array_equal(K.shape, (T, p))
# Next we form the coefficients of the equality constraint
rows, cols = K.nonzero()
c_size = K.sum()
S = sparse.csc_matrix((np.ones(c_size), (np.arange(
c_size), rows * p + cols)), shape=(c_size, T * p))
B = sparse.bmat([
[sparse.csc_matrix((c_size, T * n)), S]
])
# Next we form the KKT matrix
M = sparse.bmat([
[None, D.T, B.T],
[D, -sparse.eye(D.shape[0]), None],
[B, None, None]
], format='csc')
# And factorize it
solve = splinalg.factorized(M)
def smooth(y):
c = y[K]
# And solve for z
rhs = np.concatenate([np.zeros(z_size), np.zeros(D.shape[0]), c])
sol = solve(rhs)
z = sol[:z_size]
xhat = z[:T * n].reshape(T, n, order='C')
yhat = z[T * n:T * (n + p)].reshape(T, p, order='C')
return xhat, yhat, z
# This function implements the derivative
def DT(z, dxhat=np.zeros((T, n)), dyhat=np.zeros((T, p))):
"""
Args:
- dxhat: T x n output trajectory
- dyhat: T x p output trajectory
"""
g = np.concatenate(
[dxhat.flatten(order='C'), dyhat.flatten(order='C'), np.zeros(D.shape[0]), np.zeros(c_size)])
dsol = -solve(g)[:z_size]
values = (D @ z)[D_rows] * dsol[D_cols] + (D @ dsol)[D_rows] * z[D_cols]
dD = sparse.csc_matrix((values, (D_rows, D_cols)), shape=D.shape)
DA = np.zeros(A.shape)
DW_neg_sqrt = np.zeros(W_neg_sqrt.shape)
DC = np.zeros(C.shape)
DV_neg_sqrt = np.zeros(V_neg_sqrt.shape)
summer_T_left = sparse.kron(np.ones((1, T)), sparse.eye(p))
summer_T_right = sparse.kron(np.ones((T, 1)), sparse.eye(p))
summer_T_right_n = sparse.kron(np.ones((T, 1)), sparse.eye(n))
summer_T1_left = sparse.kron(np.ones((1, T - 1)), sparse.eye(n))
summer_T1_right = sparse.kron(np.ones((T - 1, 1)), sparse.eye(n))
inside = -summer_T_left @ dD[(T - 1) * n:(T - 1) * n + T * p, :T * n] @ summer_T_right_n
DV_neg_sqrt += summer_T_left @ dD[(T - 1) * n:(T - 1) * n + T * p, T * n:T * n + T * p] @ summer_T_right
DV_neg_sqrt += inside @ C.T
DC += V_neg_sqrt.T @ inside
masked = dD[:(T - 1) * n, :(T - 1) * n]
masked = masked.multiply(sparse.kron(
sparse.eye(T - 1), np.ones((n, n))))
inside = -summer_T1_left @ masked @ summer_T1_right
DA += W_neg_sqrt.T @ inside
DW_neg_sqrt += inside @ A.T
masked = dD[:(T - 1) * n, n:T * n]
masked = masked.multiply(sparse.kron(
sparse.eye(T - 1), np.ones((n, n))))
DW_neg_sqrt += summer_T1_left @ masked @ summer_T1_right
DA = np.array(DA)
DW_neg_sqrt = np.array(DW_neg_sqrt)
DC = np.array(DC)
DV_neg_sqrt = np.array(DV_neg_sqrt)
return KalmanSmootherParameterDerivatives(DA, DW_neg_sqrt, DC, DV_neg_sqrt)
return smooth, DT
| 35.311927
| 140
| 0.607301
|
794f3d622fb1c021f919cc62e483a6cb3f8f0b9a
| 4,981
|
py
|
Python
|
VART/samples/inception_v1_mt_py/inception_v1.py
|
qianglin-xlnx/Vitis-AI
|
ae1e8f9db31a1980e0b7bb86baeb898c4fe0da26
|
[
"Apache-2.0"
] | 3
|
2020-10-29T15:00:30.000Z
|
2021-10-21T08:09:34.000Z
|
VART/samples/inception_v1_mt_py/inception_v1.py
|
qianglin-xlnx/Vitis-AI
|
ae1e8f9db31a1980e0b7bb86baeb898c4fe0da26
|
[
"Apache-2.0"
] | 20
|
2020-10-31T03:19:03.000Z
|
2020-11-02T18:59:49.000Z
|
VART/samples/inception_v1_mt_py/inception_v1.py
|
qianglin-xlnx/Vitis-AI
|
ae1e8f9db31a1980e0b7bb86baeb898c4fe0da26
|
[
"Apache-2.0"
] | 9
|
2020-10-14T02:04:10.000Z
|
2020-12-01T08:23:02.000Z
|
'''
Copyright 2019 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from ctypes import *
import cv2
import numpy as np
import runner
import os
import input_fn
import math
import threading
import time
import sys
'''
Calculate softmax
data: data to be calculated
size: data size
return: softamx result
'''
def CPUCalcSoftmax(data,size):
sum=0.0
result = [0 for i in range(size)]
for i in range(size):
result[i] = math.exp(data[i])
sum +=result[i]
for i in range(size):
result[i] /=sum
return result
def get_script_directory():
path = os.getcwd()
return path
'''
Get topk results according to its probability
datain: data result of softmax
filePath: filePath in witch that records the infotmation of kinds
'''
def TopK(datain,size,filePath):
cnt=[i for i in range(size) ]
pair=zip(datain,cnt)
pair=sorted(pair,reverse=True)
softmax_new,cnt_new=zip(*pair)
fp=open(filePath, "r")
data1=fp.readlines()
fp.close()
for i in range(5):
flag=0
for line in data1:
if flag==cnt_new[i]:
print("Top[%d] %f %s" %(i, (softmax_new[i]),(line.strip)("\n")))
flag=flag+1
SCRIPT_DIR = get_script_directory()
calib_image_dir = SCRIPT_DIR + "/../images/"
IMAGE_WIDTH = 224
IMAGE_HEIGHT = 224
global threadnum
threadnum = 0
'''
run inception_v1 with batch
dpu: dpu runner
img: imagelist to be run
cnt: threadnum
'''
def runInceptionV1(dpu,img,cnt):
"""get tensor"""
inputTensors = dpu.get_input_tensors()
outputTensors = dpu.get_output_tensors()
tensorformat = dpu.get_tensor_format()
if tensorformat == dpu.TensorFormat.NCHW:
outputHeight = outputTensors[0].dims[2]
outputWidth = outputTensors[0].dims[3]
outputChannel = outputTensors[0].dims[1]
elif tensorformat == dpu.TensorFormat.NHWC:
outputHeight = outputTensors[0].dims[1]
outputWidth = outputTensors[0].dims[2]
outputChannel = outputTensors[0].dims[3]
else:
exit("Format error")
outputSize = outputHeight*outputWidth*outputChannel
softmax = np.empty(outputSize)
count = 0
n_of_images = len(img);
batchSize = inputTensors[0].dims[0]
while count < cnt:
runSize = batchSize
shapeIn = (runSize,) + tuple([inputTensors[0].dims[i] for i in range(inputTensors[0].ndims)][1:])
"""prepare batch input/output """
outputData = []
inputData = []
outputData.append(np.empty((runSize,outputHeight,outputWidth,outputChannel), dtype = np.float32, order = 'C'))
inputData.append(np.empty((shapeIn), dtype = np.float32, order = 'C'))
"""init input image to input buffer """
for j in range(runSize):
imageRun = inputData[0]
imageRun[j,...] = img[(count+j)%n_of_images].reshape(inputTensors[0].dims[1],inputTensors[0].dims[2],inputTensors[0].dims[3])
"""run with batch """
job_id = dpu.execute_async(inputData,outputData)
dpu.wait(job_id)
for j in range(len(outputData)):
outputData[j] = outputData[j].reshape(runSize, outputSize)
"""softmax calculate with batch """
for j in range(runSize):
softmax = CPUCalcSoftmax(outputData[0][j], outputSize)
count = count + runSize
def main(argv):
global threadnum
"""create runner """
listimage=os.listdir(calib_image_dir)
threadAll = []
threadnum = int(argv[1])
i = 0
global runTotall
runTotall = len(listimage)
all_dpu_runners = [];
for i in range(int(threadnum)):
all_dpu_runners.append(runner.Runner(argv[2])[0])
"""image list to be run """
img = []
for i in range(runTotall):
path = os.path.join(calib_image_dir,listimage[i])
image = cv2.imread(path)
img.append(input_fn.preprocess_fn(image))
cnt = 1200
"""run with batch """
time1 = time.time()
for i in range(int(threadnum)):
t1 = threading.Thread(target=runInceptionV1, args=(all_dpu_runners[i], img, cnt))
threadAll.append(t1)
for x in threadAll:
x.start()
for x in threadAll:
x.join()
time2 = time.time()
total = cnt * int(threadnum)
timetotal = time2 - time1
fps = float(total / timetotal)
print("%.2f FPS" %fps)
if __name__ == "__main__":
if len(sys.argv) != 3:
print("please input thread number and json file path.")
else :
main(sys.argv)
| 27.983146
| 137
| 0.647661
|
794f3d8c7116926d9e4ac307ceae237fc72886a3
| 11,841
|
py
|
Python
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_camembert.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 96
|
2021-06-16T09:06:52.000Z
|
2022-03-26T09:56:32.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_camembert.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 16
|
2021-07-01T05:34:48.000Z
|
2022-03-28T09:40:15.000Z
|
packages/pytea/pytest/benchmarks/transformers/missing_idx/src/transformers/tokenization_camembert.py
|
lego0901/pytea
|
8ede650def2e68f4610ba816451d8b9e28f09f76
|
[
"MIT"
] | 24
|
2021-06-19T15:58:31.000Z
|
2022-03-14T09:17:19.000Z
|
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for Camembert model."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from .tokenization_utils import PreTrainedTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"camembert-base": 512,
}
SHARED_MODEL_IDENTIFIERS = [
# Load with
# `tokenizer = AutoTokenizer.from_pretrained("username/pretrained_model")`
"Musixmatch/umberto-commoncrawl-cased-v1",
"Musixmatch/umberto-wikipedia-uncased-v1",
]
SPIECE_UNDERLINE = "▁"
class CamembertTokenizer(PreTrainedTokenizer):
"""
Adapted from :class:`~transformers.RobertaTokenizer` and :class:`~transformers.XLNetTokenizer`. Construct a
CamemBERT tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.
Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the beginning of
sequence. The token used is the :obj:`cls_token`.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
sep_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
cls_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
mask_token (:obj:`str`, `optional`, defaults to :obj:`"<mask>"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
additional_special_tokens (:obj:`List[str]`, `optional`, defaults to :obj:`["<s>NOTUSED", "</s>NOTUSED"]`):
Additional special tokens used by the tokenizer.
Attributes: sp_model (:obj:`SentencePieceProcessor`): The `SentencePiece` processor that is used for every
conversion (string, tokens and IDs).
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
additional_special_tokens=["<s>NOTUSED", "</s>NOTUSED"],
**kwargs
):
super().__init__(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
self.fairseq_tokens_to_ids = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
self.fairseq_offset = len(self.fairseq_tokens_to_ids)
self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An CamemBERT sequence has the following format:
- single sequence: ``<s> X </s>``
- pair of sequences: ``<s> A </s></s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` method.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
:obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. CamemBERT, like
RoBERTa, does not make use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
""" Converts a token (str) in an id using the vocab. """
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(token) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
| 43.533088
| 119
| 0.648763
|
794f3d96a93aa8a49c8bbc2f2459859cea86e987
| 2,030
|
py
|
Python
|
p2p/agent_manager.py
|
fipu-lab/p2p_bn
|
f2c67766f030de28fed82b11188f391d338bbe12
|
[
"MIT"
] | null | null | null |
p2p/agent_manager.py
|
fipu-lab/p2p_bn
|
f2c67766f030de28fed82b11188f391d338bbe12
|
[
"MIT"
] | null | null | null |
p2p/agent_manager.py
|
fipu-lab/p2p_bn
|
f2c67766f030de28fed82b11188f391d338bbe12
|
[
"MIT"
] | null | null | null |
from p2p.p2p_utils import *
import time
def init_agents(agent_class, train_clients, val_clients, test_clients, batch_size, model_pars=None, agent_pars=None):
start_time = time.time()
model_default = {"model_v": 1, "lr": 0.001, "decay": 0, "default_weights": False}
model_pars = model_default if model_pars is None else {**model_default, **model_pars}
agent_pars = agent_pars or {}
num_agents = len(train_clients)
print("{}: {} agents, batch size: {}, model_pars: {}, agent_pars: {}".format(
agent_class.__name__, num_agents, batch_size, model_pars, agent_pars))
clear_def_weights_cache()
pbar = tqdm(total=num_agents, position=0, leave=False, desc='Init agents')
devices = environ.get_devices()
agents = []
for agent_id, (train, val, test) in enumerate(zip(train_clients, val_clients, test_clients)):
device = resolve_agent_device(agents, None, devices)
with tf.device(device or 'CPU'):
clear_session()
agent_pars['train'] = train
agent_pars['val'] = val
agent_pars['test'] = test
agent_pars['batch_size'] = batch_size
agent_pars['model'] = create_model(**model_pars)
a = agent_class(**agent_pars)
a.device = device
a.id = agent_id
agents.append(a)
update_pb(pbar, agents, 1, start_time)
pbar.close()
print("Init agents: {} minutes".format(round((time.time() - start_time) / 60)))
# environ.save_env_vars()
return agents
def load_agents(agent_class, train_clients, val_clients, test_clients, batch_size, first_agent_id):
environ.set_agent_id(first_agent_id)
agents = []
for train, val, test in zip(train_clients, val_clients, test_clients):
a = agent_class(train=train,
val=val,
test=test,
batch_size=batch_size,
model=None,
)
agents.append(a)
return agents
| 38.301887
| 117
| 0.620197
|
794f3dfdb8c420519c9f289ce116f2ff1ebba48f
| 76,587
|
py
|
Python
|
flopy/utils/reference.py
|
ConnectedSystems/flopy
|
cf3334437be74ba780c12ff2aa2b69f3ffbf8644
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/utils/reference.py
|
ConnectedSystems/flopy
|
cf3334437be74ba780c12ff2aa2b69f3ffbf8644
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
flopy/utils/reference.py
|
ConnectedSystems/flopy
|
cf3334437be74ba780c12ff2aa2b69f3ffbf8644
|
[
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null |
"""
Module spatial referencing for flopy model objects
"""
import json
import numpy as np
import os
import warnings
from collections import OrderedDict
class SpatialReference(object):
"""
a class to locate a structured model grid in x-y space
Parameters
----------
delr : numpy ndarray
the model discretization delr vector
(An array of spacings along a row)
delc : numpy ndarray
the model discretization delc vector
(An array of spacings along a column)
lenuni : int
the length units flag from the discretization package
(default 2)
xul : float
the x coordinate of the upper left corner of the grid
Enter either xul and yul or xll and yll.
yul : float
the y coordinate of the upper left corner of the grid
Enter either xul and yul or xll and yll.
xll : float
the x coordinate of the lower left corner of the grid
Enter either xul and yul or xll and yll.
yll : float
the y coordinate of the lower left corner of the grid
Enter either xul and yul or xll and yll.
rotation : float
the counter-clockwise rotation (in degrees) of the grid
proj4_str: str
a PROJ4 string that identifies the grid in space. warning: case
sensitive!
units : string
Units for the grid. Must be either feet or meters
epsg : int
EPSG code that identifies the grid in space. Can be used in lieu of
proj4. PROJ4 attribute will auto-populate if there is an internet
connection(via get_proj4 method).
See https://www.epsg-registry.org/ or spatialreference.org
length_multiplier : float
multiplier to convert model units to spatial reference units.
delr and delc above will be multiplied by this value. (default=1.)
Attributes
----------
xedge : ndarray
array of column edges
yedge : ndarray
array of row edges
xgrid : ndarray
numpy meshgrid of xedges
ygrid : ndarray
numpy meshgrid of yedges
xcenter : ndarray
array of column centers
ycenter : ndarray
array of row centers
xcentergrid : ndarray
numpy meshgrid of column centers
ycentergrid : ndarray
numpy meshgrid of row centers
vertices : 1D array
1D array of cell vertices for whole grid in C-style (row-major) order
(same as np.ravel())
Notes
-----
xul and yul can be explicitly (re)set after SpatialReference
instantiation, but only before any of the other attributes and methods are
accessed
"""
xul, yul = None, None
xll, yll = None, None
rotation = 0.
length_multiplier = 1.
origin_loc = 'ul' # or ll
defaults = {"xul": None, "yul": None, "rotation": 0.,
"proj4_str": None,
"units": None, "lenuni": 2,
"length_multiplier": None,
"source": 'defaults'}
lenuni_values = {'undefined': 0,
'feet': 1,
'meters': 2,
'centimeters': 3}
lenuni_text = {v: k for k, v in lenuni_values.items()}
def __init__(self, delr=np.array([]), delc=np.array([]), lenuni=2,
xul=None, yul=None, xll=None, yll=None, rotation=0.0,
proj4_str=None, epsg=None, prj=None, units=None,
length_multiplier=None):
warnings.warn("SpatialReference has been deprecated. Use StructuredGrid"
" instead.",
category=DeprecationWarning)
for delrc in [delr, delc]:
if isinstance(delrc, float) or isinstance(delrc, int):
msg = ('delr and delcs must be an array or sequences equal in '
'length to the number of rows/columns.')
raise TypeError(msg)
self.delc = np.atleast_1d(np.array(delc)).astype(
np.float64) # * length_multiplier
self.delr = np.atleast_1d(np.array(delr)).astype(
np.float64) # * length_multiplier
if self.delr.sum() == 0 or self.delc.sum() == 0:
if xll is None or yll is None:
msg = ('Warning: no grid spacing or lower-left corner '
'supplied. Setting the offset with xul, yul requires '
'arguments for delr and delc. Origin will be set to '
'zero.')
print(msg)
xll, yll = 0, 0
xul, yul = None, None
self._lenuni = lenuni
self._proj4_str = proj4_str
self._epsg = epsg
if epsg is not None:
self._proj4_str = getproj4(self._epsg)
self.prj = prj
self._wkt = None
self.crs = crs(prj=prj, epsg=epsg)
self.supported_units = ["feet", "meters"]
self._units = units
self._length_multiplier = length_multiplier
self._reset()
self.set_spatialreference(xul, yul, xll, yll, rotation)
@property
def xll(self):
if self.origin_loc == 'll':
xll = self._xll if self._xll is not None else 0.
elif self.origin_loc == 'ul':
# calculate coords for lower left corner
xll = self._xul - (np.sin(self.theta) * self.yedge[0] *
self.length_multiplier)
return xll
@property
def yll(self):
if self.origin_loc == 'll':
yll = self._yll if self._yll is not None else 0.
elif self.origin_loc == 'ul':
# calculate coords for lower left corner
yll = self._yul - (np.cos(self.theta) * self.yedge[0] *
self.length_multiplier)
return yll
@property
def xul(self):
if self.origin_loc == 'll':
# calculate coords for upper left corner
xul = self._xll + (np.sin(self.theta) * self.yedge[0] *
self.length_multiplier)
if self.origin_loc == 'ul':
# calculate coords for lower left corner
xul = self._xul if self._xul is not None else 0.
return xul
@property
def yul(self):
if self.origin_loc == 'll':
# calculate coords for upper left corner
yul = self._yll + (np.cos(self.theta) * self.yedge[0] *
self.length_multiplier)
if self.origin_loc == 'ul':
# calculate coords for lower left corner
yul = self._yul if self._yul is not None else 0.
return yul
@property
def proj4_str(self):
proj4_str = None
if self._proj4_str is not None:
if "epsg" in self._proj4_str.lower():
if "init" not in self._proj4_str.lower():
proj4_str = "+init=" + self._proj4_str
else:
proj4_str = self._proj4_str
# set the epsg if proj4 specifies it
tmp = [i for i in self._proj4_str.split() if
'epsg' in i.lower()]
self._epsg = int(tmp[0].split(':')[1])
else:
proj4_str = self._proj4_str
elif self.epsg is not None:
proj4_str = '+init=epsg:{}'.format(self.epsg)
return proj4_str
@property
def epsg(self):
# don't reset the proj4 string here
# because proj4 attribute may already be populated
# (with more details than getproj4 would return)
# instead reset proj4 when epsg is set
# (on init or setattr)
return self._epsg
@property
def wkt(self):
if self._wkt is None:
if self.prj is not None:
with open(self.prj) as src:
wkt = src.read()
elif self.epsg is not None:
wkt = getprj(self.epsg)
else:
return None
return wkt
else:
return self._wkt
@property
def lenuni(self):
return self._lenuni
def _parse_units_from_proj4(self):
units = None
try:
# need this because preserve_units doesn't seem to be
# working for complex proj4 strings. So if an
# epsg code was passed, we have no choice, but if a
# proj4 string was passed, we can just parse it
if "EPSG" in self.proj4_str.upper():
import pyproj
crs = pyproj.Proj(self.proj4_str,
preserve_units=True,
errcheck=True)
proj_str = crs.srs
else:
proj_str = self.proj4_str
# http://proj4.org/parameters.html#units
# from proj4 source code
# "us-ft", "0.304800609601219", "U.S. Surveyor's Foot",
# "ft", "0.3048", "International Foot",
if "units=m" in proj_str:
units = "meters"
elif "units=ft" in proj_str or \
"units=us-ft" in proj_str or \
"to_meters:0.3048" in proj_str:
units = "feet"
return units
except:
pass
@property
def units(self):
if self._units is not None:
units = self._units.lower()
else:
units = self._parse_units_from_proj4()
if units is None:
# print("warning: assuming SpatialReference units are meters")
units = 'meters'
assert units in self.supported_units
return units
@property
def length_multiplier(self):
"""Attempt to identify multiplier for converting from
model units to sr units, defaulting to 1."""
lm = None
if self._length_multiplier is not None:
lm = self._length_multiplier
else:
if self.model_length_units == 'feet':
if self.units == 'meters':
lm = 0.3048
elif self.units == 'feet':
lm = 1.
elif self.model_length_units == 'meters':
if self.units == 'feet':
lm = 1 / .3048
elif self.units == 'meters':
lm = 1.
elif self.model_length_units == 'centimeters':
if self.units == 'meters':
lm = 1 / 100.
elif self.units == 'feet':
lm = 1 / 30.48
else: # model units unspecified; default to 1
lm = 1.
return lm
@property
def model_length_units(self):
return self.lenuni_text[self.lenuni]
@property
def bounds(self):
"""Return bounding box in shapely order."""
xmin, xmax, ymin, ymax = self.get_extent()
return xmin, ymin, xmax, ymax
@staticmethod
def load(namefile=None, reffile='usgs.model.reference'):
"""Attempts to load spatial reference information from
the following files (in order):
1) usgs.model.reference
2) NAM file (header comment)
3) SpatialReference.default dictionary
"""
reffile = os.path.join(os.path.split(namefile)[0], reffile)
d = SpatialReference.read_usgs_model_reference_file(reffile)
if d is not None:
return d
d = SpatialReference.attribs_from_namfile_header(namefile)
if d is not None:
return d
else:
return SpatialReference.defaults
@staticmethod
def attribs_from_namfile_header(namefile):
# check for reference info in the nam file header
d = SpatialReference.defaults.copy()
d['source'] = 'namfile'
if namefile is None:
return None
header = []
with open(namefile, 'r') as f:
for line in f:
if not line.startswith('#'):
break
header.extend(line.strip().replace('#', '').split(';'))
for item in header:
if "xul" in item.lower():
try:
d['xul'] = float(item.split(':')[1])
except:
pass
elif "yul" in item.lower():
try:
d['yul'] = float(item.split(':')[1])
except:
pass
elif "rotation" in item.lower():
try:
d['rotation'] = float(item.split(':')[1])
except:
pass
elif "proj4_str" in item.lower():
try:
proj4_str = ':'.join(item.split(':')[1:]).strip()
if proj4_str.lower() == 'none':
proj4_str = None
d['proj4_str'] = proj4_str
except:
pass
elif "start" in item.lower():
try:
d['start_datetime'] = item.split(':')[1].strip()
except:
pass
# spatial reference length units
elif "units" in item.lower():
d['units'] = item.split(':')[1].strip()
# model length units
elif "lenuni" in item.lower():
d['lenuni'] = int(item.split(':')[1].strip())
# multiplier for converting from model length units to sr length units
elif "length_multiplier" in item.lower():
d['length_multiplier'] = float(item.split(':')[1].strip())
return d
@staticmethod
def read_usgs_model_reference_file(reffile='usgs.model.reference'):
"""read spatial reference info from the usgs.model.reference file
https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html"""
ITMUNI = {0: "undefined", 1: "seconds", 2: "minutes", 3: "hours",
4: "days",
5: "years"}
itmuni_values = {v: k for k, v in ITMUNI.items()}
d = SpatialReference.defaults.copy()
d['source'] = 'usgs.model.reference'
d.pop(
'proj4_str') # discard default to avoid confusion with epsg code if entered
if os.path.exists(reffile):
with open(reffile) as input:
for line in input:
if len(line) > 1:
if line.strip()[0] != '#':
info = line.strip().split('#')[0].split()
if len(info) > 1:
d[info[0].lower()] = ' '.join(info[1:])
d['xul'] = float(d['xul'])
d['yul'] = float(d['yul'])
d['rotation'] = float(d['rotation'])
# convert the model.reference text to a lenuni value
# (these are the model length units)
if 'length_units' in d.keys():
d['lenuni'] = SpatialReference.lenuni_values[d['length_units']]
if 'time_units' in d.keys():
d['itmuni'] = itmuni_values[d['time_units']]
if 'start_date' in d.keys():
start_datetime = d.pop('start_date')
if 'start_time' in d.keys():
start_datetime += ' {}'.format(d.pop('start_time'))
d['start_datetime'] = start_datetime
if 'epsg' in d.keys():
try:
d['epsg'] = int(d['epsg'])
except Exception as e:
raise Exception(
"error reading epsg code from file:\n" + str(e))
# this prioritizes epsg over proj4 if both are given
# (otherwise 'proj4' entry will be dropped below)
elif 'proj4' in d.keys():
d['proj4_str'] = d['proj4']
# drop any other items that aren't used in sr class
d = {k: v for k, v in d.items() if
k.lower() in SpatialReference.defaults.keys()
or k.lower() in {'epsg', 'start_datetime', 'itmuni',
'source'}}
return d
else:
return None
def __setattr__(self, key, value):
reset = True
if key == "delr":
super(SpatialReference, self). \
__setattr__("delr", np.atleast_1d(np.array(value)))
elif key == "delc":
super(SpatialReference, self). \
__setattr__("delc", np.atleast_1d(np.array(value)))
elif key == "xul":
super(SpatialReference, self). \
__setattr__("_xul", float(value))
self.origin_loc = 'ul'
elif key == "yul":
super(SpatialReference, self). \
__setattr__("_yul", float(value))
self.origin_loc = 'ul'
elif key == "xll":
super(SpatialReference, self). \
__setattr__("_xll", float(value))
self.origin_loc = 'll'
elif key == "yll":
super(SpatialReference, self). \
__setattr__("_yll", float(value))
self.origin_loc = 'll'
elif key == "length_multiplier":
super(SpatialReference, self). \
__setattr__("_length_multiplier", float(value))
# self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll,
# yll=self.yll)
elif key == "rotation":
super(SpatialReference, self). \
__setattr__("rotation", float(value))
# self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll,
# yll=self.yll)
elif key == "lenuni":
super(SpatialReference, self). \
__setattr__("_lenuni", int(value))
# self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll,
# yll=self.yll)
elif key == "units":
value = value.lower()
assert value in self.supported_units
super(SpatialReference, self). \
__setattr__("_units", value)
elif key == "proj4_str":
super(SpatialReference, self). \
__setattr__("_proj4_str", value)
# reset the units and epsg
units = self._parse_units_from_proj4()
if units is not None:
self._units = units
self._epsg = None
elif key == "epsg":
super(SpatialReference, self). \
__setattr__("_epsg", value)
# reset the units and proj4
self._units = None
self._proj4_str = getproj4(self._epsg)
self.crs = crs(epsg=value)
elif key == "prj":
super(SpatialReference, self). \
__setattr__("prj", value)
# translation to proj4 strings in crs class not robust yet
# leave units and proj4 alone for now.
self.crs = crs(prj=value, epsg=self.epsg)
else:
super(SpatialReference, self).__setattr__(key, value)
reset = False
if reset:
self._reset()
def reset(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return
def _reset(self):
self._xgrid = None
self._ygrid = None
self._ycentergrid = None
self._xcentergrid = None
self._vertices = None
return
@property
def nrow(self):
return self.delc.shape[0]
@property
def ncol(self):
return self.delr.shape[0]
def __eq__(self, other):
if not isinstance(other, SpatialReference):
return False
if other.xul != self.xul:
return False
if other.yul != self.yul:
return False
if other.rotation != self.rotation:
return False
if other.proj4_str != self.proj4_str:
return False
return True
@classmethod
def from_namfile(cls, namefile):
attribs = SpatialReference.attribs_from_namfile_header(namefile)
try:
attribs.pop("start_datetime")
except:
pass
return SpatialReference(**attribs)
@classmethod
def from_gridspec(cls, gridspec_file, lenuni=0):
f = open(gridspec_file, 'r')
raw = f.readline().strip().split()
nrow = int(raw[0])
ncol = int(raw[1])
raw = f.readline().strip().split()
xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2])
delr = []
j = 0
while j < ncol:
raw = f.readline().strip().split()
for r in raw:
if '*' in r:
rraw = r.split('*')
for n in range(int(rraw[0])):
delr.append(float(rraw[1]))
j += 1
else:
delr.append(float(r))
j += 1
delc = []
i = 0
while i < nrow:
raw = f.readline().strip().split()
for r in raw:
if '*' in r:
rraw = r.split('*')
for n in range(int(rraw[0])):
delc.append(float(rraw[1]))
i += 1
else:
delc.append(float(r))
i += 1
f.close()
return cls(np.array(delr), np.array(delc),
lenuni, xul=xul, yul=yul, rotation=rot)
@property
def attribute_dict(self):
return {"xul": self.xul, "yul": self.yul, "rotation": self.rotation,
"proj4_str": self.proj4_str}
def set_spatialreference(self, xul=None, yul=None, xll=None, yll=None,
rotation=0.0):
"""
set spatial reference - can be called from model instance
"""
if xul is not None and xll is not None:
msg = ('Both xul and xll entered. Please enter either xul, yul or '
'xll, yll.')
raise ValueError(msg)
if yul is not None and yll is not None:
msg = ('Both yul and yll entered. Please enter either xul, yul or '
'xll, yll.')
raise ValueError(msg)
# set the origin priority based on the left corner specified
# (the other left corner will be calculated). If none are specified
# then default to upper left
if xul is None and yul is None and xll is None and yll is None:
self.origin_loc = 'ul'
xul = 0.
yul = self.delc.sum()
elif xll is not None:
self.origin_loc = 'll'
else:
self.origin_loc = 'ul'
self.rotation = rotation
self._xll = xll if xll is not None else 0.
self._yll = yll if yll is not None else 0.
self._xul = xul if xul is not None else 0.
self._yul = yul if yul is not None else 0.
# self.set_origin(xul, yul, xll, yll)
return
def __repr__(self):
s = "xul:{0:<.10G}; yul:{1:<.10G}; rotation:{2:<G}; ". \
format(self.xul, self.yul, self.rotation)
s += "proj4_str:{0}; ".format(self.proj4_str)
s += "units:{0}; ".format(self.units)
s += "lenuni:{0}; ".format(self.lenuni)
s += "length_multiplier:{}".format(self.length_multiplier)
return s
def set_origin(self, xul=None, yul=None, xll=None, yll=None):
if self.origin_loc == 'll':
# calculate coords for upper left corner
self._xll = xll if xll is not None else 0.
self.yll = yll if yll is not None else 0.
self.xul = self._xll + (np.sin(self.theta) * self.yedge[0] *
self.length_multiplier)
self.yul = self.yll + (np.cos(self.theta) * self.yedge[0] *
self.length_multiplier)
if self.origin_loc == 'ul':
# calculate coords for lower left corner
self.xul = xul if xul is not None else 0.
self.yul = yul if yul is not None else 0.
self._xll = self.xul - (np.sin(self.theta) * self.yedge[0] *
self.length_multiplier)
self.yll = self.yul - (np.cos(self.theta) * self.yedge[0] *
self.length_multiplier)
self._reset()
return
@property
def theta(self):
return -self.rotation * np.pi / 180.
@property
def xedge(self):
return self.get_xedge_array()
@property
def yedge(self):
return self.get_yedge_array()
@property
def xgrid(self):
if self._xgrid is None:
self._set_xygrid()
return self._xgrid
@property
def ygrid(self):
if self._ygrid is None:
self._set_xygrid()
return self._ygrid
@property
def xcenter(self):
return self.get_xcenter_array()
@property
def ycenter(self):
return self.get_ycenter_array()
@property
def ycentergrid(self):
if self._ycentergrid is None:
self._set_xycentergrid()
return self._ycentergrid
@property
def xcentergrid(self):
if self._xcentergrid is None:
self._set_xycentergrid()
return self._xcentergrid
def _set_xycentergrid(self):
self._xcentergrid, self._ycentergrid = np.meshgrid(self.xcenter,
self.ycenter)
self._xcentergrid, self._ycentergrid = self.transform(
self._xcentergrid,
self._ycentergrid)
def _set_xygrid(self):
self._xgrid, self._ygrid = np.meshgrid(self.xedge, self.yedge)
self._xgrid, self._ygrid = self.transform(self._xgrid, self._ygrid)
@staticmethod
def rotate(x, y, theta, xorigin=0., yorigin=0.):
"""
Given x and y array-like values calculate the rotation about an
arbitrary origin and then return the rotated coordinates. theta is in
degrees.
"""
# jwhite changed on Oct 11 2016 - rotation is now positive CCW
# theta = -theta * np.pi / 180.
theta = theta * np.pi / 180.
xrot = xorigin + np.cos(theta) * (x - xorigin) - np.sin(theta) * \
(y - yorigin)
yrot = yorigin + np.sin(theta) * (x - xorigin) + np.cos(theta) * \
(y - yorigin)
return xrot, yrot
def transform(self, x, y, inverse=False):
"""
Given x and y array-like values, apply rotation, scale and offset,
to convert them from model coordinates to real-world coordinates.
"""
if isinstance(x, list):
x = np.array(x)
y = np.array(y)
if not np.isscalar(x):
x, y = x.copy(), y.copy()
if not inverse:
x *= self.length_multiplier
y *= self.length_multiplier
x += self.xll
y += self.yll
x, y = SpatialReference.rotate(x, y, theta=self.rotation,
xorigin=self.xll, yorigin=self.yll)
else:
x, y = SpatialReference.rotate(x, y, -self.rotation,
self.xll, self.yll)
x -= self.xll
y -= self.yll
x /= self.length_multiplier
y /= self.length_multiplier
return x, y
def get_extent(self):
"""
Get the extent of the rotated and offset grid
Return (xmin, xmax, ymin, ymax)
"""
x0 = self.xedge[0]
x1 = self.xedge[-1]
y0 = self.yedge[0]
y1 = self.yedge[-1]
# upper left point
x0r, y0r = self.transform(x0, y0)
# upper right point
x1r, y1r = self.transform(x1, y0)
# lower right point
x2r, y2r = self.transform(x1, y1)
# lower left point
x3r, y3r = self.transform(x0, y1)
xmin = min(x0r, x1r, x2r, x3r)
xmax = max(x0r, x1r, x2r, x3r)
ymin = min(y0r, y1r, y2r, y3r)
ymax = max(y0r, y1r, y2r, y3r)
return (xmin, xmax, ymin, ymax)
def get_grid_lines(self):
"""
Get the grid lines as a list
"""
xmin = self.xedge[0]
xmax = self.xedge[-1]
ymin = self.yedge[-1]
ymax = self.yedge[0]
lines = []
# Vertical lines
for j in range(self.ncol + 1):
x0 = self.xedge[j]
x1 = x0
y0 = ymin
y1 = ymax
x0r, y0r = self.transform(x0, y0)
x1r, y1r = self.transform(x1, y1)
lines.append([(x0r, y0r), (x1r, y1r)])
# horizontal lines
for i in range(self.nrow + 1):
x0 = xmin
x1 = xmax
y0 = self.yedge[i]
y1 = y0
x0r, y0r = self.transform(x0, y0)
x1r, y1r = self.transform(x1, y1)
lines.append([(x0r, y0r), (x1r, y1r)])
return lines
def get_grid_line_collection(self, **kwargs):
"""
Get a LineCollection of the grid
"""
from flopy.plot import ModelMap
map = ModelMap(sr=self)
lc = map.plot_grid(**kwargs)
return lc
def get_xcenter_array(self):
"""
Return a numpy one-dimensional float array that has the cell center x
coordinate for every column in the grid in model space - not offset or rotated.
"""
x = np.add.accumulate(self.delr) - 0.5 * self.delr
return x
def get_ycenter_array(self):
"""
Return a numpy one-dimensional float array that has the cell center x
coordinate for every row in the grid in model space - not offset of rotated.
"""
Ly = np.add.reduce(self.delc)
y = Ly - (np.add.accumulate(self.delc) - 0.5 *
self.delc)
return y
def get_xedge_array(self):
"""
Return a numpy one-dimensional float array that has the cell edge x
coordinates for every column in the grid in model space - not offset
or rotated. Array is of size (ncol + 1)
"""
xedge = np.concatenate(([0.], np.add.accumulate(self.delr)))
return xedge
def get_yedge_array(self):
"""
Return a numpy one-dimensional float array that has the cell edge y
coordinates for every row in the grid in model space - not offset or
rotated. Array is of size (nrow + 1)
"""
length_y = np.add.reduce(self.delc)
yedge = np.concatenate(([length_y], length_y -
np.add.accumulate(self.delc)))
return yedge
def write_gridSpec(self, filename):
""" write a PEST-style grid specification file
"""
f = open(filename, 'w')
f.write(
"{0:10d} {1:10d}\n".format(self.delc.shape[0], self.delr.shape[0]))
f.write("{0:15.6E} {1:15.6E} {2:15.6E}\n".format(
self.xul * self.length_multiplier,
self.yul * self.length_multiplier,
self.rotation))
for r in self.delr:
f.write("{0:15.6E} ".format(r))
f.write('\n')
for c in self.delc:
f.write("{0:15.6E} ".format(c))
f.write('\n')
return
def write_shapefile(self, filename='grid.shp', epsg=None, prj=None):
"""Write a shapefile of the grid with just the row and column attributes"""
from ..export.shapefile_utils import write_grid_shapefile2
if epsg is None and prj is None:
epsg = self.epsg
write_grid_shapefile2(filename, self, array_dict={}, nan_val=-1.0e9,
epsg=epsg, prj=prj)
def get_vertices(self, i, j):
"""Get vertices for a single cell or sequence if i, j locations."""
pts = []
xgrid, ygrid = self.xgrid, self.ygrid
pts.append([xgrid[i, j], ygrid[i, j]])
pts.append([xgrid[i + 1, j], ygrid[i + 1, j]])
pts.append([xgrid[i + 1, j + 1], ygrid[i + 1, j + 1]])
pts.append([xgrid[i, j + 1], ygrid[i, j + 1]])
pts.append([xgrid[i, j], ygrid[i, j]])
if np.isscalar(i):
return pts
else:
vrts = np.array(pts).transpose([2, 0, 1])
return [v.tolist() for v in vrts]
def get_rc(self, x, y):
return self.get_ij(x, y)
def get_ij(self, x, y):
"""Return the row and column of a point or sequence of points
in real-world coordinates.
Parameters
----------
x : scalar or sequence of x coordinates
y : scalar or sequence of y coordinates
Returns
-------
i : row or sequence of rows (zero-based)
j : column or sequence of columns (zero-based)
"""
if np.isscalar(x):
c = (np.abs(self.xcentergrid[0] - x)).argmin()
r = (np.abs(self.ycentergrid[:, 0] - y)).argmin()
else:
xcp = np.array([self.xcentergrid[0]] * (len(x)))
ycp = np.array([self.ycentergrid[:, 0]] * (len(x)))
c = (np.abs(xcp.transpose() - x)).argmin(axis=0)
r = (np.abs(ycp.transpose() - y)).argmin(axis=0)
return r, c
def get_grid_map_plotter(self, **kwargs):
"""
Create a QuadMesh plotting object for this grid
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
from matplotlib.collections import QuadMesh
verts = np.vstack((self.xgrid.flatten(), self.ygrid.flatten())).T
qm = QuadMesh(self.ncol, self.nrow, verts)
return qm
def plot_array(self, a, ax=None, **kwargs):
"""
Create a QuadMesh plot of the specified array using pcolormesh
Parameters
----------
a : np.ndarray
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
import matplotlib.pyplot as plt
if ax is None:
ax = plt.gca()
qm = ax.pcolormesh(self.xgrid, self.ygrid, a, **kwargs)
return qm
def export_array(self, filename, a, nodata=-9999,
fieldname='value',
**kwargs):
"""Write a numpy array to Arc Ascii grid
or shapefile with the model reference.
Parameters
----------
filename : str
Path of output file. Export format is determined by
file extension.
'.asc' Arc Ascii grid
'.tif' GeoTIFF (requires rasterio package)
'.shp' Shapefile
a : 2D numpy.ndarray
Array to export
nodata : scalar
Value to assign to np.nan entries (default -9999)
fieldname : str
Attribute field name for array values (shapefile export only).
(default 'values')
kwargs:
keyword arguments to np.savetxt (ascii)
rasterio.open (GeoTIFF)
or flopy.export.shapefile_utils.write_grid_shapefile2
Notes
-----
Rotated grids will be either be unrotated prior to export,
using scipy.ndimage.rotate (Arc Ascii format) or rotation will be
included in their transform property (GeoTiff format). In either case
the pixels will be displayed in the (unrotated) projected geographic coordinate system,
so the pixels will no longer align exactly with the model grid
(as displayed from a shapefile, for example). A key difference between
Arc Ascii and GeoTiff (besides disk usage) is that the
unrotated Arc Ascii will have a different grid size, whereas the GeoTiff
will have the same number of rows and pixels as the original.
"""
if filename.lower().endswith(".asc"):
if len(np.unique(self.delr)) != len(np.unique(self.delc)) != 1 \
or self.delr[0] != self.delc[0]:
raise ValueError('Arc ascii arrays require a uniform grid.')
xll, yll = self.xll, self.yll
cellsize = self.delr[0] * self.length_multiplier
fmt = kwargs.get('fmt', '%.18e')
a = a.copy()
a[np.isnan(a)] = nodata
if self.rotation != 0:
try:
from scipy.ndimage import rotate
a = rotate(a, self.rotation, cval=nodata)
height_rot, width_rot = a.shape
xmin, ymin, xmax, ymax = self.bounds
dx = (xmax - xmin) / width_rot
dy = (ymax - ymin) / height_rot
cellsize = np.max((dx, dy))
# cellsize = np.cos(np.radians(self.rotation)) * cellsize
xll, yll = xmin, ymin
except ImportError:
print('scipy package required to export rotated grid.')
pass
filename = '.'.join(
filename.split('.')[:-1]) + '.asc' # enforce .asc ending
nrow, ncol = a.shape
a[np.isnan(a)] = nodata
txt = 'ncols {:d}\n'.format(ncol)
txt += 'nrows {:d}\n'.format(nrow)
txt += 'xllcorner {:f}\n'.format(xll)
txt += 'yllcorner {:f}\n'.format(yll)
txt += 'cellsize {}\n'.format(cellsize)
# ensure that nodata fmt consistent w values
txt += 'NODATA_value {}\n'.format(fmt) % (nodata)
with open(filename, 'w') as output:
output.write(txt)
with open(filename, 'ab') as output:
np.savetxt(output, a, **kwargs)
print('wrote {}'.format(filename))
elif filename.lower().endswith(".tif"):
if len(np.unique(self.delr)) != len(np.unique(self.delc)) != 1 \
or self.delr[0] != self.delc[0]:
raise ValueError('GeoTIFF export require a uniform grid.')
try:
import rasterio
from rasterio import Affine
except:
print('GeoTIFF export requires the rasterio package.')
return
dxdy = self.delc[0] * self.length_multiplier
trans = Affine.translation(self.xul, self.yul) * \
Affine.angrot(self.rotation) * \
Affine.scale(dxdy, -dxdy)
# third dimension is the number of bands
a = a.copy()
if len(a.shape) == 2:
a = np.reshape(a, (1, a.shape[0], a.shape[1]))
if a.dtype.name == 'int64':
a = a.astype('int32')
dtype = rasterio.int32
elif a.dtype.name == 'int32':
dtype = rasterio.int32
elif a.dtype.name == 'float64':
dtype = rasterio.float64
elif a.dtype.name == 'float32':
dtype = rasterio.float32
else:
msg = 'ERROR: invalid dtype "{}"'.format(a.dtype.name)
raise TypeError(msg)
meta = {'count': a.shape[0],
'width': a.shape[2],
'height': a.shape[1],
'nodata': nodata,
'dtype': dtype,
'driver': 'GTiff',
'crs': self.proj4_str,
'transform': trans
}
meta.update(kwargs)
with rasterio.open(filename, 'w', **meta) as dst:
dst.write(a)
print('wrote {}'.format(filename))
elif filename.lower().endswith(".shp"):
from ..export.shapefile_utils import write_grid_shapefile2
epsg = kwargs.get('epsg', None)
prj = kwargs.get('prj', None)
if epsg is None and prj is None:
epsg = self.epsg
write_grid_shapefile2(filename, self, array_dict={fieldname: a},
nan_val=nodata,
epsg=epsg, prj=prj)
def export_contours(self, filename, contours,
fieldname='level', epsg=None, prj=None,
**kwargs):
"""Convert matplotlib contour plot object to shapefile.
Parameters
----------
filename : str
path of output shapefile
contours : matplotlib.contour.QuadContourSet or list of them
(object returned by matplotlib.pyplot.contour)
epsg : int
EPSG code. See https://www.epsg-registry.org/ or spatialreference.org
prj : str
Existing projection file to be used with new shapefile.
**kwargs : key-word arguments to flopy.export.shapefile_utils.recarray2shp
Returns
-------
df : dataframe of shapefile contents
"""
from flopy.utils.geometry import LineString
from flopy.export.shapefile_utils import recarray2shp
if not isinstance(contours, list):
contours = [contours]
if epsg is None:
epsg = self._epsg
if prj is None:
prj = self.proj4_str
geoms = []
level = []
for ctr in contours:
levels = ctr.levels
for i, c in enumerate(ctr.collections):
paths = c.get_paths()
geoms += [LineString(p.vertices) for p in paths]
level += list(np.ones(len(paths)) * levels[i])
# convert the dictionary to a recarray
ra = np.array(level,
dtype=[(fieldname, float)]).view(np.recarray)
recarray2shp(ra, geoms, filename, epsg, prj, **kwargs)
def export_array_contours(self, filename, a,
fieldname='level',
interval=None,
levels=None,
maxlevels=1000,
epsg=None,
prj=None,
**kwargs):
"""Contour an array using matplotlib; write shapefile of contours.
Parameters
----------
filename : str
Path of output file with '.shp' extension.
a : 2D numpy array
Array to contour
epsg : int
EPSG code. See https://www.epsg-registry.org/ or spatialreference.org
prj : str
Existing projection file to be used with new shapefile.
**kwargs : key-word arguments to flopy.export.shapefile_utils.recarray2shp
"""
import matplotlib.pyplot as plt
if epsg is None:
epsg = self._epsg
if prj is None:
prj = self.proj4_str
if interval is not None:
min = np.nanmin(a)
max = np.nanmax(a)
nlevels = np.round(np.abs(max - min) / interval, 2)
msg = '{:.0f} levels at interval of {} > maxlevels={}'.format(
nlevels,
interval,
maxlevels)
assert nlevels < maxlevels, msg
levels = np.arange(min, max, interval)
fig, ax = plt.subplots()
ctr = self.contour_array(ax, a, levels=levels)
self.export_contours(filename, ctr, fieldname, epsg, prj, **kwargs)
plt.close()
def contour_array(self, ax, a, **kwargs):
"""
Create a QuadMesh plot of the specified array using pcolormesh
Parameters
----------
ax : matplotlib.axes.Axes
ax to add the contours
a : np.ndarray
array to contour
Returns
-------
contour_set : ContourSet
"""
from flopy.plot import ModelMap
kwargs['ax'] = ax
map = ModelMap(sr=self)
contour_set = map.contour_array(a=a, **kwargs)
return contour_set
@property
def vertices(self):
"""Returns a list of vertices for"""
if self._vertices is None:
self._set_vertices()
return self._vertices
def _set_vertices(self):
"""populate vertices for the whole grid"""
jj, ii = np.meshgrid(range(self.ncol), range(self.nrow))
jj, ii = jj.ravel(), ii.ravel()
self._vertices = self.get_vertices(ii, jj)
# vrts = np.array(self.get_vertices(ii, jj)).transpose([2, 0, 1])
# self._vertices = [v.tolist() for v in vrts] # conversion to lists
"""
code above is 3x faster
xgrid, ygrid = self.xgrid, self.ygrid
ij = list(map(list, zip(xgrid[:-1, :-1].ravel(), ygrid[:-1, :-1].ravel())))
i1j = map(list, zip(xgrid[1:, :-1].ravel(), ygrid[1:, :-1].ravel()))
i1j1 = map(list, zip(xgrid[1:, 1:].ravel(), ygrid[1:, 1:].ravel()))
ij1 = map(list, zip(xgrid[:-1, 1:].ravel(), ygrid[:-1, 1:].ravel()))
self._vertices = np.array(map(list, zip(ij, i1j, i1j1, ij1, ij)))
"""
def interpolate(self, a, xi, method='nearest'):
"""
Use the griddata method to interpolate values from an array onto the
points defined in xi. For any values outside of the grid, use
'nearest' to find a value for them.
Parameters
----------
a : numpy.ndarray
array to interpolate from. It must be of size nrow, ncol
xi : numpy.ndarray
array containing x and y point coordinates of size (npts, 2). xi
also works with broadcasting so that if a is a 2d array, then
xi can be passed in as (xgrid, ygrid).
method : {'linear', 'nearest', 'cubic'}
method to use for interpolation (default is 'nearest')
Returns
-------
b : numpy.ndarray
array of size (npts)
"""
try:
from scipy.interpolate import griddata
except:
print('scipy not installed\ntry pip install scipy')
return None
# Create a 2d array of points for the grid centers
points = np.empty((self.ncol * self.nrow, 2))
points[:, 0] = self.xcentergrid.flatten()
points[:, 1] = self.ycentergrid.flatten()
# Use the griddata function to interpolate to the xi points
b = griddata(points, a.flatten(), xi, method=method, fill_value=np.nan)
# if method is linear or cubic, then replace nan's with a value
# interpolated using nearest
if method != 'nearest':
bn = griddata(points, a.flatten(), xi, method='nearest')
idx = np.isnan(b)
b[idx] = bn[idx]
return b
def get_2d_vertex_connectivity(self):
"""
Create the cell 2d vertices array and the iverts index array. These
are the same form as the ones used to instantiate an unstructured
spatial reference.
Returns
-------
verts : ndarray
array of x and y coordinates for the grid vertices
iverts : list
a list with a list of vertex indices for each cell in clockwise
order starting with the upper left corner
"""
x = self.xgrid.flatten()
y = self.ygrid.flatten()
nrowvert = self.nrow + 1
ncolvert = self.ncol + 1
npoints = nrowvert * ncolvert
verts = np.empty((npoints, 2), dtype=np.float)
verts[:, 0] = x
verts[:, 1] = y
iverts = []
for i in range(self.nrow):
for j in range(self.ncol):
iv1 = i * ncolvert + j # upper left point number
iv2 = iv1 + 1
iv4 = (i + 1) * ncolvert + j
iv3 = iv4 + 1
iverts.append([iv1, iv2, iv3, iv4])
return verts, iverts
def get_3d_shared_vertex_connectivity(self, nlay, botm, ibound=None):
# get the x and y points for the grid
x = self.xgrid.flatten()
y = self.ygrid.flatten()
# set the size of the vertex grid
nrowvert = self.nrow + 1
ncolvert = self.ncol + 1
nlayvert = nlay + 1
nrvncv = nrowvert * ncolvert
npoints = nrvncv * nlayvert
# create and fill a 3d points array for the grid
verts = np.empty((npoints, 3), dtype=np.float)
verts[:, 0] = np.tile(x, nlayvert)
verts[:, 1] = np.tile(y, nlayvert)
istart = 0
istop = nrvncv
for k in range(nlay + 1):
verts[istart:istop, 2] = self.interpolate(botm[k],
verts[istart:istop, :2],
method='linear')
istart = istop
istop = istart + nrvncv
# create the list of points comprising each cell. points must be
# listed a specific way according to vtk requirements.
iverts = []
for k in range(nlay):
koffset = k * nrvncv
for i in range(self.nrow):
for j in range(self.ncol):
if ibound is not None:
if ibound[k, i, j] == 0:
continue
iv1 = i * ncolvert + j + koffset
iv2 = iv1 + 1
iv4 = (i + 1) * ncolvert + j + koffset
iv3 = iv4 + 1
iverts.append([iv4 + nrvncv, iv3 + nrvncv,
iv1 + nrvncv, iv2 + nrvncv,
iv4, iv3, iv1, iv2])
# renumber and reduce the vertices if ibound_filter
if ibound is not None:
# go through the vertex list and mark vertices that are used
ivertrenum = np.zeros(npoints, dtype=np.int)
for vlist in iverts:
for iv in vlist:
# mark vertices that are actually used
ivertrenum[iv] = 1
# renumber vertices that are used, skip those that are not
inum = 0
for i in range(npoints):
if ivertrenum[i] > 0:
inum += 1
ivertrenum[i] = inum
ivertrenum -= 1
# reassign the vertex list using the new vertex numbers
iverts2 = []
for vlist in iverts:
vlist2 = []
for iv in vlist:
vlist2.append(ivertrenum[iv])
iverts2.append(vlist2)
iverts = iverts2
idx = np.where(ivertrenum >= 0)
verts = verts[idx]
return verts, iverts
def get_3d_vertex_connectivity(self, nlay, top, bot, ibound=None):
if ibound is None:
ncells = nlay * self.nrow * self.ncol
ibound = np.ones((nlay, self.nrow, self.ncol), dtype=np.int)
else:
ncells = (ibound != 0).sum()
npoints = ncells * 8
verts = np.empty((npoints, 3), dtype=np.float)
iverts = []
ipoint = 0
for k in range(nlay):
for i in range(self.nrow):
for j in range(self.ncol):
if ibound[k, i, j] == 0:
continue
ivert = []
pts = self.get_vertices(i, j)
pt0, pt1, pt2, pt3, pt0 = pts
z = bot[k, i, j]
verts[ipoint, 0:2] = np.array(pt1)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt2)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt0)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt3)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
z = top[k, i, j]
verts[ipoint, 0:2] = np.array(pt1)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt2)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt0)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
verts[ipoint, 0:2] = np.array(pt3)
verts[ipoint, 2] = z
ivert.append(ipoint)
ipoint += 1
iverts.append(ivert)
return verts, iverts
class SpatialReferenceUnstructured(SpatialReference):
"""
a class to locate an unstructured model grid in x-y space
Parameters
----------
verts : ndarray
2d array of x and y points.
iverts : list of lists
should be of len(ncells) with a list of vertex numbers for each cell
ncpl : ndarray
array containing the number of cells per layer. ncpl.sum() must be
equal to the total number of cells in the grid.
layered : boolean
flag to indicated that the grid is layered. In this case, the vertices
define the grid for single layer, and all layers use this same grid.
In this case the ncpl value for each layer must equal len(iverts).
If not layered, then verts and iverts are specified for all cells and
all layers in the grid. In this case, npcl.sum() must equal
len(iverts).
lenuni : int
the length units flag from the discretization package
proj4_str: str
a PROJ4 string that identifies the grid in space. warning: case
sensitive!
units : string
Units for the grid. Must be either feet or meters
epsg : int
EPSG code that identifies the grid in space. Can be used in lieu of
proj4. PROJ4 attribute will auto-populate if there is an internet
connection(via get_proj4 method).
See https://www.epsg-registry.org/ or spatialreference.org
length_multiplier : float
multiplier to convert model units to spatial reference units.
delr and delc above will be multiplied by this value. (default=1.)
Attributes
----------
xcenter : ndarray
array of x cell centers
ycenter : ndarray
array of y cell centers
Notes
-----
"""
def __init__(self, xc, yc, verts, iverts, ncpl, layered=True, lenuni=1,
proj4_str="EPSG:4326", epsg=None, units=None,
length_multiplier=1.):
warnings.warn("SpatialReferenceUnstructured has been deprecated. "
"Use VertexGrid instead.",
category=DeprecationWarning)
self.xc = xc
self.yc = yc
self.verts = verts
self.iverts = iverts
self.ncpl = ncpl
self.layered = layered
self._lenuni = lenuni
self._proj4_str = proj4_str
self._epsg = epsg
if epsg is not None:
self._proj4_str = getproj4(epsg)
self.supported_units = ["feet", "meters"]
self._units = units
self._length_multiplier = length_multiplier
# set defaults
self._xul = 0.
self._yul = 0.
self.rotation = 0.
if self.layered:
assert all([n == len(iverts) for n in ncpl])
assert self.xc.shape[0] == self.ncpl[0]
assert self.yc.shape[0] == self.ncpl[0]
else:
msg = ('Length of iverts must equal ncpl.sum '
'({} {})'.format(len(iverts), ncpl))
assert len(iverts) == ncpl.sum(), msg
assert self.xc.shape[0] == self.ncpl.sum()
assert self.yc.shape[0] == self.ncpl.sum()
return
@property
def grid_type(self):
return "unstructured"
def write_shapefile(self, filename='grid.shp'):
"""
Write shapefile of the grid
Parameters
----------
filename : string
filename for shapefile
Returns
-------
"""
raise NotImplementedError()
return
def write_gridSpec(self, filename):
"""
Write a PEST-style grid specification file
Parameters
----------
filename : string
filename for grid specification file
Returns
-------
"""
raise NotImplementedError()
return
@classmethod
def from_gridspec(cls, fname):
"""
Create a new SpatialReferenceUnstructured grid from an PEST
grid specification file
Parameters
----------
fname : string
File name for grid specification file
Returns
-------
sru : flopy.utils.reference.SpatialReferenceUnstructured
"""
raise NotImplementedError()
return
@classmethod
def from_argus_export(cls, fname, nlay=1):
"""
Create a new SpatialReferenceUnstructured grid from an Argus One
Trimesh file
Parameters
----------
fname : string
File name
nlay : int
Number of layers to create
Returns
-------
sru : flopy.utils.reference.SpatialReferenceUnstructured
"""
from ..utils.geometry import get_polygon_centroid
f = open(fname, 'r')
line = f.readline()
ll = line.split()
ncells, nverts = ll[0:2]
ncells = int(ncells)
nverts = int(nverts)
verts = np.empty((nverts, 2), dtype=np.float)
xc = np.empty((ncells), dtype=np.float)
yc = np.empty((ncells), dtype=np.float)
# read the vertices
f.readline()
for ivert in range(nverts):
line = f.readline()
ll = line.split()
c, iv, x, y = ll[0:4]
verts[ivert, 0] = x
verts[ivert, 1] = y
# read the cell information and create iverts, xc, and yc
iverts = []
for icell in range(ncells):
line = f.readline()
ll = line.split()
ivlist = []
for ic in ll[2:5]:
ivlist.append(int(ic) - 1)
if ivlist[0] != ivlist[-1]:
ivlist.append(ivlist[0])
iverts.append(ivlist)
xc[icell], yc[icell] = get_polygon_centroid(verts[ivlist, :])
# close file and return spatial reference
f.close()
return cls(xc, yc, verts, iverts, np.array(nlay * [len(iverts)]))
def __setattr__(self, key, value):
super(SpatialReference, self).__setattr__(key, value)
return
def get_extent(self):
"""
Get the extent of the grid
Returns
-------
extent : tuple
min and max grid coordinates
"""
xmin = self.verts[:, 0].min()
xmax = self.verts[:, 0].max()
ymin = self.verts[:, 1].min()
ymax = self.verts[:, 1].max()
return (xmin, xmax, ymin, ymax)
def get_xcenter_array(self):
"""
Return a numpy one-dimensional float array that has the cell center x
coordinate for every cell in the grid in model space - not offset or
rotated.
"""
return self.xc
def get_ycenter_array(self):
"""
Return a numpy one-dimensional float array that has the cell center x
coordinate for every cell in the grid in model space - not offset of
rotated.
"""
return self.yc
def plot_array(self, a, ax=None):
"""
Create a QuadMesh plot of the specified array using patches
Parameters
----------
a : np.ndarray
Returns
-------
quadmesh : matplotlib.collections.QuadMesh
"""
from ..plot import plotutil
patch_collection = plotutil.plot_cvfd(self.verts, self.iverts, a=a,
ax=ax)
return patch_collection
def get_grid_line_collection(self, **kwargs):
"""
Get a patch collection of the grid
"""
from ..plot import plotutil
edgecolor = kwargs.pop('colors')
pc = plotutil.cvfd_to_patch_collection(self.verts, self.iverts)
pc.set(facecolor='none')
pc.set(edgecolor=edgecolor)
return pc
def contour_array(self, ax, a, **kwargs):
"""
Create a QuadMesh plot of the specified array using pcolormesh
Parameters
----------
ax : matplotlib.axes.Axes
ax to add the contours
a : np.ndarray
array to contour
Returns
-------
contour_set : ContourSet
"""
contour_set = ax.tricontour(self.xcenter, self.ycenter,
a, **kwargs)
return contour_set
class TemporalReference(object):
"""For now, just a container to hold start time and time units files
outside of DIS package."""
defaults = {'itmuni': 4,
'start_datetime': '01-01-1970'}
itmuni_values = {'undefined': 0,
'seconds': 1,
'minutes': 2,
'hours': 3,
'days': 4,
'years': 5}
itmuni_text = {v: k for k, v in itmuni_values.items()}
def __init__(self, itmuni=4, start_datetime=None):
self.itmuni = itmuni
self.start_datetime = start_datetime
@property
def model_time_units(self):
return self.itmuni_text[self.itmuni]
class epsgRef:
"""Sets up a local database of text representations of coordinate reference
systems, keyed by EPSG code.
The database is epsgref.json, located in the user's data directory. If
optional 'appdirs' package is available, this is in the platform-dependent
user directory, otherwise in the user's 'HOME/.flopy' directory.
"""
def __init__(self):
warnings.warn(
"epsgRef has been deprecated.", category=DeprecationWarning)
try:
from appdirs import user_data_dir
except ImportError:
user_data_dir = None
if user_data_dir:
datadir = user_data_dir('flopy')
else:
# if appdirs is not installed, use user's home directory
datadir = os.path.join(os.path.expanduser('~'), '.flopy')
if not os.path.isdir(datadir):
os.makedirs(datadir)
dbname = 'epsgref.json'
self.location = os.path.join(datadir, dbname)
def to_dict(self):
"""Returns dict with EPSG code integer key, and WKT CRS text"""
data = OrderedDict()
if os.path.exists(self.location):
with open(self.location, 'r') as f:
loaded_data = json.load(f, object_pairs_hook=OrderedDict)
# convert JSON key from str to EPSG integer
for key, value in loaded_data.items():
try:
data[int(key)] = value
except ValueError:
data[key] = value
return data
def _write(self, data):
with open(self.location, 'w') as f:
json.dump(data, f, indent=0)
f.write('\n')
def reset(self, verbose=True):
if os.path.exists(self.location):
os.remove(self.location)
if verbose:
print('Resetting {}'.format(self.location))
def add(self, epsg, prj):
"""add an epsg code to epsgref.json"""
data = self.to_dict()
data[epsg] = prj
self._write(data)
def get(self, epsg):
"""returns prj from a epsg code, otherwise None if not found"""
data = self.to_dict()
return data.get(epsg)
def remove(self, epsg):
"""removes an epsg entry from epsgref.json"""
data = self.to_dict()
if epsg in data:
del data[epsg]
self._write(data)
@staticmethod
def show():
ep = epsgRef()
prj = ep.to_dict()
for k, v in prj.items():
print('{}:\n{}\n'.format(k, v))
class crs(object):
"""Container to parse and store coordinate reference system parameters,
and translate between different formats."""
def __init__(self, prj=None, esri_wkt=None, epsg=None):
self.wktstr = None
if prj is not None:
with open(prj) as input:
self.wktstr = input.read()
elif esri_wkt is not None:
self.wktstr = esri_wkt
elif epsg is not None:
wktstr = getprj(epsg)
if wktstr is not None:
self.wktstr = wktstr
if self.wktstr is not None:
self.parse_wkt()
@property
def crs(self):
"""Dict mapping crs attributes to proj4 parameters"""
proj = None
if self.projcs is not None:
# projection
if 'mercator' in self.projcs.lower():
if 'transverse' in self.projcs.lower() or \
'tm' in self.projcs.lower():
proj = 'tmerc'
else:
proj = 'merc'
elif 'utm' in self.projcs.lower() and \
'zone' in self.projcs.lower():
proj = 'utm'
elif 'stateplane' in self.projcs.lower():
proj = 'lcc'
elif 'lambert' and 'conformal' and 'conic' in self.projcs.lower():
proj = 'lcc'
elif 'albers' in self.projcs.lower():
proj = 'aea'
elif self.projcs is None and self.geogcs is not None:
proj = 'longlat'
# datum
if 'NAD' in self.datum.lower() or \
'north' in self.datum.lower() and \
'america' in self.datum.lower():
datum = 'nad'
if '83' in self.datum.lower():
datum += '83'
elif '27' in self.datum.lower():
datum += '27'
elif '84' in self.datum.lower():
datum = 'wgs84'
# ellipse
if '1866' in self.spheroid_name:
ellps = 'clrk66'
elif 'grs' in self.spheroid_name.lower():
ellps = 'grs80'
elif 'wgs' in self.spheroid_name.lower():
ellps = 'wgs84'
# prime meridian
pm = self.primem[0].lower()
return {'proj': proj,
'datum': datum,
'ellps': ellps,
'a': self.semi_major_axis,
'rf': self.inverse_flattening,
'lat_0': self.latitude_of_origin,
'lat_1': self.standard_parallel_1,
'lat_2': self.standard_parallel_2,
'lon_0': self.central_meridian,
'k_0': self.scale_factor,
'x_0': self.false_easting,
'y_0': self.false_northing,
'units': self.projcs_unit,
'zone': self.utm_zone}
@property
def grid_mapping_attribs(self):
"""Map parameters for CF Grid Mappings
http://http://cfconventions.org/cf-conventions/cf-conventions.html,
Appendix F: Grid Mappings
"""
if self.wktstr is not None:
sp = [p for p in [self.standard_parallel_1,
self.standard_parallel_2]
if p is not None]
sp = sp if len(sp) > 0 else None
proj = self.crs['proj']
names = {'aea': 'albers_conical_equal_area',
'aeqd': 'azimuthal_equidistant',
'laea': 'lambert_azimuthal_equal_area',
'longlat': 'latitude_longitude',
'lcc': 'lambert_conformal_conic',
'merc': 'mercator',
'tmerc': 'transverse_mercator',
'utm': 'transverse_mercator'}
attribs = {'grid_mapping_name': names[proj],
'semi_major_axis': self.crs['a'],
'inverse_flattening': self.crs['rf'],
'standard_parallel': sp,
'longitude_of_central_meridian': self.crs['lon_0'],
'latitude_of_projection_origin': self.crs['lat_0'],
'scale_factor_at_projection_origin': self.crs['k_0'],
'false_easting': self.crs['x_0'],
'false_northing': self.crs['y_0']}
return {k: v for k, v in attribs.items() if v is not None}
@property
def proj4(self):
"""Not implemented yet"""
return None
def parse_wkt(self):
self.projcs = self._gettxt('PROJCS["', '"')
self.utm_zone = None
if self.projcs is not None and 'utm' in self.projcs.lower():
self.utm_zone = self.projcs[-3:].lower().strip('n').strip('s')
self.geogcs = self._gettxt('GEOGCS["', '"')
self.datum = self._gettxt('DATUM["', '"')
tmp = self._getgcsparam('SPHEROID')
self.spheroid_name = tmp.pop(0)
self.semi_major_axis = tmp.pop(0)
self.inverse_flattening = tmp.pop(0)
self.primem = self._getgcsparam('PRIMEM')
self.gcs_unit = self._getgcsparam('UNIT')
self.projection = self._gettxt('PROJECTION["', '"')
self.latitude_of_origin = self._getvalue('latitude_of_origin')
self.central_meridian = self._getvalue('central_meridian')
self.standard_parallel_1 = self._getvalue('standard_parallel_1')
self.standard_parallel_2 = self._getvalue('standard_parallel_2')
self.scale_factor = self._getvalue('scale_factor')
self.false_easting = self._getvalue('false_easting')
self.false_northing = self._getvalue('false_northing')
self.projcs_unit = self._getprojcs_unit()
def _gettxt(self, s1, s2):
s = self.wktstr.lower()
strt = s.find(s1.lower())
if strt >= 0: # -1 indicates not found
strt += len(s1)
end = s[strt:].find(s2.lower()) + strt
return self.wktstr[strt:end]
def _getvalue(self, k):
s = self.wktstr.lower()
strt = s.find(k.lower())
if strt >= 0:
strt += len(k)
end = s[strt:].find(']') + strt
try:
return float(self.wktstr[strt:end].split(',')[1])
except:
pass
def _getgcsparam(self, txt):
nvalues = 3 if txt.lower() == 'spheroid' else 2
tmp = self._gettxt('{}["'.format(txt), ']')
if tmp is not None:
tmp = tmp.replace('"', '').split(',')
name = tmp[0:1]
values = list(map(float, tmp[1:nvalues]))
return name + values
else:
return [None] * nvalues
def _getprojcs_unit(self):
if self.projcs is not None:
tmp = self.wktstr.lower().split('unit["')[-1]
uname, ufactor = tmp.strip().strip(']').split('",')[0:2]
ufactor = float(ufactor.split(']')[0].split()[0].split(',')[0])
return uname, ufactor
return None, None
def getprj(epsg, addlocalreference=True, text='esriwkt'):
"""
Gets projection file (.prj) text for given epsg code from
spatialreference.org
Parameters
----------
epsg : int
epsg code for coordinate system
addlocalreference : boolean
adds the projection file text associated with epsg to a local
database, epsgref.json, located in the user's data directory.
References
----------
https://www.epsg-registry.org/
Returns
-------
prj : str
text for a projection (*.prj) file.
"""
warnings.warn("SpatialReference has been deprecated. Use StructuredGrid "
"instead.", category=DeprecationWarning)
epsgfile = epsgRef()
wktstr = epsgfile.get(epsg)
if wktstr is None:
wktstr = get_spatialreference(epsg, text=text)
if addlocalreference and wktstr is not None:
epsgfile.add(epsg, wktstr)
return wktstr
def get_spatialreference(epsg, text='esriwkt'):
"""Gets text for given epsg code and text format from spatialreference.org
Fetches the reference text using the url:
http://spatialreference.org/ref/epsg/<epsg code>/<text>/
See: https://www.epsg-registry.org/
Parameters
----------
epsg : int
epsg code for coordinate system
text : str
string added to url
Returns
-------
url : str
"""
from flopy.utils.flopy_io import get_url_text
warnings.warn("SpatialReference has been deprecated. Use StructuredGrid "
"instead.", category=DeprecationWarning)
epsg_categories = ['epsg', 'esri']
for cat in epsg_categories:
url = "http://spatialreference.org/ref/{2}/{0}/{1}/".format(epsg,
text,
cat)
result = get_url_text(url)
if result is not None:
break
if result is not None:
return result.replace("\n", "")
elif result is None and text != 'epsg':
for cat in epsg_categories:
error_msg = 'No internet connection or epsg code {0} ' \
'not found at http://spatialreference.org/ref/{2}/{0}/{1}'.format(
epsg,
text,
cat)
print(error_msg)
elif text == 'epsg': # epsg code not listed on spatialreference.org may still work with pyproj
return '+init=epsg:{}'.format(epsg)
def getproj4(epsg):
"""Gets projection file (.prj) text for given epsg code from
spatialreference.org. See: https://www.epsg-registry.org/
Parameters
----------
epsg : int
epsg code for coordinate system
Returns
-------
prj : str
text for a projection (*.prj) file.
"""
warnings.warn("SpatialReference has been deprecated. Use StructuredGrid "
"instead.", category=DeprecationWarning)
return get_spatialreference(epsg, text='proj4')
| 35.147774
| 100
| 0.508298
|
794f3f0658eb1e6d2431528fbba0d21c691d55b1
| 20,910
|
py
|
Python
|
matrix.py
|
olivercalder/linear-algebra
|
3435373912968018810622bbf032dad87c1f8072
|
[
"MIT"
] | 1
|
2019-04-19T15:06:45.000Z
|
2019-04-19T15:06:45.000Z
|
matrix.py
|
olivercalder/linear-algebra
|
3435373912968018810622bbf032dad87c1f8072
|
[
"MIT"
] | null | null | null |
matrix.py
|
olivercalder/linear-algebra
|
3435373912968018810622bbf032dad87c1f8072
|
[
"MIT"
] | null | null | null |
class Matrix:
def __init__(self, matrix=None, width=None):
if matrix == None:
self.empty()
elif width != None:
height = matrix
self.empty(height, width)
else:
self.height = len(matrix)
self.width = len(matrix[0])
self.matrix = matrix
def empty(self, height=0, width=0):
self.height = height
self.width = width
self.matrix = []
for h in range(self.height):
self.matrix.append([])
for w in range(self.width):
self.matrix[h].append(None)
return self
def zero(self, height=None, width=None):
if height != None:
self.height = height
self.width = width
if width == None:
self.width = self.height
self.matrix = []
for h in range(self.height):
self.matrix.append([])
for w in range(self.width):
self.matrix[h].append(0)
return self
def identity(self, order, order2=None):
if order2 != None and order2 != order:
return None
else:
self.zero(order, order)
for i in range(1, order + 1):
self.set(i, i, 1)
return self
def __repr__(self):
return 'Matrix({})'.format(self.matrix)
def __str__(self):
return self.get_matrix_string()
def __lt__(self, other):
if self.det() < other.det():
return True
else:
return False
def __le__(self, other):
if self == other:
return True
elif self < other:
return True
else:
return False
def __eq__(self, other):
if type(self) != type(other):
return False
elif self.width != other.width or self.height != other.height:
return False
else:
equal = True
for row in range(self.height):
for column in range(self.width):
if self.matrix[row][column] != other.matrix[row][column]:
equal = False
return equal
def __gt__(self, other):
if self.det() > other.det():
return True
else:
return False
def __ge__(self, other):
if self == other:
return True
elif self > other:
return True
else:
return False
# def __getattr__(self, name): Implement the following for things like:
# pass A.R1 = -1*A.R1 + 3*A.R2
# def __getattribute(self, name):
# pass
# def __setattr__(self, name, value):
# pass
# def __get__(self, instance, owner):
# pass
# def __set__(self, instance, owner):
# pass
def __len__(self):
return self.height * self.width
def __getitem__(self, item):
if type(item) == type(tuple([])):
return self.get(item[0], item[1])
elif type(item) == type('Rx'):
if item[0].upper() == 'R':
return self.get_row(int(item[1:]))
elif item[0].upper() == 'C':
return self.get_column(int(item[1:]))
elif type(item) == type(0):
row = ((item - 1) // self.width) + 1
column = ((item - 1) % self.width) + 1
return self.get(row, column)
def __setitem__(self, item, value):
if type(item) == type(tuple([])):
self.set(item[0], item[1], value)
elif type(item) == type('Rx'):
if item[0].upper() == 'R':
self.set_row(int(item[1:]), value)
elif item[0].upper() == 'C':
return self.set_column(int(item[1:]))
elif type(item) == type(0):
row = (item - 1) // self.width
column = ((item - 1) % self.width) + 1
self.set(row, column, self)
else:
return None
return value
# def __iter__(self):
# pass
def __add__(self, other):
if self.order() != other.order():
return None
else:
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_value = self.get(row, column) + other.get(row, column)
if abs(new_value) < 10**-13:
new_value = 0
if abs(new_value) == 0:
new_value = 0
new_matrix.set(row, column, new_value)
return new_matrix
def __radd__(self, other):
return NotImplemented
def __iadd__(self, other):
self = self + other
return self
def __sub__(self, other):
if self.order() != other.order():
return None
else:
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_value = self.get(row, column) - other.get(row, column)
if abs(new_value) < 10**-13:
new_value = 0
if abs(new_value) == 0:
new_value = 0
new_matrix.set(row, column, new_value)
return new_matrix
def __rsub__(self, other):
return NotImplemented
def __isub__(self, other):
self = self - other
return self
def __mul__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_matrix.set(row, column, self.get(row, column) * other)
if abs(new_matrix.get(row, column)) == 0:
new_matrix.set(row, column, 0)
return new_matrix
elif self.order()[1] != other.order()[0]:
return None
elif type(other) == type(self):
new_matrix = Matrix(self.height, other.width)
for i in range(1, self.height + 1):
for j in range(1, other.width + 1):
total = 0
for k in range(1, other.height + 1):
total += self.get(i, k) * other.get(k, j)
new_matrix.set(i, j, total)
return new_matrix
def __rmul__(self, other):
# multiplying matrix by a scalar int or float
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_value = self.get(row, column) * other
if abs(new_value) == 0:
new_value = 0
new_matrix.set(row, column, new_value)
return new_matrix
def __imul__(self, other):
self = self * other
return self
def __matmul__(self, other):
return NotImplemented
# cross product
def __rmatmul__(self, other):
return NotImplemented
def __imatmul__(self, other):
return NotImplemented
# cross product
def __truediv__(self, other):
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_value = self.get(row, column) / other
if abs(new_value) == 0:
new_value = 0
new_matrix.set(row, column, new_value)
return new_matrix
def __rtruediv__(self, other):
return NotImplemented
def __itruediv__(self, other):
self = self / other
return self
def __floordiv__(self, other):
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_value = self.get(row, column) // other
if abs(new_value) == 0:
new_value = 0
new_matrix.set(row, column, new_value)
return new_matrix
def __rfloordiv__(self, other):
return NotImplemented
def __ifloordiv__(self, other):
self = self / other
return self
def __mod__(self, other):
new_matrix = self.copy()
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
new_value = self.get(row, column) % other
if abs(new_value) == 0:
new_value = 0
new_matrix.set(row, column, new_value)
return new_matrix
def __rmod__(self, other):
return NotImplemented
def __imod__(self, other):
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
self = self % other
return self
def __divmod__(self, other):
return NotImplemented
def __rdivmod__(self, other):
return NotImplemented
def __idivmod__(self, other):
return NotImplemented
def __pow__(self, other):
if other < 0:
new_matrix = self.inverse()
else:
new_matrix = self.copy()
multiplicand = new_matrix.copy()
for i in range(1, int(abs(other))):
new_matrix *= multiplicand
return new_matrix
def __rpow__(self, other):
return NotImplemented
def __ipow__(self, other):
self = self**other
return self
def __lshift__(self, other):
return NotImplemented
def __rlshift__(self, other):
return NotImplemented
def __ilshift__(self, other):
return NotImplemented
def __rshift__(self, other):
return NotImplemented
def __rrshift__(self, other):
return NotImplemented
def __irshift__(self, other):
return NotImplemented
def __and__(self, other):
return NotImplemented
def __rand__(self, other):
return NotImplemented
def __iand__(self, other):
return NotImplemented
def __xor__(self, other):
return NotImplemented
def __rxor__(self, other):
return NotImplemented
def __ixor__(self, other):
return NotImplemented
def __or__(self, other):
return NotImplemented
def __ror__(self, other):
return NotImplemented
def __ior__(self, other):
return NotImplemented
def __neg__(self):
return -1 * self
def __abs__(self):
if self.height == 1 or self.width == 1:
sum_of_squares = 0
for row in range(1, self.height + 1):
for column in range(1, self.width + 1):
sum_of_squares += (self.get(row, column))**2
return sum_of_squares**(1/2)
else:
return self.det()
def __invert__(self):
return self.inverse()
def order(self):
return tuple([self.height, self.width])
def is_square(self):
return self.order()[0] == self.order()[1]
def set(self, row, column, number):
row = row - 1
column = column - 1
self.matrix[row][column] = number
def get(self, row, column):
row = row - 1
column = column - 1
return self.matrix[row][column]
def get_row_list(self, row):
row_data = []
for i in range(1, self.width + 1):
row_data.append(self.get(row, i))
return row_data
def get_column_list(self, column):
column_data = []
for i in range(1, self.height + 1):
column_data.append(self.get(i, column))
return column_data
def get_row(self, row):
return Matrix([self.get_row_list(row)])
def get_column(self, column):
return Matrix([self.get_column_list(column)]).flip()
def get_submatrix(self, upperleft, lowerright):
new_matrix = []
for j in range(upperleft[0], lowerright[0] + 1):
new_row = []
for k in range(upperleft[1], lowerright[1] + 1):
new_row.append(self.get(j, k))
new_matrix.append(new_row)
return Matrix(new_matrix)
def set_row(self, row, data):
if type(data) == type(list()):
if self.width != 0 and self.width != len(data):
print('Error: Cannot set row. Length does not match.')
return None
else:
for i in range(1, self.width + 1):
self.set(row, i, data[i - 1])
elif type(data) == type(Matrix()):
if self.width != 0 and self.width != data.width:
if self.width == data.height and data.width == 1:
data = data.flip()
else:
print('Error: Cannot set row. Size does not match.')
return None
if data.height == 1:
for i in range(1, self.width + 1):
self.set(row, i, data.get(1, i))
else:
print('Error: Cannot set row. Type does not match.')
return None
def set_column(self, column, data):
if type(data) == type(list()):
if self.height != 0 and self.height != len(data):
print('Error: Cannot set column. Length does not match.')
return None
else:
for i in range(1, self.height + 1):
self.set(i, column, data[i - 1])
elif type(data) == type(Matrix()):
if self.height != 0 and self.height != data.height:
if self.height == data.width and data.height == 1:
data = data.flip()
else:
print('Error: Cannot set column. Size does not match.')
return None
if data.width == 1:
for i in range(1, self.height + 1):
self.set(i, column, data.get(i, 1))
else:
print('Error: Cannot set column. Type does not match.')
return None
def add_row(self, data):
if (self.order() == (0, 0)) or (type(data) == type(list()) and self.width == len(data)) or (type(data) == type(Matrix()) and self.width == data.width):
self.height += 1
self.matrix.append([])
if self.width == 0:
self.width = len(data)
for i in range(1, self.width + 1):
self.matrix[self.height - 1].append(None)
self.set_row(self.height, data)
return self
else:
print('Error: Cannot add row. Length or type does not match.')
return None
def add_column(self, data):
if (self.order() == (0, 0)) or (type(data) == type(list()) and self.height == len(data)) or (type(data) == type(Matrix()) and self.height == data.height):
self.width += 1
if self.height == 0:
self.height = len(data)
for i in range(1, self.height + 1):
self.matrix.append([])
for i in range(1, self.height + 1):
self.matrix[i - 1].append(None)
self.set_column(self.width, data)
return self
else:
print('Error: Cannot add column. Length or type does not match.')
return None
def minor(self, i, j):
ij_minor = Matrix()
for r in range(1, self.height + 1):
if r != i:
new_row = []
for c in range(1, self.width + 1):
if c != j:
new_row.append(self[r,c])
ij_minor.add_row(new_row)
return ij_minor
def det(self):
A = self
if A.height != A.width:
return None
elif A.height == 1 and A.width == 1:
return A[1,1]
else:
determinant = 0
for j in range(1, A.width + 1):
if A[1,j] != 0:
determinant += (-1)**(j+1) * A[1,j] * A.minor(1,j).det()
return determinant
def inverse(self):
if self.order()[0] != self.order()[1]:
print('Error: Cannot invert. Must be nxn matrix.')
return None
elif self.det() == 0:
print('Error: Cannot invert. Determinant = 0.')
return None
else:
A = self.copy()
degree = A.order()[0]
Aaug = A.conjoin(Matrix().identity(degree))
Aaug = Aaug.rref()
Ainv = Aaug.get_submatrix((1, 1+degree), (degree, 2*degree))
zero = Matrix().zero(1, degree)
for row in range(degree):
if Aaug.get_submatrix((1, 1), (degree, degree)) == zero and Ainv.get_submatrix((1, 1), (degree, degree)) != zero:
print('Error: Cannot invert. No solution to rref(A|I).')
return None
return Ainv
def copy(self):
A = Matrix()
for i in range(1, self.height + 1):
A.add_row(self.get_row(i))
return A
def flip(self):
A = Matrix().empty(self.width, self.height)
for i in range(1, A.height + 1):
for j in range(1, A.width + 1):
A.set(i, j, self.get(j, i))
return A
def conjoin(self, other):
A = self.copy()
for i in range(1, other.width + 1):
A.add_column(other.get_column(i))
return A
def R(self, row): # deprecated in favor of A['R1'], but still better when referring to rows using variables
row_list = self.get_row(row)
return self.get_row(row)
def set_R(self, row, matrix): # deprecated in favor of A['R1'], but still better when referring to rows using variables
self.set_row(row, matrix)
def swap_R(self, row1, row2):
tmp = self.get_row(row1)
self.set_row(row1, self.get_row(row2))
self.set_row(row2, tmp)
def rref(self):
A = self.copy()
n = 1
m = 1
while n <= A.height and m <= A.width:
i = n
while i <= A.height and A.get(i, m) == 0:
i += 1
if i > A.height:
m += 1 # Shifts the start index over one column, but does not shift down a row
else:
A.swap_R(n, i)
A['R{}'.format(n)] /= A[n,m] # Old method: A.set_R(n, 1/A.get(n, m) * A.R(n))
for j in range(1, A.height + 1):
if j != n and A.get(j, m) != 0:
A['R{}'.format(j)] -= A[j,m] * A['R{}'.format(n)] # Old method: A.set_R(j, A.R(j) - A.get(j, m) * A.R(n))
m += 1 # Shifts the start index over one column
n += 1 # Shifts the start index down one row
return A
def get_row_string(self, row):
row_string = '( '
for column in range(1, self.width + 1):
row_string = row_string + '{:^5.4g}'.format(self.get(row, column)) + ' '
row_string = row_string + ')'
return row_string
def get_column_string(self, column):
column_string = ''
for row in range(1, self.height + 1):
row_string = '( ' + '{:^5.4g}'.format(self.get(row, column)) + ' )'
column_string = column_string + row_string + '\n'
column_string.rstrip('\n')
return column_string
def get_matrix_string(self):
matrix_string = ''
for row in range(1, self.height + 1):
matrix_string += self.get_row_string(row) + '\n'
matrix_string.rstrip('\n')
return matrix_string
def print_row(self, row):
row_string = self.get_row_string(row)
print(row_string)
def print_column(self, column):
column_string = self.get_column_string(column)
print(column_string)
def print_matrix(self):
matrix_string = self.get_matrix_string()
print(matrix_string)
def test():
test = Matrix([
[ 1, 2, 2, -5, 6],
[-1, -2, -1, 1, -1],
[ 4, 8, 5, -8, 9],
[ 3, 6, 1, 5, -7]
])
print(test)
print('Slot (3, 3):', test[3, 3])
test = test.rref()
print('rref:')
print(test)
print()
print('New Matrix A:')
A = Matrix([
[22, 13, 8, 3],
[-16, -3, -2, -2],
[8, 9, 7, 2],
[5, 4, 3, 1]
])
print(A)
print('rref(A):')
print(A.rref())
print('A^-1:')
print(A**-1)
print('A[3,3]:')
print(A[3,3])
print("\nA['R2']:")
print(A['R2'])
print('A[5]:')
print(A[5])
print("\nA['R2'] = A['R2'] + 2 * A['R3']")
print('A after operation:')
A['R2'] = A['R2'] + 2 * A['R3']
print(A)
if __name__ == '__main__':
test()
| 31.826484
| 162
| 0.504687
|
794f3f660ee63d027a31a77a117e65b069e9068b
| 4,336
|
py
|
Python
|
segmenters/nlp/NLTKSentencizer/tests/test_nltksentencizer.py
|
Harshdeep1996/jina-hub
|
880ff719715b89969860c70219d26a931a031d10
|
[
"Apache-2.0"
] | 106
|
2020-04-28T10:24:08.000Z
|
2022-03-15T02:30:27.000Z
|
segmenters/nlp/NLTKSentencizer/tests/test_nltksentencizer.py
|
mezig351/jina-hub
|
3ced90575467ebb4bc070ed43a189271370b4e70
|
[
"Apache-2.0"
] | 6,808
|
2020-05-01T04:13:43.000Z
|
2021-06-23T08:04:02.000Z
|
segmenters/nlp/NLTKSentencizer/tests/test_nltksentencizer.py
|
mezig351/jina-hub
|
3ced90575467ebb4bc070ed43a189271370b4e70
|
[
"Apache-2.0"
] | 86
|
2020-04-29T09:50:29.000Z
|
2022-01-25T05:42:44.000Z
|
import numpy as np
import pytest
from .. import NLTKSentencizer
@pytest.mark.parametrize(
'language, expected_len, expected_first_chunk, expected_second_chunk, text',
[
(
None,
2,
'Today is a good day.',
"Can't wait for tomorrow!",
"Today is a good day. Can't wait for tomorrow!",
),
(
'french',
2,
"Aujourd'hui est un bon jour.",
'Je ne peux pas attendre demain!',
"Aujourd'hui est un bon jour. Je ne peux pas attendre demain!",
),
(
'german',
2,
'Heute ist ein guter Tag.',
'Ich kann nicht auf morgen warten!',
'Heute ist ein guter Tag. Ich kann nicht auf morgen warten!',
),
(
'italian',
2,
'Oggi è una buona giornata.',
"Non vedo l'ora che arrivi domani!",
"Oggi è una buona giornata. Non vedo l'ora che arrivi domani!",
),
(
'russian',
2,
'Сегодня хороший день.',
'Не могу дождаться завтра!',
'Сегодня хороший день. Не могу дождаться завтра!',
),
(
'greek',
2,
'Σήμερα είναι μια καλή μέρα.',
'Δεν μπορώ να περιμένω αύριο!',
'Σήμερα είναι μια καλή μέρα. Δεν μπορώ να περιμένω αύριο!',
),
(
'norwegian',
2,
'I dag er en god dag.',
'Gleder meg ikke til i morgen!',
'I dag er en god dag. Gleder meg ikke til i morgen!',
),
],
)
def test_nltksentencizer(
language, expected_len, expected_first_chunk, expected_second_chunk, text
):
"""
Test multiple scenarios with various languages
"""
if language:
segmenter = NLTKSentencizer(language)
else:
# default language is English
segmenter = NLTKSentencizer()
docs_chunks = segmenter.segment(np.stack([text, text]))
assert len(docs_chunks) == 2
for chunks in docs_chunks:
assert len(chunks) == expected_len
assert chunks[0]['text'] == expected_first_chunk
assert chunks[1]['text'] == expected_second_chunk
def test_locations():
"""Test simple logics regarding the ``location`` key of sentences returned by the sentencizer"""
segmenter = NLTKSentencizer()
text = (
"This is a sentence. Here's another sentence. One more sentence? Aaand, yes, one more! \n"
"Lastly, this one is the last sentence."
)
docs_chunks = segmenter.segment(np.stack([text, text]))
for chunks in docs_chunks:
# first sentence should start at the first index or later
assert chunks[0]['location'][0] >= 0
# last sentence can not end at an index greater than the length of text
assert chunks[-1]['location'][-1] <= len(text)
# sentences beginning and ending indeces cannot overlap
for i in range(1, len(chunks)):
assert chunks[i]['location'][0] > chunks[i - 1]['location'][-1]
def test_nltk_sentencizer_unsupported_language():
"""Unsupported and/or mis-spelt languages must raise a LookUp error"""
with pytest.raises(LookupError):
NLTKSentencizer('eng')
with pytest.raises(LookupError):
NLTKSentencizer('abcd')
def test_offset():
"""Test that last offset is the same as the length of sentences minus one"""
segmenter = NLTKSentencizer()
text = ' This , text is... . Amazing !!'
docs_chunks = segmenter.segment(np.stack([text, text]))
for chunks in docs_chunks:
assert len(chunks) - 1 == chunks[-1]['offset']
@pytest.mark.parametrize(
'text',
[
("T.C. Sağlık Bakalığı\'nın son açıklamasına göre, koronavirüs daha bitmedi."),
('Doç. Dr. Özkonuk, açıklama yaptı.'),
('3 No.lu madde onaylandı.'),
],
)
def test_turkish_abbreviations(text):
"""Check that Turkish sentences that include dots in abbreviations do not separate on those"""
segmenter = NLTKSentencizer(language='turkish')
# turkish abbreviations include dot, and they should not be segmented
docs_chunks = segmenter.segment(np.stack([text, text]))
for chunks in docs_chunks:
assert len(chunks) == 1
| 33.099237
| 101
| 0.586024
|
794f405b8ad86de7e275da375758a8fb4948ec86
| 15,146
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/adjacency_sid/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/adjacency_sid/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/adjacency_sid/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import sid
class adjacency_sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/adjacency-sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines segment routing extensions for prefixes.
"""
__slots__ = ("_path_helper", "_extmethods", "__sid")
_yang_name = "adjacency-sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"adjacency-sid",
]
def _get_sid(self):
"""
Getter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid (list)
YANG Description: Adjacency Segment-IDs List. An IGP-Adjacency Segment is an IGP
segment attached to a unidirectional adjacency or a set of
unidirectional adjacencies. By default, an IGP-Adjacency Segment is
local to the node which advertises it.
"""
return self.__sid
def _set_sid(self, v, load=False):
"""
Setter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid() directly.
YANG Description: Adjacency Segment-IDs List. An IGP-Adjacency Segment is an IGP
segment attached to a unidirectional adjacency or a set of
unidirectional adjacencies. By default, an IGP-Adjacency Segment is
local to the node which advertises it.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,sid.sid, yang_name="sid", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="sid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__sid = t
if hasattr(self, "_set"):
self._set()
def _unset_sid(self):
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
sid = __builtin__.property(_get_sid)
_pyangbind_elements = OrderedDict([("sid", sid)])
from . import sid
class adjacency_sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/adjacency-sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines segment routing extensions for prefixes.
"""
__slots__ = ("_path_helper", "_extmethods", "__sid")
_yang_name = "adjacency-sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"mt-isn",
"neighbors",
"neighbor",
"subTLVs",
"subTLVs",
"adjacency-sid",
]
def _get_sid(self):
"""
Getter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid (list)
YANG Description: Adjacency Segment-IDs List. An IGP-Adjacency Segment is an IGP
segment attached to a unidirectional adjacency or a set of
unidirectional adjacencies. By default, an IGP-Adjacency Segment is
local to the node which advertises it.
"""
return self.__sid
def _set_sid(self, v, load=False):
"""
Setter method for sid, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/adjacency_sid/sid (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid() directly.
YANG Description: Adjacency Segment-IDs List. An IGP-Adjacency Segment is an IGP
segment attached to a unidirectional adjacency or a set of
unidirectional adjacencies. By default, an IGP-Adjacency Segment is
local to the node which advertises it.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sid must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,sid.sid, yang_name="sid", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="sid", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__sid = t
if hasattr(self, "_set"):
self._set()
def _unset_sid(self):
self.__sid = YANGDynClass(
base=YANGListType(
False,
sid.sid,
yang_name="sid",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="sid",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
sid = __builtin__.property(_get_sid)
_pyangbind_elements = OrderedDict([("sid", sid)])
| 37.865
| 517
| 0.573749
|
794f410ae5b6548f0ba8abcfe70bc7987ce3495b
| 6,149
|
py
|
Python
|
test/test_s3.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | null | null | null |
test/test_s3.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | 2
|
2021-11-24T19:39:57.000Z
|
2022-01-03T23:03:35.000Z
|
test/test_s3.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import os
from datetime import datetime
import pytz
from parsons.aws.s3 import S3
from parsons.etl.table import Table
import urllib
import time
from test.utils import assert_matching_tables
# Requires a s3 credentials stored in aws config or env variable
# to run properly.
@unittest.skipIf(not os.environ.get('LIVE_TEST'), 'Skipping because not running live test')
class TestS3(unittest.TestCase):
def setUp(self):
self.s3 = S3()
self.s3.aws.session.get_credentials()
# Create a new bucket
self.test_bucket = os.environ['S3_TEMP_BUCKET']
# Trying miss random errors on not finding buckets
self.s3.create_bucket(self.test_bucket)
self.test_key = 'test.csv'
self.tbl = Table([{'first': 'Bob', 'last': 'Smith'}])
csv_path = self.tbl.to_csv()
self.test_key_2 = 'test2.csv'
self.tbl_2 = Table([{'first': 'Jack', 'last': 'Smith'}])
csv_path_2 = self.tbl_2.to_csv()
# Sometimes it runs into issues putting the file
retry = 1
while retry <= 5:
try:
# Put a test file in the bucket
self.s3.put_file(self.test_bucket, self.test_key, csv_path)
self.s3.put_file(self.test_bucket, self.test_key_2, csv_path_2)
break
except Exception:
print('Retrying putting file in bucket...')
retry += 1
def tearDown(self):
for k in self.s3.list_keys(self.test_bucket):
self.s3.remove_file(self.test_bucket, k)
def test_list_buckets(self):
# Also tests that create_bucket works (part of setup)
buckets = self.s3.list_buckets()
self.assertTrue(self.test_bucket in buckets)
def test_bucket_exists(self):
# Test that a bucket that exists returns True
self.assertTrue(self.s3.bucket_exists(self.test_bucket))
# Test that a bucket that doesn't exist returns False
self.assertFalse(self.s3.bucket_exists('idontexist_bucket'))
def test_list_keys(self):
# Put a file in the bucket
csv_path = self.tbl.to_csv()
key = 'test/test.csv'
self.s3.put_file(self.test_bucket, key, csv_path)
# Test that basic bucket list works
keys = self.s3.list_keys(self.test_bucket, prefix='test/test')
self.assertTrue(key in keys)
# Test that prefix filter works -- when there
keys = self.s3.list_keys(self.test_bucket, prefix='test')
self.assertTrue(key in keys)
# Test that prefix filter works -- when not there
keys = self.s3.list_keys(self.test_bucket, prefix='nope')
self.assertFalse(key in keys)
def test_key_exists(self):
csv_path = self.tbl.to_csv()
key = 'test/test.csv'
self.s3.put_file(self.test_bucket, key, csv_path)
# Test that returns True if key exists
self.assertTrue(self.s3.key_exists(self.test_bucket, key))
# Test that returns True if key does not exist
self.assertFalse(self.s3.key_exists(self.test_bucket, 'akey'))
def test_list_keys_suffix(self):
# Put a file in the bucket
csv_path = self.tbl.to_csv()
key_1 = 'test/test.csv'
key_2 = 'test/test.gz'
self.s3.put_file(self.test_bucket, key_1, csv_path)
self.s3.put_file(self.test_bucket, key_2, csv_path)
keys = self.s3.list_keys(self.test_bucket, suffix='csv')
self.assertTrue(key_1 in keys)
self.assertFalse(key_2 in keys)
keys = self.s3.list_keys(self.test_bucket, suffix='gz')
self.assertFalse(key_1 in keys)
self.assertTrue(key_2 in keys)
def test_list_keys_date_modified(self):
# Set current utc timestamp with timezone
current_utc = datetime.utcnow().astimezone(pytz.utc)
# Ensure the files created before now are included
keys = self.s3.list_keys(self.test_bucket, date_modified_before=current_utc)
self.assertEqual(len(keys), 2)
# Ensure the files created after now are not included
keys = self.s3.list_keys(self.test_bucket, date_modified_after=current_utc)
self.assertEqual(len(keys), 0)
def test_put_and_get_file(self):
# put_file is part of setup, so just testing getting it here
path = self.s3.get_file(self.test_bucket, self.test_key)
result_tbl = Table.from_csv(path)
assert_matching_tables(self.tbl, result_tbl)
def test_get_url(self):
# Test that you can download from URL
url = self.s3.get_url(self.test_bucket, self.test_key)
csv_table = Table.from_csv(url)
assert_matching_tables(self.tbl, csv_table)
# Test that the url expires
url_short = self.s3.get_url(self.test_bucket, self.test_key, expires_in=1)
time.sleep(2)
with self.assertRaises(urllib.error.HTTPError) as cm:
Table.from_csv(url_short)
self.assertEqual(cm.exception.code, 403)
def test_transfer_bucket(self):
# Create a destination bucket
# TODO maybe pull this from an env var as well
destination_bucket = f"{self.test_bucket}-test"
self.s3.create_bucket(destination_bucket)
# Copy
self.s3.transfer_bucket(self.test_bucket, self.test_key, destination_bucket)
# Test that object made it
path = self.s3.get_file(destination_bucket, self.test_key)
result_tbl = Table.from_csv(path)
assert_matching_tables(self.tbl, result_tbl)
# Test that original still exists in original bucket
self.assertTrue(self.s3.key_exists(self.test_bucket, self.test_key))
# Transfer and delete original
self.s3.transfer_bucket(
self.test_bucket, self.test_key_2, destination_bucket,
None, None, None, None, None, False, True)
path_2 = self.s3.get_file(destination_bucket, self.test_key_2)
result_tbl_2 = Table.from_csv(path_2)
assert_matching_tables(self.tbl_2, result_tbl_2)
self.assertFalse(self.s3.key_exists(self.test_bucket, self.test_key_2))
| 34.740113
| 91
| 0.658156
|
794f418fce6662d759ec12f1f20937ecf883c31a
| 11,096
|
py
|
Python
|
BB/bbObjects/bbInventory.py
|
Morgenkroete/GOF2BountyBot
|
b4fe3d765b764ab169284ce0869a810825013389
|
[
"MIT"
] | 6
|
2020-06-09T16:36:52.000Z
|
2021-02-02T17:53:44.000Z
|
BB/bbObjects/bbInventory.py
|
Morgenkroete/GOF2BountyBot
|
b4fe3d765b764ab169284ce0869a810825013389
|
[
"MIT"
] | 138
|
2020-08-02T11:20:34.000Z
|
2020-12-15T15:55:11.000Z
|
BB/bbObjects/bbInventory.py
|
Morgenkroete/GOF2BountyBot
|
b4fe3d765b764ab169284ce0869a810825013389
|
[
"MIT"
] | 6
|
2020-07-05T05:32:16.000Z
|
2020-11-01T21:58:31.000Z
|
from __future__ import annotations
from . import bbInventoryListing
from ..baseClasses import bbSerializable
class bbInventory(bbSerializable.bbSerializable):
"""A database of bbInventoryListings.
Aside from the use of bbInventoryListing for the purpose of item quantities, this class is type unaware.
:var items: The actual item listings
:vartype items: dict[object, bbInventoryListing]
:var keys: The item types stored
:vartype keys: ist[object]
:var totalItems: The total number of items stored; the sum of all item quantities
:vartype totalItems: int
:var numKeys: The number of item types stored; the length of self.keys
:vartype numKeys: int
"""
def __init__(self):
# The actual item listings
self.items = {}
# The item types stored
self.keys = []
# The total number of items stored; the sum of all item quantities
self.totalItems = 0
# The number of item types stored; the length of self.keys
self.numKeys = 0
def addItem(self, item : object, quantity : int = 1):
"""Add one or more of an item to the inventory.
If at least one of item is already in the inventory, that item's bbInventoryListing count will be incremented.
Otherwise, a new bbInventoryListing is created for item.
:param object item: The item to add to the inventory
:param int quantity: Integer amount of item to add to the inventory. Must be at least 1. (Default 1)
:raise ValueError: If quantity is less than 1
"""
if quantity < 0:
raise ValueError("Quantity must be at least 1")
# increment totalItems tracker
self.totalItems += quantity
# increment count for existing bbItemListing
if item in self.items:
self.items[item].count += quantity
# Add a new bbItemListing if one does not exist
else:
self.items[item] = bbInventoryListing.bbInventoryListing(item, quantity)
# Update keys and numKeys trackers
self.keys.append(item)
self.numKeys += 1
def _addListing(self, newListing : bbInventoryListing.bbInventoryListing):
"""Add an inventory listing to the inventory, including item and acount.
If at least one of item is already in the inventory, that item's bbInventoryListing count will be incremented.
Otherwise, a reference to the given bbInventoryListing is stored. The listing is not copied and remains mutable.
:param bbInventoryListing newListing: The inventory listing to add to the inventory
"""
# update total items count
self.totalItems += newListing.count
# if item is already stored, increment its listing count
if newListing.item in self.items:
self.items[newListing.item].count += newListing.count
# otherwise, store a reference to the given listing
else:
self.items[newListing.item] = newListing
self.keys.append(newListing.item)
# update keys counter
self.numKeys += 1
def removeItem(self, item : object, quantity : int = 1):
"""Remove one or more of an item from the inventory.
If the amount of item stored in the inventory is now zero, the bbInventoryListing is removed from the inventory.
At least quantity of item must already be stored in the inventory.
:param object item: The item to remove from the inventory
:param int quantity: Integer amount of item to remove from the inventory. Must be between 1 and the amount of item currently stored, both inclusive. (Default 1)
:raise ValueError: When attempting to remove more of an item than is in the inventory
"""
# Ensure enough of item is stored to remove quantity of it
if item in self.items and self.items[item].count >= quantity:
# Update item's count and inventory's totalItems tracker
self.items[item].count -= quantity
self.totalItems -= quantity
# remove the bbItemListing if it is now empty
if self.items[item].count == 0:
# update the keys and numKeys trackers
for i in range(len(self.keys)):
if self.keys[i] is item:
self.keys.pop(i)
break
# self.keys.remove(item)
self.numKeys -= 1
del self.items[item]
else:
raise ValueError("Attempted to remove " + str(quantity) + " " + str(item) + "(s) when " + (str(self.items[item].count) if item in self.items else "0") + " are in inventory")
def numPages(self, itemsPerPage : int) -> int:
"""Get the number of pages of items in the inventory, for a given max number of items per page
E.g, where 3 keys are in the inventory: numPages(1) gives 3. numPages(2) gives 2.
:param int itemsPerPage: The maximum number of items per page
:return: The number of pages required to list all items in the inventory
:rtype: int
"""
return int(self.numKeys/itemsPerPage) + (0 if self.numKeys % itemsPerPage == 0 else 1)
def getPage(self, pageNum : int, itemsPerPage : int) -> list:
"""Get a list of the bbItemListings on the requested page.
pageNum is 1 index-based; the first page is 1.
pageNum must be between 1 and numPages(itemsPerPage).
:param int pageNum: The number of the page to fetch
:param int itemsPerPage: The max number of items that can be contained in a single page
:return: A list containing the bbInventoryListings contained in the requested inventory page
:rtype: list[bbInventoryListings]
:raise IndexError: When attempting to get a page out of range of this inventory
"""
# Validate the requested pageNum
if pageNum < 1 or pageNum > self.numPages(itemsPerPage):
raise IndexError("pageNum out of range. min=1 max=" + str(self.numPages(itemsPerPage)))
page = []
# Splice self.keys around the first and last indices in the requested page
for item in self.keys[(pageNum - 1) * itemsPerPage: min(pageNum * itemsPerPage, self.numKeys)]:
# Add the bbItemListings for each of the page's keys to the results list
page.append(self.items[item])
return page
def stores(self, item) -> bool:
"""Decide whether a given item is stored in this inventory.
:param object item: The item to check for membership
:return: True if at least one of item is in this inventory, False otherwise
:rtype: bool
"""
return item in self.keys
def numStored(self, item) -> int:
"""Get the amount stored of a given item.
:param object item: The item to count
:return: Integer count of number of items in this inventory. 0 if it is not stored in this inventory.
:rtype: int
"""
return self.items[item].count if self.stores(item) else 0
def isEmpty(self) -> bool:
"""Decide whether or not this bbInventory currently stores any items.
:return: True if no items are stored, False if at least one item is stored currently
:rtype: bool
"""
return self.totalItems == 0
def clear(self):
"""Remove all items from the inventory.
"""
self.items = {}
self.keys = []
self.totalItems = 0
self.numKeys = 0
def __getitem__(self, key : int) -> bbInventoryListing:
"""Override [subscript] operator for reading values.
Currently returns the bbInventoryListing for the item at position key in self.keys.
:param int key: The index of the key to dereference
:return: The bbInventoryListing for the item at the requested index
:rtype: bbInventoryListing
:raise KeyError: When the given index is in range of the inventory, but the key at the requested position in the keys array does not exist in the items dictionary
:raise IndexError: When given an index that isn't an int, or the given index is out of range
:raise ValueError: When the inventory is empty
"""
if bool(self.keys):
if key in range(len(self.keys)):
if self.keys[key] in self.items:
return self.items[self.keys[key]]
raise KeyError("Failed get of key number " + str(key) + " - " + str(self.keys[key]) + ". Key does not exist in inventory.")
raise IndexError("Key of incorrect type or out of range: "+ str(key) + ". Valid range: 0 - " + str(len(self.keys)-1))
raise ValueError("Attempted to fetch key " + str(key) + ", but keys list is empty")
def __setitem__(self, key, value):
"""Disallow assignment through the [subscript] operator.
:param key: ignored
:param value: ignored
:raise NotImplementedError: Always.
"""
raise NotImplementedError("Cannot use [subscript] assignment for class bbInventory. use addItem/removeItem instead.")
# self.items[self.keys[key]] = value
def __contains__(self, item) -> bool:
"""Override the 'in' operator.
:param object item: The object to test for membership
"""
return item in self.keys
def toDict(self, **kwargs) -> dict:
data = super().toDict(**kwargs)
data["items"] = []
for listing in self.items.values():
data["items"].append(listing.toDict(**kwargs))
return data
@classmethod
def fromDict(cls, invDict, **kwargs) -> bbInventory:
newInv = bbInventory()
if "items" in invDict:
for listingDict in invDict["items"]:
newInv._addListing(bbInventoryListing.bbInventoryListing.fromDict(listingDict))
return newInv
class TypeRestrictedInventory(bbInventory):
"""An inventory where the item listings are guaranteed to be of a given type.
:var itemType: The type by which listings are restricted
:vartype itemType: type
"""
def __init__(self, itemType : type):
super().__init__()
self.itemType = itemType
def addItem(self, item: object, quantity : int = 1):
if not isinstance(item, self.itemType):
raise TypeError("Given item does not match this inventory's item type restriction. Expected '" + self.itemType.__name__ + "', given '" + type(item).__name__ + "'")
super().addItem(item, quantity=quantity)
def _addListing(self, newListing: bbInventoryListing.bbInventoryListing):
if not isinstance(newListing.item, self.itemType):
raise TypeError("Given item does not match this inventory's item type restriction. Expected '" + self.itemType.__name__ + "', given '" + type(newListing.item).__name__ + "'")
super()._addListing(newListing)
| 41.871698
| 186
| 0.636626
|
794f41f95922746903a16356f5f30c648cf9d11d
| 225
|
py
|
Python
|
main.py
|
Rodrigo-Oliveira1211/Projeto_Email
|
bed2a0bc73dce51182be11ee158008a32c33666f
|
[
"MIT"
] | null | null | null |
main.py
|
Rodrigo-Oliveira1211/Projeto_Email
|
bed2a0bc73dce51182be11ee158008a32c33666f
|
[
"MIT"
] | null | null | null |
main.py
|
Rodrigo-Oliveira1211/Projeto_Email
|
bed2a0bc73dce51182be11ee158008a32c33666f
|
[
"MIT"
] | null | null | null |
from enviar_arquivo import EmailBot
if __name__ == "__main__":
login = open("Arquivos/login.txt", "r").readlines()
email = login[0]
senha = login[1]
enviar = EmailBot(email, senha)
enviar.enviar_email()
| 22.5
| 55
| 0.662222
|
794f4208a8c86ae8d0d8fc3873b1e3e29df80066
| 121
|
py
|
Python
|
src/nrnpython/examples/test1.py
|
niltonlk/nrn
|
464541abbf72fe58de77b16bf0e1df425a280b89
|
[
"BSD-3-Clause"
] | 203
|
2018-05-03T11:02:11.000Z
|
2022-03-31T14:18:31.000Z
|
src/nrnpython/examples/test1.py
|
niltonlk/nrn
|
464541abbf72fe58de77b16bf0e1df425a280b89
|
[
"BSD-3-Clause"
] | 1,228
|
2018-04-25T09:00:48.000Z
|
2022-03-31T21:42:21.000Z
|
src/nrnpython/examples/test1.py
|
niltonlk/nrn
|
464541abbf72fe58de77b16bf0e1df425a280b89
|
[
"BSD-3-Clause"
] | 134
|
2018-04-23T09:14:13.000Z
|
2022-03-16T08:57:11.000Z
|
from neuron import h
pc = h.ParallelContext()
print "NEURON thinks I am %d of %d\n" % (pc.id(), pc.nhost())
pc.done()
| 15.125
| 61
| 0.636364
|
794f42857a502f22d3907a4f71fafdce07e24a0a
| 17,826
|
py
|
Python
|
adaptive/tests/test_learners.py
|
jhoofwijk/adaptive
|
ebb4879abcfba57de2808a1df5c01db8a1508a5d
|
[
"BSD-3-Clause"
] | null | null | null |
adaptive/tests/test_learners.py
|
jhoofwijk/adaptive
|
ebb4879abcfba57de2808a1df5c01db8a1508a5d
|
[
"BSD-3-Clause"
] | null | null | null |
adaptive/tests/test_learners.py
|
jhoofwijk/adaptive
|
ebb4879abcfba57de2808a1df5c01db8a1508a5d
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import collections
import functools as ft
import inspect
import itertools as it
import math
import operator
import os
import random
import shutil
import tempfile
import numpy as np
import pytest
import scipy.spatial
import adaptive
from adaptive.learner import (AverageLearner, BalancingLearner, DataSaver,
IntegratorLearner, Learner1D, Learner2D,
LearnerND)
from adaptive.runner import simple
try:
import skopt
from adaptive.learner import SKOptLearner
except ModuleNotFoundError:
SKOptLearner = None
LOSS_FUNCTIONS = {
Learner1D: ('loss_per_interval', (
adaptive.learner.learner1D.default_loss,
adaptive.learner.learner1D.uniform_loss,
adaptive.learner.learner1D.curvature_loss_function(),
)),
Learner2D: ('loss_per_triangle', (
adaptive.learner.learner2D.default_loss,
adaptive.learner.learner2D.uniform_loss,
adaptive.learner.learner2D.minimize_triangle_surface_loss,
adaptive.learner.learner2D.resolution_loss_function(),
)),
LearnerND: ('loss_per_simplex', (
adaptive.learner.learnerND.default_loss,
adaptive.learner.learnerND.std_loss,
adaptive.learner.learnerND.uniform_loss,
)),
}
def generate_random_parametrization(f):
"""Return a realization of 'f' with parameters bound to random values.
Parameters
----------
f : callable
All parameters but the first must be annotated with a callable
that, when called with no arguments, produces a value of the
appropriate type for the parameter in question.
"""
_, *params = inspect.signature(f).parameters.items()
if any(not callable(v.annotation) for (p, v) in params):
raise TypeError('All parameters to {} must be annotated with functions.'
.format(f.__name__))
realization = {p: v.annotation() for (p, v) in params}
return ft.partial(f, **realization)
def uniform(a, b):
return lambda: random.uniform(a, b)
# Library of functions and associated learners.
learner_function_combos = collections.defaultdict(list)
def learn_with(learner_type, **init_kwargs):
def _(f):
learner_function_combos[learner_type].append((f, init_kwargs))
return f
return _
def xfail(learner):
return pytest.mark.xfail, learner
def maybe_skip(learner):
return (pytest.mark.skip, learner) if learner is None else learner
# All parameters except the first must be annotated with a callable that
# returns a random value for that parameter.
@learn_with(Learner1D, bounds=(-1, 1))
def quadratic(x, m: uniform(0, 10), b: uniform(0, 1)):
return m * x**2 + b
@learn_with(Learner1D, bounds=(-1, 1))
def linear_with_peak(x, d: uniform(-1, 1)):
a = 0.01
return x + a**2 / (a**2 + (x - d)**2)
@learn_with(LearnerND, bounds=((-1, 1), (-1, 1)))
@learn_with(Learner2D, bounds=((-1, 1), (-1, 1)))
def ring_of_fire(xy, d: uniform(0.2, 1)):
a = 0.2
x, y = xy
return x + math.exp(-(x**2 + y**2 - d**2)**2 / a**4)
@learn_with(LearnerND, bounds=((-1, 1), (-1, 1), (-1, 1)))
def sphere_of_fire(xyz, d: uniform(0.2, 1)):
a = 0.2
x, y, z = xyz
return x + math.exp(-(x**2 + y**2 + z**2 - d**2)**2 / a**4) + z**2
@learn_with(AverageLearner, rtol=1)
def gaussian(n):
return random.gauss(0, 1)
# Decorators for tests.
# Create a sequence of learner parameters by adding all
# possible loss functions to an existing parameter set.
def add_loss_to_params(learner_type, existing_params):
if learner_type not in LOSS_FUNCTIONS:
return [existing_params]
loss_param, loss_functions = LOSS_FUNCTIONS[learner_type]
loss_params = [{loss_param: f} for f in loss_functions]
return [dict(**existing_params, **lp) for lp in loss_params]
def run_with(*learner_types, with_all_loss_functions=True):
pars = []
for l in learner_types:
has_marker = isinstance(l, tuple)
if has_marker:
marker, l = l
for f, k in learner_function_combos[l]:
ks = add_loss_to_params(l, k) if with_all_loss_functions else [k]
for k in ks:
# Check if learner was marked with our `xfail` decorator
# XXX: doesn't work when feeding kwargs to xfail.
if has_marker:
pars.append(pytest.param(l, f, dict(k),
marks=[marker]))
else:
pars.append((l, f, dict(k)))
return pytest.mark.parametrize('learner_type, f, learner_kwargs', pars)
def ask_randomly(learner, rounds, points):
n_rounds = random.randrange(*rounds)
n_points = [random.randrange(*points) for _ in range(n_rounds)]
xs = []
ls = []
for n in n_points:
x, l = learner.ask(n)
xs.extend(x)
ls.extend(l)
return xs, ls
# Tests
@run_with(Learner1D)
def test_uniform_sampling1D(learner_type, f, learner_kwargs):
"""Points are sampled uniformly if no data is provided.
Non-uniform sampling implies that we think we know something about
the function, which we do not in the absence of data.
"""
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
points, _ = ask_randomly(learner, (10, 20), (10, 20))
points.sort()
ivals = np.diff(sorted(points))
assert max(ivals) / min(ivals) < 2 + 1e-8
@pytest.mark.xfail
@run_with(Learner2D, LearnerND)
def test_uniform_sampling2D(learner_type, f, learner_kwargs):
"""Points are sampled uniformly if no data is provided.
Non-uniform sampling implies that we think we know something about
the function, which we do not in the absence of data.
"""
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
points, _ = ask_randomly(learner, (70, 100), (10, 20))
tree = scipy.spatial.cKDTree(points)
# regular grid
n = math.sqrt(len(points))
xbounds, ybounds = learner_kwargs['bounds']
r = math.sqrt((ybounds[1] - ybounds[0]) / (xbounds[1] - xbounds[0]))
xs, dx = np.linspace(*xbounds, int(n / r), retstep=True)
ys, dy = np.linspace(*ybounds, int(n * r), retstep=True)
distances, neighbors = tree.query(list(it.product(xs, ys)), k=1)
assert max(distances) < math.sqrt(dx**2 + dy**2)
@pytest.mark.parametrize('learner_type, bounds', [
(Learner1D, (-1, 1)),
(Learner2D, [(-1, 1), (-1, 1)]),
(LearnerND, [(-1, 1), (-1, 1), (-1, 1)]),
])
def test_learner_accepts_lists(learner_type, bounds):
def f(x):
return [0, 1]
learner = learner_type(f, bounds=bounds)
simple(learner, goal=lambda l: l.npoints > 10)
@run_with(Learner1D, Learner2D, LearnerND)
def test_adding_existing_data_is_idempotent(learner_type, f, learner_kwargs):
"""Adding already existing data is an idempotent operation.
Either it is idempotent, or it is an error.
This is the only sane behaviour.
"""
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner1D:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
N = random.randint(10, 30)
control.ask(N)
xs, _ = learner.ask(N)
points = [(x, f(x)) for x in xs]
for p in points:
control.tell(*p)
learner.tell(*p)
random.shuffle(points)
for p in points:
learner.tell(*p)
M = random.randint(10, 30)
pls = zip(*learner.ask(M))
cpls = zip(*control.ask(M))
# Point ordering is not defined, so compare as sets
assert set(pls) == set(cpls)
# XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55)
# but we xfail it now, as Learner2D will be deprecated anyway
@run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner)
def test_adding_non_chosen_data(learner_type, f, learner_kwargs):
"""Adding data for a point that was not returned by 'ask'."""
# XXX: learner, control and bounds are not defined
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner2D:
# If the stack_size is bigger then the number of points added,
# ask will return a point from the _stack.
learner.stack_size = 1
control.stack_size = 1
N = random.randint(10, 30)
xs, _ = control.ask(N)
ys = [f(x) for x in xs]
for x, y in zip(xs, ys):
control.tell(x, y)
learner.tell(x, y)
M = random.randint(10, 30)
pls = zip(*learner.ask(M))
cpls = zip(*control.ask(M))
# Point ordering within a single call to 'ask'
# is not guaranteed to be the same by the API.
assert set(pls) == set(cpls)
@run_with(Learner1D, xfail(Learner2D), xfail(LearnerND), AverageLearner)
def test_point_adding_order_is_irrelevant(learner_type, f, learner_kwargs):
"""The order of calls to 'tell' between calls to 'ask'
is arbitrary.
This test will fail for the Learner2D because
`interpolate.interpnd.estimate_gradients_2d_global` will give different
outputs based on the order of the triangles and values in
(ip.tri, ip.values). Therefore the _stack will contain different points.
"""
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner1D:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
N = random.randint(10, 30)
control.ask(N)
xs, _ = learner.ask(N)
points = [(x, f(x)) for x in xs]
for p in points:
control.tell(*p)
random.shuffle(points)
for p in points:
learner.tell(*p)
M = random.randint(10, 30)
pls = zip(*learner.ask(M))
cpls = zip(*control.ask(M))
# Point ordering within a single call to 'ask'
# is not guaranteed to be the same by the API.
# We compare the sorted points instead of set, because the points
# should only be identical up to machine precision.
np.testing.assert_almost_equal(sorted(pls), sorted(cpls))
# XXX: the Learner2D fails with ~50% chance
# see https://github.com/python-adaptive/adaptive/issues/55
@run_with(Learner1D, xfail(Learner2D), LearnerND, AverageLearner)
def test_expected_loss_improvement_is_less_than_total_loss(learner_type, f, learner_kwargs):
"""The estimated loss improvement can never be greater than the total loss."""
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
N = random.randint(50, 100)
xs, loss_improvements = learner.ask(N)
for x in xs:
learner.tell(x, f(x))
M = random.randint(50, 100)
_, loss_improvements = learner.ask(M)
if learner_type is Learner2D:
assert (sum(loss_improvements)
< sum(learner.loss_per_triangle(learner.ip())))
elif learner_type is Learner1D:
assert sum(loss_improvements) < sum(learner.losses.values())
elif learner_type is AverageLearner:
assert sum(loss_improvements) < learner.loss()
# XXX: This *should* pass (https://github.com/python-adaptive/adaptive/issues/55)
# but we xfail it now, as Learner2D will be deprecated anyway
@run_with(Learner1D, xfail(Learner2D), LearnerND)
def test_learner_performance_is_invariant_under_scaling(learner_type, f, learner_kwargs):
"""Learners behave identically under transformations that leave
the loss invariant.
This is a statement that the learner makes decisions based solely
on the loss function.
"""
# for now we just scale X and Y by random factors
f = generate_random_parametrization(f)
control_kwargs = dict(learner_kwargs)
control = learner_type(f, **control_kwargs)
xscale = 1000 * random.random()
yscale = 1000 * random.random()
l_kwargs = dict(learner_kwargs)
l_kwargs['bounds'] = xscale * np.array(l_kwargs['bounds'])
learner = learner_type(lambda x: yscale * f(np.array(x) / xscale),
**l_kwargs)
if learner_type in [Learner1D, LearnerND]:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
npoints = random.randrange(300, 500)
if learner_type is LearnerND:
# Because the LearnerND is slow
npoints //= 10
for n in range(npoints):
cxs, _ = control.ask(1)
xs, _ = learner.ask(1)
control.tell_many(cxs, [control.function(x) for x in cxs])
learner.tell_many(xs, [learner.function(x) for x in xs])
# Check whether the points returned are the same
xs_unscaled = np.array(xs) / xscale
assert np.allclose(xs_unscaled, cxs)
# Check if the losses are close
assert math.isclose(learner.loss(), control.loss(), rel_tol=1e-10)
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
with_all_loss_functions=False)
def test_balancing_learner(learner_type, f, learner_kwargs):
"""Test if the BalancingLearner works with the different types of learners."""
learners = [learner_type(generate_random_parametrization(f), **learner_kwargs)
for i in range(4)]
learner = BalancingLearner(learners)
# Emulate parallel execution
stash = []
for i in range(100):
n = random.randint(1, 10)
m = random.randint(0, n)
xs, _ = learner.ask(n, tell_pending=False)
# Save 'm' random points out of `xs` for later
random.shuffle(xs)
for _ in range(m):
stash.append(xs.pop())
for x in xs:
learner.tell(x, learner.function(x))
# Evaluate and add 'm' random points from `stash`
random.shuffle(stash)
for _ in range(m):
x = stash.pop()
learner.tell(x, learner.function(x))
assert all(l.npoints > 10 for l in learner.learners), [l.npoints for l in learner.learners]
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
maybe_skip(SKOptLearner), IntegratorLearner,
with_all_loss_functions=False)
def test_saving(learner_type, f, learner_kwargs):
f = generate_random_parametrization(f)
learner = learner_type(f, **learner_kwargs)
control = learner_type(f, **learner_kwargs)
if learner_type is Learner1D:
learner._recompute_losses_factor = 1
control._recompute_losses_factor = 1
simple(learner, lambda l: l.npoints > 100)
fd, path = tempfile.mkstemp()
try:
learner.save(path)
control.load(path)
np.testing.assert_almost_equal(learner.loss(), control.loss())
# Try if the control is runnable
simple(control, lambda l: l.npoints > 200)
finally:
os.remove(path)
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
maybe_skip(SKOptLearner), IntegratorLearner,
with_all_loss_functions=False)
def test_saving_of_balancing_learner(learner_type, f, learner_kwargs):
f = generate_random_parametrization(f)
learner = BalancingLearner([learner_type(f, **learner_kwargs)])
control = BalancingLearner([learner_type(f, **learner_kwargs)])
if learner_type is Learner1D:
for l, c in zip(learner.learners, control.learners):
l._recompute_losses_factor = 1
c._recompute_losses_factor = 1
simple(learner, lambda l: l.learners[0].npoints > 100)
folder = tempfile.mkdtemp()
def fname(learner):
return folder + 'test'
try:
learner.save(fname=fname)
control.load(fname=fname)
np.testing.assert_almost_equal(learner.loss(), control.loss())
# Try if the control is runnable
simple(control, lambda l: l.learners[0].npoints > 200)
finally:
shutil.rmtree(folder)
@run_with(Learner1D, Learner2D, LearnerND, AverageLearner,
maybe_skip(SKOptLearner), IntegratorLearner,
with_all_loss_functions=False)
def test_saving_with_datasaver(learner_type, f, learner_kwargs):
f = generate_random_parametrization(f)
g = lambda x: {'y': f(x), 't': random.random()}
arg_picker = operator.itemgetter('y')
learner = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
control = DataSaver(learner_type(g, **learner_kwargs), arg_picker)
if learner_type is Learner1D:
learner.learner._recompute_losses_factor = 1
control.learner._recompute_losses_factor = 1
simple(learner, lambda l: l.npoints > 100)
fd, path = tempfile.mkstemp()
try:
learner.save(path)
control.load(path)
np.testing.assert_almost_equal(learner.loss(), control.loss())
assert learner.extra_data == control.extra_data
# Try if the control is runnable
simple(control, lambda l: l.npoints > 200)
finally:
os.remove(path)
@pytest.mark.xfail
@run_with(Learner1D, Learner2D, LearnerND)
def test_convergence_for_arbitrary_ordering(learner_type, f, learner_kwargs):
"""Learners that are learning the same function should converge
to the same result "eventually" if given the same data, regardless
of the order in which that data is given.
"""
# XXX: not sure how to implement this. Can we say anything at all about
# the scaling of the loss with the number of points?
raise NotImplementedError()
@pytest.mark.xfail
@run_with(Learner1D, Learner2D, LearnerND)
def test_learner_subdomain(learner_type, f, learner_kwargs):
"""Learners that never receive data outside of a subdomain should
perform 'similarly' to learners defined on that subdomain only."""
# XXX: not sure how to implement this. How do we measure "performance"?
raise NotImplementedError()
| 32.469945
| 95
| 0.670537
|
794f43cc67b41fed1de2140a1f5017c1325c610d
| 2,035
|
py
|
Python
|
nilmtk/stats/tests/test_dropoutrate.py
|
nilmtkMridul/nilmtk
|
b4eede4f2e8f55c0f072cc08da1b47d433c07445
|
[
"Apache-2.0"
] | 1
|
2021-11-04T05:10:58.000Z
|
2021-11-04T05:10:58.000Z
|
nilmtk/stats/tests/test_dropoutrate.py
|
nilmtkMridul/nilmtk
|
b4eede4f2e8f55c0f072cc08da1b47d433c07445
|
[
"Apache-2.0"
] | null | null | null |
nilmtk/stats/tests/test_dropoutrate.py
|
nilmtkMridul/nilmtk
|
b4eede4f2e8f55c0f072cc08da1b47d433c07445
|
[
"Apache-2.0"
] | 1
|
2019-10-12T16:02:05.000Z
|
2019-10-12T16:02:05.000Z
|
#!/usr/bin/python
from __future__ import print_function, division
import unittest
from os.path import join
import numpy as np
import pandas as pd
from datetime import timedelta
from .. import DropoutRate
from ..goodsectionsresults import GoodSectionsResults
from ..totalenergy import _energy_for_power_series
from ... import TimeFrame, ElecMeter, HDFDataStore, MeterGroup
from ...elecmeter import ElecMeterID
from ...consts import JOULES_PER_KWH
from ...tests.testingtools import data_dir
METER_ID = ElecMeterID(instance=1, building=1, dataset='REDD')
METER_ID2 = ElecMeterID(instance=2, building=1, dataset='REDD')
class TestLocateGoodSections(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = join(data_dir(), 'energy_complex.h5')
cls.datastore = HDFDataStore(filename)
ElecMeter.load_meter_devices(cls.datastore)
cls.meter_meta = cls.datastore.load_metadata('building1')['elec_meters'][METER_ID.instance]
@classmethod
def tearDownClass(cls):
cls.datastore.close()
def test_pipeline(self):
meter = ElecMeter(store=self.datastore, metadata=self.meter_meta,
meter_id=METER_ID)
source_node = meter.get_source_node()
dropout_rate = DropoutRate(source_node)
dropout_rate.run()
# TODO: remove prints and actually test value of dropout rate.
print(dropout_rate.results)
print(meter.power_series().next())
# Now test metergroup
meter2 = ElecMeter(store=self.datastore, metadata=self.meter_meta,
meter_id=METER_ID2)
metergroup = MeterGroup([meter, meter2])
dr = metergroup.dropout_rate(ignore_gaps=False)
print("dr =", dr) # dr = 0.861386138614
# Test a second time to make sure cache works
dr_from_cache = metergroup.dropout_rate(ignore_gaps=False)
self.assertEqual(dr, dr_from_cache)
metergroup.clear_cache()
if __name__ == '__main__':
unittest.main()
| 35.086207
| 99
| 0.697789
|
794f4573fec2e5ea20db0d30f81b27e4466cd9ab
| 29,683
|
py
|
Python
|
BaseTools/Source/Python/Common/StringUtils.py
|
changeworld/mu_basecore
|
1a883ec85d8d2f49663c76e1a1bc5068333f5508
|
[
"BSD-2-Clause"
] | null | null | null |
BaseTools/Source/Python/Common/StringUtils.py
|
changeworld/mu_basecore
|
1a883ec85d8d2f49663c76e1a1bc5068333f5508
|
[
"BSD-2-Clause"
] | null | null | null |
BaseTools/Source/Python/Common/StringUtils.py
|
changeworld/mu_basecore
|
1a883ec85d8d2f49663c76e1a1bc5068333f5508
|
[
"BSD-2-Clause"
] | 1
|
2021-11-01T19:33:11.000Z
|
2021-11-01T19:33:11.000Z
|
## @file
# This file is used to define common string related functions used in parsing process
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import re
from . import DataType
import Common.LongFilePathOs as os
import string
from . import EdkLogger as EdkLogger
from . import GlobalData
from .BuildToolError import *
from CommonDataClass.Exceptions import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
gHexVerPatt = re.compile('0x[a-f0-9]{4}[a-f0-9]{4}$', re.IGNORECASE)
gHumanReadableVerPatt = re.compile(r'([1-9][0-9]*|0)\.[0-9]{1,2}$')
## GetSplitValueList
#
# Get a value list from a string with multiple values splited with SplitTag
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitTag: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
# @retval list() A list for splitted string
#
def GetSplitValueList(String, SplitTag=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
ValueList = []
Last = 0
Escaped = False
InSingleQuoteString = False
InDoubleQuoteString = False
InParenthesis = 0
for Index in range(0, len(String)):
Char = String[Index]
if not Escaped:
# Found a splitter not in a string, split it
if (not InSingleQuoteString or not InDoubleQuoteString) and InParenthesis == 0 and Char == SplitTag:
ValueList.append(String[Last:Index].strip())
Last = Index + 1
if MaxSplit > 0 and len(ValueList) >= MaxSplit:
break
if Char == '\\' and (InSingleQuoteString or InDoubleQuoteString):
Escaped = True
elif Char == '"' and not InSingleQuoteString:
if not InDoubleQuoteString:
InDoubleQuoteString = True
else:
InDoubleQuoteString = False
elif Char == "'" and not InDoubleQuoteString:
if not InSingleQuoteString:
InSingleQuoteString = True
else:
InSingleQuoteString = False
elif Char == '(':
InParenthesis = InParenthesis + 1
elif Char == ')':
InParenthesis = InParenthesis - 1
else:
Escaped = False
if Last < len(String):
ValueList.append(String[Last:].strip())
elif Last == len(String):
ValueList.append('')
return ValueList
## GetSplitList
#
# Get a value list from a string with multiple values splited with SplitString
# The default SplitTag is DataType.TAB_VALUE_SPLIT
# 'AAA|BBB|CCC' -> ['AAA', 'BBB', 'CCC']
#
# @param String: The input string to be splitted
# @param SplitStr: The split key, default is DataType.TAB_VALUE_SPLIT
# @param MaxSplit: The max number of split values, default is -1
#
# @retval list() A list for splitted string
#
def GetSplitList(String, SplitStr=DataType.TAB_VALUE_SPLIT, MaxSplit= -1):
return list(map(lambda l: l.strip(), String.split(SplitStr, MaxSplit)))
## MergeArches
#
# Find a key's all arches in dict, add the new arch to the list
# If not exist any arch, set the arch directly
#
# @param Dict: The input value for Dict
# @param Key: The input value for Key
# @param Arch: The Arch to be added or merged
#
def MergeArches(Dict, Key, Arch):
if Key in Dict:
Dict[Key].append(Arch)
else:
Dict[Key] = Arch.split()
## GenDefines
#
# Parse a string with format "DEFINE <VarName> = <PATH>"
# Generate a map Defines[VarName] = PATH
# Return False if invalid format
#
# @param String: String with DEFINE statement
# @param Arch: Supportted Arch
# @param Defines: DEFINE statement to be parsed
#
# @retval 0 DEFINE statement found, and valid
# @retval 1 DEFINE statement found, but not valid
# @retval -1 DEFINE statement not found
#
def GenDefines(String, Arch, Defines):
if String.find(DataType.TAB_DEFINE + ' ') > -1:
List = String.replace(DataType.TAB_DEFINE + ' ', '').split(DataType.TAB_EQUAL_SPLIT)
if len(List) == 2:
Defines[(CleanString(List[0]), Arch)] = CleanString(List[1])
return 0
else:
return -1
return 1
## GenInclude
#
# Parse a string with format "!include <Filename>"
# Return the file path
# Return False if invalid format or NOT FOUND
#
# @param String: String with INCLUDE statement
# @param IncludeFiles: INCLUDE statement to be parsed
# @param Arch: Supportted Arch
#
# @retval True
# @retval False
#
def GenInclude(String, IncludeFiles, Arch):
if String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1:
IncludeFile = CleanString(String[String.upper().find(DataType.TAB_INCLUDE.upper() + ' ') + len(DataType.TAB_INCLUDE + ' ') : ])
MergeArches(IncludeFiles, IncludeFile, Arch)
return True
else:
return False
## GetLibraryClassesWithModuleType
#
# Get Library Class definition when no module type defined
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get library classes successfully
#
def GetLibraryClassesWithModuleType(Lines, Key, KeyValues, CommentCharacter):
newKey = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), newKey[1]])
return True
## GetDynamics
#
# Get Dynamic Pcds
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get Dynamic Pcds successfully
#
def GetDynamics(Lines, Key, KeyValues, CommentCharacter):
#
# Get SkuId Name List
#
SkuIdNameList = SplitModuleType(Key)
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.splitlines()
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append([CleanString(Line, CommentCharacter), SkuIdNameList[1]])
return True
## SplitModuleType
#
# Split ModuleType out of section defien to get key
# [LibraryClass.Arch.ModuleType|ModuleType|ModuleType] -> [ 'LibraryClass.Arch', ['ModuleType', 'ModuleType', 'ModuleType'] ]
#
# @param Key: String to be parsed
#
# @retval ReturnValue A list for module types
#
def SplitModuleType(Key):
KeyList = Key.split(DataType.TAB_SPLIT)
#
# Fill in for arch
#
KeyList.append('')
#
# Fill in for moduletype
#
KeyList.append('')
ReturnValue = []
KeyValue = KeyList[0]
if KeyList[1] != '':
KeyValue = KeyValue + DataType.TAB_SPLIT + KeyList[1]
ReturnValue.append(KeyValue)
ReturnValue.append(GetSplitValueList(KeyList[2]))
return ReturnValue
## Replace macro in strings list
#
# This method replace macros used in a given string list. The macros are
# given in a dictionary.
#
# @param StringList StringList to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
#
# @retval NewList A new string list whose macros are replaced
#
def ReplaceMacros(StringList, MacroDefinitions={}, SelfReplacement=False):
NewList = []
for String in StringList:
if isinstance(String, type('')):
NewList.append(ReplaceMacro(String, MacroDefinitions, SelfReplacement))
else:
NewList.append(String)
return NewList
## Replace macro in string
#
# This method replace macros used in given string. The macros are given in a
# dictionary.
#
# @param String String to be processed
# @param MacroDefinitions The macro definitions in the form of dictionary
# @param SelfReplacement To decide whether replace un-defined macro to ''
#
# @retval string The string whose macros are replaced
#
def ReplaceMacro(String, MacroDefinitions={}, SelfReplacement=False, RaiseError=False):
LastString = String
while String and MacroDefinitions:
MacroUsed = GlobalData.gMacroRefPattern.findall(String)
# no macro found in String, stop replacing
if len(MacroUsed) == 0:
break
for Macro in MacroUsed:
if Macro not in MacroDefinitions:
if RaiseError:
raise SymbolNotFound("%s not defined" % Macro)
if SelfReplacement:
String = String.replace("$(%s)" % Macro, '')
continue
if "$(%s)" % Macro not in MacroDefinitions[Macro]:
String = String.replace("$(%s)" % Macro, MacroDefinitions[Macro])
# in case there's macro not defined
if String == LastString:
break
LastString = String
return String
## NormPath
#
# Create a normal path
# And replace DFEINE in the path
#
# @param Path: The input value for Path to be converted
# @param Defines: A set for DEFINE statement
#
# @retval Path Formatted path
#
def NormPath(Path, Defines={}):
IsRelativePath = False
if Path:
if Path[0] == '.':
IsRelativePath = True
#
# Replace with Define
#
if Defines:
Path = ReplaceMacro(Path, Defines)
#
# To local path format
#
Path = os.path.normpath(Path)
if Path.startswith(GlobalData.gWorkspace) and not Path.startswith(GlobalData.gBuildDirectory) and not os.path.exists(Path):
Path = Path[len (GlobalData.gWorkspace):]
if Path[0] == os.path.sep:
Path = Path[1:]
Path = mws.join(GlobalData.gWorkspace, Path)
if IsRelativePath and Path[0] != '.':
Path = os.path.join('.', Path)
return Path
## CleanString
#
# Remove comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
#
# @retval Path Formatted path
#
def CleanString(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False, BuildOption=False):
#
# remove whitespace
#
Line = Line.strip();
#
# Replace Edk's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
#
# remove comments, but we should escape comment character in string
#
InDoubleQuoteString = False
InSingleQuoteString = False
CommentInString = False
for Index in range(0, len(Line)):
if Line[Index] == '"' and not InSingleQuoteString:
InDoubleQuoteString = not InDoubleQuoteString
elif Line[Index] == "'" and not InDoubleQuoteString:
InSingleQuoteString = not InSingleQuoteString
elif Line[Index] == CommentCharacter and (InSingleQuoteString or InDoubleQuoteString):
CommentInString = True
elif Line[Index] == CommentCharacter and not (InSingleQuoteString or InDoubleQuoteString):
Line = Line[0: Index]
break
if CommentInString and BuildOption:
Line = Line.replace('"', '')
ChIndex = Line.find('#')
while ChIndex >= 0:
if GlobalData.gIsWindows:
if ChIndex == 0 or Line[ChIndex - 1] != '^':
Line = Line[0:ChIndex] + '^' + Line[ChIndex:]
ChIndex = Line.find('#', ChIndex + 2)
else:
ChIndex = Line.find('#', ChIndex + 1)
else:
if ChIndex == 0 or Line[ChIndex - 1] != '\\':
Line = Line[0:ChIndex] + '\\' + Line[ChIndex:]
ChIndex = Line.find('#', ChIndex + 2)
else:
ChIndex = Line.find('#', ChIndex + 1)
#
# remove whitespace again
#
Line = Line.strip();
return Line
## CleanString2
#
# Split statement with comments in a string
# Remove spaces
#
# @param Line: The string to be cleaned
# @param CommentCharacter: Comment char, used to ignore comment content, default is DataType.TAB_COMMENT_SPLIT
#
# @retval Path Formatted path
#
def CleanString2(Line, CommentCharacter=DataType.TAB_COMMENT_SPLIT, AllowCppStyleComment=False):
#
# remove whitespace
#
Line = Line.strip();
#
# Replace Edk's comment character
#
if AllowCppStyleComment:
Line = Line.replace(DataType.TAB_COMMENT_EDK_SPLIT, CommentCharacter)
#
# separate comments and statements, but we should escape comment character in string
#
InDoubleQuoteString = False
InSingleQuoteString = False
CommentInString = False
Comment = ''
for Index in range(0, len(Line)):
if Line[Index] == '"' and not InSingleQuoteString:
InDoubleQuoteString = not InDoubleQuoteString
elif Line[Index] == "'" and not InDoubleQuoteString:
InSingleQuoteString = not InSingleQuoteString
elif Line[Index] == CommentCharacter and (InDoubleQuoteString or InSingleQuoteString):
CommentInString = True
elif Line[Index] == CommentCharacter and not (InDoubleQuoteString or InSingleQuoteString):
Comment = Line[Index:].strip()
Line = Line[0:Index].strip()
break
return Line, Comment
## GetMultipleValuesOfKeyFromLines
#
# Parse multiple strings to clean comment and spaces
# The result is saved to KeyValues
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Successfully executed
#
def GetMultipleValuesOfKeyFromLines(Lines, Key, KeyValues, CommentCharacter):
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line != '' and Line[0] != CommentCharacter:
KeyValues.append(Line)
return True
## GetDefineValue
#
# Parse a DEFINE statement to get defined value
# DEFINE Key Value
#
# @param String: The content to be parsed
# @param Key: The key of DEFINE statement
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval string The defined value
#
def GetDefineValue(String, Key, CommentCharacter):
String = CleanString(String)
return String[String.find(Key + ' ') + len(Key + ' ') : ]
## GetHexVerValue
#
# Get a Hex Version Value
#
# @param VerString: The version string to be parsed
#
#
# @retval: If VerString is incorrectly formatted, return "None" which will break the build.
# If VerString is correctly formatted, return a Hex value of the Version Number (0xmmmmnnnn)
# where mmmm is the major number and nnnn is the adjusted minor number.
#
def GetHexVerValue(VerString):
VerString = CleanString(VerString)
if gHumanReadableVerPatt.match(VerString):
ValueList = VerString.split('.')
Major = ValueList[0]
Minor = ValueList[1]
if len(Minor) == 1:
Minor += '0'
DeciValue = (int(Major) << 16) + int(Minor);
return "0x%08x" % DeciValue
elif gHexVerPatt.match(VerString):
return VerString
else:
return None
## GetSingleValueOfKeyFromLines
#
# Parse multiple strings as below to get value of each definition line
# Key1 = Value1
# Key2 = Value2
# The result is saved to Dictionary
#
# @param Lines: The content to be parsed
# @param Dictionary: To store data after parsing
# @param CommentCharacter: Comment char, be used to ignore comment content
# @param KeySplitCharacter: Key split char, between key name and key value. Key1 = Value1, '=' is the key split char
# @param ValueSplitFlag: Value split flag, be used to decide if has multiple values
# @param ValueSplitCharacter: Value split char, be used to split multiple values. Key1 = Value1|Value2, '|' is the value split char
#
# @retval True Successfully executed
#
def GetSingleValueOfKeyFromLines(Lines, Dictionary, CommentCharacter, KeySplitCharacter, ValueSplitFlag, ValueSplitCharacter):
Lines = Lines.split('\n')
Keys = []
Value = ''
DefineValues = ['']
SpecValues = ['']
for Line in Lines:
#
# Handle DEFINE and SPEC
#
if Line.find(DataType.TAB_INF_DEFINES_DEFINE + ' ') > -1:
if '' in DefineValues:
DefineValues.remove('')
DefineValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_DEFINE, CommentCharacter))
continue
if Line.find(DataType.TAB_INF_DEFINES_SPEC + ' ') > -1:
if '' in SpecValues:
SpecValues.remove('')
SpecValues.append(GetDefineValue(Line, DataType.TAB_INF_DEFINES_SPEC, CommentCharacter))
continue
#
# Handle Others
#
LineList = Line.split(KeySplitCharacter, 1)
if len(LineList) >= 2:
Key = LineList[0].split()
if len(Key) == 1 and Key[0][0] != CommentCharacter:
#
# Remove comments and white spaces
#
LineList[1] = CleanString(LineList[1], CommentCharacter)
if ValueSplitFlag:
Value = list(map(string.strip, LineList[1].split(ValueSplitCharacter)))
else:
Value = CleanString(LineList[1], CommentCharacter).splitlines()
if Key[0] in Dictionary:
if Key[0] not in Keys:
Dictionary[Key[0]] = Value
Keys.append(Key[0])
else:
Dictionary[Key[0]].extend(Value)
else:
Dictionary[DataType.TAB_INF_DEFINES_MACRO][Key[0]] = Value[0]
if DefineValues == []:
DefineValues = ['']
if SpecValues == []:
SpecValues = ['']
Dictionary[DataType.TAB_INF_DEFINES_DEFINE] = DefineValues
Dictionary[DataType.TAB_INF_DEFINES_SPEC] = SpecValues
return True
## The content to be parsed
#
# Do pre-check for a file before it is parsed
# Check $()
# Check []
#
# @param FileName: Used for error report
# @param FileContent: File content to be parsed
# @param SupSectionTag: Used for error report
#
def PreCheck(FileName, FileContent, SupSectionTag):
LineNo = 0
IsFailed = False
NewFileContent = ''
for Line in FileContent.splitlines():
LineNo = LineNo + 1
#
# Clean current line
#
Line = CleanString(Line)
#
# Remove commented line
#
if Line.find(DataType.TAB_COMMA_SPLIT) == 0:
Line = ''
#
# Check $()
#
if Line.find('$') > -1:
if Line.find('$(') < 0 or Line.find(')') < 0:
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
#
# Check []
#
if Line.find('[') > -1 or Line.find(']') > -1:
#
# Only get one '[' or one ']'
#
if not (Line.find('[') > -1 and Line.find(']') > -1):
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
#
# Regenerate FileContent
#
NewFileContent = NewFileContent + Line + '\n'
if IsFailed:
EdkLogger.error("Parser", FORMAT_INVALID, Line=LineNo, File=FileName, RaiseError=EdkLogger.IsRaiseError)
return NewFileContent
## CheckFileType
#
# Check if the Filename is including ExtName
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param ExtName: Ext name of the file to be checked
# @param ContainerFilename: The container file which describes the file to be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file to be checked
#
# @retval True The file type is correct
#
def CheckFileType(CheckFilename, ExtName, ContainerFilename, SectionName, Line, LineNo= -1):
if CheckFilename != '' and CheckFilename is not None:
(Root, Ext) = os.path.splitext(CheckFilename)
if Ext.upper() != ExtName.upper():
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = "Invalid %s. '%s' is found, but '%s' file is needed" % (SectionName, CheckFilename, ExtName)
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, Line=LineNo,
File=ContainerFilename, RaiseError=EdkLogger.IsRaiseError)
return True
## CheckFileExist
#
# Check if the file exists
# Return True if it exists
# Raise a error message if it not exists
#
# @param CheckFilename: Name of the file to be checked
# @param WorkspaceDir: Current workspace dir
# @param ContainerFilename: The container file which describes the file to be checked, used for error report
# @param SectionName: Used for error report
# @param Line: The line in container file which defines the file to be checked
#
# @retval The file full path if the file exists
#
def CheckFileExist(WorkspaceDir, CheckFilename, ContainerFilename, SectionName, Line, LineNo= -1):
CheckFile = ''
if CheckFilename != '' and CheckFilename is not None:
CheckFile = WorkspaceFile(WorkspaceDir, CheckFilename)
if not os.path.isfile(CheckFile):
ContainerFile = open(ContainerFilename, 'r').read()
if LineNo == -1:
LineNo = GetLineNo(ContainerFile, Line)
ErrorMsg = "Can't find file '%s' defined in section '%s'" % (CheckFile, SectionName)
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg,
File=ContainerFilename, Line=LineNo, RaiseError=EdkLogger.IsRaiseError)
return CheckFile
## GetLineNo
#
# Find the index of a line in a file
#
# @param FileContent: Search scope
# @param Line: Search key
#
# @retval int Index of the line
# @retval -1 The line is not found
#
def GetLineNo(FileContent, Line, IsIgnoreComment=True):
LineList = FileContent.splitlines()
for Index in range(len(LineList)):
if LineList[Index].find(Line) > -1:
#
# Ignore statement in comment
#
if IsIgnoreComment:
if LineList[Index].strip()[0] == DataType.TAB_COMMENT_SPLIT:
continue
return Index + 1
return -1
## RaiseParserError
#
# Raise a parser error
#
# @param Line: String which has error
# @param Section: Used for error report
# @param File: File which has the string
# @param Format: Correct format
#
def RaiseParserError(Line, Section, File, Format='', LineNo= -1):
if LineNo == -1:
LineNo = GetLineNo(open(os.path.normpath(File), 'r').read(), Line)
ErrorMsg = "Invalid statement '%s' is found in section '%s'" % (Line, Section)
if Format != '':
Format = "Correct format is " + Format
EdkLogger.error("Parser", PARSER_ERROR, ErrorMsg, File=File, Line=LineNo, ExtraData=Format, RaiseError=EdkLogger.IsRaiseError)
## WorkspaceFile
#
# Return a full path with workspace dir
#
# @param WorkspaceDir: Workspace dir
# @param Filename: Relative file name
#
# @retval string A full path
#
def WorkspaceFile(WorkspaceDir, Filename):
return mws.join(NormPath(WorkspaceDir), NormPath(Filename))
## Split string
#
# Revmove '"' which startswith and endswith string
#
# @param String: The string need to be splited
#
# @retval String: The string after removed '""'
#
def SplitString(String):
if String.startswith('\"'):
String = String[1:]
if String.endswith('\"'):
String = String[:-1]
return String
## Convert To Sql String
#
# 1. Replace "'" with "''" in each item of StringList
#
# @param StringList: A list for strings to be converted
#
def ConvertToSqlString(StringList):
return list(map(lambda s: s.replace("'", "''"), StringList))
## Convert To Sql String
#
# 1. Replace "'" with "''" in the String
#
# @param String: A String to be converted
#
def ConvertToSqlString2(String):
return String.replace("'", "''")
#
# Remove comment block
#
def RemoveBlockComment(Lines):
IsFindBlockComment = False
IsFindBlockCode = False
ReservedLine = ''
NewLines = []
for Line in Lines:
Line = Line.strip()
#
# Remove comment block
#
if Line.find(DataType.TAB_COMMENT_EDK_START) > -1:
ReservedLine = GetSplitList(Line, DataType.TAB_COMMENT_EDK_START, 1)[0]
IsFindBlockComment = True
if Line.find(DataType.TAB_COMMENT_EDK_END) > -1:
Line = ReservedLine + GetSplitList(Line, DataType.TAB_COMMENT_EDK_END, 1)[1]
ReservedLine = ''
IsFindBlockComment = False
if IsFindBlockComment:
NewLines.append('')
continue
NewLines.append(Line)
return NewLines
#
# Get String of a List
#
def GetStringOfList(List, Split=' '):
if not isinstance(List, type([])):
return List
Str = ''
for Item in List:
Str = Str + Item + Split
return Str.strip()
#
# Get HelpTextList from HelpTextClassList
#
def GetHelpTextList(HelpTextClassList):
List = []
if HelpTextClassList:
for HelpText in HelpTextClassList:
if HelpText.String.endswith('\n'):
HelpText.String = HelpText.String[0: len(HelpText.String) - len('\n')]
List.extend(HelpText.String.split('\n'))
return List
def StringToArray(String):
if String.startswith('L"'):
if String == "L\"\"":
return "{0x00,0x00}"
else:
return "{%s,0x00,0x00}" % ",".join("0x%02x,0x00" % ord(C) for C in String[2:-1])
elif String.startswith('"'):
if String == "\"\"":
return "{0x00,0x00}"
else:
StringLen = len(String[1:-1])
if StringLen % 2:
return "{%s,0x00}" % ",".join("0x%02x" % ord(C) for C in String[1:-1])
else:
return "{%s,0x00,0x00}" % ",".join("0x%02x" % ord(C) for C in String[1:-1])
elif String.startswith('{'):
return "{%s}" % ",".join(C.strip() for C in String[1:-1].split(','))
else:
if len(String.split()) % 2:
return '{%s,0}' % ','.join(String.split())
else:
return '{%s,0,0}' % ','.join(String.split())
def StringArrayLength(String):
if String.startswith('L"'):
return (len(String) - 3 + 1) * 2
elif String.startswith('"'):
return (len(String) - 2 + 1)
else:
return len(String.split()) + 1
def RemoveDupOption(OptionString, Which="/I", Against=None):
OptionList = OptionString.split()
ValueList = []
if Against:
ValueList += Against
for Index in range(len(OptionList)):
Opt = OptionList[Index]
if not Opt.startswith(Which):
continue
if len(Opt) > len(Which):
Val = Opt[len(Which):]
else:
Val = ""
if Val in ValueList:
OptionList[Index] = ""
else:
ValueList.append(Val)
return " ".join(OptionList)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
| 33.923429
| 136
| 0.610215
|
794f4641bd91757768993b249a3e6594c0e0a49c
| 74
|
py
|
Python
|
linkable/__init__.py
|
meyt/linkable-py
|
13b2e523fd8e0ed36affac49bb8ad89361fc25c7
|
[
"MIT"
] | 1
|
2019-05-19T20:24:30.000Z
|
2019-05-19T20:24:30.000Z
|
linkable/__init__.py
|
meyt/linkable-py
|
13b2e523fd8e0ed36affac49bb8ad89361fc25c7
|
[
"MIT"
] | null | null | null |
linkable/__init__.py
|
meyt/linkable-py
|
13b2e523fd8e0ed36affac49bb8ad89361fc25c7
|
[
"MIT"
] | null | null | null |
from linkable.parser import Linkable, LinkableList
__version__ = '0.2.3'
| 18.5
| 50
| 0.783784
|
794f46d7e595933fb12cb60ac72ef59b4e3ba224
| 525
|
py
|
Python
|
main.py
|
waltatgit/Machine-Learning-Engineering-with-MLflow
|
42887869d9528356572c104392122cac5cdbf62c
|
[
"MIT"
] | null | null | null |
main.py
|
waltatgit/Machine-Learning-Engineering-with-MLflow
|
42887869d9528356572c104392122cac5cdbf62c
|
[
"MIT"
] | null | null | null |
main.py
|
waltatgit/Machine-Learning-Engineering-with-MLflow
|
42887869d9528356572c104392122cac5cdbf62c
|
[
"MIT"
] | null | null | null |
# This is a sample Python script.
# Press Ctrl+R to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press Meta+F8 to toggle the breakpoint.
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
print_hi('PyCharm')
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| 30.882353
| 98
| 0.721905
|
794f46df8528b081606db63b52d6af494f941adc
| 4,007
|
py
|
Python
|
cifar/dyresA_resnet.py
|
Nyquixt/DyConv
|
255193068424aaa83352bee258d34cb8b32b6ee6
|
[
"MIT"
] | null | null | null |
cifar/dyresA_resnet.py
|
Nyquixt/DyConv
|
255193068424aaa83352bee258d34cb8b32b6ee6
|
[
"MIT"
] | null | null | null |
cifar/dyresA_resnet.py
|
Nyquixt/DyConv
|
255193068424aaa83352bee258d34cb8b32b6ee6
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from convs.dyres_conv import *
from convs.condconv import *
__all__ = ['DyResA_ResNet18']
class DyRes_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, num_experts=3):
super().__init__()
self.conv1 = DyResConv(in_channels, channels, kernel_size=3, stride=stride, padding=1,
num_experts=num_experts, mode='A')
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = DyResConv(channels, channels, kernel_size=3, stride=1, padding=1,
num_experts=num_experts, mode='A')
self.bn2 = nn.BatchNorm2d(channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Addition
out += self.shortcut(x)
out = F.relu(out)
return out
class CondConv_BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, channels, stride=1, num_experts=3):
super().__init__()
self.conv1 = CondConv(in_channels, channels, kernel_size=3, stride=stride, padding=1, num_experts=num_experts)
self.bn1 = nn.BatchNorm2d(channels)
self.conv2 = CondConv(channels, channels, kernel_size=3, stride=1, padding=1, num_experts=num_experts)
self.bn2 = nn.BatchNorm2d(channels)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, self.expansion*channels,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*channels)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Addition
out += self.shortcut(x)
out = F.relu(out)
return out
class DyResA_ResNet(nn.Module):
def __init__(self, block1, block2, num_blocks, num_classes=100, num_experts=3):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block1, 64, num_blocks[0], stride=1, num_experts=num_experts)
self.layer2 = self._make_layer(block1, 128, num_blocks[1], stride=2, num_experts=num_experts)
self.layer3 = self._make_layer(block2, 256, num_blocks[2], stride=2, num_experts=num_experts)
self.layer4 = self._make_layer(block2, 512, num_blocks[3], stride=2, num_experts=num_experts)
self.linear = nn.Linear(512*block2.expansion, num_classes)
def _make_layer(self, block, channels, num_blocks, stride, num_experts):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, channels, stride, num_experts))
self.in_channels = channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DyResA_ResNet18(num_experts=3):
return DyResA_ResNet(DyRes_BasicBlock, CondConv_BasicBlock, [2, 2, 2, 2], num_experts=num_experts)
def test():
x = torch.randn(128, 3, 32, 32)
net1 = DyResA_ResNet18()
y1 = net1(x); print(y1.size())
# test()
| 38.528846
| 118
| 0.622411
|
794f479582ad2fefd4b4453660611cf4f1a3c977
| 2,991
|
py
|
Python
|
startup/gui/shaderPresets.py
|
pier-robot/gaffer
|
9267f2ba3822b14430d8a283c745261110b0f570
|
[
"BSD-3-Clause"
] | null | null | null |
startup/gui/shaderPresets.py
|
pier-robot/gaffer
|
9267f2ba3822b14430d8a283c745261110b0f570
|
[
"BSD-3-Clause"
] | null | null | null |
startup/gui/shaderPresets.py
|
pier-robot/gaffer
|
9267f2ba3822b14430d8a283c745261110b0f570
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import IECore
import Gaffer
import GafferScene
def __registerShaderPresets( presets ) :
for name, value in presets :
Gaffer.Metadata.registerValue( GafferScene.ShaderTweaks, "shader", "preset:" + name, value )
Gaffer.Metadata.registerValue( GafferScene.ShaderQuery, "shader", "preset:" + name, value )
with IECore.IgnoredExceptions( ImportError ) :
import GafferArnold
__registerShaderPresets( [
( "Arnold Surface", "ai:surface" ),
( "Arnold Displacement", "ai:disp_map" ),
( "Arnold Light", "ai:light" ),
( "Arnold Gobo", "ai:lightFilter:gobo" ),
( "Arnold Decay", "ai:lightFilter:light_decay" ),
( "Arnold Barndoor", "ai:lightFilter:barndoor" ),
( "Arnold Blocker", "ai:lightFilter:filter" )
] )
if os.environ.get( "GAFFERAPPLESEED_HIDE_UI", "" ) != "1" :
with IECore.IgnoredExceptions( ImportError ) :
import GafferAppleseed
__registerShaderPresets( [
( "Appleseed Light", "as:light" ),
] )
with IECore.IgnoredExceptions( ImportError ) :
import GafferOSL
__registerShaderPresets( [
( "OSL Surface", "osl:surface" ),
( "OSL Light", "osl:light" ),
] )
__registerShaderPresets( [ ( "OpenGL Surface", "gl:surface" ) ] )
| 33.606742
| 94
| 0.684052
|
794f48799abfcef4d38226162cee134ac47219e4
| 256,774
|
py
|
Python
|
tensorflow/python/framework/ops.py
|
anonymous-313/tensorflow
|
b82785818b6b020d62340eaaece32b9c75858185
|
[
"Apache-2.0"
] | 4
|
2021-02-04T16:54:54.000Z
|
2021-03-02T18:04:54.000Z
|
tensorflow/python/framework/ops.py
|
anonymous-313/tensorflow
|
b82785818b6b020d62340eaaece32b9c75858185
|
[
"Apache-2.0"
] | 1
|
2021-03-30T21:23:47.000Z
|
2021-03-30T21:23:47.000Z
|
tensorflow/python/framework/ops.py
|
anonymous-313/tensorflow
|
b82785818b6b020d62340eaaece32b9c75858185
|
[
"Apache-2.0"
] | 1
|
2019-09-27T09:03:41.000Z
|
2019-09-27T09:03:41.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions used to construct graphs."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import sys
import threading
import types
import numpy as np
import six
from six.moves import map # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import function_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import versions_pb2
from tensorflow.core.protobuf import config_pb2
# pywrap_tensorflow must be imported first to avoid profobuf issues.
# (b/143110113)
# pylint: disable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import pywrap_tensorflow
from tensorflow.python import pywrap_tfe
# pylint: enable=invalid-import-order,g-bad-import-order,unused-import
from tensorflow.python import tf2
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import core
from tensorflow.python.eager import monitoring
from tensorflow.python.eager import tape
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import registry
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import traceable_stack
from tensorflow.python.framework import versions
from tensorflow.python.ops import control_flow_util
from tensorflow.python.platform import app
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.profiler import trace
from tensorflow.python.types import core as core_tf_types
from tensorflow.python.types import internal
from tensorflow.python.util import compat
from tensorflow.python.util import decorator_utils
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import function_utils
from tensorflow.python.util import lock_util
from tensorflow.python.util import memory
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_stack
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.deprecation import deprecated_args
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import kwarg_only
from tensorflow.python.util.tf_export import tf_export
ag_ctx = LazyLoader(
"ag_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
# Temporary global switches determining if we should enable the work-in-progress
# calls to the C API. These will be removed once all functionality is supported.
_USE_C_API = True
_USE_C_SHAPES = True
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/ops_eager_execution",
"Whether ops.enable_eager_execution() is called.")
_tensor_equality_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/enable_tensor_equality",
"Whether ops.enable_tensor_equality() is called.")
_control_flow_api_gauge = monitoring.BoolGauge(
"/tensorflow/api/enable_control_flow_v2",
"Whether enable_control_flow_v2() is called.")
_tf_function_api_guage = monitoring.BoolGauge(
"/tensorflow/api/tf_function",
"Whether tf.function() is used.")
# pylint: disable=protected-access
_DTYPES_INTERN_TABLE = dtypes._INTERN_TABLE
# pylint: enable=protected-access
def tensor_id(tensor):
"""Returns a unique identifier for this Tensor."""
return tensor._id # pylint: disable=protected-access
class _UserDeviceSpec(object):
"""Store user-specified device and provide computation of merged device."""
def __init__(self, device_name_or_function):
self._device_name_or_function = device_name_or_function
self.display_name = str(self._device_name_or_function)
self.function = device_name_or_function
self.raw_string = None
if isinstance(device_name_or_function, pydev.MergeDevice):
self.is_null_merge = device_name_or_function.is_null_merge
elif callable(device_name_or_function):
self.is_null_merge = False
dev_func = self._device_name_or_function
func_name = function_utils.get_func_name(dev_func)
func_code = function_utils.get_func_code(dev_func)
if func_code:
fname = func_code.co_filename
lineno = func_code.co_firstlineno
else:
fname = "unknown"
lineno = -1
self.display_name = "%s<%s, %d>" % (func_name, fname, lineno)
elif device_name_or_function is None:
# NOTE(taylorrobie): This MUST be False. None signals a break in the
# device stack, so `is_null_merge` must be False for such a case to
# allow callers to safely skip over null merges without missing a None.
self.is_null_merge = False
else:
self.raw_string = device_name_or_function
self.function = pydev.merge_device(device_name_or_function)
self.is_null_merge = self.function.is_null_merge
# We perform this check in __init__ because it is of non-trivial cost,
# and self.string_merge is typically called many times.
self.fast_string_merge = isinstance(self.function, pydev.MergeDevice)
def string_merge(self, node_def):
if self.fast_string_merge:
return self.function.shortcut_string_merge(node_def)
return compat.as_str(_device_string(self.function(node_def)))
class NullContextmanager(object):
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
pass
def __exit__(self, type_arg, value_arg, traceback_arg):
return False # False values do not suppress exceptions
def _override_helper(clazz_object, operator, func):
"""Overrides (string) operator on Tensors to call func.
Args:
clazz_object: the class to override for; either Tensor or SparseTensor.
operator: the string name of the operator to override.
func: the function that replaces the overridden operator.
Raises:
ValueError: If operator is not allowed to be overwritten.
"""
if operator not in Tensor.OVERLOADABLE_OPERATORS:
raise ValueError("Overriding %s is disallowed" % operator)
setattr(clazz_object, operator, func)
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
# Deprecated - do not use.
# This API to avoid breaking estimator and tensorflow-mesh which depend on this
# internal API. The stub should be safe to use after TF 2.3 is released.
def is_dense_tensor_like(t):
return isinstance(t, core_tf_types.Tensor)
def uid():
"""A unique (within this program execution) integer."""
return pywrap_tfe.TFE_Py_UID()
def numpy_text(tensor, is_repr=False):
"""Human readable representation of a tensor's numpy value."""
if tensor.dtype.is_numpy_compatible:
# pylint: disable=protected-access
text = repr(tensor._numpy()) if is_repr else str(tensor._numpy())
# pylint: enable=protected-access
else:
text = "<unprintable>"
if "\n" in text:
text = "\n" + text
return text
@tf_export(v1=["enable_tensor_equality"])
def enable_tensor_equality():
"""Compare Tensors with element-wise comparison and thus be unhashable.
Comparing tensors with element-wise allows comparisons such as
tf.Variable(1.0) == 1.0. Element-wise equality implies that tensors are
unhashable. Thus tensors can no longer be directly used in sets or as a key in
a dictionary.
"""
_tensor_equality_api_usage_gauge.get_cell().set(True)
Tensor._USE_EQUALITY = True # pylint: disable=protected-access
@tf_export(v1=["disable_tensor_equality"])
def disable_tensor_equality():
"""Compare Tensors by their id and be hashable.
This is a legacy behaviour of TensorFlow and is highly discouraged.
"""
_tensor_equality_api_usage_gauge.get_cell().set(False)
Tensor._USE_EQUALITY = False # pylint: disable=protected-access
# TODO(mdan): This object should subclass Symbol, not just Tensor.
@tf_export("Tensor", "experimental.numpy.ndarray", v1=["Tensor"])
class Tensor(internal.NativeObject, core_tf_types.Tensor):
"""A tensor is a multidimensional array of elements represented by a
`tf.Tensor` object. All elements are of a single known data type.
When writing a TensorFlow program, the main object that is
manipulated and passed around is the `tf.Tensor`.
A `tf.Tensor` has the following properties:
* a single data type (float32, int32, or string, for example)
* a shape
TensorFlow supports eager execution and graph execution. In eager
execution, operations are evaluated immediately. In graph
execution, a computational graph is constructed for later
evaluation.
TensorFlow defaults to eager execution. In the example below, the
matrix multiplication results are calculated immediately.
>>> # Compute some values using a Tensor
>>> c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
>>> e = tf.matmul(c, d)
>>> print(e)
tf.Tensor(
[[1. 3.]
[3. 7.]], shape=(2, 2), dtype=float32)
Note that during eager execution, you may discover your `Tensors` are actually
of type `EagerTensor`. This is an internal detail, but it does give you
access to a useful function, `numpy`:
>>> type(e)
<class '...ops.EagerTensor'>
>>> print(e.numpy())
[[1. 3.]
[3. 7.]]
In TensorFlow, `tf.function`s are a common way to define graph execution.
A Tensor's shape (that is, the rank of the Tensor and the size of
each dimension) may not always be fully known. In `tf.function`
definitions, the shape may only be partially known.
Most operations produce tensors of fully-known shapes if the shapes of their
inputs are also fully known, but in some cases it's only possible to find the
shape of a tensor at execution time.
A number of specialized tensors are available: see `tf.Variable`,
`tf.constant`, `tf.placeholder`, `tf.sparse.SparseTensor`, and
`tf.RaggedTensor`.
For more on Tensors, see the [guide](https://tensorflow.org/guide/tensor).
"""
# List of Python operators that we allow to override.
OVERLOADABLE_OPERATORS = {
# Binary.
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__div__",
"__rdiv__",
"__truediv__",
"__rtruediv__",
"__floordiv__",
"__rfloordiv__",
"__mod__",
"__rmod__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
"__ne__",
"__eq__",
"__and__",
"__rand__",
"__or__",
"__ror__",
"__xor__",
"__rxor__",
"__getitem__",
"__pow__",
"__rpow__",
# Unary.
"__invert__",
"__neg__",
"__abs__",
"__matmul__",
"__rmatmul__"
}
# Whether to allow hashing or numpy-style equality
_USE_EQUALITY = tf2.enabled()
def __init__(self, op, value_index, dtype):
"""Creates a new `Tensor`.
Args:
op: An `Operation`. `Operation` that computes this tensor.
value_index: An `int`. Index of the operation's endpoint that produces
this tensor.
dtype: A `DType`. Type of elements stored in this tensor.
Raises:
TypeError: If the op is not an `Operation`.
"""
if not isinstance(op, Operation):
raise TypeError("op needs to be an Operation: %s" % (op,))
self._op = op
self._value_index = value_index
self._dtype = dtypes.as_dtype(dtype)
# This will be set by self._as_tf_output().
self._tf_output = None
# This will be set by self.shape().
self._shape_val = None
# List of operations that use this Tensor as input. We maintain this list
# to easily navigate a computation graph.
self._consumers = []
self._id = uid()
self._name = None
def __getattr__(self, name):
if name in {"T", "astype", "ravel", "transpose", "reshape", "clip", "size",
"tolist", "data"}:
# TODO(wangpeng): Export the enable_numpy_behavior knob
raise AttributeError("""
If you are looking for numpy-related methods, please run the following:
import tensorflow.python.ops.numpy_ops.np_config
np_config.enable_numpy_behavior()""")
self.__getattribute__(name)
@staticmethod
def _create_with_tf_output(op, value_index, dtype, tf_output):
ret = Tensor(op, value_index, dtype)
ret._tf_output = tf_output
return ret
@property
def op(self):
"""The `Operation` that produces this tensor as an output."""
return self._op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self._dtype
@property
def graph(self):
"""The `Graph` that contains this tensor."""
return self._op.graph
@property
def name(self):
"""The string name of this tensor."""
if self._name is None:
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
self._name = "%s:%d" % (self._op.name, self._value_index)
return self._name
@property
def device(self):
"""The name of the device on which this tensor will be produced, or None."""
return self._op.device
@property
def shape(self):
"""Returns a `tf.TensorShape` that represents the shape of this tensor.
>>> t = tf.constant([1,2,3,4,5])
>>> t.shape
TensorShape([5])
`tf.Tensor.shape` is equivalent to `tf.Tensor.get_shape()`.
In a `tf.function` or when building a model using
`tf.keras.Input`, they return the build-time shape of the
tensor, which may be partially unknown.
A `tf.TensorShape` is not a tensor. Use `tf.shape(t)` to get a tensor
containing the shape, calculated at runtime.
See `tf.Tensor.get_shape()`, and `tf.TensorShape` for details and examples.
"""
if self._shape_val is None:
self._shape_val = self._c_api_shape()
return self._shape_val
def _c_api_shape(self):
"""Returns the TensorShape of this tensor according to the C API."""
c_graph = self._op._graph._c_graph # pylint: disable=protected-access
shape_vec, unknown_shape = pywrap_tf_session.TF_GraphGetTensorShapeHelper(
c_graph, self._as_tf_output())
if unknown_shape:
return tensor_shape.unknown_shape()
else:
shape_vec = [None if d == -1 else d for d in shape_vec]
return tensor_shape.TensorShape(shape_vec)
@property
def _shape(self):
logging.warning("Tensor._shape is private, use Tensor.shape "
"instead. Tensor._shape will eventually be removed.")
return self.shape
@_shape.setter
def _shape(self, value):
raise ValueError(
"Tensor._shape cannot be assigned, use Tensor.set_shape instead.")
def _disallow_when_autograph_disabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph is disabled in this function."
" Try decorating it directly with @tf.function.".format(task))
def _disallow_when_autograph_enabled(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed: AutoGraph did convert this function. This might"
" indicate you are trying to use an unsupported feature.".format(task))
def _disallow_in_graph_mode(self, task):
raise errors.OperatorNotAllowedInGraphError(
"{} is not allowed in Graph execution. Use Eager execution or decorate"
" this function with @tf.function.".format(task))
def _disallow_bool_casting(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled(
"using a `tf.Tensor` as a Python `bool`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled(
"using a `tf.Tensor` as a Python `bool`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("using a `tf.Tensor` as a Python `bool`")
def _disallow_iteration(self):
if ag_ctx.control_status_ctx().status == ag_ctx.Status.DISABLED:
self._disallow_when_autograph_disabled("iterating over `tf.Tensor`")
elif ag_ctx.control_status_ctx().status == ag_ctx.Status.ENABLED:
self._disallow_when_autograph_enabled("iterating over `tf.Tensor`")
else:
# Default: V1-style Graph execution.
self._disallow_in_graph_mode("iterating over `tf.Tensor`")
def __iter__(self):
if not context.executing_eagerly():
self._disallow_iteration()
shape = self._shape_tuple()
if shape is None:
raise TypeError("Cannot iterate over a tensor with unknown shape.")
if not shape:
raise TypeError("Cannot iterate over a scalar tensor.")
if shape[0] is None:
raise TypeError(
"Cannot iterate over a tensor with unknown first dimension.")
return _TensorIterator(self, shape[0])
def _shape_as_list(self):
if self.shape.ndims is not None:
return [dim.value for dim in self.shape.dims]
else:
return None
def _shape_tuple(self):
shape = self._shape_as_list()
if shape is None:
return None
return tuple(shape)
def _rank(self):
"""Integer rank of this Tensor, if known, else None.
Returns:
Integer rank or None
"""
return self.shape.ndims
def get_shape(self):
"""Returns a `tf.TensorShape` that represents the shape of this tensor.
In eager execution the shape is always fully-known.
>>> a = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
>>> print(a.shape)
(2, 3)
`tf.Tensor.get_shape()` is equivalent to `tf.Tensor.shape`.
When executing in a `tf.function` or building a model using
`tf.keras.Input`, `Tensor.shape` may return a partial shape (including
`None` for unknown dimensions). See `tf.TensorShape` for more details.
>>> inputs = tf.keras.Input(shape = [10])
>>> # Unknown batch size
>>> print(inputs.shape)
(None, 10)
The shape is computed using shape inference functions that are
registered for each `tf.Operation`.
The returned `tf.TensorShape` is determined at *build* time, without
executing the underlying kernel. It is not a `tf.Tensor`. If you need a
shape *tensor*, either convert the `tf.TensorShape` to a `tf.constant`, or
use the `tf.shape(tensor)` function, which returns the tensor's shape at
*execution* time.
This is useful for debugging and providing early errors. For
example, when tracing a `tf.function`, no ops are being executed, shapes
may be unknown (See the [Concrete Functions
Guide](https://www.tensorflow.org/guide/concrete_function) for details).
>>> @tf.function
... def my_matmul(a, b):
... result = a@b
... # the `print` executes during tracing.
... print("Result shape: ", result.shape)
... return result
The shape inference functions propagate shapes to the extent possible:
>>> f = my_matmul.get_concrete_function(
... tf.TensorSpec([None,3]),
... tf.TensorSpec([3,5]))
Result shape: (None, 5)
Tracing may fail if a shape missmatch can be detected:
>>> cf = my_matmul.get_concrete_function(
... tf.TensorSpec([None,3]),
... tf.TensorSpec([4,5]))
Traceback (most recent call last):
...
ValueError: Dimensions must be equal, but are 3 and 4 for 'matmul' (op:
'MatMul') with input shapes: [?,3], [4,5].
In some cases, the inferred shape may have unknown dimensions. If
the caller has additional information about the values of these
dimensions, `tf.ensure_shape` or `Tensor.set_shape()` can be used to augment
the inferred shape.
>>> @tf.function
... def my_fun(a):
... a = tf.ensure_shape(a, [5, 5])
... # the `print` executes during tracing.
... print("Result shape: ", a.shape)
... return a
>>> cf = my_fun.get_concrete_function(
... tf.TensorSpec([None, None]))
Result shape: (5, 5)
Returns:
A `tf.TensorShape` representing the shape of this tensor.
"""
return self.shape
def set_shape(self, shape):
"""Updates the shape of this tensor.
Note: It is recommended to use `tf.ensure_shape` instead of
`Tensor.set_shape`, because `tf.ensure_shape` provides better checking for
programming errors and can create guarantees for compiler
optimization.
With eager execution this operates as a shape assertion.
Here the shapes match:
>>> t = tf.constant([[1,2,3]])
>>> t.set_shape([1, 3])
Passing a `None` in the new shape allows any value for that axis:
>>> t.set_shape([1,None])
An error is raised if an incompatible shape is passed.
>>> t.set_shape([1,5])
Traceback (most recent call last):
...
ValueError: Tensor's shape (1, 3) is not compatible with supplied
shape [1, 5]
When executing in a `tf.function`, or building a model using
`tf.keras.Input`, `Tensor.set_shape` will *merge* the given `shape` with
the current shape of this tensor, and set the tensor's shape to the
merged value (see `tf.TensorShape.merge_with` for details):
>>> t = tf.keras.Input(shape=[None, None, 3])
>>> print(t.shape)
(None, None, None, 3)
Dimensions set to `None` are not updated:
>>> t.set_shape([None, 224, 224, None])
>>> print(t.shape)
(None, 224, 224, 3)
The main use case for this is to provide additional shape information
that cannot be inferred from the graph alone.
For example if you know all the images in a dataset have shape [28,28,3] you
can set it with `tf.set_shape`:
>>> @tf.function
... def load_image(filename):
... raw = tf.io.read_file(filename)
... image = tf.image.decode_png(raw, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... image.set_shape([28, 28, 3])
... print("Final shape: ", image.shape)
... return image
Trace the function, see the [Concrete Functions
Guide](https://www.tensorflow.org/guide/concrete_function) for details.
>>> cf = load_image.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: (None, None, 3)
Final shape: (28, 28, 3)
Similarly the `tf.io.parse_tensor` function could return a tensor with
any shape, even the `tf.rank` is unknown. If you know that all your
serialized tensors will be 2d, set it with `set_shape`:
>>> @tf.function
... def my_parse(string_tensor):
... result = tf.io.parse_tensor(string_tensor, out_type=tf.float32)
... # the `print` executes during tracing.
... print("Initial shape: ", result.shape)
... result.set_shape([None, None])
... print("Final shape: ", result.shape)
... return result
Trace the function
>>> concrete_parse = my_parse.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: <unknown>
Final shape: (None, None)
Make sure it works:
>>> t = tf.ones([5,3], dtype=tf.float32)
>>> serialized = tf.io.serialize_tensor(t)
>>> print(serialized.dtype)
<dtype: 'string'>
>>> print(serialized.shape)
()
>>> t2 = concrete_parse(serialized)
>>> print(t2.shape)
(5, 3)
Caution: `set_shape` ensures that the applied shape is compatible with
the existing shape, but it does not check at runtime. Setting
incorrect shapes can result in inconsistencies between the
statically-known graph and the runtime value of tensors. For runtime
validation of the shape, use `tf.ensure_shape` instead. It also modifies
the `shape` of the tensor.
>>> # Serialize a rank-3 tensor
>>> t = tf.ones([5,5,5], dtype=tf.float32)
>>> serialized = tf.io.serialize_tensor(t)
>>> # The function still runs, even though it `set_shape([None,None])`
>>> t2 = concrete_parse(serialized)
>>> print(t2.shape)
(5, 5, 5)
Args:
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
Raises:
ValueError: If `shape` is not compatible with the current shape of
this tensor.
"""
# Reset cached shape.
self._shape_val = None
# We want set_shape to be reflected in the C API graph for when we run it.
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
dim_list = []
if shape.dims is None:
unknown_shape = True
else:
unknown_shape = False
for dim in shape.dims:
if dim.value is None:
dim_list.append(-1)
else:
dim_list.append(dim.value)
try:
pywrap_tf_session.TF_GraphSetTensorShape_wrapper(
self._op._graph._c_graph, # pylint: disable=protected-access
self._as_tf_output(),
dim_list,
unknown_shape)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
@property
def value_index(self):
"""The index of this tensor in the outputs of its `Operation`."""
return self._value_index
def consumers(self):
"""Returns a list of `Operation`s that consume this tensor.
Returns:
A list of `Operation`s.
"""
consumer_names = pywrap_tf_session.TF_OperationOutputConsumers_wrapper(
self._as_tf_output())
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(name)
for name in consumer_names
]
# pylint: enable=protected-access
def _as_node_def_input(self):
"""Return a value to use for the NodeDef "input" attribute.
The returned string can be used in a NodeDef "input" attribute
to indicate that the NodeDef uses this Tensor as input.
Raises:
ValueError: if this Tensor's Operation does not have a name.
Returns:
a string.
"""
if not self._op.name:
raise ValueError("Operation was not named: %s" % self._op)
if self._value_index == 0:
return self._op.name
else:
return "%s:%d" % (self._op.name, self._value_index)
def _as_tf_output(self):
# pylint: disable=protected-access
# NOTE: Beyond preventing unnecessary (re-)allocation, the cached object
# also guarantees that a dictionary of tf_output objects will retain a
# deterministic (yet unsorted) order which prevents memory blowup in the
# cache of executor(s) stored for every session.
if self._tf_output is None:
self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index)
return self._tf_output
# pylint: enable=protected-access
def __str__(self):
return "Tensor(\"%s\"%s%s%s)" % (
self.name,
(", shape=%s" %
self.get_shape()) if self.get_shape().ndims is not None else "",
(", dtype=%s" % self._dtype.name) if self._dtype else "",
(", device=%s" % self.device) if self.device else "")
def __repr__(self):
return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(),
self._dtype.name)
def __hash__(self):
g = getattr(self, "graph", None)
if (Tensor._USE_EQUALITY and executing_eagerly_outside_functions() and
(g is None or g.building_function)):
raise TypeError("Tensor is unhashable. "
"Instead, use tensor.ref() as the key.")
else:
return id(self)
def __copy__(self):
# TODO(b/77597810): get rid of Tensor copies.
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
# NOTE(mrry): This enables the Tensor's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Tensor class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Tensors interact
# with ndarrays.
__array_priority__ = 100
def __array__(self):
raise NotImplementedError(
"Cannot convert a symbolic Tensor ({}) to a numpy array."
" This error may indicate that you're trying to pass a Tensor to"
" a NumPy call, which is not supported".format(self.name))
def __len__(self):
raise TypeError("len is not well defined for symbolic Tensors. ({}) "
"Please call `x.shape` rather than `len(x)` for "
"shape information.".format(self.name))
# TODO(mdan): This convoluted machinery is hard to maintain. Clean up.
@staticmethod
def _override_operator(operator, func):
_override_helper(Tensor, operator, func)
def __bool__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This overload raises a `TypeError` when the user inadvertently
treats a `Tensor` as a boolean (most commonly in an `if` or `while`
statement), in code that was not converted by AutoGraph. For example:
```python
if tf.constant(True): # Will raise.
# ...
if tf.constant(5) < tf.constant(7): # Will raise.
# ...
```
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def __nonzero__(self):
"""Dummy method to prevent a tensor from being used as a Python `bool`.
This is the Python 2.x counterpart to `__bool__()` above.
Raises:
`TypeError`.
"""
self._disallow_bool_casting()
def eval(self, feed_dict=None, session=None):
"""Evaluates this tensor in a `Session`.
Note: If you are not using `compat.v1` libraries, you should not need this,
(or `feed_dict` or `Session`). In eager execution (or within `tf.function`)
you do not need to call `eval`.
Calling this method will execute all preceding operations that
produce the inputs needed for the operation that produces this
tensor.
*N.B.* Before invoking `Tensor.eval()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
Returns:
A numpy array corresponding to the value of this tensor.
"""
return _eval_using_default_session(self, feed_dict, self.graph, session)
@deprecation.deprecated(None, "Use ref() instead.")
def experimental_ref(self):
return self.ref()
def ref(self):
# tf.Variable also has the same ref() API. If you update the
# documentation here, please update tf.Variable.ref() as well.
"""Returns a hashable reference object to this Tensor.
The primary use case for this API is to put tensors in a set/dictionary.
We can't put tensors in a set/dictionary as `tensor.__hash__()` is no longer
available starting Tensorflow 2.0.
The following will raise an exception starting 2.0
>>> x = tf.constant(5)
>>> y = tf.constant(10)
>>> z = tf.constant(10)
>>> tensor_set = {x, y, z}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
>>> tensor_dict = {x: 'five', y: 'ten'}
Traceback (most recent call last):
...
TypeError: Tensor is unhashable. Instead, use tensor.ref() as the key.
Instead, we can use `tensor.ref()`.
>>> tensor_set = {x.ref(), y.ref(), z.ref()}
>>> x.ref() in tensor_set
True
>>> tensor_dict = {x.ref(): 'five', y.ref(): 'ten', z.ref(): 'ten'}
>>> tensor_dict[y.ref()]
'ten'
Also, the reference object provides `.deref()` function that returns the
original Tensor.
>>> x = tf.constant(5)
>>> x.ref().deref()
<tf.Tensor: shape=(), dtype=int32, numpy=5>
"""
return object_identity.Reference(self)
# TODO(agarwal): consider getting rid of this.
# TODO(mdan): This object should not subclass ops.Tensor.
class _EagerTensorBase(Tensor):
"""Base class for EagerTensor."""
# __complex__, __int__, __float__ and __index__ may copy the tensor to CPU and
# only work for scalars; values are cast as per numpy.
def __complex__(self):
return complex(self._numpy())
def __int__(self):
return int(self._numpy())
def __long__(self):
return long(self._numpy())
def __float__(self):
return float(self._numpy())
def __index__(self):
return self._numpy().__index__()
def __bool__(self):
return bool(self._numpy())
__nonzero__ = __bool__
def __format__(self, format_spec):
return self._numpy().__format__(format_spec)
def __reduce__(self):
return convert_to_tensor, (self._numpy(),)
def __copy__(self):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
return self
def __deepcopy__(self, memo):
# Eager Tensors are immutable so it's safe to return themselves as a copy.
del memo
return self
def __str__(self):
return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape,
self.dtype.name)
def __repr__(self):
return "<tf.Tensor: shape=%s, dtype=%s, numpy=%s>" % (
self.shape, self.dtype.name, numpy_text(self, is_repr=True))
def __len__(self):
"""Returns the length of the first dimension in the Tensor."""
if not self.shape.ndims:
raise TypeError("Scalar tensor has no `len()`")
# pylint: disable=protected-access
try:
return self._shape_tuple()[0]
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
def __array__(self):
return self._numpy()
def _numpy_internal(self):
raise NotImplementedError()
def _numpy(self):
try:
return self._numpy_internal()
except core._NotOkStatusException as e: # pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, e.message), None) # pylint: disable=protected-access
@property
def dtype(self):
# Note: using the intern table directly here as this is
# performance-sensitive in some models.
return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access
def numpy(self):
"""Copy of the contents of this Tensor into a NumPy array or scalar.
Unlike NumPy arrays, Tensors are immutable, so this method has to copy
the contents to ensure safety. Use `memoryview` to get a readonly
view of the contents without doing a copy:
>>> t = tf.constant([42])
>>> np.array(memoryview(t))
array([42], dtype=int32)
Note that `memoryview` is only zero-copy for Tensors on CPU. If a Tensor
is on GPU, it will have to be transferred to CPU first in order for
`memoryview` to work.
Returns:
A NumPy array of the same shape and dtype or a NumPy scalar, if this
Tensor has rank 0.
Raises:
ValueError: If the dtype of this Tensor does not have a compatible
NumPy dtype.
"""
# TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
maybe_arr = self._numpy() # pylint: disable=protected-access
return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
@property
def backing_device(self):
"""Returns the name of the device holding this tensor's memory.
`.backing_device` is usually the same as `.device`, which returns
the device on which the kernel of the operation that produced this tensor
ran. However, some operations can produce tensors on a different device
(e.g., an operation that executes on the GPU but produces output tensors
in host memory).
"""
raise NotImplementedError()
def _datatype_enum(self):
raise NotImplementedError()
def _shape_tuple(self):
"""The shape of this Tensor, as a tuple.
This is more performant than tuple(shape().as_list()) as it avoids
two list and one object creation. Marked private for now as from an API
perspective, it would be better to have a single performant way of
getting a shape rather than exposing shape() and shape_tuple()
(and heaven forbid, shape_list() etc. as well!). Punting on that for now,
but ideally one would work things out and remove the need for this method.
Returns:
tuple with the shape.
"""
raise NotImplementedError()
def _rank(self):
"""Integer rank of this Tensor.
Unlike regular Tensors, the rank is always known for EagerTensors.
This is more performant than len(self._shape_tuple())
Returns:
Integer rank
"""
raise NotImplementedError()
def _num_elements(self):
"""Number of elements of this Tensor.
Unlike regular Tensors, the number of elements is always known for
EagerTensors.
This is more performant than tensor.shape.num_elements
Returns:
Long - num elements in the tensor
"""
raise NotImplementedError()
def _copy_to_device(self, device_name): # pylint: disable=redefined-outer-name
raise NotImplementedError()
@staticmethod
def _override_operator(name, func):
setattr(_EagerTensorBase, name, func)
def _copy_nograd(self, ctx=None, device_name=None):
"""Copies tensor to dest device, but doesn't record the operation."""
# Creates a new tensor on the dest device.
if ctx is None:
ctx = context.context()
if device_name is None:
device_name = ctx.device_name
# pylint: disable=protected-access
try:
ctx.ensure_initialized()
new_tensor = self._copy_to_device(device_name)
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return new_tensor
def _copy(self, ctx=None, device_name=None):
"""Copies tensor to dest device."""
new_tensor = self._copy_nograd(ctx, device_name)
# Record the copy on tape and define backprop copy as well.
if context.executing_eagerly():
self_device = self.device
def grad_fun(dresult):
return [
dresult._copy(device_name=self_device)
if hasattr(dresult, "_copy") else dresult
]
tape.record_operation("_copy", [new_tensor], [self], grad_fun)
return new_tensor
# pylint: enable=protected-access
@property
def shape(self):
if self._tensor_shape is None: # pylint: disable=access-member-before-definition
# pylint: disable=protected-access
try:
# `_tensor_shape` is declared and defined in the definition of
# `EagerTensor`, in C.
self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple())
except core._NotOkStatusException as e:
six.raise_from(core._status_to_exception(e.code, e.message), None)
return self._tensor_shape
def get_shape(self):
"""Alias of Tensor.shape."""
return self.shape
def _shape_as_list(self):
"""The shape of the tensor as a list."""
return list(self._shape_tuple())
@property
def ndim(self):
"""Returns the number of Tensor dimensions."""
return self.shape.ndims
@deprecation.deprecated(None, "Use tf.identity instead.")
def cpu(self):
"""A copy of this Tensor with contents backed by host memory."""
return self._copy(context.context(), "CPU:0")
@deprecation.deprecated(None, "Use tf.identity instead.")
def gpu(self, gpu_index=0):
"""A copy of this Tensor with contents backed by memory on the GPU.
Args:
gpu_index: Identifies which GPU to place the contents on the returned
Tensor in.
Returns:
A GPU-memory backed Tensor object initialized with the same contents
as this Tensor.
"""
return self._copy(context.context(), "GPU:" + str(gpu_index))
def set_shape(self, shape):
if not self.shape.is_compatible_with(shape):
raise ValueError(
"Tensor's shape %s is not compatible with supplied shape %s" %
(self.shape, shape))
# Methods not supported / implemented for Eager Tensors.
@property
def op(self):
raise AttributeError(
"Tensor.op is meaningless when eager execution is enabled.")
@property
def graph(self):
raise AttributeError(
"Tensor.graph is meaningless when eager execution is enabled.")
@property
def name(self):
raise AttributeError(
"Tensor.name is meaningless when eager execution is enabled.")
@property
def value_index(self):
raise AttributeError(
"Tensor.value_index is meaningless when eager execution is enabled.")
def consumers(self):
raise NotImplementedError(
"Tensor.consumers is meaningless when eager execution is enabled.")
def _add_consumer(self, consumer):
raise NotImplementedError(
"_add_consumer not supported when eager execution is enabled.")
def _as_node_def_input(self):
raise NotImplementedError(
"_as_node_def_input not supported when eager execution is enabled.")
def _as_tf_output(self):
raise NotImplementedError(
"_as_tf_output not supported when eager execution is enabled.")
def eval(self, feed_dict=None, session=None):
raise NotImplementedError(
"eval is not supported when eager execution is enabled, "
"is .numpy() what you're looking for?")
# This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and
# registers it with the current module.
# It is exposed as an __internal__ api for now (b/171081052), though we
# expect it to be eventually covered by tf Tensor types and typing.
EagerTensor = tf_export("__internal__.EagerTensor", v1=[])(
pywrap_tfe.TFE_Py_InitEagerTensor(_EagerTensorBase))
@tf_export(v1=["convert_to_tensor"])
@dispatch.add_dispatch_support
def convert_to_tensor_v1_with_dispatch(
value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
import numpy as np
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return tf.matmul(arg, arg) + arg
# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
```
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
name: Optional name to use if a new `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
dtype_hint: same meaning as preferred_dtype, and overrides it.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor_v1(value, dtype=dtype, name=name,
preferred_dtype=preferred_dtype,
dtype_hint=dtype_hint)
def convert_to_tensor_v1(value,
dtype=None,
name=None,
preferred_dtype=None,
dtype_hint=None):
"""Converts the given `value` to a `Tensor` (with the TF1 API)."""
preferred_dtype = deprecation.deprecated_argument_lookup(
"dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype)
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
@tf_export("convert_to_tensor", v1=[])
@dispatch.add_dispatch_support
def convert_to_tensor_v2_with_dispatch(
value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`.
This function converts Python objects of various types to `Tensor`
objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars.
For example:
>>> import numpy as np
>>> def my_func(arg):
... arg = tf.convert_to_tensor(arg, dtype=tf.float32)
... return arg
>>> # The following calls are equivalent.
...
>>> value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
>>> print(value_1)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
>>> print(value_2)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
>>> value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))
>>> print(value_3)
tf.Tensor(
[[1. 2.]
[3. 4.]], shape=(2, 2), dtype=float32)
This function can be useful when composing a new operation in Python
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
and scalars in addition to `Tensor` objects.
Note: This function diverges from default Numpy behavior for `float` and
`string` types when `None` is present in a Python list or scalar. Rather
than silently converting `None` values, an error will be thrown.
Args:
value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the type
is inferred from the type of `value`.
dtype_hint: Optional element type for the returned tensor, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value` to `dtype`.
RuntimeError: If a registered conversion function returns an invalid value.
ValueError: If the `value` is a tensor not of given `dtype` in graph mode.
"""
return convert_to_tensor_v2(
value, dtype=dtype, dtype_hint=dtype_hint, name=name)
def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None):
"""Converts the given `value` to a `Tensor`."""
return convert_to_tensor(
value=value,
dtype=dtype,
name=name,
preferred_dtype=dtype_hint,
as_ref=False)
def _error_prefix(name):
return "" if name is None else "%s: " % name
def pack_eager_tensors(tensors, ctx=None):
"""Pack multiple `EagerTensor`s of the same dtype and shape.
Args:
tensors: a list of EagerTensors to pack.
ctx: context.context().
Returns:
A packed EagerTensor.
"""
if not isinstance(tensors, list):
raise TypeError("tensors must be a list or a tuple: %s" % tensors)
if not tensors:
raise ValueError("Empty tensors is unexpected for packing.")
dtype = tensors[0].dtype
shape = tensors[0].shape
handle_data = tensors[0]._handle_data # pylint: disable=protected-access
is_resource = dtype == dtypes.resource
for i in range(len(tensors)):
t = tensors[i]
if not isinstance(t, EagerTensor):
raise TypeError("tensors must be a list of EagerTensors: %s" % t)
if t.dtype != dtype:
raise ValueError(
"All tensors being packed should have the same dtype %s, "
"but the %d-th tensor is of dtype %s" % (dtype, i, t.dtype))
if t.shape != shape:
raise ValueError(
"All tensors being packed should have the same shape %s, "
"but the %d-th tensor is of shape %s" % (shape, i, t.shape))
# pylint: disable=protected-access
if is_resource and t._handle_data != handle_data:
raise ValueError(
"All tensors being packed should have the same handle data %s, "
"but the %d-th tensor is of handle data %s" %
(handle_data, i, t._handle_data))
# pylint: enable=protected-access
if ctx is None:
ctx = context.context()
# Propogate handle data for resource variables
packed_tensor = ctx.pack_eager_tensors(tensors)
if handle_data is not None:
packed_tensor._handle_data = handle_data # pylint: disable=protected-access
def grad_fun(_):
raise ValueError(
"Gradients through pack_eager_tensors are not supported yet.")
tape.record_operation("pack_eager_tensors", [packed_tensor], tensors,
grad_fun)
return packed_tensor
@trace.trace_wrapper("convert_to_tensor")
def convert_to_tensor(value,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
dtype_hint=None,
ctx=None,
accepted_result_types=(Tensor,)):
"""Implementation of the public convert_to_tensor."""
# TODO(b/142518781): Fix all call-sites and remove redundant arg
preferred_dtype = preferred_dtype or dtype_hint
if isinstance(value, EagerTensor):
if ctx is None:
ctx = context.context()
if not ctx.executing_eagerly():
graph = get_default_graph()
if not graph.building_function:
raise RuntimeError("Attempting to capture an EagerTensor without "
"building a function.")
return graph.capture(value, name=name)
if dtype is not None:
dtype = dtypes.as_dtype(dtype)
if isinstance(value, Tensor):
if dtype is not None and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtype.name, value.dtype.name, value))
return value
if preferred_dtype is not None:
preferred_dtype = dtypes.as_dtype(preferred_dtype)
# See below for the reason why it's `type(value)` and not just `value`.
# https://docs.python.org/3.8/reference/datamodel.html#special-lookup
overload = getattr(type(value), "__tf_tensor__", None)
if overload is not None:
return overload(value, dtype, name)
for base_type, conversion_func in tensor_conversion_registry.get(type(value)):
# If dtype is None but preferred_dtype is not None, we try to
# cast to preferred_dtype first.
ret = None
if dtype is None and preferred_dtype is not None:
try:
ret = conversion_func(
value, dtype=preferred_dtype, name=name, as_ref=as_ref)
except (TypeError, ValueError):
# Could not coerce the conversion to use the preferred dtype.
pass
else:
if (ret is not NotImplemented and
ret.dtype.base_dtype != preferred_dtype.base_dtype):
raise TypeError("convert_to_tensor did not convert to "
"the preferred dtype: %s vs %s " %
(ret.dtype.base_dtype, preferred_dtype.base_dtype))
if ret is None:
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
if ret is NotImplemented:
continue
if not isinstance(ret, accepted_result_types):
raise RuntimeError(
"%sConversion function %r for type %s returned non-Tensor: %r" %
(_error_prefix(name), conversion_func, base_type, ret))
if dtype and not dtype.is_compatible_with(ret.dtype):
raise RuntimeError(
"%sConversion function %r for type %s returned incompatible "
"dtype: requested = %s, actual = %s" %
(_error_prefix(name), conversion_func, base_type, dtype.name,
ret.dtype.name))
return ret
raise TypeError("%sCannot convert %r with type %s to Tensor: "
"no conversion function registered." %
(_error_prefix(name), value, type(value)))
internal_convert_to_tensor = convert_to_tensor
def internal_convert_n_to_tensor(values,
dtype=None,
name=None,
as_ref=False,
preferred_dtype=None,
ctx=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
ctx: The value of context.context().
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
if ctx is None:
ctx = context.context()
for i, value in enumerate(values):
n = None if name is None else "%s_%d" % (name, i)
ret.append(
convert_to_tensor(
value,
dtype=dtype,
name=n,
as_ref=as_ref,
preferred_dtype=preferred_dtype,
ctx=ctx))
return ret
def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None):
"""Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
preferred_dtype: Optional element type for the returned tensors, used when
dtype is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so preferred_dtype can be used as a soft
preference. If the conversion to `preferred_dtype` is not possible, this
argument has no effect.
Returns:
A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor(
values=values,
dtype=dtype,
name=name,
preferred_dtype=preferred_dtype,
as_ref=False)
def convert_to_tensor_or_composite(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor` or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_composite(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_composite(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or `CompositeTensor`.
If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it
is converted to a `Tensor` using `convert_to_tensor()`.
Args:
value: A `CompositeTensor`, or an object that can be consumed by
`convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`CompositeTensor`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor` or `CompositeTensor`, based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, composite_tensor.CompositeTensor):
value_dtype = getattr(value, "dtype", None)
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype):
raise ValueError(
"Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
(dtypes.as_dtype(dtype).name, value.dtype.name, str(value)))
return value
else:
return convert_to_tensor(
value,
dtype=dtype,
name=name,
as_ref=as_ref,
accepted_result_types=(Tensor, composite_tensor.CompositeTensor))
def internal_convert_n_to_tensor_or_composite(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor`, or objects that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `CompositeTensor`, and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Sequence):
raise TypeError("values must be a sequence.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_composite(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_composite(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `CompositeTensor` objects.
Any `CompositeTensor` objects in `values` are returned unmodified.
Args:
values: A list of `None`, `CompositeTensor``, or objects that can be
consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`s or
`CompositeTensor`s.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor` and/or `CompositeTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_composite(
values=values, dtype=dtype, name=name, as_ref=False)
def _device_string(dev_spec):
if pydev.is_device_spec(dev_spec):
return dev_spec.to_string()
else:
return dev_spec
def _NodeDef(op_type, name, attrs=None):
"""Create a NodeDef proto.
Args:
op_type: Value for the "op" attribute of the NodeDef proto.
name: Value for the "name" attribute of the NodeDef proto.
attrs: Dictionary where the key is the attribute name (a string)
and the value is the respective "attr" attribute of the NodeDef proto (an
AttrValue).
Returns:
A node_def_pb2.NodeDef protocol buffer.
"""
node_def = node_def_pb2.NodeDef(op=compat.as_bytes(op_type),
name=compat.as_bytes(name))
if attrs:
for k, v in six.iteritems(attrs):
node_def.attr[k].CopyFrom(v)
return node_def
# Copied from core/framework/node_def_util.cc
# TODO(mrry,josh11b): Consolidate this validation in C++ code.
_VALID_OP_NAME_REGEX = re.compile(r"^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_VALID_SCOPE_NAME_REGEX = re.compile(r"^[A-Za-z0-9_.\\/>-]*$")
@tf_export("__internal__.create_c_op", v1=[])
def _create_c_op(graph, node_def, inputs, control_inputs, op_def=None):
"""Creates a TF_Operation.
Args:
graph: a `Graph`.
node_def: `node_def_pb2.NodeDef` for the operation to create.
inputs: A flattened list of `Tensor`s. This function handles grouping
tensors into lists as per attributes in the `node_def`.
control_inputs: A list of `Operation`s to set as control dependencies.
op_def: Optional. `op_def_pb2.OpDef` for the operation to create. If not
specified, is looked up from the `graph` using `node_def.op`.
Returns:
A wrapped TF_Operation*.
"""
if op_def is None:
op_def = graph._get_op_def(node_def.op) # pylint: disable=protected-access
# TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs.
# Refactor so we don't have to do this here.
inputs = _reconstruct_sequence_inputs(op_def, inputs, node_def.attr)
# pylint: disable=protected-access
op_desc = pywrap_tf_session.TF_NewOperation(graph._c_graph,
compat.as_str(node_def.op),
compat.as_str(node_def.name))
if node_def.device:
pywrap_tf_session.TF_SetDevice(op_desc, compat.as_str(node_def.device))
# Add inputs
for op_input in inputs:
if isinstance(op_input, (list, tuple)):
pywrap_tf_session.TF_AddInputList(op_desc,
[t._as_tf_output() for t in op_input])
else:
pywrap_tf_session.TF_AddInput(op_desc, op_input._as_tf_output())
# Add control inputs
for control_input in control_inputs:
pywrap_tf_session.TF_AddControlInput(op_desc, control_input._c_op)
# pylint: enable=protected-access
# Add attrs
for name, attr_value in node_def.attr.items():
serialized = attr_value.SerializeToString()
# TODO(skyewm): this creates and deletes a new TF_Status for every attr.
# It might be worth creating a convenient way to re-use the same status.
pywrap_tf_session.TF_SetAttrValueProto(op_desc, compat.as_str(name),
serialized)
try:
c_op = pywrap_tf_session.TF_FinishOperation(op_desc)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
return c_op
@tf_export("Operation")
class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a `tf.Graph` that takes zero or more `Tensor`
objects as input, and produces zero or more `Tensor` objects as output.
Objects of type `Operation` are created by calling a Python op constructor
(such as `tf.matmul`) within a `tf.function` or under a `tf.Graph.as_default`
context manager.
For example, within a `tf.function`, `c = tf.matmul(a, b)` creates an
`Operation` of type "MatMul" that takes tensors `a` and `b` as input, and
produces `c` as output.
If a `tf.compat.v1.Session` is used, an `Operation` of a `tf.Graph` can be
executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for
calling `tf.compat.v1.get_default_session().run(op)`.
"""
def __init__(self,
node_def,
g,
inputs=None,
output_types=None,
control_inputs=None,
input_types=None,
original_op=None,
op_def=None):
r"""Creates an `Operation`.
NOTE: This constructor validates the name of the `Operation` (passed
as `node_def.name`). Valid `Operation` names match the following
regular expression:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]*
Args:
node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for
attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and
`device`. The `input` attribute is irrelevant here as it will be
computed when generating the model.
g: `Graph`. The parent graph.
inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the `Tensors`
computed by this operation. The length of this list indicates the
number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a control
dependency.
input_types: List of `DType` objects representing the types of the tensors
accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x
in inputs]`. Operations that expect reference-typed inputs must specify
these explicitly.
original_op: Optional. Used to associate the new `Operation` with an
existing `Operation` (for example, a replica with the op that was
replicated).
op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type
that this `Operation` represents.
Raises:
TypeError: if control inputs are not Operations or Tensors,
or if `node_def` is not a `NodeDef`,
or if `g` is not a `Graph`,
or if `inputs` are not tensors,
or if `inputs` and `input_types` are incompatible.
ValueError: if the `node_def` name is not valid.
"""
# For internal use only: `node_def` can be set to a TF_Operation to create
# an Operation for that op. This is useful for creating Operations for ops
# indirectly created by C API methods, e.g. the ops created by
# TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields
# should be None.
if isinstance(node_def, node_def_pb2.NodeDef):
if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0:
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
if not _VALID_OP_NAME_REGEX.match(node_def.name):
raise ValueError("'%s' is not a valid node name" % node_def.name)
c_op = None
elif type(node_def).__name__ == "TF_Operation":
assert inputs is None
assert output_types is None
assert control_inputs is None
assert input_types is None
assert original_op is None
assert op_def is None
c_op = node_def
else:
raise TypeError("node_def needs to be a NodeDef: %s" % (node_def,))
if not isinstance(g, Graph):
raise TypeError("g needs to be a Graph: %s" % (g,))
self._graph = g
if inputs is None:
inputs = []
elif not isinstance(inputs, list):
raise TypeError("inputs needs to be a list of Tensors: %s" % (inputs,))
for a in inputs:
if not isinstance(a, Tensor):
raise TypeError("input needs to be a Tensor: %s" % (a,))
if input_types is None:
input_types = [i.dtype.base_dtype for i in inputs]
else:
if not all(
x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)):
raise TypeError("In op '%s', input types (%s) are not compatible "
"with expected types (%s)" %
(node_def.name, [i.dtype for i in inputs], input_types))
# Build the list of control inputs.
control_input_ops = []
if control_inputs:
for c in control_inputs:
control_op = None
if isinstance(c, Operation):
control_op = c
elif isinstance(c, (Tensor, IndexedSlices)):
control_op = c.op
else:
raise TypeError("Control input must be an Operation, "
"a Tensor, or IndexedSlices: %s" % c)
control_input_ops.append(control_op)
# This will be set by self.inputs.
self._inputs_val = None
# pylint: disable=protected-access
self._original_op = original_op
# List of _UserDevSpecs holding code location of device context manager
# invocations and the users original argument to them.
self._device_code_locations = None
# Dict mapping op name to file and line information for op colocation
# context managers.
self._colocation_code_locations = None
self._control_flow_context = self.graph._get_control_flow_context()
# Gradient function for this op. There are three ways to specify gradient
# function, and first available gradient gets used, in the following order.
# 1. self._gradient_function
# 2. Gradient name registered by "_gradient_op_type" attribute.
# 3. Gradient name registered by op.type.
self._gradient_function = None
# Initialize self._c_op.
if c_op:
self._c_op = c_op
op_def = g._get_op_def(pywrap_tf_session.TF_OperationOpType(c_op))
name = self.name
else:
if op_def is None:
op_def = self._graph._get_op_def(node_def.op)
self._c_op = _create_c_op(self._graph, node_def, inputs,
control_input_ops, op_def)
name = compat.as_str(node_def.name)
self._traceback = tf_stack.extract_stack_for_node(self._c_op)
# pylint: enable=protected-access
self._is_stateful = op_def.is_stateful
# Initialize self._outputs.
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
self._outputs = []
for i in range(num_outputs):
tf_output = c_api_util.tf_output(self._c_op, i)
output_type = pywrap_tf_session.TF_OperationOutputType(tf_output)
tensor = Tensor._create_with_tf_output(self, i, output_type, tf_output) # pylint: disable=protected-access
self._outputs.append(tensor)
self._id_value = self._graph._add_op(self, name) # pylint: disable=protected-access
if not c_op:
self._control_flow_post_processing(input_tensors=inputs)
def _control_flow_post_processing(self, input_tensors=None):
"""Add this op to its control flow context.
This may add new ops and change this op's inputs. self.inputs must be
available before calling this method.
Args:
input_tensors: (Optional.) A list of `Tensors` corresponding to the inputs
of this op, which should be equivalent to `self.inputs`. Pass this
argument to avoid evaluating `self.inputs` unnecessarily.
"""
if input_tensors is None:
input_tensors = self.inputs
for input_tensor in input_tensors:
control_flow_util.CheckInputFromValidContext(self, input_tensor.op)
if self._control_flow_context is not None:
self._control_flow_context.AddOp(self)
def colocation_groups(self):
"""Returns the list of colocation groups of the op."""
default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)]
try:
class_attr = self.get_attr("_class")
except ValueError:
# This op has no explicit colocation group, so it is itself its
# own root of a colocation group.
return default_colocation_group
attr_groups = [
class_name for class_name in class_attr
if class_name.startswith(b"loc:@")
]
# If there are no colocation groups in the explicit _class field,
# return the default colocation group.
return attr_groups if attr_groups else default_colocation_group
def values(self):
"""DEPRECATED: Use outputs."""
return tuple(self.outputs)
def _get_control_flow_context(self):
"""Returns the control flow context of this op.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context of this op.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
@property
def name(self):
"""The full name of this operation."""
return pywrap_tf_session.TF_OperationName(self._c_op)
@property
def _id(self):
"""The unique integer id of this operation."""
return self._id_value
@property
def device(self):
"""The name of the device to which this op has been assigned, if any.
Returns:
The string name of the device to which this op has been
assigned, or an empty string if it has not been assigned to a
device.
"""
return pywrap_tf_session.TF_OperationDevice(self._c_op)
@property
def _device_assignments(self):
"""Code locations for device context managers active at op creation.
This property will return a list of traceable_stack.TraceableObject
instances where .obj is a string representing the assigned device
(or information about the function that would be applied to this op
to compute the desired device) and the filename and lineno members
record the location of the relevant device context manager.
For example, suppose file_a contained these lines:
file_a.py:
15: with tf.device('/gpu:0'):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the device context manager
would have these member values:
t_obj.obj -> '/gpu:0'
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._device_assignments would return the list [t_obj].
Returns:
[str: traceable_stack.TraceableObject, ...] as per this method's
description, above.
"""
return self._device_code_locations or []
@property
def _colocation_dict(self):
"""Code locations for colocation context managers active at op creation.
This property will return a dictionary for which the keys are nodes with
which this Operation is colocated, and for which the values are
traceable_stack.TraceableObject instances. The TraceableObject instances
record the location of the relevant colocation context manager but have the
"obj" field set to None to prevent leaking private data.
For example, suppose file_a contained these lines:
file_a.py:
14: node_a = tf.constant(3, name='NODE_A')
15: with tf.compat.v1.colocate_with(node_a):
16: node_b = tf.constant(4, name='NODE_B')
Then a TraceableObject t_obj representing the colocation context manager
would have these member values:
t_obj.obj -> None
t_obj.filename = 'file_a.py'
t_obj.lineno = 15
and node_b.op._colocation_dict would return the dictionary
{ 'NODE_A': t_obj }
Returns:
{str: traceable_stack.TraceableObject} as per this method's description,
above.
"""
locations_dict = self._colocation_code_locations or {}
return locations_dict.copy()
@property
def _output_types(self):
"""List this operation's output types.
Returns:
List of the types of the Tensors computed by this operation.
Each element in the list is an integer whose value is one of
the TF_DataType enums defined in pywrap_tf_session.h
The length of this list indicates the number of output endpoints
of the operation.
"""
num_outputs = pywrap_tf_session.TF_OperationNumOutputs(self._c_op)
output_types = [
int(pywrap_tf_session.TF_OperationOutputType(self._tf_output(i)))
for i in xrange(num_outputs)
]
return output_types
def _tf_output(self, output_idx):
"""Create and return a new TF_Output for output_idx'th output of this op."""
tf_output = pywrap_tf_session.TF_Output()
tf_output.oper = self._c_op
tf_output.index = output_idx
return tf_output
def _tf_input(self, input_idx):
"""Create and return a new TF_Input for input_idx'th input of this op."""
tf_input = pywrap_tf_session.TF_Input()
tf_input.oper = self._c_op
tf_input.index = input_idx
return tf_input
def _set_device(self, device): # pylint: disable=redefined-outer-name
"""Set the device of this operation.
Args:
device: string or device.. The device to set.
"""
self._set_device_from_string(compat.as_str(_device_string(device)))
def _set_device_from_string(self, device_str):
"""Fast path to set device if the type is known to be a string.
This function is called frequently enough during graph construction that
there are non-trivial performance gains if the caller can guarantee that
the specified device is already a string.
Args:
device_str: A string specifying where to place this op.
"""
pywrap_tf_session.SetRequestedDevice(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
device_str)
def _update_input(self, index, tensor):
"""Update the input to this operation at the given index.
NOTE: This is for TF internal use only. Please don't use it.
Args:
index: the index of the input to update.
tensor: the Tensor to be used as the input at the given index.
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.UpdateEdge(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._tf_input(index))
def _add_while_inputs(self, tensors):
"""See AddWhileInputHack in python_api.h.
NOTE: This is for TF internal use only. Please don't use it.
Args:
tensors: list of Tensors
Raises:
TypeError: if tensor is not a Tensor,
or if input tensor type is not convertible to dtype.
ValueError: if the Tensor is from a different graph.
"""
for tensor in tensors:
if not isinstance(tensor, Tensor):
raise TypeError("tensor must be a Tensor: %s" % tensor)
_assert_same_graph(self, tensor)
# Reset cached inputs.
self._inputs_val = None
pywrap_tf_session.AddWhileInputHack(
self._graph._c_graph, # pylint: disable=protected-access
tensor._as_tf_output(), # pylint: disable=protected-access
self._c_op)
def _add_control_inputs(self, ops):
"""Add a list of new control inputs to this operation.
Args:
ops: the list of Operations to add as control input.
Raises:
TypeError: if ops is not a list of Operations.
ValueError: if any op in ops is from a different graph.
"""
for op in ops:
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _add_control_input(self, op):
"""Add a new control input to this operation.
Args:
op: the Operation to add as control input.
Raises:
TypeError: if op is not an Operation.
ValueError: if op is from a different graph.
"""
if not isinstance(op, Operation):
raise TypeError("op must be an Operation: %s" % op)
pywrap_tf_session.AddControlInput(
self._graph._c_graph, # pylint: disable=protected-access
self._c_op, # pylint: disable=protected-access
op._c_op) # pylint: disable=protected-access
def _remove_all_control_inputs(self):
"""Removes any control inputs to this operation."""
pywrap_tf_session.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access
def _add_outputs(self, types, shapes):
"""Adds new Tensors to self.outputs.
Note: this is generally unsafe to use. This is used in certain situations in
conjunction with _set_type_list_attr.
Args:
types: list of DTypes
shapes: list of TensorShapes
"""
assert len(types) == len(shapes)
orig_num_outputs = len(self.outputs)
for i in range(len(types)):
t = Tensor(self, orig_num_outputs + i, types[i])
self._outputs.append(t)
t.set_shape(shapes[i])
def __str__(self):
return str(self.node_def)
def __repr__(self):
return "<tf.Operation '%s' type=%s>" % (self.name, self.type)
def __tf_tensor__(self, dtype=None, name=None):
"""Raises a helpful error."""
raise TypeError("can't convert Operation '{}' to Tensor".format(self.name))
@property
def outputs(self):
"""The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
@property
def inputs(self):
"""The sequence of `Tensor` objects representing the data inputs of this op."""
if self._inputs_val is None:
# pylint: disable=protected-access
self._inputs_val = tuple(
map(self.graph._get_tensor_by_tf_output,
pywrap_tf_session.GetOperationInputs(self._c_op)))
# pylint: enable=protected-access
return self._inputs_val
@property
def _input_types(self):
num_inputs = pywrap_tf_session.TF_OperationNumInputs(self._c_op)
input_types = [
dtypes.as_dtype(
pywrap_tf_session.TF_OperationInputType(self._tf_input(i)))
for i in xrange(num_inputs)
]
return input_types
@property
def control_inputs(self):
"""The `Operation` objects on which this op has a control dependency.
Before this op is executed, TensorFlow will ensure that the
operations in `self.control_inputs` have finished executing. This
mechanism can be used to run ops sequentially for performance
reasons, or to ensure that the side effects of an op are observed
in the correct order.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlInputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def _control_outputs(self):
"""The `Operation` objects which have a control dependency on this op.
Before any of the ops in self._control_outputs can execute tensorflow will
ensure self has finished executing.
Returns:
A list of `Operation` objects.
"""
control_c_ops = pywrap_tf_session.TF_OperationGetControlOutputs_wrapper(
self._c_op)
# pylint: disable=protected-access
return [
self.graph._get_operation_by_name_unsafe(
pywrap_tf_session.TF_OperationName(c_op)) for c_op in control_c_ops
]
# pylint: enable=protected-access
@property
def type(self):
"""The type of the op (e.g. `"MatMul"`)."""
return pywrap_tf_session.TF_OperationOpType(self._c_op)
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._graph
@property
def node_def(self):
# pylint: disable=line-too-long
"""Returns the `NodeDef` representation of this operation.
Returns:
A
[`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationToNodeDef(self._c_op, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
node_def = node_def_pb2.NodeDef()
node_def.ParseFromString(compat.as_bytes(data))
return node_def
@property
def op_def(self):
# pylint: disable=line-too-long
"""Returns the `OpDef` proto that represents the type of this op.
Returns:
An
[`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto)
protocol buffer.
"""
# pylint: enable=line-too-long
return self._graph._get_op_def(self.type)
@property
def traceback(self):
"""Returns the call stack from when this operation was constructed."""
return self._traceback
def _set_attr(self, attr_name, attr_value):
"""Private method used to set an attribute in the node_def."""
buf = pywrap_tf_session.TF_NewBufferFromString(
compat.as_bytes(attr_value.SerializeToString()))
try:
self._set_attr_with_buf(attr_name, buf)
finally:
pywrap_tf_session.TF_DeleteBuffer(buf)
def _set_attr_with_buf(self, attr_name, attr_buf):
"""Set an attr in the node_def with a pre-allocated buffer."""
# pylint: disable=protected-access
pywrap_tf_session.SetAttr(self._graph._c_graph, self._c_op, attr_name,
attr_buf)
# pylint: enable=protected-access
def _set_func_attr(self, attr_name, func_name):
"""Private method used to set a function attribute in the node_def."""
func = attr_value_pb2.NameAttrList(name=func_name)
self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func))
def _set_func_list_attr(self, attr_name, func_names):
"""Private method used to set a list(function) attribute in the node_def."""
funcs = [attr_value_pb2.NameAttrList(name=func_name)
for func_name in func_names]
funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list))
def _set_type_list_attr(self, attr_name, types):
"""Private method used to set a list(type) attribute in the node_def."""
if not types:
return
if isinstance(types[0], dtypes.DType):
types = [dt.as_datatype_enum for dt in types]
types_list = attr_value_pb2.AttrValue.ListValue(type=types)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list))
def _set_shape_list_attr(self, attr_name, shapes):
"""Private method used to set a list(shape) attribute in the node_def."""
shapes = [s.as_proto() for s in shapes]
shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes)
self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list))
def _clear_attr(self, attr_name):
"""Private method used to clear an attribute in the node_def."""
# pylint: disable=protected-access
pywrap_tf_session.ClearAttr(self._graph._c_graph, self._c_op, attr_name)
# pylint: enable=protected-access
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`.
Args:
name: The name of the attr to fetch.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func")
try:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_OperationGetAttrValueProto(self._c_op, name, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
x = attr_value_pb2.AttrValue()
x.ParseFromString(data)
oneof_value = x.WhichOneof("value")
if oneof_value is None:
return []
if oneof_value == "list":
for f in fields:
if getattr(x.list, f):
if f == "type":
return [dtypes.as_dtype(t) for t in x.list.type]
else:
return list(getattr(x.list, f))
return []
if oneof_value == "type":
return dtypes.as_dtype(x.type)
assert oneof_value in fields, "Unsupported field type in " + str(x)
return getattr(x, oneof_value)
def _get_attr_type(self, name):
"""Returns the `DType` value of the attr of this op with the given `name`."""
try:
dtype_enum = pywrap_tf_session.TF_OperationGetAttrType(self._c_op, name)
return _DTYPES_INTERN_TABLE[dtype_enum]
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_bool(self, name):
"""Returns the `bool` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrBool(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def _get_attr_int(self, name):
"""Returns the `int` value of the attr of this op with the given `name`."""
try:
return pywrap_tf_session.TF_OperationGetAttrInt(self._c_op, name)
except errors.InvalidArgumentError as e:
# Convert to ValueError for backwards compatibility.
raise ValueError(str(e))
def run(self, feed_dict=None, session=None):
"""Runs this operation in a `Session`.
Calling this method will execute all preceding operations that
produce the inputs needed for this operation.
*N.B.* Before invoking `Operation.run()`, its graph must have been
launched in a session, and either a default session must be
available, or `session` must be specified explicitly.
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values. See
`tf.Session.run` for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
"""
_run_using_default_session(self, feed_dict, self.graph, session)
_gradient_registry = registry.Registry("gradient")
@tf_export("RegisterGradient")
class RegisterGradient(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
__slots__ = ["_op_type"]
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f):
"""Registers the function `f` as gradient function for `op_type`."""
_gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type):
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string")
_gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient = no_gradient
NotDifferentiable = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return _gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_):
"""No op. TODO(b/74620627): Remove this."""
pass
class OpStats(object):
"""A holder for statistics about an operator.
This class holds information about the resource requirements for an op,
including the size of its weight parameters on-disk and how many FLOPS it
requires to execute forward inference.
If you define a new operation, you can create a function that will return a
set of information about its usage of the CPU and disk space when serialized.
The function itself takes a Graph object that's been set up so you can call
methods like get_tensor_by_name to help calculate the results, and a NodeDef
argument.
"""
__slots__ = ["_statistic_type", "_value"]
def __init__(self, statistic_type, value=None):
"""Sets up the initial placeholders for the statistics."""
self.statistic_type = statistic_type
self.value = value
@property
def statistic_type(self):
return self._statistic_type
@statistic_type.setter
def statistic_type(self, statistic_type):
self._statistic_type = statistic_type
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def __iadd__(self, other):
if other.statistic_type != self.statistic_type:
raise ValueError("Can't add an OpStat of type %s to one of %s." %
(self.statistic_type, other.statistic_type))
if self.value is None:
self.value = other.value
elif other.value is not None:
self._value += other.value
return self
_stats_registry = registry.Registry("statistical functions")
class RegisterStatistics(object):
"""A decorator for registering the statistics function for an op type.
This decorator can be defined for an op type so that it gives a
report on the resources used by an instance of an operator, in the
form of an OpStats object.
Well-known types of statistics include these so far:
- flops: When running a graph, the bulk of the computation happens doing
numerical calculations like matrix multiplications. This type allows a node
to return how many floating-point operations it takes to complete. The
total number of FLOPs for a graph is a good guide to its expected latency.
You can add your own statistics just by picking a new type string, registering
functions for the ops you care about, and then calling get_stats_for_node_def.
If a statistic for an op is registered multiple times, a KeyError will be
raised.
Since the statistics is counted on a per-op basis. It is not suitable for
model parameters (capacity), which is expected to be counted only once, even
if it is shared by multiple ops. (e.g. RNN)
For example, you can define a new metric called doohickey for a Foo operation
by placing this in your code:
```python
@ops.RegisterStatistics("Foo", "doohickey")
def _calc_foo_bojangles(unused_graph, unused_node_def):
return ops.OpStats("doohickey", 20)
```
Then in client code you can retrieve the value by making this call:
```python
doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey")
```
If the NodeDef is for an op with a registered doohickey function, you'll get
back the calculated amount in doohickey.value, or None if it's not defined.
"""
__slots__ = ["_op_type", "_statistic_type"]
def __init__(self, op_type, statistic_type):
"""Saves the `op_type` as the `Operation` type."""
if not isinstance(op_type, six.string_types):
raise TypeError("op_type must be a string.")
if "," in op_type:
raise TypeError("op_type must not contain a comma.")
self._op_type = op_type
if not isinstance(statistic_type, six.string_types):
raise TypeError("statistic_type must be a string.")
if "," in statistic_type:
raise TypeError("statistic_type must not contain a comma.")
self._statistic_type = statistic_type
def __call__(self, f):
"""Registers "f" as the statistics function for "op_type"."""
_stats_registry.register(f, self._op_type + "," + self._statistic_type)
return f
def get_stats_for_node_def(graph, node, statistic_type):
"""Looks up the node's statistics function in the registry and calls it.
This function takes a Graph object and a NodeDef from a GraphDef, and if
there's an associated statistics method, calls it and returns a result. If no
function has been registered for the particular node type, it returns an empty
statistics object.
Args:
graph: A Graph object that's been set up with the node's graph.
node: A NodeDef describing the operator.
statistic_type: A string identifying the statistic we're interested in.
Returns:
An OpStats object containing information about resource usage.
"""
try:
stats_func = _stats_registry.lookup(node.op + "," + statistic_type)
result = stats_func(graph, node)
except LookupError:
result = OpStats(statistic_type)
return result
def name_from_scope_name(name):
"""Returns the name of an op given the name of its scope.
Args:
name: the name of the scope.
Returns:
the name of the op (equal to scope name minus any trailing slash).
"""
return name[:-1] if (name and name[-1] == "/") else name
_MUTATION_LOCK_GROUP = 0
_SESSION_RUN_LOCK_GROUP = 1
@tf_export("Graph")
class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
Graphs are used by `tf.function`s to represent the function's computations.
Each graph contains a set of `tf.Operation` objects, which represent units of
computation; and `tf.Tensor` objects, which represent the units of data that
flow between operations.
### Using graphs directly (deprecated)
A `tf.Graph` can be constructed and used directly without a `tf.function`, as
was required in TensorFlow 1, but this is deprecated and it is recommended to
use a `tf.function` instead. If a graph is directly used, other deprecated
TensorFlow 1 classes are also required to execute the graph, such as a
`tf.compat.v1.Session`.
A default graph can be registered with the `tf.Graph.as_default` context
manager. Then, operations will be added to the graph instead of being executed
eagerly. For example:
```python
g = tf.Graph()
with g.as_default():
# Define operations and tensors in `g`.
c = tf.constant(30.0)
assert c.graph is g
```
`tf.compat.v1.get_default_graph()` can be used to obtain the default graph.
Important note: This class *is not* thread-safe for graph construction. All
operations should be created from a single thread, or external
synchronization must be provided. Unless otherwise specified, all methods
are not thread-safe.
A `Graph` instance supports an arbitrary number of "collections"
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
`tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
def __init__(self):
"""Creates a new, empty Graph."""
# Protects core state that can be returned via public accessors.
# Thread-safety is provided on a best-effort basis to support buggy
# programs, and is not guaranteed by the public `tf.Graph` API.
#
# NOTE(mrry): This does not protect the various stacks. A warning will
# be reported if these are used from multiple threads
self._lock = threading.RLock()
# The group lock synchronizes Session.run calls with methods that create
# and mutate ops (e.g. Graph.create_op()). This synchronization is
# necessary because it's illegal to modify an operation after it's been run.
# The group lock allows any number of threads to mutate ops at the same time
# but if any modification is going on, all Session.run calls have to wait.
# Similarly, if one or more Session.run calls are going on, all mutate ops
# have to wait until all Session.run calls have finished.
self._group_lock = lock_util.GroupLock(num_groups=2)
self._nodes_by_id = {} # GUARDED_BY(self._lock)
self._next_id_counter = 0 # GUARDED_BY(self._lock)
self._nodes_by_name = {} # GUARDED_BY(self._lock)
self._version = 0 # GUARDED_BY(self._lock)
# Maps a name used in the graph to the next id to use for that name.
self._names_in_use = {}
self._stack_state_is_thread_local = False
self._thread_local = threading.local()
# Functions that will be applied to choose a device if none is specified.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._device_function_stack is used instead.
self._graph_device_function_stack = traceable_stack.TraceableStack()
# Default original_op applied to new ops.
self._default_original_op = None
# Current control flow context. It could be either CondContext or
# WhileContext defined in ops/control_flow_ops.py
self._control_flow_context = None
# A new node will depend of the union of all of the nodes in the stack.
# In TF2.x or after switch_to_thread_local(),
# self._thread_local._control_dependencies_stack is used instead.
self._graph_control_dependencies_stack = []
# Arbitrary collections of objects.
self._collections = {}
# The graph-level random seed
self._seed = None
# A dictionary of attributes that should be applied to all ops.
self._attr_scope_map = {}
# A map from op type to the kernel label that should be used.
self._op_to_kernel_label_map = {}
# A map from op type to an alternative op type that should be used when
# computing gradients.
self._gradient_override_map = {}
# A map from op type to a gradient function that should be used instead.
self._gradient_function_map = {}
# True if the graph is considered "finalized". In that case no
# new operations can be added.
self._finalized = False
# Functions defined in the graph
self._functions = collections.OrderedDict()
# Default GraphDef versions
self._graph_def_versions = versions_pb2.VersionDef(
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER)
self._building_function = False
# Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(),
# self._thread_local._colocation_stack is used instead.
self._graph_colocation_stack = traceable_stack.TraceableStack()
# Set of tensors that are dangerous to feed!
self._unfeedable_tensors = object_identity.ObjectIdentitySet()
# Set of operations that are dangerous to fetch!
self._unfetchable_ops = set()
# A map of tensor handle placeholder to tensor dtype.
self._handle_feeders = {}
# A map from tensor handle to its read op.
self._handle_readers = {}
# A map from tensor handle to its move op.
self._handle_movers = {}
# A map from tensor handle to its delete op.
self._handle_deleters = {}
# Allow optimizers and other objects to pseudo-uniquely key graphs (this key
# will be shared when defining function graphs, for example, so optimizers
# being called inside function definitions behave as if they were seeing the
# actual outside graph).
self._graph_key = "grap-key-%d/" % (uid(),)
# A string with the last reduction method passed to
# losses.compute_weighted_loss(), or None. This is required only for
# backward compatibility with Estimator and optimizer V1 use cases.
self._last_loss_reduction = None
# Flag that is used to indicate whether loss has been scaled by optimizer.
# If this flag has been set, then estimator uses it to scale losss back
# before reporting. This is required only for backward compatibility with
# Estimator and optimizer V1 use cases.
self._is_loss_scaled_by_optimizer = False
self._container = ""
# Set to True if this graph is being built in an
# AutomaticControlDependencies context.
self._add_control_dependencies = False
# Cache for OpDef protobufs retrieved via the C API.
self._op_def_cache = {}
# Cache for constant results of `broadcast_gradient_args()`. The keys are
# tuples of fully-defined shapes: (x_shape_tuple, y_shape_tuple), and the
# values are tuples of reduction indices: (rx, ry).
self._bcast_grad_args_cache = {}
# Cache for constant results of `reduced_shape()`. The keys are pairs of
# tuples: (input_shape_tuple, reduction_indices_tuple), and the values
# are pairs of tuples: (output_shape_kept_dims, tile_scaling).
self._reduced_shape_cache = {}
# TODO(skyewm): fold as much of the above as possible into the C
# implementation
self._scoped_c_graph = c_api_util.ScopedTFGraph()
# The C API requires all ops to have shape functions. Disable this
# requirement (many custom ops do not have shape functions, and we don't
# want to break these existing cases).
pywrap_tf_session.SetRequireShapeInferenceFns(self._c_graph, False)
if tf2.enabled():
self.switch_to_thread_local()
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@tf_contextlib.contextmanager
def _variable_creator_scope(self, creator, priority=100):
"""Scope which defines a variable creation function.
Args:
creator: A callable taking `next_creator` and `kwargs`. See the
`tf.variable_creator_scope` docstring.
priority: Creators with a higher `priority` are called first. Within the
same priority, creators are called inner-to-outer.
Yields:
`_variable_creator_scope` is a context manager with a side effect, but
doesn't return a value.
Raises:
RuntimeError: If variable creator scopes are not properly nested.
"""
# This step keeps a reference to the existing stack, and it also initializes
# self._thread_local._variable_creator_stack if it doesn't exist yet.
old = self._variable_creator_stack
new = list(old)
new.append((priority, creator))
# Sorting is stable, so we'll put higher-priority creators later in the list
# but otherwise maintain registration order.
new.sort(key=lambda item: item[0])
self._thread_local._variable_creator_stack = new # pylint: disable=protected-access
try:
yield
finally:
if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access
raise RuntimeError(
"Exiting variable_creator_scope without proper nesting.")
self._thread_local._variable_creator_stack = old # pylint: disable=protected-access
# Note: this method is private because the API of tf.Graph() is public and
# frozen, and this functionality is still not ready for public visibility.
@property
def _variable_creator_stack(self):
if not hasattr(self._thread_local, "_variable_creator_stack"):
self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access
# This previously returned a copy of the stack instead of the stack itself,
# to guard against accidental mutation. Consider, however, code that wants
# to save and restore the variable creator stack:
# def f():
# original_stack = graph._variable_creator_stack
# graph._variable_creator_stack = new_stack
# ... # Some code
# graph._variable_creator_stack = original_stack
#
# And lets say you have some code that calls this function with some
# variable_creator:
# def g():
# with variable_scope.variable_creator_scope(creator):
# f()
# When exiting the variable creator scope, it would see a different stack
# object than it expected leading to a "Exiting variable_creator_scope
# without proper nesting" error.
return self._thread_local._variable_creator_stack # pylint: disable=protected-access
@_variable_creator_stack.setter
def _variable_creator_stack(self, variable_creator_stack):
self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access
def _check_not_finalized(self):
"""Check if the graph is finalized.
Raises:
RuntimeError: If the graph finalized.
"""
if self._finalized:
raise RuntimeError("Graph is finalized and cannot be modified.")
def _add_op(self, op, op_name):
"""Adds 'op' to the graph and returns the unique ID for the added Operation.
Args:
op: the Operation to add.
op_name: the name of the Operation.
Returns:
An integer that is a unique ID for the added Operation.
"""
self._check_not_finalized()
with self._lock:
self._next_id_counter += 1
op_id = self._next_id_counter
self._nodes_by_id[op_id] = op
self._nodes_by_name[op_name] = op
self._version = max(self._version, op_id)
return op_id
@property
def _c_graph(self):
if self._scoped_c_graph:
return self._scoped_c_graph.graph
return None
@property
def version(self):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
`tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
"""
if self._finalized:
return self._version
with self._lock:
return self._version
@property
def graph_def_versions(self):
# pylint: disable=line-too-long
"""The GraphDef version information of this graph.
For details on the meaning of each version, see
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto).
Returns:
A `VersionDef`.
"""
# pylint: enable=line-too-long
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphVersions(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
version_def = versions_pb2.VersionDef()
version_def.ParseFromString(compat.as_bytes(data))
return version_def
@property
def seed(self):
"""The graph-level random seed of this graph."""
return self._seed
@seed.setter
def seed(self, seed):
self._seed = seed
@property
def finalized(self):
"""True if this graph has been finalized."""
return self._finalized
def finalize(self):
"""Finalizes this graph, making it read-only.
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
when using a `tf.compat.v1.train.QueueRunner`.
"""
self._finalized = True
def _unsafe_unfinalize(self):
"""Opposite of `finalize`.
Internal interface.
NOTE: Unfinalizing a graph could have negative impact on performance,
especially in a multi-threaded environment. Unfinalizing a graph
when it is in use by a Session may lead to undefined behavior. Ensure
that all sessions using a graph are closed before calling this method.
"""
self._finalized = False
def _get_control_flow_context(self):
"""Returns the current control flow context.
Returns:
A context object.
"""
return self._control_flow_context
def _set_control_flow_context(self, ctx):
"""Sets the current control flow context.
Args:
ctx: a context object.
"""
self._control_flow_context = ctx
def _copy_functions_to_graph_def(self, graph_def, starting_bytesize):
"""If this graph contains functions, copy them to `graph_def`."""
bytesize = starting_bytesize
for f in self._functions.values():
bytesize += f.definition.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
graph_def.library.function.extend([f.definition])
if f.grad_func_name:
grad_def = function_pb2.GradientDef()
grad_def.function_name = f.name
grad_def.gradient_func = f.grad_func_name
graph_def.library.gradient.extend([grad_def])
def _as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A tuple containing a
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and the version of the graph to which that
`GraphDef` corresponds.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
with self._lock:
with c_api_util.tf_buffer() as buf:
pywrap_tf_session.TF_GraphToGraphDef(self._c_graph, buf)
data = pywrap_tf_session.TF_GetBuffer(buf)
graph = graph_pb2.GraphDef()
graph.ParseFromString(compat.as_bytes(data))
# Strip the experimental library field iff it's empty.
if not graph.library.function:
graph.ClearField("library")
if add_shapes:
for node in graph.node:
op = self._nodes_by_name[node.name]
if op.outputs:
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in op.outputs])
for function_def in graph.library.function:
defined_function = self._functions[function_def.signature.name]
try:
func_graph = defined_function.graph
except AttributeError:
# _DefinedFunction doesn't have a graph, _EagerDefinedFunction
# does. Both rely on ops.py, so we can't really isinstance check
# them.
continue
input_shapes = function_def.attr["_input_shapes"]
try:
func_graph_inputs = func_graph.inputs
except AttributeError:
continue
# TODO(b/141471245): Fix the inconsistency when inputs of func graph
# are appended during gradient computation of while/cond.
for input_tensor, arg_def in zip(func_graph_inputs,
function_def.signature.input_arg):
input_shapes.list.shape.add().CopyFrom(
input_tensor.get_shape().as_proto())
if input_tensor.dtype == dtypes.resource:
_copy_handle_data_to_arg_def(input_tensor, arg_def)
for output_tensor, arg_def in zip(func_graph.outputs,
function_def.signature.output_arg):
if output_tensor.dtype == dtypes.resource:
_copy_handle_data_to_arg_def(output_tensor, arg_def)
for node in function_def.node_def:
try:
op = func_graph.get_operation_by_name(node.name)
except KeyError:
continue
outputs = op.outputs
if op.type == "StatefulPartitionedCall":
# Filter out any extra outputs (possibly added by function
# backpropagation rewriting).
num_outputs = len(node.attr["Tout"].list.type)
outputs = outputs[:num_outputs]
node.attr["_output_shapes"].list.shape.extend(
[output.get_shape().as_proto() for output in outputs])
return graph, self._version
def as_graph_def(self, from_version=None, add_shapes=False):
# pylint: disable=line-too-long
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
(using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
Args:
from_version: Optional. If this is set, returns a `GraphDef` containing
only the nodes that were added to this graph since its `version`
property had the given value.
add_shapes: If true, adds an "_output_shapes" list attr to each node with
the inferred shapes of each of its outputs.
Returns:
A
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer.
Raises:
ValueError: If the `graph_def` would be too large.
"""
# pylint: enable=line-too-long
result, _ = self._as_graph_def(from_version, add_shapes)
return result
def _is_function(self, name):
"""Tests whether 'name' is registered in this graph's function library.
Args:
name: string op name.
Returns:
bool indicating whether or not 'name' is registered in function library.
"""
return compat.as_str(name) in self._functions
def _get_function(self, name):
"""Returns the function definition for 'name'.
Args:
name: string function name.
Returns:
The function def proto.
"""
return self._functions.get(compat.as_str(name), None)
def _add_function(self, function):
"""Adds a function to the graph.
After the function has been added, you can call to the function by
passing the function name in place of an op name to
`Graph.create_op()`.
Args:
function: A `_DefinedFunction` object.
Raises:
ValueError: if another function is defined with the same name.
"""
self._check_not_finalized()
name = function.name
# Sanity checks on gradient definition.
if (function.grad_func_name is not None) and (function.python_grad_func is
not None):
raise ValueError("Gradient defined twice for function %s" % name)
# Add function to graph
# pylint: disable=protected-access
gradient = (
function._grad_func._c_func.func if function._grad_func else None)
pywrap_tf_session.TF_GraphCopyFunction(self._c_graph, function._c_func.func,
gradient)
# pylint: enable=protected-access
self._functions[compat.as_str(name)] = function
# Need a new-enough consumer to support the functions we add to the graph.
if self._graph_def_versions.min_consumer < 12:
self._graph_def_versions.min_consumer = 12
@property
def building_function(self):
"""Returns True iff this graph represents a function."""
return self._building_function
# Helper functions to create operations.
@deprecated_args(None,
"Shapes are always computed; don't use the compute_shapes "
"as it has no effect.", "compute_shapes")
def create_op(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_shapes=True,
compute_device=True):
"""Creates an `Operation` in this graph.
This is a low-level interface for creating an `Operation`. Most
programs will not call this method directly, and instead use the
Python op constructors, such as `tf.constant()`, which add ops to
the default graph.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always
computed).
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
del compute_shapes
for idx, a in enumerate(inputs):
if not isinstance(a, Tensor):
raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Creates an `Operation` in this graph.
Implements `Graph.create_op()` without the overhead of the deprecation
wrapper.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of the
tensors that the operation consumes. By default, uses the base `DType`
of each input in `inputs`. Operations that expect reference-typed inputs
must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Raises:
ValueError: if colocation conflicts with existing device assignment.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
if name is None:
name = op_type
# If a names ends with a '/' it is a "name scope" and we use it as-is,
# after removing the trailing '/'.
if name and name[-1] == "/":
name = name_from_scope_name(name)
else:
name = self.unique_name(name)
node_def = _NodeDef(op_type, name, attrs)
input_ops = set(t.op for t in inputs)
control_inputs = self._control_dependencies_for_inputs(input_ops)
# _create_op_helper mutates the new Operation. `_mutation_lock` ensures a
# Session.run call cannot occur between creating and mutating the op.
with self._mutation_lock():
ret = Operation(
node_def,
self,
inputs=inputs,
output_types=dtypes,
control_inputs=control_inputs,
input_types=input_types,
original_op=self._default_original_op,
op_def=op_def)
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_from_tf_operation(self, c_op, compute_device=True):
"""Creates an `Operation` in this graph from the supplied TF_Operation.
This method is like create_op() except the new Operation is constructed
using `c_op`. The returned Operation will have `c_op` as its _c_op
field. This is used to create Operation objects around TF_Operations created
indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile).
This function does not call Operation._control_flow_post_processing or
Graph._control_dependencies_for_inputs (since the inputs may not be
available yet). The caller is responsible for calling these methods.
Args:
c_op: a wrapped TF_Operation
compute_device: (Optional.) If True, device functions will be executed to
compute the device property of the Operation.
Returns:
An `Operation` object.
"""
self._check_not_finalized()
ret = Operation(c_op, self)
# If a name_scope was created with ret.name but no nodes were created in it,
# the name will still appear in _names_in_use even though the name hasn't
# been used. This is ok, just leave _names_in_use as-is in this case.
# TODO(skyewm): make the C API guarantee no name conflicts.
name_key = ret.name.lower()
if name_key not in self._names_in_use:
self._names_in_use[name_key] = 1
self._create_op_helper(ret, compute_device=compute_device)
return ret
def _create_op_helper(self, op, compute_device=True):
"""Common logic for creating an op in this graph."""
# Apply any additional attributes requested. Do not overwrite any existing
# attributes.
for key, value in self._attr_scope_map.items():
try:
op.get_attr(key)
except ValueError:
if callable(value):
value = value(op.node_def)
if not isinstance(value, (type(None), attr_value_pb2.AttrValue)):
raise TypeError(
"Callable for scope map key '%s' must return either None or "
"an AttrValue protocol buffer; but it returned: %s" %
(key, value))
if value:
op._set_attr(key, value) # pylint: disable=protected-access
# Apply a kernel label if one has been specified for this op type.
try:
kernel_label = self._op_to_kernel_label_map[op.type]
op._set_attr("_kernel", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label)))
except KeyError:
pass
op._gradient_function = self._gradient_function_map.get(op.type) # pylint: disable=protected-access
# Apply the overriding op type for gradients if one has been specified for
# this op type.
try:
mapped_op_type = self._gradient_override_map[op.type]
op._set_attr("_gradient_op_type", # pylint: disable=protected-access
attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type)))
except KeyError:
pass
self._record_op_seen_by_control_dependencies(op)
if compute_device:
self._apply_device_functions(op)
# Snapshot the colocation stack metadata before we might generate error
# messages using it. Note that this snapshot depends on the actual stack
# and is independent of the op's _class attribute.
# pylint: disable=protected-access
op._colocation_code_locations = self._snapshot_colocation_stack_metadata()
# pylint: enable=protected-access
if self._colocation_stack:
all_colocation_groups = []
is_device_set = False
for colocation_op in self._colocation_stack.peek_objs():
try:
all_colocation_groups.extend(colocation_op.colocation_groups())
except AttributeError:
pass
if colocation_op.device and not is_device_set:
# pylint: disable=protected-access
op._set_device(colocation_op.device)
# pylint: enable=protected-access
is_device_set = True
all_colocation_groups = sorted(set(all_colocation_groups))
# pylint: disable=protected-access
op._set_attr(
"_class",
attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups)))
# pylint: enable=protected-access
# Sets "container" attribute if
# (1) self._container is not None
# (2) "is_stateful" is set in OpDef
# (3) "container" attribute is in OpDef
# (4) "container" attribute is None
if self._container and op._is_stateful: # pylint: disable=protected-access
try:
container_attr = op.get_attr("container")
except ValueError:
# "container" attribute is not in OpDef
pass
else:
if not container_attr:
op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access
s=compat.as_bytes(self._container)))
def _add_new_tf_operations(self, compute_devices=True):
"""Creates `Operations` in this graph for any new TF_Operations.
This is useful for when TF_Operations are indirectly created by the C API
outside of the Operation constructor (e.g. by TF_ImportGraphDef,
TF_FinishWhile). This ensures there are corresponding Operations for all
TF_Operations in the underlying TF_Graph.
Args:
compute_devices: (Optional.) If True, device functions will be executed to
compute the device properties of each new Operation.
Returns:
A list of the new `Operation` objects.
"""
self._check_not_finalized()
# Create all Operation objects before accessing their inputs since an op may
# be created before its inputs.
new_ops = [
self._create_op_from_tf_operation(c_op, compute_device=compute_devices)
for c_op in c_api_util.new_tf_operations(self)
]
# pylint: disable=protected-access
for op in new_ops:
new_control_inputs = self._control_dependencies_for_inputs(op.inputs)
op._add_control_inputs(new_control_inputs)
op._control_flow_post_processing()
# pylint: enable=protected-access
return new_ops
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
"""Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
This function is the canonical way to get/validate an object of
one of the allowed types from an external argument reference in the
Session API.
This method may be called concurrently from multiple threads.
Args:
obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can
also be any object with an `_as_graph_element()` method that returns a
value of one of these types. Note: `_as_graph_element` will be called
inside the graph's lock and so may not modify the graph.
allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
to types.
ValueError: If `obj` is of an appropriate type but invalid. For
example, an invalid string.
KeyError: If `obj` is not an object in the graph.
"""
if self._finalized:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
with self._lock:
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
"""See `Graph.as_graph_element()` for details."""
# The vast majority of this function is figuring
# out what an API user might be doing wrong, so
# that we can give helpful error messages.
#
# Ideally, it would be nice to split it up, but we
# need context to generate nice error messages.
if allow_tensor and allow_operation:
types_str = "Tensor or Operation"
elif allow_tensor:
types_str = "Tensor"
elif allow_operation:
types_str = "Operation"
else:
raise ValueError("allow_tensor and allow_operation can't both be False.")
temp_obj = _as_graph_element(obj)
if temp_obj is not None:
obj = temp_obj
# If obj appears to be a name...
if isinstance(obj, compat.bytes_or_text_types):
name = compat.as_str(obj)
if ":" in name and allow_tensor:
# Looks like a Tensor name and can be a Tensor.
try:
op_name, out_n = name.split(":")
out_n = int(out_n)
except:
raise ValueError("The name %s looks a like a Tensor name, but is "
"not a valid one. Tensor names must be of the "
"form \"<op_name>:<output_index>\"." % repr(name))
if op_name in self._nodes_by_name:
op = self._nodes_by_name[op_name]
else:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, does not exist in the "
"graph." % (repr(name), repr(op_name)))
try:
return op.outputs[out_n]
except:
raise KeyError("The name %s refers to a Tensor which does not "
"exist. The operation, %s, exists but only has "
"%s outputs." %
(repr(name), repr(op_name), len(op.outputs)))
elif ":" in name and not allow_tensor:
# Looks like a Tensor name but can't be a Tensor.
raise ValueError("Name %s appears to refer to a Tensor, not a %s." %
(repr(name), types_str))
elif ":" not in name and allow_operation:
# Looks like an Operation name and can be an Operation.
if name not in self._nodes_by_name:
raise KeyError("The name %s refers to an Operation not in the "
"graph." % repr(name))
return self._nodes_by_name[name]
elif ":" not in name and not allow_operation:
# Looks like an Operation name but can't be an Operation.
if name in self._nodes_by_name:
# Yep, it's an Operation name
err_msg = ("The name %s refers to an Operation, not a %s." %
(repr(name), types_str))
else:
err_msg = ("The name %s looks like an (invalid) Operation name, "
"not a %s." % (repr(name), types_str))
err_msg += (" Tensor names must be of the form "
"\"<op_name>:<output_index>\".")
raise ValueError(err_msg)
elif isinstance(obj, Tensor) and allow_tensor:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Tensor %s is not an element of this graph." % obj)
return obj
elif isinstance(obj, Operation) and allow_operation:
# Actually obj is just the object it's referring to.
if obj.graph is not self:
raise ValueError("Operation %s is not an element of this graph." % obj)
return obj
else:
# We give up!
raise TypeError("Can not convert a %s into a %s." %
(type(obj).__name__, types_str))
def get_operations(self):
"""Return the list of operations in the graph.
You can modify the operations in place, but modifications
to the list such as inserts/delete have no effect on the
list of operations known to the graph.
This method may be called concurrently from multiple threads.
Returns:
A list of Operations.
"""
if self._finalized:
return list(self._nodes_by_id.values())
with self._lock:
return list(self._nodes_by_id.values())
def get_operation_by_name(self, name):
"""Returns the `Operation` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to an operation in this graph.
"""
if not isinstance(name, six.string_types):
raise TypeError("Operation names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def _get_operation_by_name_unsafe(self, name):
"""Returns the `Operation` with the given `name`.
This is a internal unsafe version of get_operation_by_name. It skips many
checks and does not have user friendly error messages but runs considerably
faster. This method may be called concurrently from multiple threads.
Args:
name: The name of the `Operation` to return.
Returns:
The `Operation` with the given `name`.
Raises:
KeyError: If `name` does not correspond to an operation in this graph.
"""
if self._finalized:
return self._nodes_by_name[name]
with self._lock:
return self._nodes_by_name[name]
def _get_operation_by_tf_operation(self, tf_oper):
op_name = pywrap_tf_session.TF_OperationName(tf_oper)
return self._get_operation_by_name_unsafe(op_name)
def get_tensor_by_name(self, name):
"""Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
name: The name of the `Tensor` to return.
Returns:
The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
KeyError: If `name` does not correspond to a tensor in this graph.
"""
# Names should be strings.
if not isinstance(name, six.string_types):
raise TypeError("Tensor names are strings (or similar), not %s." %
type(name).__name__)
return self.as_graph_element(name, allow_tensor=True, allow_operation=False)
def _get_tensor_by_tf_output(self, tf_output):
"""Returns the `Tensor` representing `tf_output`.
Note that there is only one such `Tensor`, i.e. multiple calls to this
function with the same TF_Output value will always return the same `Tensor`
object.
Args:
tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`).
Returns:
The `Tensor` that represents `tf_output`.
"""
op = self._get_operation_by_tf_operation(tf_output.oper)
return op.outputs[tf_output.index]
@property
def _last_id(self):
return self._next_id_counter
def _get_op_def(self, type): # pylint: disable=redefined-builtin
"""Returns the `OpDef` proto for `type`. `type` is a string."""
# NOTE: No locking is required because the lookup and insertion operations
# on Python dictionaries are atomic.
try:
return self._op_def_cache[type]
except KeyError:
with c_api_util.tf_buffer() as buf:
# pylint: disable=protected-access
pywrap_tf_session.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type),
buf)
# pylint: enable=protected-access
data = pywrap_tf_session.TF_GetBuffer(buf)
op_def = op_def_pb2.OpDef()
op_def.ParseFromString(compat.as_bytes(data))
self._op_def_cache[type] = op_def
return op_def
def as_default(self):
"""Returns a context manager that makes this `Graph` the default graph.
This method should be used if you want to create multiple graphs
in the same process. For convenience, a global default graph is
provided, and all ops will be added to this graph if you do not
create a new graph explicitly.
Use this method with the `with` keyword to specify that ops created within
the scope of a block should be added to this graph. In this case, once
the scope of the `with` is exited, the previous default graph is set again
as default. There is a stack, so it's ok to have multiple nested levels
of `as_default` calls.
The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
The following code examples are equivalent:
```python
# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g
# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g
```
If eager execution is enabled ops created under this context manager will be
added to the graph instead of executed eagerly.
Returns:
A context manager for using this graph as the default graph.
"""
return _default_graph_stack.get_controller(self)
@property
def collections(self):
"""Returns the names of the collections known to this graph."""
return list(self._collections)
def add_to_collection(self, name, value):
"""Stores `value` in the collection with the given `name`.
Note that collections are not sets, so it is possible to add a value to
a collection several times.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collection.
""" # pylint: disable=g-doc-exception
self._check_not_finalized()
with self._lock:
if name not in self._collections:
self._collections[name] = [value]
else:
self._collections[name].append(value)
def add_to_collections(self, names, value):
"""Stores `value` in the collections given by `names`.
Note that collections are not sets, so it is possible to add a value to
a collection several times. This function makes sure that duplicates in
`names` are ignored, but it will not check for pre-existing membership of
`value` in any of the collections in `names`.
`names` can be any iterable, but if `names` is a string, it is treated as a
single collection name.
Args:
names: The keys for the collections to add to. The `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collections.
"""
# Make sure names are unique, but treat strings as a single collection name
names = (names,) if isinstance(names, six.string_types) else set(names)
for name in names:
self.add_to_collection(name, value)
def get_collection_ref(self, name):
"""Returns a list of values in the collection with the given `name`.
If the collection exists, this returns the list itself, which can
be modified in place to change the collection. If the collection does
not exist, it is created as an empty list and the list is returned.
This is different from `get_collection()` which always returns a copy of
the collection list if it exists and never creates an empty collection.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection.
""" # pylint: disable=g-doc-exception
with self._lock:
coll_list = self._collections.get(name, None)
if coll_list is None:
coll_list = []
self._collections[name] = coll_list
return coll_list
def get_collection(self, name, scope=None):
"""Returns a list of values in the collection with the given `name`.
This is different from `get_collection_ref()` which always returns the
actual collection list if it exists in that it returns a new list each time
it is called.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
""" # pylint: disable=g-doc-exception
with self._lock:
collection = self._collections.get(name, None)
if collection is None:
return []
if scope is None:
return list(collection)
else:
c = []
regex = re.compile(scope)
for item in collection:
try:
if regex.match(item.name):
c.append(item)
except AttributeError:
# Collection items with no name are ignored.
pass
return c
def get_all_collection_keys(self):
"""Returns a list of collections used in this graph."""
with self._lock:
return [x for x in self._collections if isinstance(x, six.string_types)]
def clear_collection(self, name):
"""Clears all values in a collection.
Args:
name: The key for the collection. The `GraphKeys` class contains many
standard names for collections.
"""
self._check_not_finalized()
with self._lock:
if name in self._collections:
del self._collections[name]
@tf_contextlib.contextmanager
def _original_op(self, op):
"""Python 'with' handler to help annotate ops with their originator.
An op may have an 'original_op' property that indicates the op on which
it was based. For example a replica op is based on the op that was
replicated and a gradient op is based on the op that was differentiated.
All ops created in the scope of this 'with' handler will have
the given 'op' as their original op.
Args:
op: The Operation that all ops created in this scope will have as their
original op.
Yields:
Nothing.
"""
old_original_op = self._default_original_op
self._default_original_op = op
try:
yield
finally:
self._default_original_op = old_original_op
@property
def _name_stack(self):
# This may be called from a thread where name_stack doesn't yet exist.
if not hasattr(self._thread_local, "_name_stack"):
self._thread_local._name_stack = ""
return self._thread_local._name_stack
@_name_stack.setter
def _name_stack(self, name_stack):
self._thread_local._name_stack = name_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_contextlib.contextmanager
def name_scope(self, name):
"""Returns a context manager that creates hierarchical names for operations.
A graph maintains a stack of name scopes. A `with name_scope(...):`
statement pushes a new name onto the stack for the lifetime of the context.
The `name` argument will be interpreted as follows:
* A string (not ending with '/') will create a new name scope, in which
`name` is appended to the prefix of all operations created in the
context. If `name` has been used before, it will be made unique by
calling `self.unique_name(name)`.
* A scope previously captured from a `with g.name_scope(...) as
scope:` statement will be treated as an "absolute" name scope, which
makes it possible to re-enter existing scopes.
* A value of `None` or the empty string will reset the current name scope
to the top-level (empty) name scope.
For example:
```python
with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"
# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"
# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"
# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"
# Treats `scope` as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"
with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"
```
The name of the scope itself can be captured by `with
g.name_scope(...) as scope:`, which stores the name of the scope
in the variable `scope`. This value can be used to name an
operation that represents the overall result of executing the ops
in a scope. For example:
```python
inputs = tf.constant(...)
with g.name_scope('my_layer') as scope:
weights = tf.Variable(..., name="weights")
biases = tf.Variable(..., name="biases")
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
```
NOTE: This constructor validates the given `name`. Valid scope
names match one of the following regular expressions:
[A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root)
[A-Za-z0-9_.\\-/]* (for other scopes)
Args:
name: A name for the scope.
Returns:
A context manager that installs `name` as a new name scope.
Raises:
ValueError: If `name` is not a valid scope name, according to the rules
above.
"""
if name:
if isinstance(name, compat.bytes_or_text_types):
name = compat.as_str(name)
if self._name_stack:
# Scopes created in a nested scope may have initial characters
# that are illegal as the initial character of an op name
# (viz. '-', '\', '/', and '_').
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# Scopes created in the root must match the more restrictive
# op name regex, which constrains the initial character.
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
old_stack = self._name_stack
if not name: # Both for name=None and name="" we re-set to empty scope.
new_stack = None
elif name[-1] == "/":
new_stack = name_from_scope_name(name)
else:
new_stack = self.unique_name(name)
self._name_stack = new_stack
try:
yield "" if new_stack is None else new_stack + "/"
finally:
self._name_stack = old_stack
# pylint: enable=g-doc-return-or-yield,line-too-long
def unique_name(self, name, mark_as_used=True):
"""Return a unique operation name for `name`.
Note: You rarely need to call `unique_name()` directly. Most of
the time you just need to create `with g.name_scope()` blocks to
generate structured names.
`unique_name` is used to generate structured names, separated by
`"/"`, to help identify operations when debugging a graph.
Operation names are displayed in error messages reported by the
TensorFlow runtime, and in various visualization tools such as
TensorBoard.
If `mark_as_used` is set to `True`, which is the default, a new
unique name is created and marked as in use. If it's set to `False`,
the unique name is returned without actually being marked as used.
This is useful when the caller simply wants to know what the name
to be created will be.
Args:
name: The name for an operation.
mark_as_used: Whether to mark this name as being used.
Returns:
A string to be passed to `create_op()` that will be used
to name the operation being created.
"""
if self._name_stack:
name = self._name_stack + "/" + name
# For the sake of checking for names in use, we treat names as case
# insensitive (e.g. foo = Foo).
name_key = name.lower()
i = self._names_in_use.get(name_key, 0)
# Increment the number for "name_key".
if mark_as_used:
self._names_in_use[name_key] = i + 1
if i > 0:
base_name_key = name_key
# Make sure the composed name key is not already used.
while name_key in self._names_in_use:
name_key = "%s_%d" % (base_name_key, i)
i += 1
# Mark the composed name_key as used in case someone wants
# to call unique_name("name_1").
if mark_as_used:
self._names_in_use[name_key] = 1
# Return the new name with the original capitalization of the given name.
name = "%s_%d" % (name, i - 1)
return name
def get_name_scope(self):
"""Returns the current name scope.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.compat.v1.get_default_graph().get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
return self._name_stack
@tf_contextlib.contextmanager
def _colocate_with_for_gradient(self, op, gradient_uid,
ignore_existing=False):
with self.colocate_with(op, ignore_existing):
if gradient_uid is not None:
ctx = _get_enclosing_context(self)
if ctx is not None:
ctx.EnterGradientColocation(op, gradient_uid)
try:
yield
finally:
ctx.ExitGradientColocation(op, gradient_uid)
else:
yield
else:
yield
@tf_contextlib.contextmanager
def colocate_with(self, op, ignore_existing=False):
"""Returns a context manager that specifies an op to colocate with.
Note: this function is not for public use, only for internal libraries.
For example:
```python
a = tf.Variable([1.0])
with g.colocate_with(a):
b = tf.constant(1.0)
c = tf.add(a, b)
```
`b` and `c` will always be colocated with `a`, no matter where `a`
is eventually placed.
**NOTE** Using a colocation scope resets any existing device constraints.
If `op` is `None` then `ignore_existing` must be `True` and the new
scope resets all colocation and device constraints.
Args:
op: The op to colocate all created ops with, or `None`.
ignore_existing: If true, only applies colocation of this op within the
context, rather than applying all colocation properties on the stack.
If `op` is `None`, this value must be `True`.
Raises:
ValueError: if op is None but ignore_existing is False.
Yields:
A context manager that specifies the op with which to colocate
newly created ops.
"""
if op is None and not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
op, device_only_candidate = _op_to_colocate_with(op, self)
# By default, colocate_with resets the device function stack,
# since colocate_with is typically used in specific internal
# library functions where colocation is intended to be "stronger"
# than device functions.
#
# In the future, a caller may specify that device_functions win
# over colocation, in which case we can add support.
device_fn_tmp = self._device_function_stack
self._device_function_stack = traceable_stack.TraceableStack()
if ignore_existing:
current_stack = self._colocation_stack
self._colocation_stack = traceable_stack.TraceableStack()
if op is not None:
# offset refers to the stack frame used for storing code location.
# We use 4, the sum of 1 to use our caller's stack frame and 3
# to jump over layers of context managers above us.
if device_only_candidate is not None:
self._colocation_stack.push_obj(device_only_candidate, offset=4)
self._colocation_stack.push_obj(op, offset=4)
elif not ignore_existing:
raise ValueError("Trying to reset colocation (op is None) but "
"ignore_existing is not True")
try:
yield
finally:
# Restore device function stack
self._device_function_stack = device_fn_tmp
if op is not None:
self._colocation_stack.pop_obj()
if device_only_candidate is not None:
self._colocation_stack.pop_obj()
# Reset the colocation stack if requested.
if ignore_existing:
self._colocation_stack = current_stack
def _add_device_to_stack(self, device_name_or_function, offset=0):
"""Add device to stack manually, separate from a context manager."""
total_offset = 1 + offset
spec = _UserDeviceSpec(device_name_or_function)
self._device_function_stack.push_obj(spec, offset=total_offset)
return spec
@tf_contextlib.contextmanager
def device(self, device_name_or_function):
# pylint: disable=line-too-long
"""Returns a context manager that specifies the default device to use.
The `device_name_or_function` argument may either be a device name
string, a device function, or None:
* If it is a device name string, all operations constructed in
this context will be assigned to the device with that name, unless
overridden by a nested `device()` context.
* If it is a function, it will be treated as a function from
Operation objects to device name strings, and invoked each time
a new Operation is created. The Operation will be assigned to
the device with the returned name.
* If it is None, all `device()` invocations from the enclosing context
will be ignored.
For information about the valid syntax of device name strings, see
the documentation in
[`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h).
For example:
```python
with g.device('/device:GPU:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.
# Defines a function from `Operation` to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/device:GPU:0"
else:
return "/cpu:0"
with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.
```
**N.B.** The device scope may be overridden by op wrappers or
other library code. For example, a variable assignment op
`v.assign()` must be colocated with the `tf.Variable` `v`, and
incompatible device scopes will be ignored.
Args:
device_name_or_function: The device name or function to use in the
context.
Yields:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If device scopes are not properly nested.
"""
self._add_device_to_stack(device_name_or_function, offset=2)
old_top_of_stack = self._device_function_stack.peek_top_obj()
try:
yield
finally:
new_top_of_stack = self._device_function_stack.peek_top_obj()
if old_top_of_stack is not new_top_of_stack:
raise RuntimeError("Exiting device scope without proper scope nesting.")
self._device_function_stack.pop_obj()
def _apply_device_functions(self, op):
"""Applies the current device function stack to the given operation."""
# Apply any device functions in LIFO order, so that the most recently
# pushed function has the first chance to apply a device to the op.
# We apply here because the result can depend on the Operation's
# signature, which is computed in the Operation constructor.
# pylint: disable=protected-access
prior_device_string = None
for device_spec in self._device_function_stack.peek_objs():
if device_spec.is_null_merge:
continue
if device_spec.function is None:
break
device_string = device_spec.string_merge(op)
# Take advantage of the fact that None is a singleton and Python interns
# strings, since identity checks are faster than equality checks.
if device_string is not prior_device_string:
op._set_device_from_string(device_string)
prior_device_string = device_string
op._device_code_locations = self._snapshot_device_function_stack_metadata()
# pylint: enable=protected-access
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
Stateful operations, such as variables and queues, can maintain their
states on devices so that they can be shared by multiple processes.
A resource container is a string name under which these stateful
operations are tracked. These resources can be released or cleared
with `tf.Session.reset()`.
For example:
```python
with g.container('experiment0'):
# All stateful Operations constructed in this context will be placed
# in resource container "experiment0".
v1 = tf.Variable([1.0])
v2 = tf.Variable([2.0])
with g.container("experiment1"):
# All stateful Operations constructed in this context will be
# placed in resource container "experiment1".
v3 = tf.Variable([3.0])
q1 = tf.queue.FIFOQueue(10, tf.float32)
# All stateful Operations constructed in this context will be
# be created in the "experiment0".
v4 = tf.Variable([4.0])
q1 = tf.queue.FIFOQueue(20, tf.float32)
with g.container(""):
# All stateful Operations constructed in this context will be
# be placed in the default resource container.
v5 = tf.Variable([5.0])
q3 = tf.queue.FIFOQueue(30, tf.float32)
# Resets container "experiment0", after which the state of v1, v2, v4, q1
# will become undefined (such as uninitialized).
tf.Session.reset(target, ["experiment0"])
```
Args:
container_name: container name string.
Returns:
A context manager for defining resource containers for stateful ops,
yields the container name.
"""
original_container = self._container
self._container = container_name
try:
yield self._container
finally:
self._container = original_container
# pylint: enable=g-doc-return-or-yield
class _ControlDependenciesController(object):
"""Context manager for `control_dependencies()`."""
def __init__(self, graph, control_inputs):
"""Create a new `_ControlDependenciesController`.
A `_ControlDependenciesController` is the context manager for
`with tf.control_dependencies()` blocks. These normally nest,
as described in the documentation for `control_dependencies()`.
The `control_inputs` argument list control dependencies that must be
added to the current set of control dependencies. Because of
uniquification the set can be empty even if the caller passed a list of
ops. The special value `None` indicates that we want to start a new
empty set of control dependencies instead of extending the current set.
In that case we also clear the current control flow context, which is an
additional mechanism to add control dependencies.
Args:
graph: The graph that this controller is managing.
control_inputs: List of ops to use as control inputs in addition to the
current control dependencies. None to indicate that the dependencies
should be cleared.
"""
self._graph = graph
if control_inputs is None:
self._control_inputs_val = []
self._new_stack = True
else:
self._control_inputs_val = control_inputs
self._new_stack = False
self._seen_nodes = set()
self._old_stack = None
self._old_control_flow_context = None
# pylint: disable=protected-access
def __enter__(self):
if self._new_stack:
# Clear the control_dependencies graph.
self._old_stack = self._graph._control_dependencies_stack
self._graph._control_dependencies_stack = []
# Clear the control_flow_context too.
self._old_control_flow_context = self._graph._get_control_flow_context()
self._graph._set_control_flow_context(None)
self._graph._push_control_dependencies_controller(self)
def __exit__(self, unused_type, unused_value, unused_traceback):
self._graph._pop_control_dependencies_controller(self)
if self._new_stack:
self._graph._control_dependencies_stack = self._old_stack
self._graph._set_control_flow_context(self._old_control_flow_context)
# pylint: enable=protected-access
@property
def control_inputs(self):
return self._control_inputs_val
def add_op(self, op):
if isinstance(op, Tensor):
op = op.ref()
self._seen_nodes.add(op)
def op_in_group(self, op):
if isinstance(op, Tensor):
op = op.ref()
return op in self._seen_nodes
def _push_control_dependencies_controller(self, controller):
self._control_dependencies_stack.append(controller)
def _pop_control_dependencies_controller(self, controller):
assert self._control_dependencies_stack[-1] is controller
self._control_dependencies_stack.pop()
def _current_control_dependencies(self):
ret = set()
for controller in self._control_dependencies_stack:
for op in controller.control_inputs:
ret.add(op)
return ret
def _control_dependencies_for_inputs(self, input_ops):
"""For an op that takes `input_ops` as inputs, compute control inputs.
The returned control dependencies should yield an execution that
is equivalent to adding all control inputs in
self._control_dependencies_stack to a newly created op. However,
this function attempts to prune the returned control dependencies
by observing that nodes created within the same `with
control_dependencies(...):` block may have data dependencies that make
the explicit approach redundant.
Args:
input_ops: The data input ops for an op to be created.
Returns:
A list of control inputs for the op to be created.
"""
ret = []
for controller in self._control_dependencies_stack:
# If any of the input_ops already depends on the inputs from controller,
# we say that the new op is dominated (by that input), and we therefore
# do not need to add control dependencies for this controller's inputs.
dominated = False
for op in input_ops:
if controller.op_in_group(op):
dominated = True
break
if not dominated:
# Don't add a control input if we already have a data dependency on i.
# NOTE(mrry): We do not currently track transitive data dependencies,
# so we may add redundant control inputs.
ret.extend(c for c in controller.control_inputs if c not in input_ops)
return ret
def _record_op_seen_by_control_dependencies(self, op):
"""Record that the given op depends on all registered control dependencies.
Args:
op: An Operation.
"""
for controller in self._control_dependencies_stack:
controller.add_op(op)
def control_dependencies(self, control_inputs):
"""Returns a context manager that specifies control dependencies.
Use with the `with` keyword to specify that all operations constructed
within the context should have control dependencies on
`control_inputs`. For example:
```python
with g.control_dependencies([a, b, c]):
# `d` and `e` will only run after `a`, `b`, and `c` have executed.
d = ...
e = ...
```
Multiple calls to `control_dependencies()` can be nested, and in
that case a new `Operation` will have control dependencies on the union
of `control_inputs` from all active contexts.
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `a`, `b`, `c`, and `d`.
```
You can pass None to clear the control dependencies:
```python
with g.control_dependencies([a, b]):
# Ops constructed here run after `a` and `b`.
with g.control_dependencies(None):
# Ops constructed here run normally, not waiting for either `a` or `b`.
with g.control_dependencies([c, d]):
# Ops constructed here run after `c` and `d`, also not waiting
# for either `a` or `b`.
```
*N.B.* The control dependencies context applies *only* to ops that
are constructed within the context. Merely using an op or tensor
in the context does not add a control dependency. The following
example illustrates this point:
```python
# WRONG
def my_func(pred, tensor):
t = tf.matmul(tensor, tensor)
with tf.control_dependencies([pred]):
# The matmul op is created outside the context, so no control
# dependency will be added.
return t
# RIGHT
def my_func(pred, tensor):
with tf.control_dependencies([pred]):
# The matmul op is created in the context, so a control dependency
# will be added.
return tf.matmul(tensor, tensor)
```
Also note that though execution of ops created under this scope will trigger
execution of the dependencies, the ops created under this scope might still
be pruned from a normal tensorflow graph. For example, in the following
snippet of code the dependencies are never executed:
```python
loss = model.loss()
with tf.control_dependencies(dependencies):
loss = loss + tf.constant(1) # note: dependencies ignored in the
# backward pass
return tf.gradients(loss, model.variables)
```
This is because evaluating the gradient graph does not require evaluating
the constant(1) op created in the forward pass.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the
context. Can also be `None` to clear the control dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
# First convert the inputs to ops, and deduplicate them.
# NOTE(mrry): Other than deduplication, we do not currently track direct
# or indirect dependencies between control_inputs, which may result in
# redundant control inputs.
control_ops = []
current = self._current_control_dependencies()
for c in control_inputs:
# The hasattr(handle) is designed to match ResourceVariables. This is so
# control dependencies on a variable or on an unread variable don't
# trigger reads.
if (isinstance(c, IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
c = self.as_graph_element(c)
if isinstance(c, Tensor):
c = c.op
elif not isinstance(c, Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
if c not in current:
control_ops.append(c)
current.add(c)
return self._ControlDependenciesController(self, control_ops)
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _attr_scope(self, attr_map):
"""EXPERIMENTAL: A context manager for setting attributes on operators.
This context manager can be used to add additional
attributes to operators within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # No extra attributes
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}):
f_2 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}):
f_3 = Foo() # Additional attribute _a=False
with g._attr_scope({"_a": None}):
f_4 = Foo() # No additional attributes.
Args:
attr_map: A dictionary mapping attr name strings to AttrValue protocol
buffers or None.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If attr_map is not a dictionary mapping
strings to AttrValue protobufs.
"""
if not isinstance(attr_map, dict):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers")
# The saved_attrs dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_attrs = {}
# Install the given attribute
for name, attr in attr_map.items():
if not (isinstance(name, six.string_types) and
(isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or
callable(attr))):
raise TypeError("attr_map must be a dictionary mapping "
"strings to AttrValue protocol buffers or "
"callables that emit AttrValue protocol buffers")
try:
saved_attrs[name] = self._attr_scope_map[name]
except KeyError:
pass
if attr is None:
del self._attr_scope_map[name]
else:
self._attr_scope_map[name] = attr
try:
yield # The code within the context runs here.
finally:
# Remove the attributes set for this context, and restore any saved
# attributes.
for name, attr in attr_map.items():
try:
self._attr_scope_map[name] = saved_attrs[name]
except KeyError:
del self._attr_scope_map[name]
# pylint: enable=g-doc-return-or-yield
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _kernel_label_map(self, op_to_kernel_label_map):
"""EXPERIMENTAL: A context manager for setting kernel labels.
This context manager can be used to select particular
implementations of kernels within the scope of the context.
For example:
with ops.Graph().as_default() as g:
f_1 = Foo() # Uses the default registered kernel for the Foo op.
with g.kernel_label_map({"Foo": "v_2"}):
f_2 = Foo() # Uses the registered kernel with label "v_2"
# for the Foo op.
with g.kernel_label_map({"Foo": "v_3"}):
f_3 = Foo() # Uses the registered kernel with label "v_3"
# for the Foo op.
with g.kernel_label_map({"Foo": ""}):
f_4 = Foo() # Uses the default registered kernel
# for the Foo op.
Args:
op_to_kernel_label_map: A dictionary mapping op type strings to kernel
label strings.
Returns:
A context manager that sets the kernel label to be used for one or more
ops created in that context.
Raises:
TypeError: If op_to_kernel_label_map is not a dictionary mapping
strings to strings.
"""
if not isinstance(op_to_kernel_label_map, dict):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
# The saved_labels dictionary stores any currently-set labels that
# will be overridden by this context manager.
saved_labels = {}
# Install the given label
for op_type, label in op_to_kernel_label_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(label, six.string_types)):
raise TypeError("op_to_kernel_label_map must be a dictionary mapping "
"strings to strings")
try:
saved_labels[op_type] = self._op_to_kernel_label_map[op_type]
except KeyError:
pass
self._op_to_kernel_label_map[op_type] = label
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, label in op_to_kernel_label_map.items():
try:
self._op_to_kernel_label_map[op_type] = saved_labels[op_type]
except KeyError:
del self._op_to_kernel_label_map[op_type]
# pylint: enable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def _override_gradient_function(self, gradient_function_map):
"""Specify gradient function for the given op type."""
# This is an internal API and we don't need nested context for this.
# TODO(mdan): make it a proper context manager.
assert not self._gradient_function_map
self._gradient_function_map = gradient_function_map
try:
yield
finally:
self._gradient_function_map = {}
# pylint: disable=g-doc-return-or-yield
@tf_contextlib.contextmanager
def gradient_override_map(self, op_type_map):
"""EXPERIMENTAL: A context manager for overriding gradient functions.
This context manager can be used to override the gradient function
that will be used for ops within the scope of the context.
For example:
```python
@tf.RegisterGradient("CustomSquare")
def _custom_square_grad(op, grad):
# ...
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
s_1 = tf.square(c) # Uses the default gradient for tf.square.
with g.gradient_override_map({"Square": "CustomSquare"}):
s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the
# gradient of s_2.
```
Args:
op_type_map: A dictionary mapping op type strings to alternative op type
strings.
Returns:
A context manager that sets the alternative op type to be used for one
or more ops created in that context.
Raises:
TypeError: If `op_type_map` is not a dictionary mapping strings to
strings.
"""
if not isinstance(op_type_map, dict):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
# The saved_mappings dictionary stores any currently-set mappings that
# will be overridden by this context manager.
saved_mappings = {}
# Install the given label
for op_type, mapped_op_type in op_type_map.items():
if not (isinstance(op_type, six.string_types) and
isinstance(mapped_op_type, six.string_types)):
raise TypeError("op_type_map must be a dictionary mapping "
"strings to strings")
try:
saved_mappings[op_type] = self._gradient_override_map[op_type]
except KeyError:
pass
self._gradient_override_map[op_type] = mapped_op_type
try:
yield # The code within the context runs here.
finally:
# Remove the labels set for this context, and restore any saved labels.
for op_type, mapped_op_type in op_type_map.items():
try:
self._gradient_override_map[op_type] = saved_mappings[op_type]
except KeyError:
del self._gradient_override_map[op_type]
# pylint: enable=g-doc-return-or-yield
def prevent_feeding(self, tensor):
"""Marks the given `tensor` as unfeedable in this graph."""
self._unfeedable_tensors.add(tensor)
def is_feedable(self, tensor):
"""Returns `True` if and only if `tensor` is feedable."""
return tensor not in self._unfeedable_tensors
def prevent_fetching(self, op):
"""Marks the given `op` as unfetchable in this graph."""
self._unfetchable_ops.add(op)
def is_fetchable(self, tensor_or_op):
"""Returns `True` if and only if `tensor_or_op` is fetchable."""
if isinstance(tensor_or_op, Tensor):
return tensor_or_op.op not in self._unfetchable_ops
else:
return tensor_or_op not in self._unfetchable_ops
def switch_to_thread_local(self):
"""Make device, colocation and dependencies stacks thread-local.
Device, colocation and dependencies stacks are not thread-local be default.
If multiple threads access them, then the state is shared. This means that
one thread may affect the behavior of another thread.
After this method is called, the stacks become thread-local. If multiple
threads access them, then the state is not shared. Each thread uses its own
value; a thread doesn't affect other threads by mutating such a stack.
The initial value for every thread's stack is set to the current value
of the stack when `switch_to_thread_local()` was first called.
"""
if not self._stack_state_is_thread_local:
self._stack_state_is_thread_local = True
@property
def _device_function_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where device_function_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_device_function_stack"):
stack_copy_for_this_thread = self._graph_device_function_stack.copy()
self._thread_local._device_function_stack = stack_copy_for_this_thread
return self._thread_local._device_function_stack
# pylint: enable=protected-access
else:
return self._graph_device_function_stack
@property
def _device_functions_outer_to_inner(self):
user_device_specs = self._device_function_stack.peek_objs()
device_functions = [spec.function for spec in user_device_specs]
device_functions_outer_to_inner = list(reversed(device_functions))
return device_functions_outer_to_inner
def _snapshot_device_function_stack_metadata(self):
"""Return device function stack as a list of TraceableObjects.
Returns:
[traceable_stack.TraceableObject, ...] where each TraceableObject's .obj
member is a displayable name for the user's argument to Graph.device, and
the filename and lineno members point to the code location where
Graph.device was called directly or indirectly by the user.
"""
snapshot = []
for obj in self._device_function_stack.peek_traceable_objs():
obj_copy = obj.copy_metadata()
obj_copy.obj = obj.obj.display_name
snapshot.append(obj_copy)
return snapshot
@_device_function_stack.setter
def _device_function_stack(self, device_function_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._device_function_stack = device_function_stack
# pylint: enable=protected-access
else:
self._graph_device_function_stack = device_function_stack
@property
def _colocation_stack(self):
"""Return thread-local copy of colocation stack."""
if self._stack_state_is_thread_local:
# This may be called from a thread where colocation_stack doesn't yet
# exist.
# pylint: disable=protected-access
if not hasattr(self._thread_local, "_colocation_stack"):
stack_copy_for_this_thread = self._graph_colocation_stack.copy()
self._thread_local._colocation_stack = stack_copy_for_this_thread
return self._thread_local._colocation_stack
# pylint: enable=protected-access
else:
return self._graph_colocation_stack
def _snapshot_colocation_stack_metadata(self):
"""Return colocation stack metadata as a dictionary."""
return {
traceable_obj.obj.name: traceable_obj.copy_metadata()
for traceable_obj in self._colocation_stack.peek_traceable_objs()
}
@_colocation_stack.setter
def _colocation_stack(self, colocation_stack):
if self._stack_state_is_thread_local:
# pylint: disable=protected-access
self._thread_local._colocation_stack = colocation_stack
# pylint: enable=protected-access
else:
self._graph_colocation_stack = colocation_stack
@property
def _control_dependencies_stack(self):
if self._stack_state_is_thread_local:
# This may be called from a thread where control_dependencies_stack
# doesn't yet exist.
if not hasattr(self._thread_local, "_control_dependencies_stack"):
self._thread_local._control_dependencies_stack = (
self._graph_control_dependencies_stack[:])
return self._thread_local._control_dependencies_stack
else:
return self._graph_control_dependencies_stack
@_control_dependencies_stack.setter
def _control_dependencies_stack(self, control_dependencies):
if self._stack_state_is_thread_local:
self._thread_local._control_dependencies_stack = control_dependencies
else:
self._graph_control_dependencies_stack = control_dependencies
@property
def _distribution_strategy_stack(self):
"""A stack to maintain distribution strategy context for each thread."""
if not hasattr(self._thread_local, "_distribution_strategy_stack"):
self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access
return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access
@_distribution_strategy_stack.setter
def _distribution_strategy_stack(self, _distribution_strategy_stack):
self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access
_distribution_strategy_stack)
@property
def _global_distribute_strategy_scope(self):
"""For implementing `tf.distribute.set_strategy()`."""
if not hasattr(self._thread_local, "distribute_strategy_scope"):
self._thread_local.distribute_strategy_scope = None
return self._thread_local.distribute_strategy_scope
@_global_distribute_strategy_scope.setter
def _global_distribute_strategy_scope(self, distribute_strategy_scope):
self._thread_local.distribute_strategy_scope = (distribute_strategy_scope)
def _mutation_lock(self):
"""Returns a lock to guard code that creates & mutates ops.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_MUTATION_LOCK_GROUP)
def _session_run_lock(self):
"""Returns a lock to guard code for Session.run.
See the comment for self._group_lock for more info.
"""
return self._group_lock.group(_SESSION_RUN_LOCK_GROUP)
# TODO(agarwal): currently device directives in an outer eager scope will not
# apply to inner graph mode code. Fix that.
@tf_export(v1=["device"])
def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See `tf.Graph.device` for more details.
Args:
device_name_or_function: The device name or function to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If eager execution is enabled and a function is passed in.
"""
if context.executing_eagerly():
if callable(device_name_or_function):
raise RuntimeError(
"tf.device does not support functions when eager execution "
"is enabled.")
return context.device(device_name_or_function)
elif executing_eagerly_outside_functions():
@tf_contextlib.contextmanager
def combined(device_name_or_function):
with get_default_graph().device(device_name_or_function):
if not callable(device_name_or_function):
with context.device(device_name_or_function):
yield
else:
yield
return combined(device_name_or_function)
else:
return get_default_graph().device(device_name_or_function)
@tf_export("device", v1=[])
def device_v2(device_name):
"""Specifies the device for ops created/executed in this context.
This function specifies the device to be used for ops created/executed in a
particular context. Nested contexts will inherit and also create/execute
their ops on the specified device. If a specific device is not required,
consider not using this function so that a device can be automatically
assigned. In general the use of this function is optional. `device_name` can
be fully specified, as in "/job:worker/task:1/device:cpu:0", or partially
specified, containing only a subset of the "/"-separated fields. Any fields
which are specified will override device annotations from outer scopes.
For example:
```python
with tf.device('/job:foo'):
# ops created here have devices with /job:foo
with tf.device('/job:bar/task:0/device:gpu:2'):
# ops created here have the fully specified device above
with tf.device('/device:gpu:1'):
# ops created here have the device '/job:foo/device:gpu:1'
```
Args:
device_name: The device name to use in the context.
Returns:
A context manager that specifies the default device to use for newly
created ops.
Raises:
RuntimeError: If a function is passed in.
"""
if callable(device_name):
raise RuntimeError("tf.device does not support functions.")
return device(device_name)
@tf_export(v1=["container"])
def container(container_name):
"""Wrapper for `Graph.container()` using the default graph.
Args:
container_name: The container string to use in the context.
Returns:
A context manager that specifies the default container to use for newly
created stateful ops.
"""
return get_default_graph().container(container_name)
def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False):
if context.executing_eagerly():
if op is not None:
if not hasattr(op, "device"):
op = internal_convert_to_tensor_or_indexed_slices(op)
return device(op.device)
else:
return NullContextmanager()
else:
default_graph = get_default_graph()
if isinstance(op, EagerTensor):
if default_graph.building_function:
return default_graph.device(op.device)
else:
raise ValueError("Encountered an Eager-defined Tensor during graph "
"construction, but a function was not being built.")
return default_graph._colocate_with_for_gradient(
op, gradient_uid=gradient_uid, ignore_existing=ignore_existing)
# Internal interface to colocate_with. colocate_with has been deprecated from
# public API. There are still a few internal uses of colocate_with. Add internal
# only API for those uses to avoid deprecation warning.
def colocate_with(op, ignore_existing=False):
return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing)
@deprecation.deprecated(
date=None, instructions="Colocations handled automatically by placer.")
@tf_export(v1=["colocate_with"])
def _colocate_with(op, ignore_existing=False):
return colocate_with(op, ignore_existing)
@tf_export("control_dependencies")
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
See `tf.Graph.control_dependencies` for more details.
Note: *In TensorFlow 2 with eager and/or Autograph, you should not require
this method, as ops execute in the expected order thanks to automatic control
dependencies.* Only use `tf.control_dependencies` when working with v1
`tf.Graph` code.
When eager execution is enabled, any callable object in the `control_inputs`
list will be called.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which must be
executed or computed before running the operations defined in the context.
Can also be `None` to clear the control dependencies. If eager execution
is enabled, any callable object in the `control_inputs` list will be
called.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
"""
if context.executing_eagerly():
if control_inputs:
# Execute any pending callables.
for control in control_inputs:
if callable(control):
control()
return NullContextmanager()
else:
return get_default_graph().control_dependencies(control_inputs)
class _DefaultStack(threading.local):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super(_DefaultStack, self).__init__()
self._enforce_nesting = True
self.stack = []
def get_default(self):
return self.stack[-1] if self.stack else None
def reset(self):
self.stack = []
def is_cleared(self):
return not self.stack
@property
def enforce_nesting(self):
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default):
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = _DefaultStack() # pylint: disable=protected-access
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
def _eval_using_default_session(tensors, feed_dict, graph, session=None):
"""Uses the default session to evaluate one or more tensors.
Args:
tensors: A single Tensor, or a list of Tensor objects.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which the tensors are defined.
session: (Optional) A different session to use to evaluate "tensors".
Returns:
Either a single numpy ndarray if "tensors" is a single tensor; or a list
of numpy ndarrays that each correspond to the respective element in
"tensors".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot evaluate tensor using `eval()`: No default "
"session is registered. Use `with "
"sess.as_default()` or pass an explicit session to "
"`eval(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph. Pass an explicit session to "
"`eval(session=sess)`.")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to evaluate tensor: "
"the tensor's graph is different from the session's "
"graph.")
return session.run(tensors, feed_dict)
def _run_using_default_session(operation, feed_dict, graph, session=None):
"""Uses the default session to run "operation".
Args:
operation: The Operation to be run.
feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists,
numpy ndarrays, TensorProtos, or strings.
graph: The graph in which "operation" is defined.
session: (Optional) A different session to use to run "operation".
Raises:
ValueError: If no default session is available; the default session
does not have "graph" as its graph; or if "session" is specified,
and it does not have "graph" as its graph.
"""
if session is None:
session = get_default_session()
if session is None:
raise ValueError("Cannot execute operation using `run()`: No default "
"session is registered. Use `with "
"sess.as_default():` or pass an explicit session to "
"`run(session=sess)`")
if session.graph is not graph:
raise ValueError("Cannot use the default session to execute operation: "
"the operation's graph is different from the "
"session's graph. Pass an explicit session to "
"run(session=sess).")
else:
if session.graph is not graph:
raise ValueError("Cannot use the given session to execute operation: "
"the operation's graph is different from the session's "
"graph.")
session.run(operation, feed_dict)
class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self):
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self):
"""Override that returns a global default if the stack is empty."""
if self.stack:
return self.stack[-1]
elif self._global_default_graph:
return self._global_default_graph
else:
self._global_default_graph = Graph()
return self._global_default_graph
def _GetGlobalDefaultGraph(self):
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self):
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default):
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode():
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack():
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope():
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when tracing a `tf.function`. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.function
def func():
# A function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with tape.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(
scope, skip_on_eager=False), control_dependencies(
None), tape.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export(v1=["executing_eagerly_outside_functions"])
def executing_eagerly_outside_functions():
"""Returns True if executing eagerly, even if inside a graph function.
This function will check the outermost context for the program and see if
it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,
which checks the current context and will return `False` within a
`tf.function` body. It can be used to build library that behave differently
in eager runtime and v1 session runtime (deprecated).
Example:
>>> tf.compat.v1.enable_eager_execution()
>>> @tf.function
... def func():
... # A function constructs TensorFlow graphs, it does not execute eagerly,
... # but the outer most context is still eager.
... assert not tf.executing_eagerly()
... return tf.compat.v1.executing_eagerly_outside_functions()
>>> func()
<tf.Tensor: shape=(), dtype=bool, numpy=True>
Returns:
boolean, whether the outermost context is in eager mode.
"""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
@tf_export("inside_function", v1=[])
def inside_function():
"""Indicates whether the caller code is executing inside a `tf.function`.
Returns:
Boolean, True if the caller code is executing inside a `tf.function`
rather than eagerly.
Example:
>>> tf.inside_function()
False
>>> @tf.function
... def f():
... print(tf.inside_function())
>>> f()
True
"""
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None):
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- tf.contrib.eager.SYNC: executes each operation synchronously.
- tf.contrib.eager.ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution():
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created. It can be used at the beginning of the program for complex migration
projects from TensorFlow 1.x to 2.x.
"""
_api_usage_gauge.get_cell().set(False)
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None):
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError(
"device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*"
)
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError(
"execution_mode must be one of None, tf.contrib.eager.SYNC, "
"tf.contrib.eager.ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None):
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
from tensorflow.contrib.eager.python import tfe
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph():
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph():
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph():
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
# Exported due to b/171079555
@tf_export("__internal__.get_name_scope", v1=[])
def get_name_scope():
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
original_graph = getattr(original_item, "graph", None)
graph = getattr(item, "graph", None)
if original_graph and graph and original_graph is not graph:
raise ValueError(
"%s must be from the same graph as %s (graphs are %s and %s)." %
(item, original_item, graph, original_graph))
def _get_graph_from_inputs(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % (graph,))
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
# Determine if this is a valid graph_element.
# TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this
# up.
graph_element = None
if (isinstance(op_input, (Operation, internal.NativeObject)) and
((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = getattr(graph_element, "graph", None)
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
class GraphKeys(object):
"""Standard names to use for graph collections.
The standard library uses various well-known names to collect and
retrieve values associated with a graph. For example, the
`tf.Optimizer` subclasses default to optimizing the variables
collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is
specified, but it is also possible to pass an explicit list of
variables.
The following standard keys are defined:
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
`tf.compat.v1.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
* `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each
machine. Usually used for temporarily variables, like counters.
Note: use `tf.contrib.framework.local_variable` to add to this collection.
* `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the
model for inference (feed forward). Note: use
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
`tf.compat.v1.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
`tf.compat.v1.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
`tf.compat.v1.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
`tf.compat.v1.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
The following standard keys are _defined_, but their collections are **not**
automatically populated as many of the others are:
* `WEIGHTS`
* `BIASES`
* `ACTIVATIONS`
"""
# Key to collect Variable objects that are global (shared across machines).
# Default collection for all variables, except local ones.
GLOBAL_VARIABLES = "variables"
# Key to collect local variables that are local to the machine and are not
# saved/restored.
LOCAL_VARIABLES = "local_variables"
# Key to collect local variables which are used to accumulate interal state
# to be used in tf.metrics.*.
METRIC_VARIABLES = "metric_variables"
# Key to collect model variables defined by layers.
MODEL_VARIABLES = "model_variables"
# Key to collect Variable objects that will be trained by the
# optimizers.
TRAINABLE_VARIABLES = "trainable_variables"
# Key to collect summaries.
SUMMARIES = "summaries"
# Key to collect QueueRunners.
QUEUE_RUNNERS = "queue_runners"
# Key to collect table initializers.
TABLE_INITIALIZERS = "table_initializer"
# Key to collect asset filepaths. An asset represents an external resource
# like a vocabulary file.
ASSET_FILEPATHS = "asset_filepaths"
# Key to collect Variable objects that keep moving averages.
MOVING_AVERAGE_VARIABLES = "moving_average_variables"
# Key to collect regularization losses at graph construction.
REGULARIZATION_LOSSES = "regularization_losses"
# Key to collect concatenated sharded variables.
CONCATENATED_VARIABLES = "concatenated_variables"
# Key to collect savers.
SAVERS = "savers"
# Key to collect weights
WEIGHTS = "weights"
# Key to collect biases
BIASES = "biases"
# Key to collect activations
ACTIVATIONS = "activations"
# Key to collect update_ops
UPDATE_OPS = "update_ops"
# Key to collect losses
LOSSES = "losses"
# Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing.
SAVEABLE_OBJECTS = "saveable_objects"
# Key to collect all shared resources used by the graph which need to be
# initialized once per cluster.
RESOURCES = "resources"
# Key to collect all shared resources used in this graph which need to be
# initialized once per session.
LOCAL_RESOURCES = "local_resources"
# Trainable resource-style variables.
TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables"
# Key to indicate various ops.
INIT_OP = "init_op"
LOCAL_INIT_OP = "local_init_op"
READY_OP = "ready_op"
READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op"
SUMMARY_OP = "summary_op"
GLOBAL_STEP = "global_step"
# Used to count the number of evaluations performed during a single evaluation
# run.
EVAL_STEP = "eval_step"
TRAIN_OP = "train_op"
# Key for control flow context.
COND_CONTEXT = "cond_context"
WHILE_CONTEXT = "while_context"
# Used to store v2 summary names.
_SUMMARY_COLLECTION = "_SUMMARY_V2"
# List of all collections that keep track of variables.
_VARIABLE_COLLECTIONS = [
GLOBAL_VARIABLES,
LOCAL_VARIABLES,
METRIC_VARIABLES,
MODEL_VARIABLES,
TRAINABLE_VARIABLES,
MOVING_AVERAGE_VARIABLES,
CONCATENATED_VARIABLES,
TRAINABLE_RESOURCE_VARIABLES,
]
# Key for streaming model ports.
# NOTE(yuanbyu): internal and experimental.
_STREAMING_MODEL_PORTS = "streaming_model_ports"
@decorator_utils.classproperty
@deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.")
def VARIABLES(cls): # pylint: disable=no-self-argument
return cls.GLOBAL_VARIABLES
def dismantle_graph(graph):
"""Cleans up reference cycles from a `Graph`.
Helpful for making sure the garbage collector doesn't need to run after a
temporary `Graph` is no longer needed.
Args:
graph: A `Graph` object to destroy. Neither it nor any of its ops are usable
after this function runs.
"""
memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access
# Now clean up Operation<->Graph reference cycles by clearing all of the
# attributes for the Graph and its ops.
graph_operations = graph.get_operations()
for op in graph_operations:
op.__dict__ = {}
graph.__dict__ = {}
@tf_export(v1=["add_to_collection"])
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
See `tf.Graph.add_to_collection`
for more details.
Args:
name: The key for the collection. For example, the `GraphKeys` class
contains many standard names for collections.
value: The value to add to the collection.
@compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collection(name, value)
@tf_export(v1=["add_to_collections"])
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
See `tf.Graph.add_to_collections`
for more details.
Args:
names: The key for the collections. The `GraphKeys` class contains many
standard names for collections.
value: The value to add to the collections.
@compatibility(eager)
Collections are only supported in eager when variables are created inside
an EagerVariableStore (e.g. as part of a layer or template).
@end_compatibility
"""
get_default_graph().add_to_collections(names, value)
@tf_export(v1=["get_collection_ref"])
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
See `tf.Graph.get_collection_ref`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
Returns:
The list of values in the collection with the given `name`, or an empty
list if no value has been added to that collection. Note that this returns
the collection list itself, which can be modified in place to change the
collection.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection_ref(key)
@tf_export(v1=["get_collection"])
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
See `tf.Graph.get_collection`
for more details.
Args:
key: The key for the collection. For example, the `GraphKeys` class contains
many standard names for collections.
scope: (Optional.) If supplied, the resulting list is filtered to include
only items whose `name` attribute matches using `re.match`. Items without
a `name` attribute are never returned if a scope is supplied and the
choice or `re.match` means that a `scope` without special tokens filters
by prefix.
Returns:
The list of values in the collection with the given `name`, or
an empty list if no value has been added to that collection. The
list contains the values in the order under which they were
collected.
@compatibility(eager)
Collections are not supported when eager execution is enabled.
@end_compatibility
"""
return get_default_graph().get_collection(key, scope)
def get_all_collection_keys():
"""Returns a list of collections used in the default graph."""
return get_default_graph().get_all_collection_keys()
def name_scope(name, default_name=None, values=None, skip_on_eager=True):
"""Internal-only entry point for `name_scope*`.
Internal ops do not use the public API and instead rely on
`ops.name_scope` regardless of the execution mode. This function
dispatches to the correct `name_scope*` implementation based on
the arguments provided and the current mode. Specifically,
* if `values` contains a graph tensor `Graph.name_scope` is used;
* `name_scope_v1` is used in graph mode;
* `name_scope_v2` -- in eager mode.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
skip_on_eager: Indicates to return NullContextmanager if executing eagerly.
By default this is True since naming tensors and operations in eager mode
have little use and cause unnecessary performance overhead. However, it is
important to preserve variable names since they are often useful for
debugging and saved models.
Returns:
`name_scope*` context manager.
"""
if not context.executing_eagerly():
return internal_name_scope_v1(name, default_name, values)
if skip_on_eager:
return NullContextmanager()
name = default_name if name is None else name
if values:
# The presence of a graph tensor in `values` overrides the context.
# TODO(slebedev): this is Keras-specific and should be removed.
# pylint: disable=unidiomatic-typecheck
graph_value = next((value for value in values if type(value) == Tensor),
None)
# pylint: enable=unidiomatic-typecheck
if graph_value is not None:
return graph_value.graph.name_scope(name)
return name_scope_v2(name or "")
class internal_name_scope_v1(object): # pylint: disable=invalid-name
"""Graph-only version of `name_scope_v1`."""
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
if not (default_name is None or isinstance(default_name, six.string_types)):
raise TypeError(
"`default_name` type (%s) is not a string type. You likely meant to "
"pass this into the `values` kwarg." % type(default_name))
self._name = default_name if name is None else name
self._default_name = default_name
self._values = values
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
Raises:
ValueError: if neither `name` nor `default_name` is provided
but `values` are.
"""
if self._name is None and self._values is not None:
# We only raise an error if values is not None (provided) because
# currently tf.name_scope(None) (values=None then) is sometimes used as
# an idiom to reset to top scope.
raise ValueError(
"At least one of name (%s) and default_name (%s) must be provided."
% (self._name, self._default_name))
g = get_default_graph()
if self._values and not g.building_function:
# Specialize based on the knowledge that `_get_graph_from_inputs()`
# ignores `inputs` when building a function.
g_from_inputs = _get_graph_from_inputs(self._values)
if g_from_inputs is not g:
g = g_from_inputs
self._g_manager = g.as_default()
self._g_manager.__enter__()
else:
self._g_manager = None
else:
self._g_manager = None
try:
self._name_scope = g.name_scope(self._name)
return self._name_scope.__enter__()
except:
if self._g_manager is not None:
self._g_manager.__exit__(*sys.exc_info())
raise
def __exit__(self, *exc_info):
self._name_scope.__exit__(*exc_info)
if self._g_manager is not None:
self._g_manager.__exit__(*exc_info)
# Named like a function for backwards compatibility with the
# @tf_contextlib.contextmanager version, which was switched to a class to avoid
# some object creation overhead.
@tf_export(v1=["name_scope"])
class name_scope_v1(object): # pylint: disable=invalid-name
"""A context manager for use when defining a Python op.
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
`tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope(name, "MyOp", [a, b, c]) as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
"""
__slots__ = ["_name", "_name_scope"]
@property
def name(self):
return self._name
def __init__(self, name, default_name=None, values=None):
"""Initialize the context manager.
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
values: The list of `Tensor` arguments that are passed to the op function.
Raises:
TypeError: if `default_name` is passed in but not a string.
"""
self._name_scope = name_scope(
name, default_name, values, skip_on_eager=False)
self._name = default_name if name is None else name
def __enter__(self):
return self._name_scope.__enter__()
def __exit__(self, *exc_info):
return self._name_scope.__exit__(*exc_info)
@tf_export("name_scope", v1=[])
class name_scope_v2(object):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
```python
def my_op(a, b, c, name=None):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.convert_to_tensor(c, name="c")
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`,
and `MyOp/c`.
Inside a `tf.function`, if the scope name already exists, the name will be
made unique by appending `_n`. For example, calling `my_op` the second time
will generate `MyOp_1/a`, etc.
"""
__slots__ = ["_name", "_exit_fns"]
def __init__(self, name):
"""Initialize the context manager.
Args:
name: The prefix to use on all names created within the name scope.
Raises:
ValueError: If name is not a string.
"""
if not isinstance(name, six.string_types):
raise ValueError("name for name_scope must be a string.")
self._name = name
self._exit_fns = []
@property
def name(self):
return self._name
def __enter__(self):
"""Start the scope block.
Returns:
The scope name.
"""
ctx = context.context()
if ctx.executing_eagerly():
# Names are not auto-incremented in eager mode.
# A trailing slash breaks out of nested name scopes, indicating a
# fully specified scope name, for compatibility with Graph.name_scope.
# This also prevents auto-incrementing.
old_name = ctx.scope_name
name = self._name
if not name:
scope_name = ""
elif name[-1] == "/":
scope_name = name
elif old_name:
scope_name = old_name + name + "/"
else:
scope_name = name + "/"
ctx.scope_name = scope_name
def _restore_name_scope(*_):
ctx.scope_name = old_name
self._exit_fns.append(_restore_name_scope)
else:
scope = get_default_graph().name_scope(self._name)
scope_name = scope.__enter__()
self._exit_fns.append(scope.__exit__)
return scope_name
def __exit__(self, type_arg, value_arg, traceback_arg):
self._exit_fns.pop()(type_arg, value_arg, traceback_arg)
return False # False values do not suppress exceptions
def __getstate__(self):
return self._name, self._exit_fns
def __setstate__(self, state):
self._name = state[0]
self._exit_fns = state[1]
def strip_name_scope(name, export_scope):
"""Removes name scope from a name.
Args:
name: A `string` name.
export_scope: Optional `string`. Name scope to remove.
Returns:
Name with name scope removed, or the original name if export_scope
is None.
"""
if export_scope:
if export_scope[-1] == "/":
export_scope = export_scope[:-1]
try:
# Strips export_scope/, export_scope///,
# ^export_scope/, loc:@export_scope/.
str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)"
return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1)
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
def prepend_name_scope(name, import_scope):
"""Prepends name scope to a name.
Args:
name: A `string` name.
import_scope: Optional `string`. Name scope to add.
Returns:
Name with name scope added, or the original name if import_scope
is None.
"""
if import_scope:
if import_scope[-1] == "/":
import_scope = import_scope[:-1]
try:
str_to_replace = r"([\^]|loc:@|^)(.*)"
return re.sub(str_to_replace, r"\1" + import_scope + r"/\2",
compat.as_str(name))
except TypeError as e:
# If the name is not of a type we can process, simply return it.
logging.warning(e)
return name
else:
return name
# pylint: disable=g-doc-return-or-yield
# pylint: disable=not-context-manager
@tf_export(v1=["op_scope"])
@tf_contextlib.contextmanager
def op_scope(values, name, default_name=None):
"""DEPRECATED. Same as name_scope above, just different argument order."""
logging.warn("tf.op_scope(values, name, default_name) is deprecated,"
" use tf.name_scope(name, default_name, values)")
with name_scope(name, default_name=default_name, values=values) as scope:
yield scope
_proto_function_registry = registry.Registry("proto functions")
def register_proto_function(collection_name,
proto_type=None,
to_proto=None,
from_proto=None):
"""Registers `to_proto` and `from_proto` functions for collection_name.
`to_proto` function converts a Python object to the corresponding protocol
buffer, and returns the protocol buffer.
`from_proto` function converts protocol buffer into a Python object, and
returns the object..
Args:
collection_name: Name of the collection.
proto_type: Protobuf type, such as `saver_pb2.SaverDef`,
`variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`..
to_proto: Function that implements Python object to protobuf conversion.
from_proto: Function that implements protobuf to Python object conversion.
"""
if to_proto and not callable(to_proto):
raise TypeError("to_proto must be callable.")
if from_proto and not callable(from_proto):
raise TypeError("from_proto must be callable.")
_proto_function_registry.register((proto_type, to_proto, from_proto),
collection_name)
def get_collection_proto_type(collection_name):
"""Returns the proto_type for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[0]
except LookupError:
return None
def get_to_proto_function(collection_name):
"""Returns the to_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[1]
except LookupError:
return None
def get_from_proto_function(collection_name):
"""Returns the from_proto function for collection_name."""
try:
return _proto_function_registry.lookup(collection_name)[2]
except LookupError:
return None
def _op_to_colocate_with(v, graph):
"""Operation object corresponding to v to use for colocation constraints."""
if v is None:
return None, None
if isinstance(v, Operation):
return v, None
# We always want to colocate with the reference op.
# When 'v' is a ResourceVariable, the reference op is the handle creating op.
#
# What this should be is:
# if isinstance(v, ResourceVariable):
# return v.handle.op, v
# However, that would require a circular import dependency.
# As of October 2018, there were attempts underway to remove
# colocation constraints altogether. Assuming that will
# happen soon, perhaps this hack to work around the circular
# import dependency is acceptable.
if hasattr(v, "handle") and isinstance(v.handle, Tensor):
device_only_candidate = lambda: None
device_only_candidate.device = v.device
device_only_candidate.name = v.name
if graph.building_function:
return graph.capture(v.handle).op, device_only_candidate
else:
return v.handle.op, device_only_candidate
return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op, None
def _is_keras_symbolic_tensor(x):
return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph"
# These symbols were originally defined in this module; import them for
# backwards compatibility until all references have been updated to access
# them from the indexed_slices.py module.
IndexedSlices = indexed_slices.IndexedSlices
IndexedSlicesValue = indexed_slices.IndexedSlicesValue
convert_to_tensor_or_indexed_slices = \
indexed_slices.convert_to_tensor_or_indexed_slices
convert_n_to_tensor_or_indexed_slices = \
indexed_slices.convert_n_to_tensor_or_indexed_slices
internal_convert_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_to_tensor_or_indexed_slices
internal_convert_n_to_tensor_or_indexed_slices = \
indexed_slices.internal_convert_n_to_tensor_or_indexed_slices
register_tensor_conversion_function = \
tensor_conversion_registry.register_tensor_conversion_function
# Helper functions for op wrapper modules generated by `python_op_gen`.
def to_raw_op(f):
"""Make a given op wrapper function `f` raw.
Raw op wrappers can only be called with keyword arguments.
Args:
f: An op wrapper function to make raw.
Returns:
Raw `f`.
"""
# Copy `f` to get a new `__dict__`, otherwise `tf_export` will fail
# due to double-registration.
f = types.FunctionType(f.__code__, f.__globals__, f.__name__, f.__defaults__,
f.__closure__)
return kwarg_only(f)
def raise_from_not_ok_status(e, name):
message = e.message + (" name: " + name if name is not None else "")
# pylint: disable=protected-access
six.raise_from(core._status_to_exception(e.code, message), None)
# pylint: enable=protected-access
def add_exit_callback_to_default_func_graph(fn):
"""Add a callback to run when the default function graph goes out of scope.
Usage:
```python
@tf.function
def fn(x, v):
expensive = expensive_object(v)
add_exit_callback_to_default_func_graph(lambda: expensive.release())
return g(x, expensive)
fn(x=tf.constant(...), v=...)
# `expensive` has been released.
```
Args:
fn: A callable that takes no arguments and whose output is ignored.
To be executed when exiting func graph scope.
Raises:
RuntimeError: If executed when the current default graph is not a FuncGraph,
or not currently executing in function creation mode (e.g., if inside
an init_scope).
"""
default_graph = get_default_graph()
if not default_graph._building_function: # pylint: disable=protected-access
raise RuntimeError(
"Cannot add scope exit callbacks when not building a function. "
"Default graph: {}".format(default_graph))
default_graph._add_scope_exit_callback(fn) # pylint: disable=protected-access
def _reconstruct_sequence_inputs(op_def, inputs, attrs):
"""Regroups a flat list of input tensors into scalar and sequence inputs.
Args:
op_def: The `op_def_pb2.OpDef` (for knowing the input types)
inputs: a list of input `Tensor`s to the op.
attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define
how long each sequence is)
Returns:
A list of `Tensor`s (corresponding to scalar inputs) and lists of
`Tensor`s (corresponding to sequence inputs).
"""
grouped_inputs = []
i = 0
for input_arg in op_def.input_arg:
if input_arg.number_attr:
input_len = attrs[input_arg.number_attr].i
is_sequence = True
elif input_arg.type_list_attr:
input_len = len(attrs[input_arg.type_list_attr].list.type)
is_sequence = True
else:
input_len = 1
is_sequence = False
if is_sequence:
grouped_inputs.append(inputs[i:i + input_len])
else:
grouped_inputs.append(inputs[i])
i += input_len
assert i == len(inputs)
return grouped_inputs
_numpy_style_type_promotion = False
def enable_numpy_style_type_promotion():
"""If called, follows NumPy's rules for type promotion.
Used for enabling NumPy behavior on methods for TF NumPy.
"""
global _numpy_style_type_promotion
_numpy_style_type_promotion = True
_numpy_style_slicing = False
def enable_numpy_style_slicing():
"""If called, follows NumPy's rules for slicing Tensors.
Used for enabling NumPy behavior on slicing for TF NumPy.
"""
global _numpy_style_slicing
_numpy_style_slicing = True
class _TensorIterator(object):
"""Iterates over the leading dim of a Tensor. Performs no error checks."""
__slots__ = ["_tensor", "_index", "_limit"]
def __init__(self, tensor, dim0):
self._tensor = tensor
self._index = 0
self._limit = dim0
def __iter__(self):
return self
def __next__(self):
if self._index == self._limit:
raise StopIteration
result = self._tensor[self._index]
self._index += 1
return result
next = __next__ # python2.x compatibility.
def set_int_list_attr(op, attr_name, ints):
"""TF internal method used to set a list(int) attribute in the node_def."""
ints_list = attr_value_pb2.AttrValue.ListValue(i=ints)
op._set_attr(attr_name, attr_value_pb2.AttrValue(list=ints_list)) # pylint:disable=protected-access
def _get_enclosing_context(graph):
# pylint: disable=protected-access
if graph is None:
return None
if graph._control_flow_context is not None:
return graph._control_flow_context
if graph.building_function and hasattr(graph, "outer_graph"):
return _get_enclosing_context(graph.outer_graph)
def get_resource_handle_data(graph_op):
assert type(graph_op) == Tensor # pylint: disable=unidiomatic-typecheck
handle_data = pywrap_tf_session.GetHandleShapeAndType(
graph_op.graph._c_graph, graph_op._as_tf_output()) # pylint: disable=protected-access
return cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData.FromString(
compat.as_bytes(handle_data))
def _copy_handle_data_to_arg_def(tensor, arg_def):
handle_data = get_resource_handle_data(tensor)
if handle_data.shape_and_type:
shape_and_type = handle_data.shape_and_type[0]
proto = arg_def.handle_data.add()
proto.dtype = shape_and_type.dtype
proto.shape.CopyFrom(handle_data.shape_and_type[0].shape)
| 36.483944
| 115
| 0.691861
|
794f48a34f55e1df7c3762c67f0be95dbf110835
| 1,775
|
py
|
Python
|
lambdata_johanaluna/tryme2.py
|
johanaluna/lambdata
|
342ffd027de3a7a68ce52164df568f502b65d77f
|
[
"MIT"
] | null | null | null |
lambdata_johanaluna/tryme2.py
|
johanaluna/lambdata
|
342ffd027de3a7a68ce52164df568f502b65d77f
|
[
"MIT"
] | 4
|
2020-03-24T17:49:32.000Z
|
2021-06-02T00:34:44.000Z
|
lambdata_johanaluna/tryme2.py
|
johanaluna/lambdata
|
342ffd027de3a7a68ce52164df568f502b65d77f
|
[
"MIT"
] | null | null | null |
# Import libraries
import pandas
import numpy
from sklearn.model_selection import train_test_split
class Check_Data():
def __init__(self, df, name_column_target):
self.df = df
self.name_column_target = name_column_target
# function to check the null in a data frame and report how many nulls it found
def reportnulls(self):
"""
Takes a data frame and check de nulls and sum
the resutls and organizes them from highest to lowest
"""
self.null_counts = self.df.isnull().sum().sort_values(ascending=False)
# return count of null values
return self.null_counts
"""
function to split the data into train, validation and test
this function split the data in 80% 20%
that means that the target corresponds to 20%
of the complete data frame
"""
def splitdata(self):
print('shape of your data frame: ', self.df.shape)
# Define X and y
self.X = self.df.drop(columns=self.name_column_target)
self.y = self.df[self.name_column_target]
# we need to do 2 splits
# 1.(Takes X and y into X_trainval, X_test, y_trainval, y_test)
self.X_trainval, self.X_test, self.y_trainval, self.y_test = train_test_split(
self.X, self.y, train_size=0.80, test_size=0.20, random_state=42)
# 2.(Takes X_trainval, y_trainval and split data
# into X_train, X_val, y_train, y_val)
self.X_train, self.X_val, self.y_train, self.y_val = train_test_split(
self.X_trainval, self.y_trainval, train_size=0.80,
test_size=0.20, random_state=42)
# Return the results of the split
return (self.X_train, self.y_train, self.X_val, self.y_val, self.X_test, self.y_test)
| 34.803922
| 93
| 0.664225
|
794f4925fc1bac4b3af3f99eae619dbaddd23e54
| 2,771
|
py
|
Python
|
CIM15/IEC61970/Informative/InfERPSupport/ErpLedgerBudget.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 58
|
2015-04-22T10:41:03.000Z
|
2022-03-29T16:04:34.000Z
|
CIM15/IEC61970/Informative/InfERPSupport/ErpLedgerBudget.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 12
|
2015-08-26T03:57:23.000Z
|
2020-12-11T20:14:42.000Z
|
CIM15/IEC61970/Informative/InfERPSupport/ErpLedgerBudget.py
|
MaximeBaudette/PyCIM
|
d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14
|
[
"MIT"
] | 35
|
2015-01-10T12:21:03.000Z
|
2020-09-09T08:18:16.000Z
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class ErpLedgerBudget(Document):
"""Information for utility Ledger Budgets. They support the transfer budget amounts between all possible source applications throughout an enterprise and a general ledger or budget application.Information for utility Ledger Budgets. They support the transfer budget amounts between all possible source applications throughout an enterprise and a general ledger or budget application.
"""
def __init__(self, ErpLedBudLineItems=None, *args, **kw_args):
"""Initialises a new 'ErpLedgerBudget' instance.
@param ErpLedBudLineItems:
"""
self._ErpLedBudLineItems = []
self.ErpLedBudLineItems = [] if ErpLedBudLineItems is None else ErpLedBudLineItems
super(ErpLedgerBudget, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["ErpLedBudLineItems"]
_many_refs = ["ErpLedBudLineItems"]
def getErpLedBudLineItems(self):
return self._ErpLedBudLineItems
def setErpLedBudLineItems(self, value):
for x in self._ErpLedBudLineItems:
x.ErpLedgerBudget = None
for y in value:
y._ErpLedgerBudget = self
self._ErpLedBudLineItems = value
ErpLedBudLineItems = property(getErpLedBudLineItems, setErpLedBudLineItems)
def addErpLedBudLineItems(self, *ErpLedBudLineItems):
for obj in ErpLedBudLineItems:
obj.ErpLedgerBudget = self
def removeErpLedBudLineItems(self, *ErpLedBudLineItems):
for obj in ErpLedBudLineItems:
obj.ErpLedgerBudget = None
| 42.630769
| 387
| 0.737279
|
794f4941c5fe873992a6fadb5737d3b685608dad
| 2,607
|
py
|
Python
|
tools/installer/cefpython3.__init__.py
|
simon-graham/cefpython
|
cce4e3606a1bf1030133ebefc17bccad8099e7b5
|
[
"CNRI-Python",
"RSA-MD",
"Linux-OpenIB"
] | 1
|
2020-12-30T18:54:28.000Z
|
2020-12-30T18:54:28.000Z
|
tools/installer/cefpython3.__init__.py
|
simon-graham/cefpython
|
cce4e3606a1bf1030133ebefc17bccad8099e7b5
|
[
"CNRI-Python",
"RSA-MD",
"Linux-OpenIB"
] | null | null | null |
tools/installer/cefpython3.__init__.py
|
simon-graham/cefpython
|
cce4e3606a1bf1030133ebefc17bccad8099e7b5
|
[
"CNRI-Python",
"RSA-MD",
"Linux-OpenIB"
] | 1
|
2020-11-21T07:15:14.000Z
|
2020-11-21T07:15:14.000Z
|
# Copyright (c) 2013 CEF Python, see the Authors file.
# All rights reserved. Licensed under BSD 3-clause license.
# Project website: https://github.com/cztomczak/cefpython
# NOTE: Template variables like {{VERSION}} are replaced with actual
# values when make_installer.py tool generates this package
# installer.
import os
import sys
import ctypes
import platform
__all__ = ["cefpython"] # Disabled: "wx"
__version__ = "{{VERSION}}"
__author__ = "The CEF Python authors"
# If package was installed using PIP or setup.py then package
# dir is here:
# /usr/local/lib/python2.7/dist-packages/cefpython3/
# If this is a debian package then package_dir returns:
# /usr/lib/pymodules/python2.7/cefpython3
# The above path consists of symbolic links to the real directory:
# /usr/share/pyshared/cefpython3
package_dir = os.path.dirname(os.path.abspath(__file__))
# This loads the libcef.so library for the subprocess executable.
# On Mac it works without setting library paths.
os.environ["LD_LIBRARY_PATH"] = package_dir
# This env variable will be returned by cefpython.GetModuleDirectory().
os.environ["CEFPYTHON3_PATH"] = package_dir
# This loads the libcef library for the main python executable.
# Loading library dynamically using ctypes.CDLL is required on Linux.
# TODO: Check if on Linux libcef.so can be linked like on Mac.
# On Mac the CEF framework dependency information is added to
# the cefpython*.so module by linking to CEF framework.
# The libffmpegsumo.so library does not need to be loaded here,
# it may cause issues to load it here in the browser process.
if platform.system() == "Linux":
libcef = os.path.join(package_dir, "libcef.so")
ctypes.CDLL(libcef, ctypes.RTLD_GLOBAL)
# Load the cefpython module for given Python version
if sys.version_info[:2] == (2, 7):
# noinspection PyUnresolvedReferences
from . import cefpython_py27 as cefpython
elif sys.version_info[:2] == (3, 4):
# noinspection PyUnresolvedReferences
from . import cefpython_py34 as cefpython
elif sys.version_info[:2] == (3, 5):
# noinspection PyUnresolvedReferences
from . import cefpython_py35 as cefpython
elif sys.version_info[:2] == (3, 6):
# noinspection PyUnresolvedReferences
from . import cefpython_py36 as cefpython
elif sys.version_info[:2] == (3, 7):
# noinspection PyUnresolvedReferences
from . import cefpython_py37 as cefpython
elif sys.version_info[:2] == (3, 8):
# noinspection PyUnresolvedReferences
from . import cefpython_py38 as cefpython
else:
raise Exception("Python version not supported: " + sys.version)
| 38.338235
| 71
| 0.749137
|
794f4b81b5d9f432459235c580eb422a006c4147
| 4,858
|
py
|
Python
|
tests/processors/tests.py
|
mgedmin/raven-python
|
6d487a8298dd5340c701b7195eb65ce4ed113f1f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/processors/tests.py
|
mgedmin/raven-python
|
6d487a8298dd5340c701b7195eb65ce4ed113f1f
|
[
"BSD-3-Clause"
] | null | null | null |
tests/processors/tests.py
|
mgedmin/raven-python
|
6d487a8298dd5340c701b7195eb65ce4ed113f1f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from mock import Mock
from raven.utils.testutils import TestCase
from raven.processors import SanitizePasswordsProcessor, \
RemovePostDataProcessor, RemoveStackLocalsProcessor
VARS = {
'foo': 'bar',
'password': 'hello',
'the_secret': 'hello',
'a_password_here': 'hello',
'api_key': 'secret_key',
'apiKey': 'secret_key',
}
class SantizePasswordsProcessorTest(TestCase):
def _check_vars_sanitized(self, vars, proc):
"""
Helper to check that keys have been sanitized.
"""
self.assertTrue('foo' in vars)
self.assertEquals(vars['foo'], 'bar')
self.assertTrue('password' in vars)
self.assertEquals(vars['password'], proc.MASK)
self.assertTrue('the_secret' in vars)
self.assertEquals(vars['the_secret'], proc.MASK)
self.assertTrue('a_password_here' in vars)
self.assertEquals(vars['a_password_here'], proc.MASK)
self.assertTrue('api_key' in vars)
self.assertEquals(vars['api_key'], proc.MASK)
self.assertTrue('apiKey' in vars)
self.assertEquals(vars['apiKey'], proc.MASK)
def test_stacktrace(self):
data = {
'sentry.interfaces.Stacktrace': {
'frames': [{'vars': VARS}],
}
}
proc = SanitizePasswordsProcessor(Mock())
result = proc.process(data)
self.assertTrue('sentry.interfaces.Stacktrace' in result)
stack = result['sentry.interfaces.Stacktrace']
self.assertTrue('frames' in stack)
self.assertEquals(len(stack['frames']), 1)
frame = stack['frames'][0]
self.assertTrue('vars' in frame)
self._check_vars_sanitized(frame['vars'], proc)
def test_http(self):
data = {
'sentry.interfaces.Http': {
'data': VARS,
'env': VARS,
'headers': VARS,
'cookies': VARS,
}
}
proc = SanitizePasswordsProcessor(Mock())
result = proc.process(data)
self.assertTrue('sentry.interfaces.Http' in result)
http = result['sentry.interfaces.Http']
for n in ('data', 'env', 'headers', 'cookies'):
self.assertTrue(n in http)
self._check_vars_sanitized(http[n], proc)
def test_querystring_as_string(self):
data = {
'sentry.interfaces.Http': {
'query_string':
'foo=bar&password=hello&the_secret=hello'
'&a_password_here=hello&api_key=secret_key',
}
}
proc = SanitizePasswordsProcessor(Mock())
result = proc.process(data)
self.assertTrue('sentry.interfaces.Http' in result)
http = result['sentry.interfaces.Http']
self.assertEquals(
http['query_string'],
'foo=bar&password=%(m)s&the_secret=%(m)s'
'&a_password_here=%(m)s&api_key=%(m)s' % dict(m=proc.MASK))
def test_querystring_as_string_with_partials(self):
data = {
'sentry.interfaces.Http': {
'query_string': 'foo=bar&password&baz=bar',
}
}
proc = SanitizePasswordsProcessor(Mock())
result = proc.process(data)
self.assertTrue('sentry.interfaces.Http' in result)
http = result['sentry.interfaces.Http']
self.assertEquals(http['query_string'], 'foo=bar&password&baz=bar' % dict(m=proc.MASK))
def test_sanitize_credit_card(self):
proc = SanitizePasswordsProcessor(Mock())
result = proc.sanitize('foo', '4242424242424242')
self.assertEquals(result, proc.MASK)
def test_sanitize_credit_card_amex(self):
# AMEX numbers are 15 digits, not 16
proc = SanitizePasswordsProcessor(Mock())
result = proc.sanitize('foo', '424242424242424')
self.assertEquals(result, proc.MASK)
class RemovePostDataProcessorTest(TestCase):
def test_does_remove_data(self):
data = {
'sentry.interfaces.Http': {
'data': 'foo',
}
}
proc = RemovePostDataProcessor(Mock())
result = proc.process(data)
self.assertTrue('sentry.interfaces.Http' in result)
http = result['sentry.interfaces.Http']
self.assertFalse('data' in http)
class RemoveStackLocalsProcessorTest(TestCase):
def test_does_remove_data(self):
data = {
'sentry.interfaces.Stacktrace': {
'frames': [{'vars': VARS,}],
}
}
proc = RemoveStackLocalsProcessor(Mock())
result = proc.process(data)
self.assertTrue('sentry.interfaces.Stacktrace' in result)
stack = result['sentry.interfaces.Stacktrace']
for frame in stack['frames']:
self.assertFalse('vars' in frame)
| 32.386667
| 95
| 0.595101
|
794f4cbe53333ba69d06e91cf05fbac54b794393
| 533
|
py
|
Python
|
app/models.py
|
Kaundu/News-Highlight
|
37f7d81059f684c958938b00de9b2cfd40c30dd8
|
[
"Unlicense"
] | null | null | null |
app/models.py
|
Kaundu/News-Highlight
|
37f7d81059f684c958938b00de9b2cfd40c30dd8
|
[
"Unlicense"
] | 1
|
2020-06-13T08:18:27.000Z
|
2020-06-13T08:18:27.000Z
|
app/models.py
|
Tellvinch/newsalert
|
bbd2d6a27d9f5961814e52b09e3bc3543b026fac
|
[
"MIT"
] | null | null | null |
# File for models/classes
class Article:
def __init__(self, id, author, title, description, url, image, date):
self.id = id
self.author = author
self.title = title
self.description = description
self.url = url
self.image = image
self.date = date
class News_Update:
'''
highlight class to define objects
'''
def __init__(self, id, name, category, url):
self.id = id
self.name = name
self.category = category
self.url = url
| 25.380952
| 73
| 0.581614
|
794f4db925d05389f2f5e79bf1b6d0de0c9a9b49
| 4,973
|
py
|
Python
|
titanic/training/train.py
|
memasanz/02_MLOpsPython
|
23fa96a70a58ad6a25642b20486e94080a5ea580
|
[
"MIT"
] | 1
|
2022-03-28T17:31:02.000Z
|
2022-03-28T17:31:02.000Z
|
titanic/training/train.py
|
memasanz/02_MLOpsPython
|
23fa96a70a58ad6a25642b20486e94080a5ea580
|
[
"MIT"
] | null | null | null |
titanic/training/train.py
|
memasanz/02_MLOpsPython
|
23fa96a70a58ad6a25642b20486e94080a5ea580
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import pandas as pd
import numpy as np
import joblib
import os
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.metrics import roc_auc_score,roc_curve
# Split the dataframe into test and train data
def split_data(df):
# X = df.drop('Y', axis=1).values
# y = df['Y'].values
# X_train, X_test, y_train, y_test = train_test_split(
# X, y, test_size=0.2, random_state=0)
# data = {"train": {"X": X_train, "y": y_train},
# "test": {"X": X_test, "y": y_test}}
# return data
LABEL = 'Survived'
y_raw = df[LABEL]
X_raw = df.drop([LABEL], axis=1)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X_raw, y_raw, test_size=0.3, random_state=0)
data = {"train": {"X": X_train, "y": y_train},
"test": {"X": X_test, "y": y_test}}
return data
def buildpreprocessorpipeline(X_raw):
categorical_features = X_raw.select_dtypes(include=['object']).columns
numeric_features = X_raw.select_dtypes(include=['float','int64']).columns
categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='constant', fill_value="missing")),
('onehotencoder', OneHotEncoder(categories='auto', sparse=False, handle_unknown='ignore'))])
numeric_transformer = Pipeline(steps=[('scaler', StandardScaler())])
preprocessor = ColumnTransformer(
transformers=[
('numeric', numeric_transformer, numeric_features),
('categorical', categorical_transformer, categorical_features)
], remainder="drop")
return preprocessor
# Train the model, return the model
def train_model(data, ridge_args):
# reg_model = Ridge(**ridge_args)
# reg_model.fit(data["train"]["X"], data["train"]["y"])
# return reg_model
lg = LogisticRegression(penalty='l2', C=1.0, solver='liblinear')
preprocessor = buildpreprocessorpipeline(data["train"]["X"])
#estimator instance
clf = Pipeline(steps=[('preprocessor', preprocessor),
('regressor', lg)])
model = clf.fit(data["train"]["X"], data["train"]["y"])
# Evaluate the metrics for the model
def get_model_metrics(model, data):
# preds = model.predict(data["test"]["X"])
# mse = mean_squared_error(preds, data["test"]["y"])
# metrics = {"mse": mse}
# return metrics
y_hat = model.predict(data["test"]["X"])
acc = np.average(y_hat == data["test"]["Y"])
y_scores = model.predict_proba(data["test"]["X"])
auc = roc_auc_score(data["test"]["Y"],y_scores[:,1])
metrics = {"acc": acc, "auc": auc }
return metrics
def main():
print("Running train.py")
# # Define training parameters
# ridge_args = {"alpha": 0.5}
# # Load the training data as dataframe
# data_dir = "data"
# data_file = os.path.join(data_dir, 'diabetes.csv')
# train_df = pd.read_csv(data_file)
# data = split_data(train_df)
# # Train the model
# model = train_model(data, ridge_args)
# # Log the metrics for the model
# metrics = get_model_metrics(model, data)
# for (k, v) in metrics.items():
# print(f"{k}: {v}")
if __name__ == '__main__':
main()
| 36.837037
| 138
| 0.68892
|
794f4dfe9d5e7877d5c526ec8cc8377bfc2c94f6
| 2,702
|
py
|
Python
|
builders/frontend_builder.py
|
willook/semantic-segmentation-zoo
|
7b756629ce83fab3db4d91bc2513bb555fb28bb4
|
[
"Apache-2.0"
] | 1
|
2019-09-20T14:29:16.000Z
|
2019-09-20T14:29:16.000Z
|
builders/frontend_builder.py
|
willook/semantic-segmentation-zoo
|
7b756629ce83fab3db4d91bc2513bb555fb28bb4
|
[
"Apache-2.0"
] | null | null | null |
builders/frontend_builder.py
|
willook/semantic-segmentation-zoo
|
7b756629ce83fab3db4d91bc2513bb555fb28bb4
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.contrib import slim
from frontends import resnet_v2
from frontends import mobilenet_v2
from frontends import inception_v4
import os
def build_frontend(inputs, frontend, is_training=True, pretrained_dir="models"):
if frontend == 'ResNet50':
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
logits, end_points = resnet_v2.resnet_v2_50(inputs, is_training=is_training, scope='resnet_v2_50')
frontend_scope='resnet_v2_50'
init_fn = slim.assign_from_checkpoint_fn(model_path=os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), var_list=slim.get_model_variables('resnet_v2_50'), ignore_missing_vars=True)
elif frontend == 'ResNet101':
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
logits, end_points = resnet_v2.resnet_v2_101(inputs, is_training=is_training, scope='resnet_v2_101')
frontend_scope='resnet_v2_101'
init_fn = slim.assign_from_checkpoint_fn(model_path=os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), var_list=slim.get_model_variables('resnet_v2_101'), ignore_missing_vars=True)
elif frontend == 'ResNet152':
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
logits, end_points = resnet_v2.resnet_v2_152(inputs, is_training=is_training, scope='resnet_v2_152')
frontend_scope='resnet_v2_152'
init_fn = slim.assign_from_checkpoint_fn(model_path=os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), var_list=slim.get_model_variables('resnet_v2_152'), ignore_missing_vars=True)
elif frontend == 'MobileNetV2':
with slim.arg_scope(mobilenet_v2.training_scope()):
logits, end_points = mobilenet_v2.mobilenet(inputs, is_training=is_training, scope='mobilenet_v2', base_only=True)
frontend_scope='mobilenet_v2'
init_fn = slim.assign_from_checkpoint_fn(model_path=os.path.join(pretrained_dir, 'mobilenet_v2.ckpt'), var_list=slim.get_model_variables('mobilenet_v2'), ignore_missing_vars=True)
elif frontend == 'InceptionV4':
with slim.arg_scope(inception_v4.inception_v4_arg_scope()):
logits, end_points = inception_v4.inception_v4(inputs, is_training=is_training, scope='inception_v4')
frontend_scope='inception_v4'
init_fn = slim.assign_from_checkpoint_fn(model_path=os.path.join(pretrained_dir, 'inception_v4.ckpt'), var_list=slim.get_model_variables('inception_v4'), ignore_missing_vars=True)
else:
raise ValueError("Unsupported fronetnd model '%s'. This function only supports ResNet50, ResNet101, ResNet152, and MobileNetV2" % (frontend))
return logits, end_points, frontend_scope, init_fn
| 71.105263
| 193
| 0.749815
|
794f4f9e0faa7f87a8237afdb26dd2f7e4898e6c
| 3,691
|
py
|
Python
|
lambda/py/lambda_upload/ask_sdk_model/interfaces/audioplayer/clear_queue_directive.py
|
frivas/alexa-mixed-polly
|
bf0fde9005a66f3d6f0193799eacef934d166de7
|
[
"W3C"
] | null | null | null |
lambda/py/lambda_upload/ask_sdk_model/interfaces/audioplayer/clear_queue_directive.py
|
frivas/alexa-mixed-polly
|
bf0fde9005a66f3d6f0193799eacef934d166de7
|
[
"W3C"
] | null | null | null |
lambda/py/lambda_upload/ask_sdk_model/interfaces/audioplayer/clear_queue_directive.py
|
frivas/alexa-mixed-polly
|
bf0fde9005a66f3d6f0193799eacef934d166de7
|
[
"W3C"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.directive import Directive
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.interfaces.audioplayer.clear_behavior import ClearBehavior
class ClearQueueDirective(Directive):
"""
:param clear_behavior:
:type clear_behavior: (optional) ask_sdk_model.interfaces.audioplayer.clear_behavior.ClearBehavior
"""
deserialized_types = {
'object_type': 'str',
'clear_behavior': 'ask_sdk_model.interfaces.audioplayer.clear_behavior.ClearBehavior'
} # type: Dict
attribute_map = {
'object_type': 'type',
'clear_behavior': 'clearBehavior'
} # type: Dict
def __init__(self, clear_behavior=None):
# type: (Optional[ClearBehavior]) -> None
"""
:param clear_behavior:
:type clear_behavior: (optional) ask_sdk_model.interfaces.audioplayer.clear_behavior.ClearBehavior
"""
self.__discriminator_value = "AudioPlayer.ClearQueue" # type: str
self.object_type = self.__discriminator_value
super(ClearQueueDirective, self).__init__(object_type=self.__discriminator_value)
self.clear_behavior = clear_behavior
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ClearQueueDirective):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 32.955357
| 106
| 0.613113
|
794f4fa0db94d108dcdcbbee05456a60c4e43f7e
| 1,522
|
py
|
Python
|
pro_near/dsl_bball.py
|
myracheng/pronear
|
a92e97cd860900f3c535a72a1b867d8f5ad096ab
|
[
"Apache-2.0"
] | null | null | null |
pro_near/dsl_bball.py
|
myracheng/pronear
|
a92e97cd860900f3c535a72a1b867d8f5ad096ab
|
[
"Apache-2.0"
] | null | null | null |
pro_near/dsl_bball.py
|
myracheng/pronear
|
a92e97cd860900f3c535a72a1b867d8f5ad096ab
|
[
"Apache-2.0"
] | null | null | null |
import dsl
DSL_DICT = {
('list', 'list') : [dsl.MapFunction, dsl.MapPrefixesFunction, dsl.SimpleITE],
('list', 'atom') : [dsl.FoldFunction, dsl.running_averages.RunningAverageLast5Function, dsl.SimpleITE,
dsl.running_averages.RunningAverageWindow13Function,
dsl.running_averages.RunningAverageWindow5Function],
('atom', 'atom') : [dsl.AddFunction, dsl.MultiplyFunction, dsl.SimpleITE,
#51
# dsl.basketball.BBallOffenseBallDistSelection,dsl.basketball.BBallOffenseBhDistSelection,
# dsl.basketball.BBallDefenseBhDistSelection,
# dsl.basketball.BBallBhOneHotSelection,
# dsl.basketball.BBallScreenBhDistSelection,
# dsl.basketball.BBallScreenPaintSelection,
# dsl.basketball.BBallBallPaintSelection
#47
dsl.basketball.BBallBallSelection,dsl.basketball.BBallOffenseSelection,
dsl.basketball.BBallDefenseSelection,
dsl.basketball.BBallOffenseBallDistSelection,dsl.basketball.BBallOffenseBhDistSelection,
dsl.basketball.BBallOffenseBasketDistSelection,dsl.basketball.BBallDefenseBhDistSelection,
dsl.basketball.BBallOffensePaintSelection
]
}
CUSTOM_EDGE_COSTS = {
('list', 'list') : {},
('list', 'atom') : {},
('atom', 'atom') : {}
}
| 47.5625
| 114
| 0.606439
|
794f4fc847867791a413224615d64df971ac4d81
| 3,643
|
py
|
Python
|
utils/test/run-until-faulted.py
|
nathawes/swift-lldb
|
3cbf7470e0f9191ec1fc1c69ce8048c1dc64ec77
|
[
"Apache-2.0"
] | 427
|
2018-05-29T14:21:02.000Z
|
2022-03-16T03:17:54.000Z
|
utils/test/run-until-faulted.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | 25
|
2018-07-23T08:34:15.000Z
|
2021-11-05T07:13:36.000Z
|
utils/test/run-until-faulted.py
|
DalavanCloud/lldb
|
e913eaf2468290fb94c767d474d611b41a84dd69
|
[
"Apache-2.0"
] | 52
|
2018-07-19T19:57:32.000Z
|
2022-03-11T16:05:38.000Z
|
#!/usr/bin/env python
"""
Run a program via lldb until it fails.
The lldb executable is located via your PATH env variable, if not specified.
"""
import os
import sys
from optparse import OptionParser
def is_exe(fpath):
"""Check whether fpath is an executable."""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
def which(program):
"""Find the full path to a program, or return None."""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def do_lldb_launch_loop(lldb_command, exe, exe_options):
from cStringIO import StringIO
import pexpect
import time
prompt = "\(lldb\) "
lldb = pexpect.spawn(lldb_command)
# Turn on logging for what lldb sends back.
lldb.logfile_read = sys.stdout
lldb.expect(prompt)
# Now issue the file command.
# print "sending 'file %s' command..." % exe
lldb.sendline('file %s' % exe)
lldb.expect(prompt)
# Loop until it faults....
count = 0
# while True:
# count = count + 1
for i in range(100):
count = i
# print "sending 'process launch -- %s' command... (iteration: %d)" %
# (exe_options, count)
lldb.sendline('process launch -- %s' % exe_options)
index = lldb.expect(['Process .* exited with status',
'Process .* stopped',
pexpect.TIMEOUT])
if index == 0:
# We'll try again later.
time.sleep(3)
elif index == 1:
# Perfect, our process had stopped; break out of the loop.
break
elif index == 2:
# Something went wrong.
print "TIMEOUT occurred:", str(lldb)
# Give control of lldb shell to the user.
lldb.interact()
def main():
# This is to set up the Python path to include the pexpect-2.4 dir.
# Remember to update this when/if things change.
scriptPath = sys.path[0]
sys.path.append(
os.path.join(
scriptPath,
os.pardir,
os.pardir,
'test',
'pexpect-2.4'))
parser = OptionParser(usage="""\
%prog [options]
Run a program via lldb until it fails.
The lldb executable is located via your PATH env variable, if not specified.\
""")
parser.add_option('-l', '--lldb-command',
type='string', action='store', metavar='LLDB_COMMAND',
default='lldb', dest='lldb_command',
help='Full path to your lldb command')
parser.add_option(
'-e',
'--executable',
type='string',
action='store',
dest='exe',
help="""(Mandatory) The executable to launch via lldb.""")
parser.add_option(
'-o',
'--options',
type='string',
action='store',
default='',
dest='exe_options',
help="""The args/options passed to the launched program, if specified.""")
opts, args = parser.parse_args()
lldb_command = which(opts.lldb_command)
if not opts.exe:
parser.print_help()
sys.exit(1)
exe = opts.exe
exe_options = opts.exe_options
# We have parsed the options.
print "lldb command:", lldb_command
print "executable:", exe
print "executable options:", exe_options
do_lldb_launch_loop(lldb_command, exe, exe_options)
if __name__ == '__main__':
main()
| 27.80916
| 82
| 0.581938
|
794f50617f0951c2a9af58bdc0c1373c3d9d953f
| 10,788
|
py
|
Python
|
keepluggable/image_actions.py
|
nandoflorestan/keepluggable
|
046ad73befcf6b08ca8f8baf090989b9dd48cedc
|
[
"MIT"
] | 1
|
2016-12-17T14:25:28.000Z
|
2016-12-17T14:25:28.000Z
|
keepluggable/image_actions.py
|
nandoflorestan/keepluggable
|
046ad73befcf6b08ca8f8baf090989b9dd48cedc
|
[
"MIT"
] | null | null | null |
keepluggable/image_actions.py
|
nandoflorestan/keepluggable
|
046ad73befcf6b08ca8f8baf090989b9dd48cedc
|
[
"MIT"
] | 2
|
2015-07-08T13:07:47.000Z
|
2016-08-16T10:32:50.000Z
|
"""An Action class that deals with images."""
from copy import copy
from io import BytesIO
from typing import Any, BinaryIO, Dict, List, Union
# import imghdr # imghdr.what(file)
from kerno.pydantic import Pydantic, ReqStr
from pydantic import PositiveInt, validator
from PIL import Image, ExifTags
from keepluggable.actions import BaseFilesAction
from keepluggable.exceptions import FileNotAllowed
class ImageVersionConfig(Pydantic):
"""A part of the configuration."""
format: ReqStr
width: PositiveInt
height: PositiveInt
name: ReqStr
@validator("format")
def validate_format(cls, value: str) -> str:
"""Convert format to lower case."""
value = value.lower()
if value not in ("png", "jpeg", "gif"):
raise ValueError(f"Unknown format: {value}")
return value
@classmethod
def from_str(cls, line: str) -> "ImageVersionConfig":
"""Instantiate from a configuration line."""
parts = line.split()
assert (
len(parts) == 4
), f'The configuration line "{line}" should have 4 parts'
return cls(
format=parts[0],
width=parts[1],
height=parts[2],
name=parts[3],
)
class ImageAction(BaseFilesAction):
"""A specialized Action class that deals with images.
It converts formats, rotates and resizes images etc.
To enable this action, use this configuration::
cls_action = keepluggable.image_actions.ImageAction
It inherits from BaseFilesAction, so read its documentation too.
**Installing Pillow**
To use this action, you need to install the Pillow imaging library::
sudo apt-get install libjpeg-dev zlib1g-dev libfreetype6-dev
# Create these links. If they already exist, remove and readd them:
sudo ln -s /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib
sudo ln -s /usr/lib/x86_64-linux-gnu/libfreetype.so /usr/lib
sudo ln -s /usr/lib/x86_64-linux-gnu/libz.so /usr/lib
pip install Pillow
Pay attention to the desired supported formats near the end of the output::
*** TKINTER support not available
--- JPEG support available
*** OPENJPEG (JPEG2000) support not available
--- ZLIB (PNG/ZIP) support available
*** LIBTIFF support not available
--- FREETYPE2 support available
*** LITTLECMS2 support not available
*** WEBP support not available
*** WEBPMUX support not available
**Configuration settings**
- ``upload_must_be_img``: a boolean; if True, uploads will only be
accepted if they are image files. The default for this setting is False.
- ``store_original``: a boolean; if False, the original upload will
not have its payload stored. The metadata is always stored in an effort
to recognize repeated uploads of the same file. The default for this
setting is True.
- ``versions``: a list of image versions in the form
"format max-width max-height name"
- ``versions_quality`` (integer): the quality parameter to be passed
to the Pillow JPEG encoder. The default is 90.
Here is an example configuration::
[keepluggable_page_images]
# (...)
store_original = False
versions =
jpeg 3840 2160 4k
jpeg 1920 1920 hd
jpeg 960 960 half
jpeg 480 480 quarter
jpeg 240 240 vignette
versions_quality = 90
"""
EXIF_TAGS = {v: k for (k, v) in ExifTags.TAGS.items()} # str to int map
EXIF_ROTATION_FIX = {1: 0, 8: 90, 3: 180, 6: 270}
class Config(BaseFilesAction.Config):
"""Validated configuration for ``ImageAction``."""
upload_must_be_img: bool = False
store_original: bool = True
versions_quality: int = 90
versions: List[ImageVersionConfig]
@validator("versions", pre=True, each_item=False)
def validate_versions(
cls,
value: Union[List[ImageVersionConfig], str],
) -> List[ImageVersionConfig]:
"""Convert the configuration string into validated objects."""
if not isinstance(value, str):
return value
# Convert str to ImageVersionConfig
versions = []
for line in value.split("\n"):
line = line.strip()
if not line: # Ignore an empty line
continue
versions.append(ImageVersionConfig.from_str(line))
# We want to process from smaller to bigger:
versions.sort(key=lambda d: d.width)
return versions
def _img_from_stream(
self,
bytes_io: BinaryIO,
metadata: Dict[str, Any],
) -> Image:
try:
img = Image.open(bytes_io)
except OSError:
raise FileNotAllowed(
'Unable to store the image "{}" because '
"the server is unable to identify the image format.".format(
metadata["file_name"]
)
)
img.bytes_io = bytes_io
return img
def _rotate_exif_orientation(self, img: Image) -> Image:
"""Rotate the image according to metadata in the payload.
Some cameras do not rotate the image, they just add orientation
metadata to the file, so we rotate it here.
"""
if not hasattr(img, "_getexif"):
return img # PIL.PngImagePlugin.PngImageFile apparently lacks EXIF
tags = img._getexif()
if tags is None:
return img
orientation = tags.get(self.EXIF_TAGS["Orientation"])
if orientation is None:
return img
degrees = self.EXIF_ROTATION_FIX.get(orientation)
rotated = img.rotate(degrees, expand=True) if degrees else img
return rotated
def _store_versions(
self,
bytes_io: BinaryIO,
metadata: Dict[str, Any],
repo: Any,
) -> None:
# We override this method to deal with images.
is_image = metadata["mime_type"].startswith("image")
if not is_image:
if self.config.upload_must_be_img:
raise FileNotAllowed(
'The file name "{}" lacks a supported image extension, '
"so it was not stored.".format(metadata["file_name"])
)
else:
super()._store_versions(bytes_io, metadata, repo)
return
# # If you need to load the image after verify(), must reopen it
# bytes_io.seek(0)
original = self._img_from_stream(bytes_io, metadata) # may raise
original = self._rotate_exif_orientation(original)
# Probably don't need to verify() the image since we are loading it
# original.verify() # TODO What does this raise?
self._copy_img(original, metadata) # Try to raise before storing
# No exceptions were raised, so store the original file
metadata["image_width"], metadata["image_height"] = original.size
if self.config.store_original: # Optionally store original payload
self._store_file(bytes_io, metadata, repo)
else: # Always store original metadata
self._store_metadata(bytes_io, metadata)
# There is no point in enlarging an uploaded image, but some
# configured sizes might be larger. We want to create only the
# sizes smaller than the uploaded image, plus one (the original size).
largest_version_created_so_far = 0
original_area = original.size[0] * original.size[1]
new_versions = []
for version_config in self.config.versions:
current_area = version_config.width * version_config.height
if largest_version_created_so_far <= original_area:
# Do it
new_versions.append(
self._store_img_version( # may raise
original, metadata, version_config, repo
)
)
largest_version_created_so_far = current_area
metadata["versions"] = new_versions
def _store_img_version(
self,
original: Image,
original_metadata: Dict[str, Any],
version_config: ImageVersionConfig,
repo: Any,
) -> Dict[str, Any]:
metadata = copy(original_metadata)
metadata["version"] = version_config.name
metadata["original_id"] = original_metadata["id"]
del metadata["id"]
img = self._convert_img(original, metadata, version_config)
# Store the new metadata and the new payload
self._store_file(img.stream, metadata, repo)
return metadata
def _copy_img(
self,
original: Image,
metadata: Dict[str, Any],
alpha: bool = True,
) -> Image:
mode = "RGBA" if alpha else "RGB"
try:
return original.convert(mode) # Create a copy
except OSError:
raise FileNotAllowed(
'Unable to store the image "{}" because '
"the server is unable to convert it.".format(
metadata["file_name"]
)
)
def _convert_img(
self,
original: Image,
metadata: Dict[str, Any],
version_config: ImageVersionConfig,
resample=Image.LANCZOS,
) -> Image:
"""Return a new image, converted from ``original``.
Do it using ``version_config`` and setting ``metadata``.
"""
fmt = version_config.format
# Resize, keeping the aspect ratio:
img = self._copy_img(original, metadata, alpha=fmt != "jpeg")
img.thumbnail((version_config.width, version_config.height), resample)
stream = BytesIO()
img.save(
stream,
format=fmt.upper(),
quality=self.config.versions_quality,
optimize=1,
)
img.stream = stream # so we can recover it elsewhere
# Fill in the metadata
metadata["mime_type"] = "image/" + fmt
metadata["image_width"], metadata["image_height"] = img.size
self._compute_length(stream, metadata)
self._compute_md5(stream, metadata)
return img
def _complement(self, metadata: Dict[str, Any]) -> Dict[str, Any]:
"""Omit the main *href* if we are not storing original images."""
metadata = super()._complement(metadata)
# Add main *href* if we are storing original images or if not image
if metadata.get("image_width") and not self.config.store_original:
del metadata["href"]
return metadata
| 35.370492
| 79
| 0.606136
|
794f51823b9aa22d680a31448d0fb03810ce4eec
| 944
|
py
|
Python
|
test/test_region_summary.py
|
NVE/python-varsom-avalanche-client
|
c7787bf070d8ea91efd3a2a9e7782eedd4961528
|
[
"MIT"
] | null | null | null |
test/test_region_summary.py
|
NVE/python-varsom-avalanche-client
|
c7787bf070d8ea91efd3a2a9e7782eedd4961528
|
[
"MIT"
] | null | null | null |
test/test_region_summary.py
|
NVE/python-varsom-avalanche-client
|
c7787bf070d8ea91efd3a2a9e7782eedd4961528
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Snøskredvarsel API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v5.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import varsom_avalanche_client
from models.region_summary import RegionSummary # noqa: E501
from varsom_avalanche_client.rest import ApiException
class TestRegionSummary(unittest.TestCase):
"""RegionSummary unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testRegionSummary(self):
"""Test RegionSummary"""
# FIXME: construct object with mandatory attributes with example values
# model = varsom_avalanche_client.models.region_summary.RegionSummary() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.6
| 119
| 0.713983
|
794f5224402772f77868c4a17498fce0885e5385
| 3,786
|
py
|
Python
|
capture/noworkflow/now/utils/cross_version.py
|
raffaelfoidl/noworkflow
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
[
"MIT"
] | 108
|
2015-02-04T14:16:51.000Z
|
2022-03-06T13:52:45.000Z
|
capture/noworkflow/now/utils/cross_version.py
|
raffaelfoidl/noworkflow
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
[
"MIT"
] | 92
|
2015-01-19T14:58:06.000Z
|
2021-04-19T17:28:50.000Z
|
capture/noworkflow/now/utils/cross_version.py
|
raffaelfoidl/noworkflow
|
aa4ca189df24fec6c7abd32bcca6a097b21fdf31
|
[
"MIT"
] | 31
|
2015-03-03T23:53:59.000Z
|
2021-11-11T04:23:44.000Z
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
# Do not add from __future__ imports here
"""Provide support for both Python 2 and Python 3"""
import sys
import numbers
try:
from cStringIO import StringIO # pylint: disable=unused-import
except ImportError:
from io import StringIO
PY3 = (sys.version_info >= (3, 0))
if PY3:
import builtins # pylint: disable=wrong-import-position, unused-import
import pickle # pylint: disable=wrong-import-position, unused-import
import reprlib # pylint: disable=wrong-import-position, unused-import
from itertools import zip_longest # pylint: disable=wrong-import-position, unused-import
IMMUTABLE = (None.__class__, bool, numbers.Number, str, bytes)
string = (str, bytes) # pylint: disable=invalid-name
raw_bytes = (bytes, bytearray) # pylint: disable=invalid-name
else:
import __builtin__ as builtins # pylint: disable=wrong-import-position, unused-import, import-error
try:
import cPickle as pickle # pylint: disable=wrong-import-position, unused-import
except ImportError:
import pickle # pylint: disable=wrong-import-position, unused-import, ungrouped-imports
import repr as reprlib # pylint: disable=wrong-import-position, unused-import, import-error
from itertools import izip_longest as zip_longest # pylint: disable=wrong-import-position, unused-import, ungrouped-imports
IMMUTABLE = (None.__class__, bool, numbers.Number, basestring) # pylint: disable=invalid-name, undefined-variable
string = (basestring,) # pylint: disable=invalid-name, undefined-variable
raw_bytes = (str,) # pylint: disable=invalid-name
def cross_compile(*args, **kwargs):
"""Compile the source string into a code object
__future__ imports change the behavior of default compile function
This function just provides the 'compile' free of __future__ imports
"""
return compile(*args, **kwargs)
def bytes_string(text, encode="utf-8"):
"""Return a bytes object on Python 3 and a str object on Python 2"""
if not PY3:
if isinstance(text, unicode): # pylint: disable=undefined-variable
result = text.encode(encode)
else:
result = text
else:
if isinstance(text, bytes):
result = text
else:
result = bytes(text, encode)
return result
def default_string(text, encode="utf-8"):
"""Return a unicode object on Python 3 and a bytes object on Python 2"""
if not PY3:
if isinstance(text, unicode): # pylint: disable=undefined-variable
result = text.encode(encode)
else:
result = text
else:
if isinstance(text, bytes):
result = text.decode(encode)
else:
result = text
return result
| 47.924051
| 154
| 0.544638
|
794f5243f54f0804ec162bec691a557c23883c30
| 773
|
py
|
Python
|
shared/charge_controller_tcp_driver/exemple_driver.py
|
EDF-Lab/EDF
|
3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9
|
[
"MIT"
] | 16
|
2022-02-11T14:49:04.000Z
|
2022-03-30T07:33:45.000Z
|
shared/charge_controller_tcp_driver/exemple_driver.py
|
EDF-Lab/EDF
|
3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9
|
[
"MIT"
] | 1
|
2022-02-16T15:23:50.000Z
|
2022-02-21T15:30:21.000Z
|
shared/charge_controller_tcp_driver/exemple_driver.py
|
EDF-Lab/EDF
|
3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9
|
[
"MIT"
] | 1
|
2022-03-24T10:52:28.000Z
|
2022-03-24T10:52:28.000Z
|
import sys
sys.path.append("..")
import time
from charge_controller_tcp_driver.charge_controller_tcp_client_helper import *
if __name__ == '__main__':
helper = ChargeControllerTCPClientHelper("169.254.43.3", 12500)
time.sleep(3)
helper.set_pwm(100)
print("PWM:", helper.get_pwm())
#time.sleep(10)
#helper.set_ev_state("A")
#print("EV State: ", helper.get_ev_state())
time.sleep(10)
helper.set_pwm(50)
time.sleep(2)
print("PWM:", helper.get_pwm())
#print("EV State: ", helper.get_ev_state())
time.sleep(1)
#helper.set_pwm(50)
#print("PWM:", helper.get_pwm())
time.sleep(10)
helper.set_pwm(30)
time.sleep(2)
print("PWM:", helper.get_pwm())
# print("EV State: ", helper.get_ev_state())
| 24.15625
| 78
| 0.648124
|
794f524b73ae6229ac647da7e98bc64ee31d4e67
| 16,617
|
py
|
Python
|
tenable/sc/audit_files.py
|
csanders-git/pyTenable
|
dea25ba02b049bfe3a8cd151c155c3ccf9b2a285
|
[
"MIT"
] | null | null | null |
tenable/sc/audit_files.py
|
csanders-git/pyTenable
|
dea25ba02b049bfe3a8cd151c155c3ccf9b2a285
|
[
"MIT"
] | null | null | null |
tenable/sc/audit_files.py
|
csanders-git/pyTenable
|
dea25ba02b049bfe3a8cd151c155c3ccf9b2a285
|
[
"MIT"
] | null | null | null |
'''
audit_files
===========
The following methods allow for interaction into the Tenable.sc
:sc-api:`Audit File <AuditFile.html>` API and the
:sc-api:`Audit File Template <AuditFile-Template.html>` API. These items are
typically seen under the **Scans: Audit Files** section of Tenable.sc.
Methods available on ``sc.audit_files``:
.. rst-class:: hide-signature
.. autoclass:: AuditFileAPI
.. automethod:: create
.. automethod:: delete
.. automethod:: details
.. automethod:: edit
.. automethod:: list
.. automethod:: template_categories
.. automethod:: template_details
.. automethod:: template_list
'''
from .base import SCEndpoint
from io import BytesIO
from os.path import basename
class AuditFileAPI(SCEndpoint):
def _constructor(self, **kw):
'''
Handles parsing the keywords and returns a audit file definition document
'''
if 'name' in kw:
# Validate that the name parameter is a string.
self._check('name', kw['name'], str)
if 'description' in kw:
# Validate that the description parameter is a string,
self._check('description', kw['description'], str)
if 'type' in kw:
# Validate that the type is one of the 3 possible audit file types:
# "", "scapWindows", or "scapLinux".
self._check('type', kw['type'], str,
choices=['', 'scapWindows', 'scapLinux'])
if 'template' in kw:
# Convert the template parameter into the auditFileTemplate
# sub-document and verify that the input is an integer value.
kw['auditFileTemplate'] = {'id': self._check(
'template', kw['template'], int)}
del(kw['template'])
if 'vars' in kw:
# expand the the vars dict into a series of key/value documents.
kw['variables'] = [{
'name': self._check('var:name', k, str),
'value': self._check('var:value', v, str)
} for k,v in self._check('vars', kw['vars'], dict).items()]
del(kw['vars'])
if 'filename' in kw:
# Validate that the filename parameter is a string.
self._check('filename', kw['filename'], str)
if 'orig_filename' in kw:
# validate the the original_filename parameter is of type string and
# then store it in the CamelCase equiv:
kw['originalFilename'] = self._check(
'orig_filename', kw['orig_filename'], str)
del(kw['orig_filename'])
if 'version' in kw:
# Validate that the version parameter is of type string and falls
# within the expected range of values
self._check('version', kw['version'], str,
choices=['1.0', '1.1', '1.2'])
if 'benchmark' in kw:
# Validate that the benchmark name is a string and then store it
# in the benchmarkName attribute.
kw['benchmarkName'] = self._check('benchmark', kw['benchmark'], str)
del(kw['benchmark'])
if 'profile' in kw:
# Validate that the profile name is a string and then store it in
# the profileName attribute.
kw['profileName'] = self._check('profile', kw['profile'], str)
del(kw['profile'])
if 'data_stream' in kw:
# Validate that the profile_stream attribute is a string and then
# store it in the dataStreamName attribute.
kw['dataStreamName'] = self._check(
'data_stream', kw['data_stream'], str)
del(kw['data_stream'])
if 'tailoring_filename' in kw:
kw['tailoringFilename'] = self._check(
'tailoring_filename', kw['tailoring_filename'], str)
del(kw['tailoring_filename'])
if 'tailoring_orig_filename' in kw:
kw['tailoringOriginalFilename'] = self._check(
'tailoring_orig_filename', kw['tailoring_orig_filename'], str)
del(kw['tailoring_orig_filename'])
return kw
def create(self, name, audit_file=None, tailoring_file=None, **kw):
'''
Creates a audit file.
:sc-api:`audit file: create <AuditFile.html#auditFile_POST>`
Args:
name (str):
The name of the audit file.
audit_file (FileObject, optional):
The file-like object containing the audit file if uploading a
custom audit file.
benchmark (str, optional):
When the type is set to either SCAP datatype, this specifies the
name of the benchmark.
data_stream (str, optional):
When using version 1.2 of either SCAP datatype, you must specify
the name of the data stream.
description (str, optional):
A description of for the audit file.
profile (str, optional):
When the type is set to either SCAP datatype, this specifies the
name of the profile.
tailoring_file (FileObject, optional):
When the SCAP version is set to 1.2, this tailoring file can
optionally be provided.
template (int, optional):
The audit file template it to use. If using a template, then no
file is uploaded.
type (str, optional):
The type of audit file to upload. Generally only used when
uploading SCAP content as it will default to the Tenable-created
audit-file format. Supported SCAP values are ``scapWindows``
and ``scapLinux``.
vars (dict, optional):
If a template is specified, then this dictionary specifies the
parameters within the template to customize and what those
values should be. The values are provided within the template
definition.
version (str, optional):
When specifying a SCAP datatype, this informs Tenable.sc what
version of SCAP this audit checklist is. Supported values are
``1.0``, ``1.1``, and ``1.2``.
Returns:
:obj:`dict`:
The newly created audit file.
Examples:
>>> audit = sc.audit_files.create()
'''
kw['name'] = name
# Upload and store the relevent information on the audit file that has
# been provided.
if audit_file:
if hasattr(audit_file, 'name'):
kw['orig_filename'] = basename(audit_file.name)
kw['filename'] = self._api.files.upload(audit_file)
# Upload and store the relevent information on the tailoring file that
# has been provided.
if tailoring_file:
if hasattr(tailoring_file, 'name'):
kw['tailoring_orig_filename'] = basename(tailoring_file.name)
kw['tailoring_filename'] = self._api.files.upload(tailoring_file)
payload = self._constructor(**kw)
return self._api.post('auditFile', json=payload).json()['response']
def details(self, id, fields=None):
'''
Returns the details for a specific audit file.
:sc-api:`audit file: details <AuditFile.html#AuditFileRESTReference-/auditFile/{id}>`
Args:
id (int): The identifier for the audit file.
fields (list, optional): A list of attributes to return.
Returns:
:obj:`dict`:
The audit file resource record.
Examples:
>>> audit = sc.audit_files.details(1)
>>> pprint(audit)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str) for f in fields])
return self._api.get('auditFile/{}'.format(self._check('id', id, int)),
params=params).json()['response']
def edit(self, id, audit_file=None, tailoring_file=None, **kw):
'''
Edits a audit file.
:sc-api:`audit file: edit <AuditFile.html#auditFile_id_PATCH>`
Args:
audit_file (FileObject, optional):
The file-like object containing the audit file if uploading a
custom audit file.
benchmark (str, optional):
When the type is set to either SCAP datatype, this specifies the
name of the benchmark.
data_stream (str, optional):
When using version 1.2 of either SCAP datatype, you must specify
the name of the data stream.
description (str, optional):
A description of for the audit file.
name (str, optional):
The name of the audit file.
profile (str, optional):
When the type is set to either SCAP datatype, this specifies the
name of the profile.
tailoring_file (FileObject, optional):
When the SCAP version is set to 1.2, this tailoring file can
optionally be provided.
template (int, optional):
The audit file template it to use. If using a template, then no
file is uploaded.
type (str, optional):
The type of audit file to upload. Generally only used when
uploading SCAP content as it will default to the Tenable-created
audit-file format. Supported SCAP values are ``scapWindows``
and ``scapLinux``.
vars (dict, optional):
If a template is specified, then this dictionary specifies the
parameters within the template to customize and what those
values should be. The values are provided within the template
definition.
version (str, optional):
When specifying a SCAP datatype, this informs Tenable.sc what
version of SCAP this audit checklist is. Supported values are
``1.0``, ``1.1``, and ``1.2``.
Returns:
:obj:`dict`:
The newly updated audit file.
Examples:
>>> audit = sc.audit_files.edit()
'''
# Upload and store the relevent information on the audit file that has
# been provided.
if audit_file:
if hasattr(audit_file, 'name'):
kw['orig_filename'] = basename(audit_file.name)
kw['filename'] = self._api.files.upload(audit_file)
# Upload and store the relevent information on the tailoring file that
# has been provided.
if tailoring_file:
if hasattr(tailoring_file, 'name'):
kw['tailoring_orig_filename'] = basename(tailoring_file.name)
kw['tailoring_filename'] = self._api.files.upload(tailoring_file)
payload = self._constructor(**kw)
return self._api.patch('auditFile/{}'.format(
self._check('id', id, int)), json=payload).json()['response']
def delete(self, id):
'''
Removes a audit file.
:sc-api:`audit file: delete <AuditFile.html#auditFile_id_DELETE>`
Args:
id (int): The numeric identifier for the audit file to remove.
Returns:
:obj:`str`:
An empty response.
Examples:
>>> sc.audit_files.delete(1)
'''
return self._api.delete('auditFile/{}'.format(
self._check('id', id, int))).json()['response']
def list(self, fields=None):
'''
Retrieves the list of audit file definitions.
:sc-api:`audit file: list <AuditFile.html#AuditFileRESTReference-/auditFile>`
Args:
fields (list, optional):
A list of attributes to return for each audit file.
Returns:
:obj:`list`:
A list of audit file resources.
Examples:
>>> for audit in sc.audit_files.list():
... pprint(audit)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('auditFile', params=params).json()['response']
def export_audit(self, id, fobj=None):
'''
Exports an Audit File.
:sc-api:`audit file: export <AuditFile.html#AuditFileRESTReference-/auditFile/{id}/export>`
Args:
id (int): The audit file numeric identifier.
fobj (FileObject, optional):
The file-like object to write the resulting file into. If
no file-like object is provided, a BytesIO objects with the
downloaded file will be returned. Be aware that the default
option of using a BytesIO object means that the file will be
stored in memory, and it's generally recommended to pass an
actual file-object to write to instead.
Returns:
:obj:`FileObject`:
The file-like object with the resulting zipped report.
Examples:
>>> with open('example.zip', 'wb') as fobj:
... sc.audit_files.export_audit(1, fobj)
'''
resp = self._api.get('auditFile/{}/export'.format(
self._check('id', id, int)), stream=True)
# if no file-like object was passed, then we will instantiate a BytesIO
# object to push the file into.
if not fobj:
fobj = BytesIO()
# Lets stream the file into the file-like object...
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fobj.write(chunk)
fobj.seek(0)
resp.close()
return fobj
def template_categories(self):
'''
Returns the audit file template categories
:sc-api:`audit template: categories <AuditFile-Template.html#auditFileTemplate_categories_GET>`
Returns:
:obj:`list`:
List of audit file category listing dicts.
Exmaples:
>>> for cat in sc.audit_files.template_categorties():
... pprint(cat)
'''
return self._api.get('auditFileTemplate/categories').json()['response']
def template_details(self, id, fields=None):
'''
Returns the details for the specified audit file template id.
:sc-api:`audit template: details <AuditFile-Template.html#auditFileTemplate_id_GET>`
Args:
id (int):
The numeric identifier for the audit file template.
fields (list, optional):
A list of attributes to return.
Returns:
:obj:`dict`:
The audit file template record.
Exmaples:
>>> tmpl = sc.audit_files.template_details(1)
'''
params = dict()
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('auditFileTemplate/{}'.format(
self._check('id', id, int)), params=params).json()['response']
def template_list(self, category=None, search=None, fields=None):
'''
Returns the list of audit file templates.
:sc-api:`audit templates: list <AuditFile-Template.html#AuditFileTemplateRESTReference-/auditFileTemplate>`
Args:
category (int, optional):
Restrict the results to only the specified category id.
fields (list, optional):
A list of attributes to return.
search (str, optional):
Restrict the response to only audit file names that match the
search string specified.
Returns:
:obj:`list`:
List of audit file records.
Exmaples:
>>> for tmpl in sc.audit_files.template_list():
... pprint(tmpl)
'''
params = dict()
if category:
params['categoryID'] = self._check('category', category, int)
if search:
params['searchString'] = self._check('search', search, str)
if fields:
params['fields'] = ','.join([self._check('field', f, str)
for f in fields])
return self._api.get('auditFileTemplate',
params=params).json()['response']
| 37.851936
| 115
| 0.56719
|
794f5349f2ed8c33b2b08d10c63098365b926d72
| 1,961
|
py
|
Python
|
config/settings/local.py
|
jvosk/repork
|
ca49095ec4b13c0b60eb909fa3349b093f8a3479
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/local.py
|
jvosk/repork
|
ca49095ec4b13c0b60eb909fa3349b093f8a3479
|
[
"BSD-3-Clause"
] | null | null | null |
config/settings/local.py
|
jvosk/repork
|
ca49095ec4b13c0b60eb909fa3349b093f8a3479
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!)+)4$xoj$8b9as#lnt_yth$kv7x@c2nty!4g%le^2lcm0)h*8(')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| 31.126984
| 110
| 0.498725
|
794f548b0df22fff167b16cbf8237dd07f240c48
| 14,861
|
py
|
Python
|
cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py
|
cloudification-io/cinder
|
23d76e01f2b4f3771b57fb287084a4884238b827
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py
|
dFarui/cinder
|
b2922384054ddbd46e071fd07372a75a21d7f85d
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_fake_objects.py
|
dFarui/cinder
|
b2922384054ddbd46e071fd07372a75a21d7f85d
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import requests
from cinder import exception
from cinder.tests.unit.volume.drivers.dell_emc.powermax import (
powermax_data as tpd)
class FakeLookupService(object):
def get_device_mapping_from_network(self, initiator_wwns, target_wwns):
return tpd.PowerMaxData.device_map
class FakeResponse(object):
def __init__(self, status_code, return_object):
self.status_code = status_code
self.return_object = return_object
def json(self):
if self.return_object:
return self.return_object
else:
raise ValueError
def get_status_code(self):
return self.status_code()
def raise_for_status(self):
if 200 <= self.status_code <= 204:
return False
else:
return True
class FakeRequestsSession(object):
def __init__(self, *args, **kwargs):
self.data = tpd.PowerMaxData()
def request(self, method, url, params=None, data=None):
return_object = ''
status_code = 200
if method == 'GET':
status_code, return_object = self._get_request(url, params)
elif method == 'POST' or method == 'PUT':
status_code, return_object = self._post_or_put(url, data)
elif method == 'DELETE':
status_code, return_object = self._delete(url)
elif method == 'TIMEOUT':
raise requests.Timeout
elif method == 'EXCEPTION':
raise Exception
elif method == 'CONNECTION':
raise requests.ConnectionError
elif method == 'HTTP':
raise requests.HTTPError
elif method == 'SSL':
raise requests.exceptions.SSLError
elif method == 'EXCEPTION':
raise exception.VolumeBackendAPIException
return FakeResponse(status_code, return_object)
def _get_request(self, url, params):
status_code = 200
return_object = None
if self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
elif 'sloprovisioning' in url:
if 'volume' in url:
return_object = self._sloprovisioning_volume(url, params)
elif 'storagegroup' in url:
return_object = self._sloprovisioning_sg(url)
elif 'maskingview' in url:
return_object = self._sloprovisioning_mv(url)
elif 'portgroup' in url:
return_object = self._sloprovisioning_pg(url)
elif 'host' in url:
return_object = self._sloprovisioning_ig(url)
elif 'initiator' in url:
return_object = self._sloprovisioning_initiator(url)
elif 'service_level_demand_report' in url:
return_object = self.data.srp_slo_details
elif 'srp' in url:
return_object = self.data.srp_details
elif 'workloadtype' in url:
return_object = self.data.workloadtype
elif 'compressionCapable' in url:
return_object = self.data.compression_info
elif 'slo' in url:
return_object = self.data.powermax_slo_details
elif 'replication' in url:
return_object = self._replication(url)
elif 'system' in url:
if 'director' in url:
url_split = url.split('/')
if 'port' in url_split[-1]:
return_object = self._system_port_list(url)
elif url_split[-2] == 'port':
return_object = self._system_port_detail(url)
else:
return_object = self._system(url)
elif 'headroom' in url:
return_object = self.data.headroom
elif 'performance' in url:
if 'Array' in url:
if 'registrationdetails' in url:
return_object = self._performance_registration(url)
if 'keys' in url:
return_object = self.data.array_keys
return status_code, return_object
def _sloprovisioning_volume(self, url, params):
return_object = self.data.volume_list[2]
if '/private' in url:
return_object = self.data.private_vol_details
elif params:
if '1' in params.values():
return_object = self.data.volume_list[0]
elif '2' in params.values():
return_object = self.data.volume_list[1]
else:
for vol in self.data.volume_details:
if vol['volumeId'] in url:
return_object = vol
break
return return_object
def _sloprovisioning_sg(self, url):
return_object = self.data.sg_list
for sg in self.data.sg_details:
if sg['storageGroupId'] in url:
return_object = sg
break
return return_object
def _sloprovisioning_mv(self, url):
if self.data.masking_view_name_i in url:
return_object = self.data.maskingview[1]
else:
return_object = self.data.maskingview[0]
return return_object
def _sloprovisioning_pg(self, url):
return_object = None
for pg in self.data.portgroup:
if pg['portGroupId'] in url:
return_object = pg
break
return return_object
def _system_port_detail(self, url):
return_object = None
for port in self.data.port_list:
if port['symmetrixPort']['symmetrixPortKey']['directorId'] in url:
return_object = port
break
return return_object
@staticmethod
def _system_port_list(url):
url_split = url.split('/')
return {'symmetrixPortKey': [{'directorId': url_split[-2],
'portId': '1'}]}
def _sloprovisioning_ig(self, url):
return_object = None
for ig in self.data.inititiatorgroup:
if ig['hostId'] in url:
return_object = ig
break
return return_object
def _sloprovisioning_initiator(self, url):
return_object = self.data.initiator_list[2]
if self.data.wwpn1 in url:
return_object = self.data.initiator_list[0]
elif self.data.initiator in url:
return_object = self.data.initiator_list[1]
return return_object
def _replication(self, url):
return_object = None
if 'storagegroup' in url:
return_object = self._replication_sg(url)
elif 'rdf_group' in url:
if self.data.device_id in url:
return_object = self.data.rdf_group_vol_details
elif self.data.rdf_group_no_1 in url:
return_object = self.data.rdf_group_details
else:
return_object = self.data.rdf_group_list
elif 'snapshot' in url:
return_object = self.data.volume_snap_vx
elif 'capabilities' in url:
return_object = self.data.capabilities
return return_object
def _replication_sg(self, url):
return_object = None
if 'snapid' in url:
return_object = self.data.group_snap_vx
elif 'rdf_group' in url:
for sg in self.data.sg_rdf_details:
if sg['storageGroupName'] in url:
return_object = sg
break
elif 'storagegroup' in url:
return_object = self.data.sg_details_rep[0]
return return_object
def _system(self, url):
return_object = None
if 'job' in url:
for job in self.data.job_list:
if job['jobId'] in url:
return_object = job
break
elif 'info' in url:
return_object = self.data.version_details
elif 'tag' in url:
return_object = []
else:
for symm in self.data.symmetrix:
if symm['symmetrixId'] in url:
return_object = symm
break
return return_object
@staticmethod
def _performance_registration(url):
url_split = url.split('/')
array_id = url_split[-1]
return {"registrationDetailsInfo": [
{"symmetrixId": array_id, "realtime": True, "message": "Success",
"collectionintervalmins": 5, "diagnostic": True}]}
def _post_or_put(self, url, payload):
return_object = self.data.job_list[0]
status_code = 201
if 'performance' in url:
if 'PortGroup' in url:
if 'metrics' in url:
return 200, self.data.dummy_performance_data
elif 'FEPort' in url:
if 'metrics' in url:
return 200, self.data.dummy_performance_data
elif 'realtime' in url:
if 'metrics' in url:
return 200, self.data.dummy_performance_data
elif self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
elif payload:
payload = ast.literal_eval(payload)
if self.data.failed_resource in payload.values():
status_code = 500
return_object = self.data.job_list[2]
if payload.get('executionOption'):
status_code = 202
return status_code, return_object
def _delete(self, url):
if self.data.failed_resource in url:
status_code = 500
return_object = self.data.job_list[2]
else:
status_code = 204
return_object = None
return status_code, return_object
def session(self):
return FakeRequestsSession()
def close(self):
pass
class FakeConfiguration(object):
def __init__(self, emc_file=None, volume_backend_name=None,
interval=0, retries=0, replication_device=None, **kwargs):
self.cinder_dell_emc_config_file = emc_file
self.interval = interval
self.retries = retries
self.volume_backend_name = volume_backend_name
self.config_group = volume_backend_name
self.filter_function = None
self.goodness_function = None
self.san_is_local = False
if replication_device:
self.replication_device = replication_device
for key, value in kwargs.items():
if 'san_' in key:
self.set_san_config_options(key, value)
elif 'powermax_' and '_name_template' in key:
self.set_host_name_template_config_options(key, value)
elif 'powermax_' in key:
self.set_powermax_config_options(key, value)
elif 'chap_' in key:
self.set_chap_config_options(key, value)
elif 'driver_ssl_cert' in key:
self.set_ssl_cert_config_options(key, value)
elif 'u4p_' in key:
self.set_u4p_failover_config_options(key, value)
elif 'load_' in key:
self.set_performance_config_options(key, value)
def set_san_config_options(self, key, value):
if key == 'san_login':
self.san_login = value
elif key == 'san_password':
self.san_password = value
elif key == 'san_ip':
self.san_ip = value
elif key == 'san_api_port':
self.san_api_port = value
def set_powermax_config_options(self, key, value):
if key == 'powermax_srp':
self.powermax_srp = value
elif key == 'powermax_service_level':
self.powermax_service_level = value
elif key == 'powermax_workload':
self.powermax_workload = value
elif key == 'powermax_port_groups':
self.powermax_port_groups = value
elif key == 'powermax_array':
self.powermax_array = value
def set_chap_config_options(self, key, value):
if key == 'use_chap_auth':
self.use_chap_auth = value
elif key == 'chap_username':
self.chap_username = value
elif key == 'chap_password':
self.chap_password = value
def set_ssl_cert_config_options(self, key, value):
if key == 'driver_ssl_cert_verify':
self.driver_ssl_cert_verify = value
elif key == 'driver_ssl_cert_path':
self.driver_ssl_cert_path = value
def set_u4p_failover_config_options(self, key, value):
if key == 'u4p_failover_target':
self.u4p_failover_target = value
elif key == 'u4p_failover_backoff_factor':
self.u4p_failover_backoff_factor = value
elif key == 'u4p_failover_retries':
self.u4p_failover_retries = value
elif key == 'u4p_failover_timeout':
self.u4p_failover_timeout = value
elif key == 'u4p_primary':
self.u4p_primary = value
def set_host_name_template_config_options(self, key, value):
if key == 'powermax_short_host_name_template':
self.powermax_short_host_name_template = value
elif key == 'powermax_port_group_name_template':
self.powermax_port_group_name_template = value
def set_performance_config_options(self, key, value):
if key == 'load_balance':
self.load_balance = value
elif key == 'load_balance_real_time':
self.load_balance_real_time = value
elif key == 'load_data_format':
self.load_data_format = value
elif key == 'load_look_back':
self.load_look_back = value
elif key == 'load_look_back_real_time':
self.load_look_back_real_time = value
elif key == 'port_group_load_metric':
self.port_group_load_metric = value
elif key == 'port_load_metric':
self.port_load_metric = value
def safe_get(self, key):
try:
return getattr(self, key)
except Exception:
return None
def append_config_values(self, values):
pass
| 35.21564
| 78
| 0.594913
|
794f54c684b625e922499ef475abfc5d24d42d20
| 1,351
|
py
|
Python
|
personal_env/lib/python3.8/site-packages/pylint/message/message.py
|
jestinmwilson/personal-website
|
6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74
|
[
"MIT"
] | null | null | null |
personal_env/lib/python3.8/site-packages/pylint/message/message.py
|
jestinmwilson/personal-website
|
6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74
|
[
"MIT"
] | null | null | null |
personal_env/lib/python3.8/site-packages/pylint/message/message.py
|
jestinmwilson/personal-website
|
6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74
|
[
"MIT"
] | null | null | null |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
import collections
from pylint.constants import MSG_TYPES
_MsgBase = collections.namedtuple(
"_MsgBase",
[
"msg_id",
"symbol",
"msg",
"C",
"category",
"confidence",
"abspath",
"path",
"module",
"obj",
"line",
"column",
],
)
class Message(_MsgBase):
"""This class represent a message to be issued by the reporters"""
def __new__(cls, msg_id, symbol, location, msg, confidence):
return _MsgBase.__new__(
cls,
msg_id,
symbol,
msg,
msg_id[0],
MSG_TYPES[msg_id[0]],
confidence,
*location
)
def format(self, template):
"""Format the message according to the given template.
The template format is the one of the format method :
cf. https://docs.python.org/2/library/string.html#formatstrings
"""
# For some reason, _asdict on derived namedtuples does not work with
# Python 3.4. Needs some investigation.
return template.format(**dict(zip(self._fields, self)))
| 25.980769
| 81
| 0.551443
|
794f552bf82dc1da604c4109ebc4006f221c0870
| 7,116
|
py
|
Python
|
services/users/project/tests/test_users.py
|
bill-door/testdriven-app
|
26c6718388980415c8fb4110a9b143925798e135
|
[
"MIT"
] | null | null | null |
services/users/project/tests/test_users.py
|
bill-door/testdriven-app
|
26c6718388980415c8fb4110a9b143925798e135
|
[
"MIT"
] | null | null | null |
services/users/project/tests/test_users.py
|
bill-door/testdriven-app
|
26c6718388980415c8fb4110a9b143925798e135
|
[
"MIT"
] | null | null | null |
# services/users/project/tests/test_users.py
import json
import unittest
from project import db
from project.api.models import User
from project.tests.base import BaseTestCase
def add_user(username, email):
user = User(username=username, email=email)
db.session.add(user)
db.session.commit()
return user
class TestUserService(BaseTestCase):
"""Tests for the Users Service."""
def test_users(self):
"""Ensure the /ping route behaves correctly."""
response = self.client.get('/users/ping')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('pong!', data['message'])
self.assertIn('success', data['status'])
def test_add_user(self):
"""Ensure a new user can be added to the database."""
with self.client:
response = self.client.post(
'/users',
data=json.dumps({
'username': 'mcgrath',
'email': 'mcg@gmail.com'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 201)
self.assertIn('mcg@gmail.com was added!', data['message'])
self.assertIn('success', data['status'])
def test_add_user_invalid_json(self):
"""Ensure error is thrown if the JSON object is empty."""
with self.client:
response = self.client.post(
'/users',
data=json.dumps({}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_invalid_json_keys(self):
"""
Ensure error is thrown if the JSON object does not have a username key.
"""
with self.client:
response = self.client.post(
'/users',
data=json.dumps({'email': 'mcg@gmail.com'}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_user_duplicate_email(self):
"""Ensure error is thrown if the email already exists."""
with self.client:
self.client.post(
'/users',
data=json.dumps({
'username': 'mcgrath',
'email': 'mcg@gmail.com'
}),
content_type='application/json',
)
response = self.client.post(
'/users',
data=json.dumps({
'username': 'mcgrath',
'email': 'mcg@gmail.com'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 400)
self.assertIn(
'Sorry. That email already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_single_user(self):
"""Ensure get single user behaves correctly."""
user = add_user('mcgrath', 'mcg@gmail.com')
with self.client:
response = self.client.get(f'/users/{user.id}')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertIn('mcgrath', data['data']['username'])
self.assertIn('mcg@gmail.com', data['data']['email'])
self.assertIn('success', data['status'])
def test_single_user_no_id(self):
"""Ensure error is thrown if an id is not provided."""
with self.client:
response = self.client.get('/users/blah')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User does not exist', data['message'])
self.assertIn('fail', data['status'])
def test_single_user_incorrect_id(self):
"""Ensure error is thrown if the id does not exist."""
with self.client:
response = self.client.get('/users/999')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertIn('User does not exist', data['message'])
self.assertIn('fail', data['status'])
def test_all_users(self):
"""Ensure get all users behaves correctly."""
add_user('mcgrath', 'mcg@gmail.com')
add_user('homer', 'homer@simpsons.com')
with self.client:
response = self.client.get('/users')
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual(len(data['data']['users']), 2)
self.assertIn('mcgrath', data['data']['users'][0]['username'])
self.assertIn(
'mcg@gmail.com', data['data']['users'][0]['email'])
self.assertIn('homer', data['data']['users'][1]['username'])
self.assertIn(
'homer@simpsons.com', data['data']['users'][1]['email'])
self.assertIn('success', data['status'])
def test_main_no_users(self):
"""Ensure the main route behaves correctly when no users have been
added to the database."""
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'All Users', response.data)
self.assertIn(b'<p>No users!</p>', response.data)
def test_main_with_users(self):
"""Ensure the main route behaves correctly when users have been
added to the database."""
add_user('mcgrath', 'mcg@gmail.com')
add_user('homer', 'homer@gmail.com')
with self.client:
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertIn(b'All Users', response.data)
self.assertNotIn(b'<p>No users!</p>', response.data)
self.assertIn(b'mcgrath', response.data)
self.assertIn(b'homer', response.data)
def test_main_add_user(self):
"""
Ensure a new user can be added to the database via a POST request.
"""
with self.client:
response = self.client.post(
'/',
data={'username': 'mcgrath', 'email': 'mcg@gmail.com'},
follow_redirects=True
)
self.assertEqual(response.status_code, 200)
self.assertIn(b'All Users', response.data)
self.assertNotIn(b'<p>No users!</p>', response.data)
self.assertIn(b'mcgrath', response.data)
if __name__ == '__main__':
unittest.main()
| 38.673913
| 79
| 0.562395
|
794f5568a6c628b0f85c2b8c44b922a9578dc2c4
| 171
|
py
|
Python
|
apps/course/admin.py
|
capy-pl/nccu-grade-system
|
db7107d56e45d535eed92b47c06e0d5c06f983a2
|
[
"Apache-2.0"
] | 2
|
2019-01-14T17:20:06.000Z
|
2019-05-06T03:26:23.000Z
|
apps/course/admin.py
|
capy-pl/nccu-grade-system
|
db7107d56e45d535eed92b47c06e0d5c06f983a2
|
[
"Apache-2.0"
] | 4
|
2018-12-25T15:06:35.000Z
|
2019-01-05T08:02:36.000Z
|
apps/course/admin.py
|
capy-pl/nccu-grade-system
|
db7107d56e45d535eed92b47c06e0d5c06f983a2
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Course, ScoringSubject
# Register your models here.
admin.site.register(Course)
admin.site.register(ScoringSubject)
| 21.375
| 42
| 0.818713
|
794f5645192b1e66b26e1161e9f6c20fca6166c7
| 1,012
|
py
|
Python
|
system/main.py
|
IsakLundstrom/D0020E_UWB_Nav_DR3
|
f5dccdbccd8a4d91b5e666c81da3d656270193f1
|
[
"MIT"
] | null | null | null |
system/main.py
|
IsakLundstrom/D0020E_UWB_Nav_DR3
|
f5dccdbccd8a4d91b5e666c81da3d656270193f1
|
[
"MIT"
] | null | null | null |
system/main.py
|
IsakLundstrom/D0020E_UWB_Nav_DR3
|
f5dccdbccd8a4d91b5e666c81da3d656270193f1
|
[
"MIT"
] | null | null | null |
import threading
from webpage.server import startServer
from fallHandler import FallHandler
from sessionHandler import SessionHandler
from globalVariables import GlobalVariables
from webpage.changeConfig import ChangeConfig
import time
if __name__ == '__main__':
globalVariables = GlobalVariables() # Here we set up objects and threads for the program
fallSystemLock = threading.Lock()
system = FallHandler(fallSystemLock, globalVariables)
session = SessionHandler(globalVariables)
serverThread = threading.Thread(target=startServer, args=(fallSystemLock, globalVariables))
systemThread = threading.Thread(target=system.startSystem)
sessionThread = threading.Thread(target=session.listen)
serverThread.start()
systemThread.start()
sessionThread.start() # all threads start
print(threading.active_count())
while(1): # every ten seconds we print how many active threads we have
time.sleep(10)
print('Nr of threads alive:', threading.active_count())
| 42.166667
| 95
| 0.772727
|
794f56ddd6b6d822b3f0d4de8aeb4a33a95af2a6
| 6,509
|
py
|
Python
|
plugins/modules/cmci_create.py
|
sophiegreen/ibm_zos_cics
|
a0d14af4f60d80a7262ae557f64e67b429115daa
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/cmci_create.py
|
sophiegreen/ibm_zos_cics
|
a0d14af4f60d80a7262ae557f64e67b429115daa
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/cmci_create.py
|
sophiegreen/ibm_zos_cics
|
a0d14af4f60d80a7262ae557f64e67b429115daa
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) IBM Corporation 2020
# Apache License, Version 2.0 (see https://opensource.org/licenses/Apache-2.0)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cmci_create
short_description: Create CICS and CICSplex definitions in CSD and BAS repositories
description:
- The cmci_create module can be used to create definitional CICS and CICSPlex® SM resources in CICS
regions, using the CMCI API. The CMCI API is provided by CICSplex SM, or in SMSS regions. For information about
the CMCI API see
U(https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/cmci/clientapi_overview.html).
For information about how to compose POST requests, see
U(https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/cmci/clientapi_post.html).
author: "IBM"
extends_documentation_fragment:
- ibm.ibm_zos_cics.cmci.COMMON
- ibm.ibm_zos_cics.cmci.ATTRIBUTES
- ibm.ibm_zos_cics.cmci.PARAMETERS
'''
EXAMPLES = r"""
- name: define a BUNDLE in a CSD
cmci_create:
cmci_host: 'winmvs2c.hursley.ibm.com'
cmci_port: '10080'
context: 'iyk3z0r9'
resource_name: 'CICSDefinitionBundle'
attributes:
name: PONGALT
BUNDLEDIR: /u/ibmuser/bundle/pong/pongbundle_1.0.0
csdgroup: JVMGRP
parameters:
csd: null
"""
RETURN = r"""
changed:
description: True if the state was changed, otherwise False
returned: always
type: bool
failed:
description: True if query_job failed, othewise False
returned: always
type: bool
connect_version:
description: Version of the CMCI API
returned: success
type: str
cpsm_reason:
description:
- Character value of the CPSM API reason code returned. For a list of reason values provided by each API command,
see
U(https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/commands-cpsm/eyup2kr.html)
returned: success
type: str
cpsm_reason_code:
description:
- Numeric value of the CPSM API reason code returned. For a list of numeric values see
U(https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/commands-cpsm/eyup2ks.html)
returned: success
type: int
cpsm_response:
description:
- Character value of the CPSM API response code returned. For a list of response values provided by each API
command, see
U(https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/commands-cpsm/eyup2kr.html)
returned: success
type: str
cpsm_response_code:
description:
- Numeric value of the CPSM API response code returned. For a list of numeric values see
U(https://www.ibm.com/support/knowledgecenter/SSGMCP_5.6.0/reference-system-programming/commands-cpsm/eyup2ks.html)
returned: success
type: str
http_status:
description:
- Message associated with HTTP status code returned by CMCI
returned: success
type: str
http_status_code:
description:
- HTTP status code returned by CMCI
returned: success
type: int
record_count:
description:
- Number of records returned
returned: success
type: int
records:
description:
- A list of the returned records
returned: success
type: list
elements: dict
sample:
- _keydata: "C1D5E2C9E3C5E2E3"
aloadtime: "00:00:00.000000"
apist: "CICSAPI"
application: ""
applmajorver: "-1"
applmicrover: "-1"
applminorver: "-1"
basdefinever: "0"
cedfstatus: "CEDF"
changeagent: "CSDAPI"
changeagrel: "0730"
changetime: "2020-12-15T02:34:31.000000+00:00"
changeusrid: "YQCHEN"
coboltype: "NOTAPPLIC"
concurrency: "QUASIRENT"
copy: "NOTREQUIRED"
currentloc: "NOCOPY"
datalocation: "ANY"
definesource: "ANSITEST"
definetime: "2020-12-15T02:34:29.000000+00:00"
dynamstatus: "NOTDYNAMIC"
entrypoint: "FF000000"
execkey: "USEREXECKEY"
executionset: "FULLAPI"
eyu_cicsname: "IYCWEMW2"
eyu_cicsrel: "E730"
eyu_reserved: "0"
fetchcnt: "0"
fetchtime: "00:00:00.000000"
holdstatus: "NOTAPPLIC"
installagent: "CSDAPI"
installtime: "2020-12-15T02:34:33.000000+00:00"
installusrid: "YQCHEN"
jvmclass: ""
jvmserver: ""
language: "NOTDEFINED"
length: "0"
library: ""
librarydsn: ""
loadpoint: "FF000000"
lpastat: "NOTAPPLIC"
newcopycnt: "0"
operation: ""
pgrjusecount: "0"
platform: ""
program: "ANSITEST"
progtype: "PROGRAM"
remotename: ""
remotesystem: ""
removecnt: "0"
rescount: "0"
residency: "NONRESIDENT"
rloading: "0.000"
rplid: "0"
rremoval: "0.000"
runtime: "UNKNOWN"
ruse: "0.000"
sharestatus: "PRIVATE"
status: "DISABLED"
transid: ""
useagelstat: "0"
usecount: "0"
usefetch: "0.000"
request:
description: Information about the request that was made to CMCI
returned: success
type: dict
contains:
body:
description: The XML body sent with the request, if any
returned: success
type: str
method:
description: The HTTP method used for the request
returned: success
type: str
url:
description: The URL used for the request
returned: success
type: str
"""
from ansible_collections.ibm.ibm_zos_cics.plugins.module_utils.cmci import (
AnsibleCMCIModule, PARAMETERS, ATTRIBUTES, append_attributes, append_parameters,
append_attributes_parameters_arguments
)
from typing import Optional, Dict
class AnsibleCMCICreateModule(AnsibleCMCIModule):
def __init__(self):
super(AnsibleCMCICreateModule, self).__init__('POST')
def init_argument_spec(self): # type: () -> Dict
argument_spec = super(AnsibleCMCICreateModule, self).init_argument_spec()
append_attributes_parameters_arguments(argument_spec)
return argument_spec
def init_body(self): # type: () -> Optional[Dict]
create = {}
append_parameters(create, self._p.get(PARAMETERS))
append_attributes(create, self._p.get(ATTRIBUTES))
return {
'request': {
'create': create
}
}
def main():
AnsibleCMCICreateModule().main()
if __name__ == '__main__':
main()
| 29.188341
| 122
| 0.681364
|
794f5799d2729df8c8746e7da3f0ba2cdcca8e4f
| 2,629
|
py
|
Python
|
output.py
|
nadhirxz/athan-parser
|
3ebb99d27e592e87e098dfb8582f328a8d0e0bc2
|
[
"MIT"
] | 1
|
2022-01-09T23:48:18.000Z
|
2022-01-09T23:48:18.000Z
|
output.py
|
nadhirxz/athan-parser
|
3ebb99d27e592e87e098dfb8582f328a8d0e0bc2
|
[
"MIT"
] | null | null | null |
output.py
|
nadhirxz/athan-parser
|
3ebb99d27e592e87e098dfb8582f328a8d0e0bc2
|
[
"MIT"
] | null | null | null |
import os
import json
import re
places = ['alger', 'adrar', 'djelfa']
p = {
'alger': ['Oran', 'Mostaganem', 'Relizane', 'Chlef', 'Ain Defla', 'Tipaza', 'Medea', 'Blida', 'Boumerdes', 'Bouira', 'Dellys', 'Tizi Ouzou', 'Msila', 'Bordj Bou Arreridj', 'Setif', 'Jijel', 'Mila', 'Constantine', 'Skikda', 'Oum El Bouaghi', 'Guelma', 'Annaba', 'Souk Ahras', 'El Taref'],
'adrar': ['Beni Ounif', 'Tindouf', 'Bechar', 'Timimoun', 'Reggane', 'Bordj Badji Mokhtar', 'Beni Abbes', 'In Salah', 'El Menia', 'Ghardaia', 'Ouargla', 'Tamanrasset', 'In Guezzam', 'Illizi', 'Djanet', 'In Amenas'],
'djelfa': ['Maghnia', 'Sebdou', 'Ain Temouchent', 'Tlemcen', 'Ben Badis', 'Naama', 'Mascara', 'Saida', 'El Bayadh', 'Tiaret', 'Tissemsilt', 'Ain Oussera', 'Laghouat', 'Hassi R\'Mel', 'Ain El Melh', 'Bou Saada', 'Biskra', 'Touggourt', 'Batna', 'El Oued', 'Khenchela', 'Bir El Ater', 'Tebessa']
}
out = {}
for dir in os.listdir('./output'):
if (re.match(r'\d{4}', dir)):
out[dir] = {}
for place in places:
timing = f'./output/{dir}/{place}/timing'
offset = f'./output/{dir}/{place}/offset'
out[dir][place] = {'timing': {}, 'places': p[place], 'offset': []}
for file_name in os.listdir(timing):
file = open(os.path.join(timing, file_name), 'r').read()
lines = [l.strip() for l in file.split('\n')]
for line in lines:
line = line.split('|')
out[dir][place]['timing'][line[-1]] = line[:-1][::-1]
for file_name in os.listdir(offset):
file = open(os.path.join(offset, file_name), 'r').read()
if (place == 'adrar'):
out[dir][place]['offset'].append({'first': [], 'second': []})
parts = file.split('\n\n')
for i, part in enumerate(parts):
a = 'first' if i % 2 == 0 else 'second'
part = part.split('\n')
for line in part:
out[dir][place]['offset'][int(file_name.split('.')[0]) - 1][a].append(line.split('|'))
else:
out[dir][place]['offset'].append([])
lines = [l.strip() for l in file.split('\n')]
for line in lines:
out[dir][place]['offset'][int(file_name.split('.')[0]) - 1].append(line.split('|'))
def stringify(out):
output = json.dumps(out, indent=4)
output = output.replace(' ', ' ')
output = re.sub(r'\[\n\s+"', '["', output)
output = re.sub(r'",\s+', '", ', output)
return re.sub(r'"\s+\]', '"]', output)
open('./output/output.json', 'w').write(stringify(out))
for dir in os.listdir('./output'):
if (re.match(r'\d{4}', dir)):
open(f'./output/{dir}/{dir}.json', 'w').write(stringify(out[dir]))
for place in places:
open(f'./output/{dir}/{place}/{place}.json', 'w').write(stringify(out[dir][place]))
| 44.559322
| 296
| 0.580068
|
794f581387e59c130c0476e0f8f180cbae813724
| 2,351
|
py
|
Python
|
patterns.py
|
IvanBrasilico/cli_talker
|
9932196e414e7875da592851a761e3f7a47550f6
|
[
"MIT"
] | null | null | null |
patterns.py
|
IvanBrasilico/cli_talker
|
9932196e414e7875da592851a761e3f7a47550f6
|
[
"MIT"
] | 1
|
2021-06-01T21:56:20.000Z
|
2021-06-01T21:56:20.000Z
|
patterns.py
|
IvanBrasilico/cli_talker
|
9932196e414e7875da592851a761e3f7a47550f6
|
[
"MIT"
] | null | null | null |
'''Configuration of the routes, or vocabulary of the bot'''
from botteryapp import ch, ih
from bottery.conf.patterns import Pattern, DefaultPattern
from bottery.views import pong
from cli_talker.views import (flask_restless_view, help_text,
say_help, tec_view)
from sql_alchemy_view.views import input_example, note_view, notebook_view
class FunctionPattern(Pattern):
'''Allows check to be made by an user-defined function'''
def __init__(self, pattern, view, function):
'''Pass any function that receives a string and
returns True or False'''
self.function = function
super().__init__(pattern, view)
def check(self, message):
return self.function(self.pattern, message.text)
class HangUserPattern(DefaultPattern):
def __init__(self, view):
self.hanged_users = set()
super().__init__(view)
def activate_hang(self, message):
self.hanged_users.add(message.user.id)
def deactivate_hang(self, message):
self.hanged_users.discard(message.user.id)
def check(self, message):
if message is None:
return 'Empty message'
if message.user.id in self.hanged_users:
return self.view
hang_user_pattern = HangUserPattern(flask_restless_view)
hang_user_pattern_tec = HangUserPattern(tec_view)
hang_user_pattern_notebook = HangUserPattern(notebook_view)
hang_user_pattern_note = HangUserPattern(note_view)
hang_user_pattern_input = HangUserPattern(input_example)
ch.set_hang(hang_user_pattern, 'person')
ch.set_hang(hang_user_pattern_tec, 'tec')
ch.set_hang(hang_user_pattern_notebook, 'notebook')
ch.set_hang(hang_user_pattern_note, 'note')
ih.set_hang(hang_user_pattern_input, 'project')
def first_word(pattern, text):
words = text.split(' ')
if words:
return words[0] == pattern
return False
patterns = [
hang_user_pattern,
hang_user_pattern_tec,
hang_user_pattern_notebook,
hang_user_pattern_note,
hang_user_pattern_input,
Pattern('tec', tec_view),
Pattern('person', flask_restless_view),
FunctionPattern('notebook', notebook_view, first_word),
FunctionPattern('note', note_view, first_word),
Pattern('project', input_example),
Pattern('ping', pong),
Pattern('help', help_text),
DefaultPattern(say_help)
]
| 30.532468
| 74
| 0.72097
|
794f5836003dd96679ba8b1149171cc6beb284cd
| 4,640
|
py
|
Python
|
openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_ctl/Opengauss_Function_Tools_gs_ctl_Case0020.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_ctl/Opengauss_Function_Tools_gs_ctl_Case0020.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/TOOLS/INTERNAL_TOOLS/gs_ctl/Opengauss_Function_Tools_gs_ctl_Case0020.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 系统内部使用工具
Case Name : gs_ctl start指定-l FILENAME,当指定文件在数据库实例目录下备机
是否可以启动成功(参数后指定文件路径)
Description :
1.关闭运行正常的集群
2.使用开启数据库的用户在实例目录下创建文件
3.使用-l参数并指定文件路径启动数据库(备机执行)
4.查看gsctl_test.dat文件中是否有记录信息
5.查看集群状态,备机正常启动
Expect :
1.关闭运行正常的集群成功
2.使用开启数据库的用户在实例目录下创建文件成功
3.使用-l参数并指定该文件路径启动数据库备机成功
4.查看gsctl_test.dat文件中,文件中有记录信息
5.查看集群状态,备机正常启动
History :
"""
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
LOG = Logger()
class LogicalReplication(unittest.TestCase):
def setUp(self):
LOG.info('----------------this is setup-----------------------')
LOG.info(
'---Opengauss_Function_Tools_gs_ctl_Case0020开始执行-----')
self.constant = Constant()
self.env_path = macro.DB_ENV_PATH
self.instance_path = macro.DB_INSTANCE_PATH
self.PrimaryNode = Node('PrimaryDbUser')
def test_system_internal_tools(self):
LOG.info(
'------------若为单机环境,后续不执行,直接通过------------')
excute_cmd = f''' source {self.env_path}
gs_om -t status --detail
'''
LOG.info(excute_cmd)
msg = self.PrimaryNode.sh(excute_cmd).result()
LOG.info(msg)
if 'Standby' not in msg:
return '单机环境,后续不执行,直接通过'
else:
self.user_node = Node('Standby1DbUser')
self.sh_standbby = CommonSH('Standby1DbUser')
LOG.info('---------关闭正在运行的主机------------')
is_stop = self.sh_standbby.stop_db_instance()
self.assertTrue(is_stop)
LOG.info('-----------在实例目录下创建文件------------------')
excute_cmd = f'''
touch {self.sh_standbby}/gsctl_test.dat;
'''
LOG.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
LOG.info(msg)
self.assertNotIn(self.constant.SQL_WRONG_MSG[1], msg)
LOG.info('----------------启动数据库-------------------')
excute_cmd = f'''
source {self.env_path};
gs_ctl start -D {self.instance_path} -M standby \
-l {self.instance_path}/gsctl_test.dat;
'''
LOG.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
LOG.info(msg)
self.assertIn(self.constant.RESTART_SUCCESS_MSG, msg)
LOG.info('----------------查看gsctl_test.dat文件-------------------')
excute_cmd = f'''
du -h {self.instance_path}/gsctl_test.dat ;
'''
LOG.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
LOG.info(msg)
res = msg.split()[0]
self.assertTrue(float(res[:-1]) > 0)
LOG.info('----------------查看集群状态-------------------')
status = self.sh_standbby.get_db_cluster_status()
self.assertTrue(status)
def tearDown(self):
LOG.info('----------------this is tearDown-----------------------')
excute_cmd = f''' source {self.env_path}
gs_om -t status --detail
'''
LOG.info(excute_cmd)
msg = self.PrimaryNode.sh(excute_cmd).result()
LOG.info(msg)
if 'Standby' not in msg:
return '单机环境,后续不执行,直接通过'
else:
self.user_node = Node('Standby1DbUser')
self.sh_standbby = CommonSH('Standby1DbUser')
LOG.info('----------------恢复集群状态------------------')
is_start = self.sh_standbby.start_db_cluster()
LOG.info(is_start)
LOG.info('-----------删除创建文件------------------')
excute_cmd = f'''
rm -rf {self.instance_path}/gsctl_test.dat;
'''
LOG.info(excute_cmd)
msg = self.user_node.sh(excute_cmd).result()
LOG.info(msg)
LOG.info(
'-----Opengauss_Function_Tools_gs_ctl_Case0020执行完成---')
| 34.887218
| 84
| 0.552586
|
794f590b10d08e388ddb53ac66cdc81e4c1dd22b
| 15,188
|
py
|
Python
|
youtube_dl_gui/optionsmanager.py
|
Sofronio/youtube-dl-gui
|
cf1d7d682e941473028b182cea6ea8d8dc5ed779
|
[
"Unlicense"
] | null | null | null |
youtube_dl_gui/optionsmanager.py
|
Sofronio/youtube-dl-gui
|
cf1d7d682e941473028b182cea6ea8d8dc5ed779
|
[
"Unlicense"
] | null | null | null |
youtube_dl_gui/optionsmanager.py
|
Sofronio/youtube-dl-gui
|
cf1d7d682e941473028b182cea6ea8d8dc5ed779
|
[
"Unlicense"
] | 1
|
2021-09-19T22:54:37.000Z
|
2021-09-19T22:54:37.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Youtubedlg module to handle settings. """
from __future__ import unicode_literals
import json
import os.path
from .utils import (
os_path_expanduser,
os_path_exists,
encode_tuple,
decode_tuple,
check_path
)
from .formats import (
OUTPUT_FORMATS,
FORMATS
)
class OptionsManager(object):
"""Handles youtubedlg options.
This class is responsible for storing and retrieving the options.
Attributes:
SETTINGS_FILENAME (string): Filename of the settings file.
SENSITIVE_KEYS (tuple): Contains the keys that we don't want
to store on the settings file. (SECURITY ISSUES).
Args:
config_path (string): Absolute path where OptionsManager
should store the settings file.
Note:
See load_default() method for available options.
Example:
Access the options using the 'options' variable.
opt_manager = OptionsManager('.')
opt_manager.options['save_path'] = '~/Downloads'
"""
SETTINGS_FILENAME = 'settings.json'
SENSITIVE_KEYS = ('sudo_password', 'password', 'video_password')
def __init__(self, config_path):
self.config_path = config_path
self.settings_file = os.path.join(config_path, self.SETTINGS_FILENAME)
self.options = dict()
self.load_default()
self.load_from_file()
def load_default(self):
"""Load the default options.
Note:
This method is automatically called by the constructor.
Options Description:
save_path (string): Path where youtube-dl should store the
downloaded file. Default is $HOME.
video_format (string): Video format to download.
When this options is set to '0' youtube-dl will choose
the best video format available for the given URL.
second_video_format (string): Video format to mix with the first
one (-f 18+17).
to_audio (boolean): If True youtube-dl will post process the
video file.
keep_video (boolen): If True youtube-dl will keep the video file
after post processing it.
audio_format (string): Audio format of the post processed file.
Available values are "mp3", "wav", "aac", "m4a", "vorbis", "opus".
audio_quality (string): Audio quality of the post processed file.
Available values are "9", "5", "0". The lowest the value the
better the quality.
restrict_filenames (boolean): If True youtube-dl will restrict
the downloaded file filename to ASCII characters only.
output_format (int): This option sets the downloaded file
output template. See formats.OUTPUT_FORMATS for more info.
output_template (string): Can be any output template supported
by youtube-dl.
playlist_start (int): Playlist index to start downloading.
playlist_end (int): Playlist index to stop downloading.
max_downloads (int): Maximum number of video files to download
from the given playlist.
min_filesize (float): Minimum file size of the video file.
If the video file is smaller than the given size then
youtube-dl will abort the download process.
max_filesize (float): Maximum file size of the video file.
If the video file is larger than the given size then
youtube-dl will abort the download process.
min_filesize_unit (string): Minimum file size unit.
Available values: '', 'k', 'm', 'g', 'y', 'p', 'e', 'z', 'y'.
max_filesize_unit (string): Maximum file size unit.
See 'min_filesize_unit' option for available values.
write_subs (boolean): If True youtube-dl will try to download
the subtitles file for the given URL.
write_all_subs (boolean): If True youtube-dl will try to download
all the available subtitles files for the given URL.
write_auto_subs (boolean): If True youtube-dl will try to download
the automatic subtitles file for the given URL.
embed_subs (boolean): If True youtube-dl will merge the subtitles
file with the video. (ONLY mp4 files).
subs_lang (string): Language of the subtitles file to download.
Needs 'write_subs' option.
ignore_errors (boolean): If True youtube-dl will ignore the errors
and continue the download process.
open_dl_dir (boolean): If True youtube-dlg will open the
destination folder after download process has been completed.
write_description (boolean): If True youtube-dl will write video
description to a .description file.
write_info (boolean): If True youtube-dl will write video
metadata to a .info.json file.
write_thumbnail (boolean): If True youtube-dl will write
thumbnail image to disk.
retries (int): Number of youtube-dl retries.
user_agent (string): Specify a custom user agent for youtube-dl.
referer (string): Specify a custom referer to use if the video
access is restricted to one domain.
proxy (string): Use the specified HTTP/HTTPS proxy.
shutdown (boolean): If True youtube-dlg will turn the computer
off after the download process has been completed.
sudo_password (string): SUDO password for the shutdown process if
the user does not have elevated privileges.
username (string): Username to login with.
password (string): Password to login with.
video_password (string): Video password for the given URL.
youtubedl_path (string): Absolute path to the youtube-dl binary.
Default is the self.config_path. You can change this option
to point on /usr/local/bin etc.. if you want to use the
youtube-dl binary on your system. This is also the directory
where youtube-dlg will auto download the youtube-dl if not
exists so you should make sure you have write access if you
want to update the youtube-dl binary from within youtube-dlg.
cmd_args (string): String that contains extra youtube-dl options
seperated by spaces.
enable_log (boolean): If True youtube-dlg will enable
the LogManager. See main() function under __init__().
log_time (boolean): See logmanager.LogManager add_time attribute.
workers_number (int): Number of download workers that download manager
will spawn. Must be greater than zero.
locale_name (string): Locale name (e.g. ru_RU).
main_win_size (tuple): Main window size (width, height).
If window becomes to small the program will reset its size.
See _settings_are_valid method MIN_FRAME_SIZE.
opts_win_size (tuple): Options window size (width, height).
If window becomes to small the program will reset its size.
See _settings_are_valid method MIN_FRAME_SIZE.
save_path_dirs (list): List that contains temporary save paths.
selected_video_formats (list): List that contains the selected
video formats to display on the main window.
selected_audio_formats (list): List that contains the selected
audio formats to display on the main window.
selected_format (string): Current format selected on the main window.
youtube_dl_debug (boolean): When True will pass '-v' flag to youtube-dl.
ignore_config (boolean): When True will ignore youtube-dl config file options.
confirm_exit (boolean): When True create popup to confirm exiting youtube-dl-gui.
native_hls (boolean): When True youtube-dl will use the native HLS implementation.
show_completion_popup (boolean): When True youtube-dl-gui will create a popup
to inform the user for the download completion.
confirm_deletion (boolean): When True ask user before item removal.
nomtime (boolean): When True will not use the Last-modified header to
set the file modification time.
embed_thumbnail (boolean): When True will embed the thumbnail in
the audio file as cover art.
add_metadata (boolean): When True will write metadata to file.
"""
#REFACTOR Remove old options & check options validation
self.options = {
'save_path': os_path_expanduser('~'),
'save_path_dirs': [
os_path_expanduser('~'),
os.path.join(os_path_expanduser('~'), "Downloads"),
os.path.join(os_path_expanduser('~'), "Desktop"),
os.path.join(os_path_expanduser('~'), "Videos"),
os.path.join(os_path_expanduser('~'), "Music"),
],
'video_format': '0',
'second_video_format': '0',
'to_audio': False,
'keep_video': False,
'audio_format': '',
'audio_quality': '5',
'restrict_filenames': False,
'output_format': 1,
'output_template': os.path.join('%(uploader)s', '%(title)s.%(ext)s'),
'playlist_start': 1,
'playlist_end': 0,
'max_downloads': 0,
'min_filesize': 0,
'max_filesize': 0,
'min_filesize_unit': '',
'max_filesize_unit': '',
'write_subs': False,
'write_all_subs': False,
'write_auto_subs': False,
'embed_subs': False,
'subs_lang': 'en',
'ignore_errors': True,
'open_dl_dir': False,
'write_description': False,
'write_info': False,
'write_thumbnail': False,
'retries': 10,
'user_agent': '',
'referer': '',
'proxy': '',
'shutdown': False,
'sudo_password': '',
'username': '',
'password': '',
'video_password': '',
'youtubedl_path': self.config_path,
'cmd_args': '',
'enable_log': True,
'log_time': True,
'workers_number': 3,
'locale_name': 'en_US',
'main_win_size': (740, 490),
'opts_win_size': (640, 490),
'selected_video_formats': ['webm', 'mp4'],
'selected_audio_formats': ['mp3', 'm4a', 'vorbis'],
'selected_format': '0',
'youtube_dl_debug': False,
'ignore_config': True,
'confirm_exit': True,
'native_hls': True,
'show_completion_popup': True,
'confirm_deletion': True,
'nomtime': False,
'embed_thumbnail': False,
'add_metadata': False
}
def load_from_file(self):
"""Load options from settings file. """
if not os_path_exists(self.settings_file):
return
with open(self.settings_file, 'rb') as settings_file:
try:
options = json.load(settings_file)
if self._settings_are_valid(options):
self.options = options
except:
self.load_default()
def save_to_file(self):
"""Save options to settings file. """
check_path(self.config_path)
with open(self.settings_file, 'wb') as settings_file:
options = self._get_options()
json.dump(options,
settings_file,
indent=4,
separators=(',', ': '))
def _settings_are_valid(self, settings_dictionary):
"""Check settings.json dictionary.
Args:
settings_dictionary (dict): Options dictionary loaded
from the settings file. See load_from_file() method.
Returns:
True if settings.json dictionary is valid, else False.
"""
VALID_VIDEO_FORMAT = ('0', '17', '36', '5', '34', '35', '43', '44', '45',
'46', '18', '22', '37', '38', '160', '133', '134', '135', '136','137',
'264', '138', '242', '243', '244', '247', '248', '271', '272', '82',
'83', '84', '85', '100', '101', '102', '139', '140', '141', '171', '172')
VALID_AUDIO_FORMAT = ('mp3', 'wav', 'aac', 'm4a', 'vorbis', 'opus', '')
VALID_AUDIO_QUALITY = ('0', '5', '9')
VALID_FILESIZE_UNIT = ('', 'k', 'm', 'g', 't', 'p', 'e', 'z', 'y')
VALID_SUB_LANGUAGE = ('en', 'el', 'pt', 'fr', 'it', 'ru', 'es', 'de', 'he', 'sv', 'tr')
MIN_FRAME_SIZE = 100
# Decode string formatted tuples back to normal tuples
settings_dictionary['main_win_size'] = decode_tuple(settings_dictionary['main_win_size'])
settings_dictionary['opts_win_size'] = decode_tuple(settings_dictionary['opts_win_size'])
for key in self.options:
if key not in settings_dictionary:
return False
if type(self.options[key]) != type(settings_dictionary[key]):
return False
# Check if each key has a valid value
rules_dict = {
'video_format': FORMATS.keys(),
'second_video_format': VALID_VIDEO_FORMAT,
'audio_format': VALID_AUDIO_FORMAT,
'audio_quality': VALID_AUDIO_QUALITY,
'output_format': OUTPUT_FORMATS.keys(),
'min_filesize_unit': VALID_FILESIZE_UNIT,
'max_filesize_unit': VALID_FILESIZE_UNIT,
'subs_lang': VALID_SUB_LANGUAGE
}
for key, valid_list in rules_dict.items():
if settings_dictionary[key] not in valid_list:
return False
# Check workers number value
if settings_dictionary['workers_number'] < 1:
return False
# Check main-options frame size
for size in settings_dictionary['main_win_size']:
if size < MIN_FRAME_SIZE:
return False
for size in settings_dictionary['opts_win_size']:
if size < MIN_FRAME_SIZE:
return False
return True
def _get_options(self):
"""Return options dictionary without SENSITIVE_KEYS. """
temp_options = self.options.copy()
for key in self.SENSITIVE_KEYS:
temp_options[key] = ''
# Encode normal tuples to string formatted tuples
temp_options['main_win_size'] = encode_tuple(temp_options['main_win_size'])
temp_options['opts_win_size'] = encode_tuple(temp_options['opts_win_size'])
return temp_options
| 37.316953
| 97
| 0.587569
|
794f590d5629674748248822b96e97f8ef152cf0
| 3,687
|
py
|
Python
|
services/movies_admin/movies/models.py
|
svvladimir-ru/ugc_sprint_1
|
7ae4f9094f34981057f6c80d38bd96df6c96d2db
|
[
"MIT"
] | null | null | null |
services/movies_admin/movies/models.py
|
svvladimir-ru/ugc_sprint_1
|
7ae4f9094f34981057f6c80d38bd96df6c96d2db
|
[
"MIT"
] | null | null | null |
services/movies_admin/movies/models.py
|
svvladimir-ru/ugc_sprint_1
|
7ae4f9094f34981057f6c80d38bd96df6c96d2db
|
[
"MIT"
] | null | null | null |
import uuid
from django.core.validators import MaxValueValidator as Max
from django.core.validators import MinValueValidator as Min
from django.db import models
from django.utils.translation import gettext_lazy as _
from model_utils.models import TimeStampedModel
class Person(TimeStampedModel):
id = models.UUIDField(_('id'), primary_key=True, default=uuid.uuid4, editable=False)
full_name = models.TextField(_('Full name'))
birth_date = models.DateField(_('Birthday'), null=True)
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('Persons')
managed = False
db_table = f'"content"."person"'
def __str__(self):
return self.full_name
class Genre(TimeStampedModel):
id = models.UUIDField(_('id'), primary_key=True, default=uuid.uuid4, editable=False)
name = models.TextField(_('Name'))
description = models.TextField(_('Description'), blank=True)
class Meta:
verbose_name = _('Genre')
verbose_name_plural = _('Genres')
managed = False
db_table = f'"content"."genre"'
def __str__(self):
return self.name
class FilmWorkType(models.TextChoices):
MOVIE = ('film', _('Film'))
SERIES = ('series', _('Series'))
class FilmWork(TimeStampedModel):
id = models.UUIDField(_('id'), primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(_('Name'), max_length=255)
description = models.TextField(_('Description'), blank=True)
creation_date = models.DateField(_('Creation date'), null=True, blank=True)
certificate = models.TextField(_('Certificate'), blank=True)
file_path = models.FileField(_('File'), upload_to='film_works/', null=True, blank=True)
rating = models.FloatField(_('Rating'), validators=[Min(0), Max(10)], null=True, blank=True)
type = models.TextField(_('Type'), choices=FilmWorkType.choices, blank=True)
genres = models.ManyToManyField('movies.Genre', through='movies.GenreFilmWork')
persons = models.ManyToManyField('movies.Person', through='movies.PersonFilmWork')
class Meta:
verbose_name = _('Film')
verbose_name_plural = _('Films')
managed = False
db_table = f'"content"."film_work"'
def __str__(self):
return self.title
class RoleType(models.TextChoices):
ACTOR = ('actor', _('Actor'))
WRITER = ('writer', _('Writer'))
DIRECTOR = ('director', _('Director'))
class PersonFilmWork(models.Model):
id = models.UUIDField(_('id'), primary_key=True, default=uuid.uuid4, editable=False)
film_work = models.ForeignKey('movies.FilmWork', on_delete=models.CASCADE)
person = models.ForeignKey('movies.Person', on_delete=models.CASCADE)
role = models.TextField(_('Role'), choices=RoleType.choices)
created = models.DateTimeField(_('Created'), auto_created=True, auto_now_add=True)
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('Persons')
db_table = f'"content"."person_film_work"'
managed = False
unique_together = ('film_work', 'person', 'role')
class GenreFilmWork(models.Model):
id = models.UUIDField(_('id'), primary_key=True, default=uuid.uuid4, editable=False)
film_work = models.ForeignKey('movies.FilmWork', on_delete=models.CASCADE)
genre = models.ForeignKey('movies.Genre', on_delete=models.CASCADE)
created = models.DateTimeField(_('Created'), auto_created=True, auto_now_add=True)
class Meta:
verbose_name = _('Genre')
verbose_name_plural = _('Genres')
db_table = f'"content"."genre_film_work"'
managed = False
unique_together = ('film_work', 'genre')
| 36.87
| 96
| 0.684296
|
794f595e5ccf5a7592de1fdf9e49f6356ecaa5c6
| 2,248
|
py
|
Python
|
example_nn.py
|
aaskov/nsp
|
e0926a618f0b47d7d34a45c5c2bc3a32dcfa24c1
|
[
"MIT"
] | null | null | null |
example_nn.py
|
aaskov/nsp
|
e0926a618f0b47d7d34a45c5c2bc3a32dcfa24c1
|
[
"MIT"
] | null | null | null |
example_nn.py
|
aaskov/nsp
|
e0926a618f0b47d7d34a45c5c2bc3a32dcfa24c1
|
[
"MIT"
] | 1
|
2018-07-26T06:51:33.000Z
|
2018-07-26T06:51:33.000Z
|
# -*- coding: utf-8 -*-
"""
Neural network - nsp
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from nn.main import NeuralNetwork
from data.data_sunspot import get_sunspot, data_split
#%%
if __name__ == "__main__":
print 'This file contains the Neural Network example'
# Parameters and settings
data_lag = 3
N_input_units = data_lag
N_hidden_units = 8
N_output_units = 1
net_fit_repeat = 5
net_max_iter = 10000
net_best_err = 9e9
# Load the Sunspot dataset
year, reading, lag_matrix = get_sunspot(data_lag)
train, test = data_split(lag_matrix)
# Repeat network fit to find the one with the lowest error
net_best = None
for net_fit_count in range(net_fit_repeat):
net = NeuralNetwork(structure=(N_input_units-1, N_hidden_units, N_output_units), train_input=train[:, 1:], train_target=train[:, 0], test_input=test[:, 1:], test_target=test[:, 0], max_iter=net_max_iter)
net.train()
if net.e_test[-1] < net_best_err:
net_best = net
net_best_err = net.e_test[-1]
# Train and output error
print 'Minimum test MSE:', str(min(net_best.e_test))
# Evaluate error
plt.figure('Error function')
plt.semilogy(net_best.e_train, label="Train")
plt.semilogy(net_best.e_test, label="Test")
plt.legend()
text = 'Weight range: ' + str(net_best.range) + ', Step-size: ' + str(net_best.eta)
plt.title(text)
plt.ylabel('Cost')
plt.xlabel('Iterations')
plt.grid()
# Norm of error gradient wrt weight
plt.figure('Error gradient norm')
plt.semilogy(net_best.norm_gradient)
plt.title('Weight gradient norm')
plt.ylabel('Norm gradient')
plt.xlabel('Iterations')
plt.grid()
# Prediction and test set
plt.figure('Prediction on testset')
y = net.predict(net.test_input)
plt.plot(y, label="Prediction")
plt.plot(net_best.test_target, label="True value")
plt.legend()
plt.xlabel('Year')
plt.ylabel('Target')
plt.grid()
# Network drawer (not implemented yet)
#draw_network((Ni, Nh, No), N.Wi.T * 2, N.Wo.T * 2)
| 30.794521
| 212
| 0.639235
|
794f59e48507e6002311e54e8ae31f3ad1bf4647
| 8,655
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py
|
jiansowa/Paddle
|
488152a6d076eac91ef0921ff6e16c65777f814d
|
[
"Apache-2.0"
] | 8
|
2019-06-16T12:36:11.000Z
|
2021-03-05T05:33:21.000Z
|
python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py
|
wozna/Paddle
|
0ecf441af14d554c85f69a206e3e3a9bdd86fb13
|
[
"Apache-2.0"
] | 1
|
2020-09-10T09:05:52.000Z
|
2020-09-10T09:06:22.000Z
|
python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py
|
wozna/Paddle
|
0ecf441af14d554c85f69a206e3e3a9bdd86fb13
|
[
"Apache-2.0"
] | 25
|
2019-12-07T02:14:14.000Z
|
2021-12-30T06:16:30.000Z
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding
import paddle.fluid.framework as framework
from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope
import numpy as np
import six
class SimpleNet(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_steps=20,
init_scale=0.1,
is_sparse=False,
dtype='float32'):
super(SimpleNet, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_steps = num_steps
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype=dtype,
is_sparse=is_sparse,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.hidden_size],
dtype=dtype,
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size],
dtype=dtype,
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def forward(self, input, label):
x_emb = self.embedding(input)
fc = fluid.layers.matmul(x_emb, self.softmax_weight)
fc = fluid.layers.elementwise_add(fc, self.softmax_bias)
projection = fluid.layers.matmul(
fc, fluid.layers.transpose(
self.embedding.weight, perm=[1, 0]))
projection = fluid.layers.reshape(
projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss
class TestDygraphSimpleNet(unittest.TestCase):
def test_simple_net(self):
for is_sparse in [True, False]:
for dtype in ["float32", "float64"]:
self.simple_net_float(is_sparse, dtype)
def simple_net_float(self, is_sparse, dtype):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
seed = 90
hidden_size = 10
vocab_size = 1000
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
for is_sort_sum_gradient in [True, False]:
traced_layer = None
with fluid.dygraph.guard(place):
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_steps=num_steps,
init_scale=init_scale,
is_sparse=is_sparse,
dtype=dtype)
sgd = SGDOptimizer(
learning_rate=1e-3,
parameter_list=simple_net.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
fluid.set_flags({
'FLAGS_sort_sum_gradient': is_sort_sum_gradient
})
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, num_steps))
y_data = y_data.reshape((-1, 1))
x = to_variable(x_data)
y = to_variable(y_data)
outs = simple_net(x, y)
dy_loss = outs
if i == 0:
for param in simple_net.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
sgd.minimize(dy_loss)
sgd.clear_gradients()
if i == batch_num - 1:
for param in simple_net.parameters():
dy_param_updated[param.name] = param.numpy()
dy_loss_value = dy_loss.numpy()
with new_program_scope():
paddle.manual_seed(seed)
paddle.framework.random._manual_program_seed(seed)
simple_net = SimpleNet(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_steps=num_steps,
is_sparse=is_sparse,
dtype=dtype)
exe = fluid.Executor(place)
sgd = SGDOptimizer(learning_rate=1e-3)
x = fluid.layers.data(
name="x", shape=[-1, num_steps], dtype='int64')
y = fluid.layers.data(name="y", shape=[-1, 1], dtype=dtype)
static_loss = simple_net(x, y)
sgd.minimize(static_loss)
static_param_updated = dict()
static_param_init = dict()
static_param_name_list = list()
for param in simple_net.parameters():
static_param_name_list.append(param.name)
out = exe.run(framework.default_startup_program(),
fetch_list=static_param_name_list)
for i in range(len(static_param_name_list)):
static_param_init[static_param_name_list[i]] = out[i]
static_loss_value = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
x_data = x_data.reshape((-1, num_steps))
y_data = y_data.reshape((-1, 1))
fetch_list = [static_loss]
fetch_list.extend(static_param_name_list)
out = exe.run(fluid.default_main_program(),
feed={"x": x_data,
"y": y_data},
fetch_list=fetch_list)
static_loss_value = out[0]
if i == batch_num - 1:
for k in range(3, len(out)):
static_param_updated[static_param_name_list[
k - 1]] = out[k]
self.assertTrue(
np.array_equal(static_loss_value, dy_loss_value))
for key, value in six.iteritems(static_param_init):
self.assertTrue(np.array_equal(value, dy_param_init[key]))
for key, value in six.iteritems(static_param_updated):
self.assertTrue(
np.array_equal(value, dy_param_updated[key]))
if __name__ == '__main__':
unittest.main()
| 41.811594
| 79
| 0.531947
|
794f5a0a760f60c895a4b9297568e03fe569dd19
| 7,258
|
py
|
Python
|
chainer/testing/unary_math_function_test.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | 7
|
2017-05-08T07:02:40.000Z
|
2018-12-02T18:35:39.000Z
|
chainer/testing/unary_math_function_test.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
chainer/testing/unary_math_function_test.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy
import unittest
import chainer
from chainer import cuda
from chainer.testing import attr
from chainer.testing import condition
def _make_data_default(shape, dtype):
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy
def unary_math_function_unittest(func, func_expected=None, label_expected=None,
make_data=None):
"""Decorator for testing unary mathematical Chainer functions.
This decorator makes test classes test unary mathematical Chainer
functions. Tested are forward and backward computations on CPU and GPU
across parameterized ``shape`` and ``dtype``.
Args:
func(~chainer.Function): Chainer function to be tested by the decorated
test class.
func_expected: Function used to provide expected values for
testing forward computation. If not given, a corresponsing numpy
function for ``func`` is implicitly picked up by its class name.
label_expected(string): String used to test labels of Chainer
functions. If not given, the class name of ``func`` lowered is
implicitly used.
make_data: Function to customize input and gradient data used
in the tests. It takes ``shape`` and ``dtype`` as its arguments,
and returns a tuple of input and gradient data. By default, uniform
destribution ranged ``[-1, 1]`` is used for both.
The decorated test class tests forward and backward computations on CPU and
GPU across the following :func:`~chainer.testing.parameterize` ed
parameters:
- shape: rank of zero, and rank of more than zero
- dtype: ``numpy.float16``, ``numpy.float32`` and ``numpy.float64``
Additionally, it tests the label of the Chainer function.
Chainer functions tested by the test class decorated with the decorator
should have the following properties:
- Unary, taking one parameter and returning one value
- ``dtype`` of input and output are the same
- Elementwise operation for the supplied ndarray
.. admonition:: Example
The following code defines a test class that tests
:func:`~chainer.functions.sin` Chainer function, which takes a parameter
with ``dtype`` of float and returns a value with the same ``dtype``.
.. doctest::
>>> import unittest
>>> from chainer import testing
>>> from chainer import functions as F
>>>
>>> @testing.unary_math_function_unittest(F.Sin())
... class TestSin(unittest.TestCase):
... pass
Because the test methods are implicitly injected to ``TestSin`` class by
the decorator, it is enough to place ``pass`` in the class definition.
Now the test is run with ``nose`` module.
.. doctest::
>>> import nose
>>> nose.run(
... defaultTest=__name__, argv=['', '-a', '!gpu'], exit=False)
True
To customize test data, ``make_data`` optional parameter can be used.
The following is an example of testing ``sqrt`` Chainer function, which
is tested in positive value domain here instead of the default input.
.. doctest::
>>> import numpy
>>>
>>> def make_data(shape, dtype):
... x = numpy.random.uniform(0.1, 1, shape).astype(dtype)
... gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
... return x, gy
...
>>> @testing.unary_math_function_unittest(F.Sqrt(),
... make_data=make_data)
... class TestSqrt(unittest.TestCase):
... pass
...
>>> nose.run(
... defaultTest=__name__, argv=['', '-a', '!gpu'], exit=False)
True
``make_data`` function which returns input and gradient data generated
in proper value domains with given ``shape`` and ``dtype`` parameters is
defined, then passed to the decorator's ``make_data`` parameter.
"""
# TODO(takagi) In the future, the Chainer functions that could be tested
# with the decorator would be extended as:
#
# - Multiple input parameters
# - Multiple output values
# - Other types than float: integer
# - Other operators other than analytic math: basic math
# Import here to avoid mutual import.
from chainer import gradient_check
from chainer import testing
if func_expected is None:
name = func.__class__.__name__.lower()
try:
func_expected = getattr(numpy, name)
except AttributeError:
raise ValueError("NumPy has no functions corresponding "
"to Chainer function '{}'.".format(name))
if label_expected is None:
label_expected = func.__class__.__name__.lower()
if make_data is None:
make_data = _make_data_default
def f(klass):
assert issubclass(klass, unittest.TestCase)
def setUp(self):
self.x, self.gy = make_data(self.shape, self.dtype)
if self.dtype == numpy.float16:
self.backward_options = {
'eps': 2 ** -4, 'atol': 2 ** -4, 'rtol': 2 ** -4,
'dtype': numpy.float64}
else:
self.backward_options = {'atol': 1e-4, 'rtol': 1e-4}
setattr(klass, "setUp", setUp)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = func(x)
self.assertEqual(y.data.dtype, x_data.dtype)
y_expected = func_expected(cuda.to_cpu(x_data), dtype=x_data.dtype)
testing.assert_allclose(y_expected, y.data, atol=1e-4, rtol=1e-4)
setattr(klass, "check_forward", check_forward)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x)
setattr(klass, "test_forward_cpu", test_forward_cpu)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
setattr(klass, "test_forward_gpu", test_forward_gpu)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
func, x_data, y_grad, **self.backward_options)
setattr(klass, "check_backward", check_backward)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
setattr(klass, "test_backward_cpu", test_backward_cpu)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
setattr(klass, "test_backward_gpu", test_backward_gpu)
def test_label(self):
self.assertEqual(func.label, label_expected)
setattr(klass, "test_label", test_label)
# Return parameterized class.
return testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))(klass)
return f
| 37.412371
| 79
| 0.615734
|
794f5aa4efde48ff4a13bcdc55726a6be0c1fc61
| 330
|
py
|
Python
|
torchcv/transforms/__init__.py
|
WynMew/torchcv
|
4bcbfc9a26f7d44eaebb4375234e4f2954dde3fd
|
[
"MIT"
] | 74
|
2018-03-31T04:06:55.000Z
|
2021-09-10T08:23:07.000Z
|
torchcv/transforms/__init__.py
|
WynMew/torchcv
|
4bcbfc9a26f7d44eaebb4375234e4f2954dde3fd
|
[
"MIT"
] | 5
|
2018-04-01T06:13:17.000Z
|
2019-04-15T12:34:11.000Z
|
torchcv/transforms/__init__.py
|
WynMew/torchcv
|
4bcbfc9a26f7d44eaebb4375234e4f2954dde3fd
|
[
"MIT"
] | 15
|
2018-04-01T05:12:15.000Z
|
2022-03-07T07:59:18.000Z
|
from torchcv.transforms.resize import resize
from torchcv.transforms.random_flip import random_flip
from torchcv.transforms.random_crop import random_crop
from torchcv.transforms.random_paste import random_paste
from torchcv.transforms.scale_jitter import scale_jitter
from torchcv.transforms.random_distort import random_distort
| 47.142857
| 60
| 0.890909
|
794f5d83e2aab4be98d3145b7d9e21ab87018fe4
| 2,372
|
py
|
Python
|
ironic_python_agent/cmd/agent.py
|
steveb/ironic-python-agent
|
85b3afd863aec09940b44da9ed89d7cfcbf66e82
|
[
"Apache-2.0"
] | 1
|
2021-02-27T02:48:50.000Z
|
2021-02-27T02:48:50.000Z
|
ironic_python_agent/cmd/agent.py
|
steveb/ironic-python-agent
|
85b3afd863aec09940b44da9ed89d7cfcbf66e82
|
[
"Apache-2.0"
] | null | null | null |
ironic_python_agent/cmd/agent.py
|
steveb/ironic-python-agent
|
85b3afd863aec09940b44da9ed89d7cfcbf66e82
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo_config import cfg
from oslo_log import log
from oslo_service import sslutils
from oslo_utils import strutils
from ironic_python_agent import agent
from ironic_python_agent import config
from ironic_python_agent import utils
CONF = cfg.CONF
def run():
"""Entrypoint for IronicPythonAgent."""
# NOTE(dtantsur): this must happen very early of the files from
# /etc/ironic-python-agent.d won't be loaded
utils.copy_config_from_vmedia()
log.register_options(CONF)
CONF(args=sys.argv[1:])
# Debug option comes from oslo.log, allow overriding it via kernel cmdline
ipa_debug = config.APARAMS.get('ipa-debug')
if ipa_debug is not None:
ipa_debug = strutils.bool_from_string(ipa_debug)
CONF.set_override('debug', ipa_debug)
log.setup(CONF, 'ironic-python-agent')
# Used for TLS configuration
sslutils.register_opts(CONF)
logger = log.getLogger(__name__)
logger.debug("Configuration:")
CONF.log_opt_values(logger, log.DEBUG)
agent.IronicPythonAgent(CONF.api_url,
agent.Host(hostname=CONF.advertise_host,
port=CONF.advertise_port),
agent.Host(hostname=CONF.listen_host,
port=CONF.listen_port),
CONF.ip_lookup_attempts,
CONF.ip_lookup_sleep,
CONF.network_interface,
CONF.lookup_timeout,
CONF.lookup_interval,
CONF.standalone,
CONF.agent_token,
CONF.hardware_initialization_delay,
CONF.advertise_protocol).run()
| 37.0625
| 78
| 0.642074
|
794f600b6f765840dbb8222dd28f4d1a21d729d4
| 7,684
|
py
|
Python
|
game.py
|
lestatzzz/Gomoku-python
|
3871d38489fc297ca5d37de18d21d553b8f38897
|
[
"MIT"
] | null | null | null |
game.py
|
lestatzzz/Gomoku-python
|
3871d38489fc297ca5d37de18d21d553b8f38897
|
[
"MIT"
] | null | null | null |
game.py
|
lestatzzz/Gomoku-python
|
3871d38489fc297ca5d37de18d21d553b8f38897
|
[
"MIT"
] | null | null | null |
import os
import time
AI_USE_CPP = False
if not AI_USE_CPP: # 是否用C++版的AI脚本
from ai import AI1Step
else:
import example
class Gomoku:
def __init__(self):
self.g_map = [[0 for y in range(15)] for x in range(15)] # 当前的棋盘
self.cur_step = 0 # 步数
self.max_search_steps = 3 # 最远搜索2回合之后
def move_1step(self, input_by_window=False, pos_x=None, pos_y=None):
"""
玩家落子
:param input_by_window: 是否从图形界面输入
:param pos_x: 从图形界面输入时,输入的x坐标为多少
:param pos_y: 从图形界面输入时,输入的y坐标为多少
"""
while True:
try:
if not input_by_window:
pos_x = int(input('x: ')) # 接受玩家的输入人
pos_y = int(input('y: '))
if 0 <= pos_x <= 14 and 0 <= pos_y <= 14: # 判断这个格子能否落子
if self.g_map[pos_x][pos_y] == 0:
self.g_map[pos_x][pos_y] = 1
self.cur_step += 1
return
except ValueError: # 玩家输入不正确的情况(例如输入了‘A’)
continue
def game_result(self, show=False):
"""判断游戏的结局。0为游戏进行中,1为玩家获胜,2为电脑获胜,3为平局"""
# 1. 判断是否横向连续五子
for x in range(11):
for y in range(15):
if self.g_map[x][y] == 1 and self.g_map[x + 1][y] == 1 and self.g_map[x + 2][y] == 1 and self.g_map[x + 3][y] == 1 and self.g_map[x + 4][y] == 1:
if show:
return 1, [(x0, y) for x0 in range(x, x + 5)]
else:
return 1
if self.g_map[x][y] == 2 and self.g_map[x + 1][y] == 2 and self.g_map[x + 2][y] == 2 and self.g_map[x + 3][y] == 2 and self.g_map[x + 4][y] == 2:
if show:
return 2, [(x0, y) for x0 in range(x, x + 5)]
else:
return 2
# 2. 判断是否纵向连续五子
for x in range(15):
for y in range(11):
if self.g_map[x][y] == 1 and self.g_map[x][y + 1] == 1 and self.g_map[x][y + 2] == 1 and self.g_map[x][y + 3] == 1 and self.g_map[x][y + 4] == 1:
if show:
return 1, [(x, y0) for y0 in range(y, y + 5)]
else:
return 1
if self.g_map[x][y] == 2 and self.g_map[x][y + 1] == 2 and self.g_map[x][y + 2] == 2 and self.g_map[x][y + 3] == 2 and self.g_map[x][y + 4] == 2:
if show:
return 2, [(x, y0) for y0 in range(y, y + 5)]
else:
return 2
# 3. 判断是否有左上-右下的连续五子
for x in range(11):
for y in range(11):
if self.g_map[x][y] == 1 and self.g_map[x + 1][y + 1] == 1 and self.g_map[x + 2][y + 2] == 1 and self.g_map[x + 3][y + 3] == 1 and self.g_map[x + 4][y + 4] == 1:
if show:
return 1, [(x + t, y + t) for t in range(5)]
else:
return 1
if self.g_map[x][y] == 2 and self.g_map[x + 1][y + 1] == 2 and self.g_map[x + 2][y + 2] == 2 and self.g_map[x + 3][y + 3] == 2 and self.g_map[x + 4][y + 4] == 2:
if show:
return 2, [(x + t, y + t) for t in range(5)]
else:
return 2
# 4. 判断是否有右上-左下的连续五子
for x in range(11):
for y in range(11):
if self.g_map[x + 4][y] == 1 and self.g_map[x + 3][y + 1] == 1 and self.g_map[x + 2][y + 2] == 1 and self.g_map[x + 1][y + 3] == 1 and self.g_map[x][y + 4] == 1:
if show:
return 1, [(x + t, y + 4 - t) for t in range(5)]
else:
return 1
if self.g_map[x + 4][y] == 2 and self.g_map[x + 3][y + 1] == 2 and self.g_map[x + 2][y + 2] == 2 and self.g_map[x + 1][y + 3] == 2 and self.g_map[x][y + 4] == 2:
if show:
return 2, [(x + t, y + 4 - t) for t in range(5)]
else:
return 2
# 5. 判断是否为平局
for x in range(15):
for y in range(15):
if self.g_map[x][y] == 0: # 棋盘中还有剩余的格子,不能判断为平局
if show:
return 0, [(-1, -1)]
else:
return 0
if show:
return 3, [(-1, -1)]
else:
return 3
def ai_move_1step(self):
"""电脑落子"""
for x in range(15):
for y in range(15):
if self.g_map[x][y] == 0:
self.g_map[x][y] = 2
self.cur_step += 1
return
def ai_play_1step_by_cpp(self):
# ai = AI1Step(self, self.cur_step, True) # AI判断下一步执行什么操作
st = time.time()
mapstring = list()
for x in range(15):
mapstring.extend(self.g_map[x])
try:
node_len, ai_ope_x, ai_poe_y = example.ai_1step(self.cur_step, int(True), self.max_search_steps, mapstring)
ai_ope = [ai_ope_x, ai_poe_y]
except ValueError:
raise ValueError('AI程序计算出来的数值不正确')
ed = time.time()
print('生成了%d个节点,用时%.4f' % (node_len, ed - st))
self.g_map[ai_ope[0]][ai_ope[1]] = 2
self.cur_step += 1
def ai_play_1step_py_python(self):
ai = AI1Step(self, self.cur_step, True) # AI判断下一步执行什么操作
st = time.time()
ai.search(0, [set(), set()], self.max_search_steps) # 最远看2回合之后
ed = time.time()
print('生成了%d个节点,用时%.4f,评价用时%.4f' % (len(ai.method_tree), ed - st, ai.t))
if ai.next_node_dx_list[0] == -1:
raise ValueError('ai.next_node_dx_list[0] == -1')
ai_ope = ai.method_tree[ai.next_node_dx_list[0]].ope
if self.g_map[ai_ope[0]][ai_ope[1]] != 0:
raise ValueError('self.game_map[ai_ope[0]][ai_ope[1]] = %d' % self.g_map[ai_ope[0]][ai_ope[1]])
self.g_map[ai_ope[0]][ai_ope[1]] = 2
self.cur_step += 1
def ai_play_1step(self):
if AI_USE_CPP:
self.max_search_steps = 3
self.ai_play_1step_by_cpp()
else:
self.max_search_steps = 2
self.ai_play_1step_py_python()
def show(self, res):
"""显示游戏内容"""
for y in range(15):
for x in range(15):
if self.g_map[x][y] == 0:
print(' ', end='')
elif self.g_map[x][y] == 1:
print('〇', end='')
elif self.g_map[x][y] == 2:
print('×', end='')
if x != 14:
print('-', end='')
print('\n', end='')
for x in range(15):
print('| ', end='')
print('\n', end='')
if res == 1:
print('玩家获胜!')
elif res == 2:
print('电脑获胜!')
elif res == 3:
print('平局!')
def play(self):
while True:
self.move_1step() # 玩家下一步
res = self.game_result() # 判断游戏结果
if res != 0: # 如果游戏结果为“已经结束”,则显示游戏内容,并退出主循环
self.show(res)
return
self.ai_move_1step() # 电脑下一步
res = self.game_result()
if res != 0:
self.show(res)
return
self.show(0) # 在游戏还没有结束的情况下,显示游戏内容,并继续下一轮循环
def map2string(self):
mapstring = list()
for x in range(15):
mapstring.extend(list(map(lambda x0: x0 + 48, self.g_map[x])))
return bytearray(mapstring).decode('utf8')
| 38.039604
| 177
| 0.440916
|
794f62257315f49527ad6d33136d9da247ce1237
| 2,744
|
py
|
Python
|
medical_data_visualizer.py
|
jmacdonald2010/medical-data-visualizer-FFCDAP
|
d985f5f597d096e6b315d85f100d08b06ee235ff
|
[
"MIT"
] | null | null | null |
medical_data_visualizer.py
|
jmacdonald2010/medical-data-visualizer-FFCDAP
|
d985f5f597d096e6b315d85f100d08b06ee235ff
|
[
"MIT"
] | null | null | null |
medical_data_visualizer.py
|
jmacdonald2010/medical-data-visualizer-FFCDAP
|
d985f5f597d096e6b315d85f100d08b06ee235ff
|
[
"MIT"
] | null | null | null |
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
# Import data
df = pd.read_csv('medical_examination.csv')
# Add 'overweight' column
df['overweight'] = df.apply(lambda x: 1 if (x.weight / (np.square(x.height / 100))) > 25 else 0, axis=1)
# Normalize data by making 0 always good and 1 always bad. If the value of 'cholestorol' or 'gluc' is 1, make the value 0. If the value is more than 1, make the value 1.
def normalize_values(value, **kwargs):
if value <= 1:
value = 0
else:
value = 1
return value
for i in range(7, 9):
df.iloc[:, i] = df.apply(lambda x: normalize_values(x[i]), axis=1)
# Draw Categorical Plot
def draw_cat_plot():
# Create DataFrame for cat plot using `pd.melt` using just the values from 'cholesterol', 'gluc', 'smoke', 'alco', 'active', and 'overweight'.
df_cat = pd.melt(df, id_vars='cardio', value_vars=['active', 'alco', 'cholesterol', 'gluc', 'overweight', 'smoke'])
# Group and reformat the data to split it by 'cardio'. Show the counts of each feature. You will have to rename one of the collumns for the catplot to work correctly.
# df_cat = None # was able to create a graph that looks correct w/o this step
# Draw the catplot with 'sns.catplot()'
# courtesy of ArbyC and HMHarris_41414141 on freeCodeCamp forums
fig = sns.catplot( # not entirely sure if i'm supposed to assign this to the var fig
data=df_cat,
kind='count',
x='variable',
hue='value',
col='cardio'
)
fig = fig.set_ylabels('total').fig
# Do not modify the next two lines
fig.savefig('catplot.png')
return fig
# Draw Heat Map
def draw_heat_map():
# Clean the data
df_heat = df[(df['ap_lo'] <= df['ap_hi'])]
# remove height under 2.5th percentile
df_heat = df_heat[(df['height'] >= df['height'].quantile(0.025))]
# remove height over 97.5 percentile
df_heat = df_heat[(df['height'] <= df['height'].quantile(0.975))]
# remove weight under 2.5th percentile
df_heat = df_heat[(df['weight'] >= df['weight'].quantile(0.025))]
# remove weight above 97.5 percentile
df_heat = df_heat[(df['weight'] <= df['weight'].quantile(0.975))]
# Calculate the correlation matrix
corr = df_heat.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
fig, ax = plt.subplots(figsize=(11,9))
# Draw the heatmap with 'sns.heatmap()'
sns.heatmap(corr, mask=mask, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={'shrink': .5}, annot=True, fmt=".1f")
# Do not modify the next two lines
fig.savefig('heatmap.png')
return fig
| 33.060241
| 170
| 0.655612
|
794f625feae1cef4cb125b21e8290b35fd27d5f0
| 474
|
py
|
Python
|
LinearCongruentialGenerator.py
|
Xzya/CodeAbbeySolutions
|
0a37eb246c24c1d74a6ff6c2ccf525444c5e787a
|
[
"MIT"
] | 2
|
2021-07-25T13:41:48.000Z
|
2022-03-02T21:07:39.000Z
|
LinearCongruentialGenerator.py
|
Xzya/CodeAbbeySolutions
|
0a37eb246c24c1d74a6ff6c2ccf525444c5e787a
|
[
"MIT"
] | null | null | null |
LinearCongruentialGenerator.py
|
Xzya/CodeAbbeySolutions
|
0a37eb246c24c1d74a6ff6c2ccf525444c5e787a
|
[
"MIT"
] | 5
|
2015-10-29T16:11:43.000Z
|
2022-03-13T12:50:32.000Z
|
#input
# 13
# 1261 169 10 6 8
# 131 63712 69 22 11
# 1803 8 3338 2682 23
# 167 3 76 22 19
# 163 411240 55099 35664 13
# 901 9 5 1 22
# 89 9912 40838 35944 21
# 25 4 39548 3869 6
# 197 9620 2323 998 5
# 1079 9 247168 165345 9
# 563 34 15988 2408 23
# 67 98 3914 594 22
# 155 88032 224866 1635 17
lines = int(input())
for i in range(0, lines):
(a, c, m, x0, n) = (int(x) for x in input().split())
for j in range(0, n):
x0 = (a * x0 + c) % m
print(x0, "", end="")
| 18.96
| 54
| 0.601266
|
794f62e60a4b3a5db18eaea8c2a48c6247a9dab5
| 982
|
py
|
Python
|
var/spack/repos/builtin/packages/py-qtpy/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
var/spack/repos/builtin/packages/py-qtpy/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17
|
2019-03-21T15:54:00.000Z
|
2022-03-29T19:34:28.000Z
|
var/spack/repos/builtin/packages/py-qtpy/package.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-04-07T18:27:09.000Z
|
2022-03-31T22:52:38.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyQtpy(PythonPackage):
"""QtPy: Abtraction layer for PyQt5/PyQt4/PySide/PySide2"""
homepage = "https://github.com/spyder-ide/qtpy"
pypi = "QtPy/QtPy-1.2.1.tar.gz"
version('1.7.1', sha256='e97275750934b3a1f4d8e263f5b889ae817ed36f26867ab0ce52be731ab1ed9e')
version('1.2.1', sha256='5803ce31f50b24295e8e600b76cc91d7f2a3140a5a0d526d40226f9ec5e9097d')
apis = ['pyqt5', 'pyqt4', 'pyside2', 'pyside']
variant('api', default='pyqt5', description='Default QT API',
values=apis, multi=False)
depends_on('py-setuptools', type='build')
for api in apis:
depends_on('py-' + api, when='+' + api, type='run')
def setup_run_environment(self, env):
env.set('QT_API', self.spec.variants['api'].value)
| 33.862069
| 95
| 0.696538
|
794f62ff63e6d26ed970bec168f4c34d6f8495ea
| 2,592
|
py
|
Python
|
qqbot/core/util/logging.py
|
SuperKuroko/botpy
|
8e9a69ebe4d52a9a84b25047595925525495f402
|
[
"MIT"
] | 1
|
2022-03-30T13:04:32.000Z
|
2022-03-30T13:04:32.000Z
|
qqbot/core/util/logging.py
|
SuperKuroko/botpy
|
8e9a69ebe4d52a9a84b25047595925525495f402
|
[
"MIT"
] | null | null | null |
qqbot/core/util/logging.py
|
SuperKuroko/botpy
|
8e9a69ebe4d52a9a84b25047595925525495f402
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
import os
import platform
from logging import FileHandler
from logging.handlers import TimedRotatingFileHandler
LOG_COLORS_CONFIG = {
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
}
def _getLevel():
level = logging.INFO
level_str = os.getenv("QQBOT_LOG_LEVEL", str(logging.INFO))
try:
level = int(level_str)
if level not in (
logging.NOTSET,
logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.ERROR,
logging.CRITICAL,
):
logging.error("wrong logging level %s" % level_str)
level = logging.INFO
except ValueError:
logging.error("wrong logging level %s" % level_str)
logging.info("logging level: %d" % level)
return level
def getLogger(name=None):
print_format = (
"%(asctime)s - "
"\033[1;33m%(levelname)s: %(name)s - %(filename)s - %(funcName)s(line: %(lineno)s):\033[0m%(message)s"
""
)
file_format = "%(asctime)s-%(name)s - %(filename)s - %(funcName)s - line %(lineno)s-%(levelname)s - %(message)s"
if name is None:
logger = logging.getLogger("qqbot")
else:
logger = logging.getLogger(name)
logging.basicConfig(format=print_format)
logger.setLevel(level=_getLevel())
# FileHandler
no_log = os.getenv("QQBOT_DISABLE_LOG", "0")
if no_log == "0":
formatter = logging.Formatter(file_format)
if name is None:
name = "qqbot"
log_file = os.path.join(os.getcwd(), name + ".log")
file_handler = None
if platform.system().lower() != "windows":
# do not use RotatingFileHandler under Windows
# due to multi-process issue
# file_handler = RotatingFileHandler(
# log_file,
# maxBytes=1024 * 1024,
# backupCount=5,
# )
# save last 7 days log
file_handler = TimedRotatingFileHandler(
filename=log_file,
when="D",
backupCount=7,
)
else:
file_handler = FileHandler(log_file, encoding="utf-8")
logger.debug(
"qqbot: dumping log file to {path}".format(path=os.path.realpath(log_file))
)
file_handler.setLevel(level=_getLevel())
file_handler.setFormatter(formatter)
if len(logger.handlers) == 0:
logger.addHandler(file_handler)
return logger
| 29.454545
| 116
| 0.567515
|
794f633966582982660eae0dc6068cbd6c3e6336
| 3,840
|
py
|
Python
|
similarity/preprocessing.py
|
jtraviesor/alfred-tf-trainer
|
9747d24bef418415a31abfe0c9982d2f1d9d8298
|
[
"MIT"
] | 3
|
2017-11-18T11:41:46.000Z
|
2020-02-13T19:22:28.000Z
|
similarity/preprocessing.py
|
jtraviesor/alfred-tf-trainer
|
9747d24bef418415a31abfe0c9982d2f1d9d8298
|
[
"MIT"
] | 4
|
2017-09-01T05:28:49.000Z
|
2017-11-20T05:27:49.000Z
|
similarity/preprocessing.py
|
jtraviesor/alfred-tf-trainer
|
9747d24bef418415a31abfe0c9982d2f1d9d8298
|
[
"MIT"
] | 1
|
2018-06-08T03:55:32.000Z
|
2018-06-08T03:55:32.000Z
|
from tensorflow.python.platform import gfile
from tensorflow.contrib.learn.python.learn.preprocessing.text import CategoricalVocabulary
import re
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
TOKENIZER_RE = re.compile(r"[A-Z]{2,}(?![a-z])|[A-Z][a-z]+(?=[A-Z])|[\'\w\-]+",
re.UNICODE)
def tokenizer(iterator):
"""Tokenizer generator.
Args:
iterator: Input iterator with strings.
Yields:
array of tokens per each value in the input.
"""
for value in iterator:
yield TOKENIZER_RE.findall(value)
class VocabularyProcessor(object):
"""Maps documents to sequences of word ids."""
def __init__(self,
min_frequency=0,
vocabulary=None,
tokenizer_fn=None):
"""Initializes a VocabularyProcessor instance.
Args:
max_document_length: Maximum length of documents.
if documents are longer, they will be trimmed, if shorter - padded.
min_frequency: Minimum frequency of words in the vocabulary.
vocabulary: CategoricalVocabulary object.
Attributes:
vocabulary_: CategoricalVocabulary object.
"""
self.min_frequency = min_frequency
if vocabulary:
self.vocabulary_ = vocabulary
else:
self.vocabulary_ = CategoricalVocabulary(support_reverse=True)
if tokenizer_fn:
self._tokenizer = tokenizer_fn
else:
self._tokenizer = tokenizer
def fit(self, raw_documents, unused_y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit format signature of estimators.
Returns:
self
"""
for tokens in self._tokenizer(raw_documents):
for token in tokens:
self.vocabulary_.add(token)
if self.min_frequency > 0:
self.vocabulary_.trim(self.min_frequency)
self.vocabulary_.freeze()
return self
def fit_transform(self, raw_documents, unused_y=None):
"""Learn the vocabulary dictionary and return indexies of words.
Args:
raw_documents: An iterable which yield either str or unicode.
unused_y: to match fit_transform signature of estimators.
Returns:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
self.fit(raw_documents)
return self.transform(raw_documents)
def transform(self, raw_documents):
"""Transform documents to word-id matrix.
Convert words to ids with vocabulary fitted with fit or the one
provided in the constructor.
Args:
raw_documents: An iterable which yield either str or unicode.
Yields:
x: iterable, [n_samples, max_document_length]. Word-id matrix.
"""
for tokens in self._tokenizer(raw_documents):
word_ids = np.zeros(len(tokens), np.int64)
for idx, token in enumerate(tokens):
word_ids[idx] = self.vocabulary_.get(token)
yield word_ids
def reverse(self, documents):
"""Reverses output of vocabulary mapping to words.
Args:
documents: iterable, list of class ids.
Yields:
Iterator over mapped in words documents.
"""
for item in documents:
output = []
for class_id in item:
output.append(self.vocabulary_.reverse(class_id))
yield ' '.join(output)
def save(self, filename):
"""Saves vocabulary processor into given file.
Args:
filename: Path to output file.
"""
with gfile.Open(filename, 'wb') as f:
f.write(pickle.dumps(self))
@classmethod
def restore(cls, filename):
"""Restores vocabulary processor from given file.
Args:
filename: Path to file to load from.
Returns:
VocabularyProcessor object.
"""
with gfile.Open(filename, 'rb') as f:
return pickle.loads(f.read())
| 31.219512
| 90
| 0.678385
|
794f63f25f3c37d975513fb60aa53d4ac1cebc54
| 3,929
|
py
|
Python
|
ask-sdk-model/ask_sdk_model/interfaces/conversations/api_request.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-model/ask_sdk_model/interfaces/conversations/api_request.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-sdk-model/ask_sdk_model/interfaces/conversations/api_request.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_sdk_model.slot_value import SlotValue
class APIRequest(object):
"""
API request object
:param name: API name
:type name: (optional) str
:param arguments: Object containing values for API arguments
:type arguments: (optional) dict(str, object)
:param slots:
:type slots: (optional) dict(str, ask_sdk_model.slot_value.SlotValue)
"""
deserialized_types = {
'name': 'str',
'arguments': 'dict(str, object)',
'slots': 'dict(str, ask_sdk_model.slot_value.SlotValue)'
} # type: Dict
attribute_map = {
'name': 'name',
'arguments': 'arguments',
'slots': 'slots'
} # type: Dict
supports_multiple_types = False
def __init__(self, name=None, arguments=None, slots=None):
# type: (Optional[str], Optional[Dict[str, object]], Optional[Dict[str, SlotValue]]) -> None
"""API request object
:param name: API name
:type name: (optional) str
:param arguments: Object containing values for API arguments
:type arguments: (optional) dict(str, object)
:param slots:
:type slots: (optional) dict(str, ask_sdk_model.slot_value.SlotValue)
"""
self.__discriminator_value = None # type: str
self.name = name
self.arguments = arguments
self.slots = slots
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, APIRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 31.685484
| 100
| 0.587681
|
794f63fbafc7d945ebe5e640513dfbf5ed6b9c05
| 9,512
|
py
|
Python
|
qpth/qp.py
|
zhangdongkun98/qpth
|
255dd55596685f2eff3db584bb44c4dfdbba1f28
|
[
"Apache-2.0"
] | 553
|
2017-03-02T06:31:14.000Z
|
2022-03-10T06:58:36.000Z
|
qpth/qp.py
|
zhangdongkun98/qpth
|
255dd55596685f2eff3db584bb44c4dfdbba1f28
|
[
"Apache-2.0"
] | 43
|
2017-03-03T00:38:08.000Z
|
2022-01-30T11:44:17.000Z
|
qpth/qp.py
|
zhangdongkun98/qpth
|
255dd55596685f2eff3db584bb44c4dfdbba1f28
|
[
"Apache-2.0"
] | 87
|
2017-03-02T11:24:34.000Z
|
2022-03-11T11:28:35.000Z
|
import torch
from torch.autograd import Function
from .util import bger, expandParam, extract_nBatch
from . import solvers
from .solvers.pdipm import batch as pdipm_b
from .solvers.pdipm import spbatch as pdipm_spb
# from .solvers.pdipm import single as pdipm_s
from enum import Enum
class QPSolvers(Enum):
PDIPM_BATCHED = 1
CVXPY = 2
def QPFunction(eps=1e-12, verbose=0, notImprovedLim=3,
maxIter=20, solver=QPSolvers.PDIPM_BATCHED,
check_Q_spd=True):
class QPFunctionFn(Function):
@staticmethod
def forward(ctx, Q_, p_, G_, h_, A_, b_):
"""Solve a batch of QPs.
This function solves a batch of QPs, each optimizing over
`nz` variables and having `nineq` inequality constraints
and `neq` equality constraints.
The optimization problem for each instance in the batch
(dropping indexing from the notation) is of the form
\hat z = argmin_z 1/2 z^T Q z + p^T z
subject to Gz <= h
Az = b
where Q \in S^{nz,nz},
S^{nz,nz} is the set of all positive semi-definite matrices,
p \in R^{nz}
G \in R^{nineq,nz}
h \in R^{nineq}
A \in R^{neq,nz}
b \in R^{neq}
These parameters should all be passed to this function as
Variable- or Parameter-wrapped Tensors.
(See torch.autograd.Variable and torch.nn.parameter.Parameter)
If you want to solve a batch of QPs where `nz`, `nineq` and `neq`
are the same, but some of the contents differ across the
minibatch, you can pass in tensors in the standard way
where the first dimension indicates the batch example.
This can be done with some or all of the coefficients.
You do not need to add an extra dimension to coefficients
that will not change across all of the minibatch examples.
This function is able to infer such cases.
If you don't want to use any equality or inequality constraints,
you can set the appropriate values to:
e = Variable(torch.Tensor())
Parameters:
Q: A (nBatch, nz, nz) or (nz, nz) Tensor.
p: A (nBatch, nz) or (nz) Tensor.
G: A (nBatch, nineq, nz) or (nineq, nz) Tensor.
h: A (nBatch, nineq) or (nineq) Tensor.
A: A (nBatch, neq, nz) or (neq, nz) Tensor.
b: A (nBatch, neq) or (neq) Tensor.
Returns: \hat z: a (nBatch, nz) Tensor.
"""
nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)
Q, _ = expandParam(Q_, nBatch, 3)
p, _ = expandParam(p_, nBatch, 2)
G, _ = expandParam(G_, nBatch, 3)
h, _ = expandParam(h_, nBatch, 2)
A, _ = expandParam(A_, nBatch, 3)
b, _ = expandParam(b_, nBatch, 2)
if check_Q_spd:
for i in range(nBatch):
e, _ = torch.eig(Q[i])
if not torch.all(e[:,0] > 0):
raise RuntimeError('Q is not SPD.')
_, nineq, nz = G.size()
neq = A.size(1) if A.nelement() > 0 else 0
assert(neq > 0 or nineq > 0)
ctx.neq, ctx.nineq, ctx.nz = neq, nineq, nz
if solver == QPSolvers.PDIPM_BATCHED:
ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)
zhats, ctx.nus, ctx.lams, ctx.slacks = pdipm_b.forward(
Q, p, G, h, A, b, ctx.Q_LU, ctx.S_LU, ctx.R,
eps, verbose, notImprovedLim, maxIter)
elif solver == QPSolvers.CVXPY:
vals = torch.Tensor(nBatch).type_as(Q)
zhats = torch.Tensor(nBatch, ctx.nz).type_as(Q)
lams = torch.Tensor(nBatch, ctx.nineq).type_as(Q)
nus = torch.Tensor(nBatch, ctx.neq).type_as(Q) \
if ctx.neq > 0 else torch.Tensor()
slacks = torch.Tensor(nBatch, ctx.nineq).type_as(Q)
for i in range(nBatch):
Ai, bi = (A[i], b[i]) if neq > 0 else (None, None)
vals[i], zhati, nui, lami, si = solvers.cvxpy.forward_single_np(
*[x.cpu().numpy() if x is not None else None
for x in (Q[i], p[i], G[i], h[i], Ai, bi)])
# if zhati[0] is None:
# import IPython, sys; IPython.embed(); sys.exit(-1)
zhats[i] = torch.Tensor(zhati)
lams[i] = torch.Tensor(lami)
slacks[i] = torch.Tensor(si)
if neq > 0:
nus[i] = torch.Tensor(nui)
ctx.vals = vals
ctx.lams = lams
ctx.nus = nus
ctx.slacks = slacks
else:
assert False
ctx.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_)
return zhats
@staticmethod
def backward(ctx, dl_dzhat):
zhats, Q, p, G, h, A, b = ctx.saved_tensors
nBatch = extract_nBatch(Q, p, G, h, A, b)
Q, Q_e = expandParam(Q, nBatch, 3)
p, p_e = expandParam(p, nBatch, 2)
G, G_e = expandParam(G, nBatch, 3)
h, h_e = expandParam(h, nBatch, 2)
A, A_e = expandParam(A, nBatch, 3)
b, b_e = expandParam(b, nBatch, 2)
# neq, nineq, nz = ctx.neq, ctx.nineq, ctx.nz
neq, nineq = ctx.neq, ctx.nineq
if solver == QPSolvers.CVXPY:
ctx.Q_LU, ctx.S_LU, ctx.R = pdipm_b.pre_factor_kkt(Q, G, A)
# Clamp here to avoid issues coming up when the slacks are too small.
# TODO: A better fix would be to get lams and slacks from the
# solver that don't have this issue.
d = torch.clamp(ctx.lams, min=1e-8) / torch.clamp(ctx.slacks, min=1e-8)
pdipm_b.factor_kkt(ctx.S_LU, ctx.R, d)
dx, _, dlam, dnu = pdipm_b.solve_kkt(
ctx.Q_LU, d, G, A, ctx.S_LU,
dl_dzhat, torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, neq).type_as(G) if neq > 0 else torch.Tensor())
dps = dx
dGs = bger(dlam, zhats) + bger(ctx.lams, dx)
if G_e:
dGs = dGs.mean(0)
dhs = -dlam
if h_e:
dhs = dhs.mean(0)
if neq > 0:
dAs = bger(dnu, zhats) + bger(ctx.nus, dx)
dbs = -dnu
if A_e:
dAs = dAs.mean(0)
if b_e:
dbs = dbs.mean(0)
else:
dAs, dbs = None, None
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
if Q_e:
dQs = dQs.mean(0)
if p_e:
dps = dps.mean(0)
grads = (dQs, dps, dGs, dhs, dAs, dbs)
return grads
return QPFunctionFn.apply
class SpQPFunction(Function):
def __init__(self, Qi, Qsz, Gi, Gsz, Ai, Asz,
eps=1e-12, verbose=0, notImprovedLim=3, maxIter=20):
self.Qi, self.Qsz = Qi, Qsz
self.Gi, self.Gsz = Gi, Gsz
self.Ai, self.Asz = Ai, Asz
self.eps = eps
self.verbose = verbose
self.notImprovedLim = notImprovedLim
self.maxIter = maxIter
self.nineq, self.nz = Gsz
self.neq, _ = Asz
def forward(self, Qv, p, Gv, h, Av, b):
self.nBatch = Qv.size(0)
zhats, self.nus, self.lams, self.slacks = pdipm_spb.forward(
self.Qi, Qv, self.Qsz, p, self.Gi, Gv, self.Gsz, h,
self.Ai, Av, self.Asz, b, self.eps, self.verbose,
self.notImprovedLim, self.maxIter)
self.save_for_backward(zhats, Qv, p, Gv, h, Av, b)
return zhats
def backward(self, dl_dzhat):
zhats, Qv, p, Gv, h, Av, b = self.saved_tensors
Di = type(self.Qi)([range(self.nineq), range(self.nineq)])
Dv = self.lams / self.slacks
Dsz = torch.Size([self.nineq, self.nineq])
dx, _, dlam, dnu = pdipm_spb.solve_kkt(
self.Qi, Qv, self.Qsz, Di, Dv, Dsz,
self.Gi, Gv, self.Gsz,
self.Ai, Av, self.Asz, dl_dzhat,
type(p)(self.nBatch, self.nineq).zero_(),
type(p)(self.nBatch, self.nineq).zero_(),
type(p)(self.nBatch, self.neq).zero_())
dps = dx
dGs = bger(dlam, zhats) + bger(self.lams, dx)
GM = torch.cuda.sparse.DoubleTensor(
self.Gi, Gv[0].clone().fill_(1.0), self.Gsz
).to_dense().byte().expand_as(dGs)
dGs = dGs[GM].view_as(Gv)
dhs = -dlam
dAs = bger(dnu, zhats) + bger(self.nus, dx)
AM = torch.cuda.sparse.DoubleTensor(
self.Ai, Av[0].clone().fill_(1.0), self.Asz
).to_dense().byte().expand_as(dAs)
dAs = dAs[AM].view_as(Av)
dbs = -dnu
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
QM = torch.cuda.sparse.DoubleTensor(
self.Qi, Qv[0].clone().fill_(1.0), self.Qsz
).to_dense().byte().expand_as(dQs)
dQs = dQs[QM].view_as(Qv)
grads = (dQs, dps, dGs, dhs, dAs, dbs)
return grads
| 37.596838
| 84
| 0.512721
|
794f6435b4f8d18b1f13b0c83a85c68696eccaa1
| 1,009
|
py
|
Python
|
azure/mgmt/network/v2015_06_15/models/application_gateway_paged.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2015_06_15/models/application_gateway_paged.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2015_06_15/models/application_gateway_paged.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ApplicationGatewayPaged(Paged):
"""
A paging container for iterating over a list of :class:`ApplicationGateway <azure.mgmt.network.v2015_06_15.models.ApplicationGateway>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[ApplicationGateway]'}
}
def __init__(self, *args, **kwargs):
super(ApplicationGatewayPaged, self).__init__(*args, **kwargs)
| 36.035714
| 146
| 0.5778
|
794f64ec9967e891aa907fe16dcab26b13146243
| 1,479
|
py
|
Python
|
airflow/migrations/versions/b247b1e3d1ed_add_queued_by_job_id_to_ti.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
airflow/migrations/versions/b247b1e3d1ed_add_queued_by_job_id_to_ti.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
airflow/migrations/versions/b247b1e3d1ed_add_queued_by_job_id_to_ti.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add queued by Job ID to TI
Revision ID: b247b1e3d1ed
Revises: e38be357a868
Create Date: 2020-09-04 11:53:00.978882
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'b247b1e3d1ed'
down_revision = 'e38be357a868'
branch_labels = None
depends_on = None
def upgrade():
"""Apply Add queued by Job ID to TI"""
with op.batch_alter_table('task_instance') as batch_op:
batch_op.add_column(sa.Column('queued_by_job_id', sa.Integer(), nullable=True))
def downgrade():
"""Unapply Add queued by Job ID to TI"""
with op.batch_alter_table('task_instance') as batch_op:
batch_op.drop_column('queued_by_job_id')
| 31.468085
| 87
| 0.749831
|
794f6538fed2d639a29939eead7d574094d94555
| 1,483
|
py
|
Python
|
google/ads/googleads/v9/errors/types/resource_count_limit_exceeded_error.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/errors/types/resource_count_limit_exceeded_error.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v9/errors/types/resource_count_limit_exceeded_error.py
|
JakobSteixner/google-ads-python
|
df2b802cc7e78295a4ece21cc7ef3787cd35dab0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"ResourceCountLimitExceededErrorEnum",},
)
class ResourceCountLimitExceededErrorEnum(proto.Message):
r"""Container for enum describing possible resource count limit
exceeded errors.
"""
class ResourceCountLimitExceededError(proto.Enum):
r"""Enum describing possible resource count limit exceeded
errors.
"""
UNSPECIFIED = 0
UNKNOWN = 1
ACCOUNT_LIMIT = 2
CAMPAIGN_LIMIT = 3
ADGROUP_LIMIT = 4
AD_GROUP_AD_LIMIT = 5
AD_GROUP_CRITERION_LIMIT = 6
SHARED_SET_LIMIT = 7
MATCHING_FUNCTION_LIMIT = 8
RESPONSE_ROW_LIMIT_EXCEEDED = 9
RESOURCE_LIMIT = 10
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.66
| 74
| 0.703304
|
794f6546113983a17df4517da4bfb5c024bbd621
| 14,050
|
py
|
Python
|
homeassistant/util/color.py
|
mdonoughe/home-assistant
|
d9805160bc787146bff0c434fdcab995716f0f8c
|
[
"Apache-2.0"
] | 2
|
2020-02-20T18:47:55.000Z
|
2021-11-09T11:33:28.000Z
|
homeassistant/util/color.py
|
mdonoughe/home-assistant
|
d9805160bc787146bff0c434fdcab995716f0f8c
|
[
"Apache-2.0"
] | 1
|
2021-02-08T20:56:06.000Z
|
2021-02-08T20:56:06.000Z
|
homeassistant/util/color.py
|
diophung/home-assistant
|
a5aa1118937702ca8bec050614ee52dc14f8466b
|
[
"Apache-2.0"
] | 1
|
2020-11-21T09:37:47.000Z
|
2020-11-21T09:37:47.000Z
|
"""Color util methods."""
import logging
import math
import colorsys
from typing import Tuple
_LOGGER = logging.getLogger(__name__)
# Official CSS3 colors from w3.org:
# https://www.w3.org/TR/2010/PR-css3-color-20101028/#html4
# names do not have spaces in them so that we can compare against
# requests more easily (by removing spaces from the requests as well).
# This lets "dark seagreen" and "dark sea green" both match the same
# color "darkseagreen".
COLORS = {
'aliceblue': (240, 248, 255),
'antiquewhite': (250, 235, 215),
'aqua': (0, 255, 255),
'aquamarine': (127, 255, 212),
'azure': (240, 255, 255),
'beige': (245, 245, 220),
'bisque': (255, 228, 196),
'black': (0, 0, 0),
'blanchedalmond': (255, 235, 205),
'blue': (0, 0, 255),
'blueviolet': (138, 43, 226),
'brown': (165, 42, 42),
'burlywood': (222, 184, 135),
'cadetblue': (95, 158, 160),
'chartreuse': (127, 255, 0),
'chocolate': (210, 105, 30),
'coral': (255, 127, 80),
'cornflowerblue': (100, 149, 237),
'cornsilk': (255, 248, 220),
'crimson': (220, 20, 60),
'cyan': (0, 255, 255),
'darkblue': (0, 0, 139),
'darkcyan': (0, 139, 139),
'darkgoldenrod': (184, 134, 11),
'darkgray': (169, 169, 169),
'darkgreen': (0, 100, 0),
'darkgrey': (169, 169, 169),
'darkkhaki': (189, 183, 107),
'darkmagenta': (139, 0, 139),
'darkolivegreen': (85, 107, 47),
'darkorange': (255, 140, 0),
'darkorchid': (153, 50, 204),
'darkred': (139, 0, 0),
'darksalmon': (233, 150, 122),
'darkseagreen': (143, 188, 143),
'darkslateblue': (72, 61, 139),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'darkturquoise': (0, 206, 209),
'darkviolet': (148, 0, 211),
'deeppink': (255, 20, 147),
'deepskyblue': (0, 191, 255),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'dodgerblue': (30, 144, 255),
'firebrick': (178, 34, 34),
'floralwhite': (255, 250, 240),
'forestgreen': (34, 139, 34),
'fuchsia': (255, 0, 255),
'gainsboro': (220, 220, 220),
'ghostwhite': (248, 248, 255),
'gold': (255, 215, 0),
'goldenrod': (218, 165, 32),
'gray': (128, 128, 128),
'green': (0, 128, 0),
'greenyellow': (173, 255, 47),
'grey': (128, 128, 128),
'honeydew': (240, 255, 240),
'hotpink': (255, 105, 180),
'indianred': (205, 92, 92),
'indigo': (75, 0, 130),
'ivory': (255, 255, 240),
'khaki': (240, 230, 140),
'lavender': (230, 230, 250),
'lavenderblush': (255, 240, 245),
'lawngreen': (124, 252, 0),
'lemonchiffon': (255, 250, 205),
'lightblue': (173, 216, 230),
'lightcoral': (240, 128, 128),
'lightcyan': (224, 255, 255),
'lightgoldenrodyellow': (250, 250, 210),
'lightgray': (211, 211, 211),
'lightgreen': (144, 238, 144),
'lightgrey': (211, 211, 211),
'lightpink': (255, 182, 193),
'lightsalmon': (255, 160, 122),
'lightseagreen': (32, 178, 170),
'lightskyblue': (135, 206, 250),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'lightsteelblue': (176, 196, 222),
'lightyellow': (255, 255, 224),
'lime': (0, 255, 0),
'limegreen': (50, 205, 50),
'linen': (250, 240, 230),
'magenta': (255, 0, 255),
'maroon': (128, 0, 0),
'mediumaquamarine': (102, 205, 170),
'mediumblue': (0, 0, 205),
'mediumorchid': (186, 85, 211),
'mediumpurple': (147, 112, 219),
'mediumseagreen': (60, 179, 113),
'mediumslateblue': (123, 104, 238),
'mediumspringgreen': (0, 250, 154),
'mediumturquoise': (72, 209, 204),
'mediumvioletredred': (199, 21, 133),
'midnightblue': (25, 25, 112),
'mintcream': (245, 255, 250),
'mistyrose': (255, 228, 225),
'moccasin': (255, 228, 181),
'navajowhite': (255, 222, 173),
'navy': (0, 0, 128),
'navyblue': (0, 0, 128),
'oldlace': (253, 245, 230),
'olive': (128, 128, 0),
'olivedrab': (107, 142, 35),
'orange': (255, 165, 0),
'orangered': (255, 69, 0),
'orchid': (218, 112, 214),
'palegoldenrod': (238, 232, 170),
'palegreen': (152, 251, 152),
'paleturquoise': (175, 238, 238),
'palevioletred': (219, 112, 147),
'papayawhip': (255, 239, 213),
'peachpuff': (255, 218, 185),
'peru': (205, 133, 63),
'pink': (255, 192, 203),
'plum': (221, 160, 221),
'powderblue': (176, 224, 230),
'purple': (128, 0, 128),
'red': (255, 0, 0),
'rosybrown': (188, 143, 143),
'royalblue': (65, 105, 225),
'saddlebrown': (139, 69, 19),
'salmon': (250, 128, 114),
'sandybrown': (244, 164, 96),
'seagreen': (46, 139, 87),
'seashell': (255, 245, 238),
'sienna': (160, 82, 45),
'silver': (192, 192, 192),
'skyblue': (135, 206, 235),
'slateblue': (106, 90, 205),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'snow': (255, 250, 250),
'springgreen': (0, 255, 127),
'steelblue': (70, 130, 180),
'tan': (210, 180, 140),
'teal': (0, 128, 128),
'thistle': (216, 191, 216),
'tomato': (255, 99, 71),
'turquoise': (64, 224, 208),
'violet': (238, 130, 238),
'wheat': (245, 222, 179),
'white': (255, 255, 255),
'whitesmoke': (245, 245, 245),
'yellow': (255, 255, 0),
'yellowgreen': (154, 205, 50),
}
def color_name_to_rgb(color_name):
"""Convert color name to RGB hex value."""
# COLORS map has no spaces in it, so make the color_name have no
# spaces in it as well for matching purposes
hex_value = COLORS.get(color_name.replace(' ', '').lower())
if not hex_value:
_LOGGER.error('unknown color supplied %s default to white', color_name)
hex_value = COLORS['white']
return hex_value
# Taken from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
# License: Code is given as is. Use at your own risk and discretion.
# pylint: disable=invalid-name, invalid-sequence-index
def color_RGB_to_xy(iR: int, iG: int, iB: int) -> Tuple[float, float, int]:
"""Convert from RGB color to XY color."""
if iR + iG + iB == 0:
return 0.0, 0.0, 0
R = iR / 255
B = iB / 255
G = iG / 255
# Gamma correction
R = pow((R + 0.055) / (1.0 + 0.055),
2.4) if (R > 0.04045) else (R / 12.92)
G = pow((G + 0.055) / (1.0 + 0.055),
2.4) if (G > 0.04045) else (G / 12.92)
B = pow((B + 0.055) / (1.0 + 0.055),
2.4) if (B > 0.04045) else (B / 12.92)
# Wide RGB D65 conversion formula
X = R * 0.664511 + G * 0.154324 + B * 0.162028
Y = R * 0.313881 + G * 0.668433 + B * 0.047685
Z = R * 0.000088 + G * 0.072310 + B * 0.986039
# Convert XYZ to xy
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
# Brightness
Y = 1 if Y > 1 else Y
brightness = round(Y * 255)
return round(x, 3), round(y, 3), brightness
# Converted to Python from Obj-C, original source from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
# pylint: disable=invalid-sequence-index
def color_xy_brightness_to_RGB(vX: float, vY: float,
ibrightness: int) -> Tuple[int, int, int]:
"""Convert from XYZ to RGB."""
brightness = ibrightness / 255.
if brightness == 0:
return (0, 0, 0)
Y = brightness
if vY == 0:
vY += 0.00000000001
X = (Y / vY) * vX
Z = (Y / vY) * (1 - vX - vY)
# Convert to RGB using Wide RGB D65 conversion.
r = X * 1.656492 - Y * 0.354851 - Z * 0.255038
g = -X * 0.707196 + Y * 1.655397 + Z * 0.036152
b = X * 0.051713 - Y * 0.121364 + Z * 1.011530
# Apply reverse gamma correction.
r, g, b = map(
lambda x: (12.92 * x) if (x <= 0.0031308) else
((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055),
[r, g, b]
)
# Bring all negative components to zero.
r, g, b = map(lambda x: max(0, x), [r, g, b])
# If one component is greater than 1, weight components by that value.
max_component = max(r, g, b)
if max_component > 1:
r, g, b = map(lambda x: x / max_component, [r, g, b])
ir, ig, ib = map(lambda x: int(x * 255), [r, g, b])
return (ir, ig, ib)
# pylint: disable=invalid-sequence-index
def color_hsb_to_RGB(fH: float, fS: float, fB: float) -> Tuple[int, int, int]:
"""Convert a hsb into its rgb representation."""
if fS == 0:
fV = fB * 255
return (fV, fV, fV)
r = g = b = 0
h = fH / 60
f = h - float(math.floor(h))
p = fB * (1 - fS)
q = fB * (1 - fS * f)
t = fB * (1 - (fS * (1 - f)))
if int(h) == 0:
r = int(fB * 255)
g = int(t * 255)
b = int(p * 255)
elif int(h) == 1:
r = int(q * 255)
g = int(fB * 255)
b = int(p * 255)
elif int(h) == 2:
r = int(p * 255)
g = int(fB * 255)
b = int(t * 255)
elif int(h) == 3:
r = int(p * 255)
g = int(q * 255)
b = int(fB * 255)
elif int(h) == 4:
r = int(t * 255)
g = int(p * 255)
b = int(fB * 255)
elif int(h) == 5:
r = int(fB * 255)
g = int(p * 255)
b = int(q * 255)
return (r, g, b)
# pylint: disable=invalid-sequence-index
def color_RGB_to_hsv(iR: int, iG: int, iB: int) -> Tuple[int, int, int]:
"""Convert an rgb color to its hsv representation."""
fHSV = colorsys.rgb_to_hsv(iR/255.0, iG/255.0, iB/255.0)
return (int(fHSV[0]*65536), int(fHSV[1]*255), int(fHSV[2]*255))
# pylint: disable=invalid-sequence-index
def color_hsv_to_RGB(iH: int, iS: int, iV: int) -> Tuple[int, int, int]:
"""Convert an hsv color into its rgb representation."""
fRGB = colorsys.hsv_to_rgb(iH/65536, iS/255, iV/255)
return (int(fRGB[0]*255), int(fRGB[1]*255), int(fRGB[2]*255))
# pylint: disable=invalid-sequence-index
def color_xy_to_hs(vX: float, vY: float) -> Tuple[int, int]:
"""Convert an xy color to its hs representation."""
h, s, _ = color_RGB_to_hsv(*color_xy_brightness_to_RGB(vX, vY, 255))
return (h, s)
# pylint: disable=invalid-sequence-index
def _match_max_scale(input_colors: Tuple[int, ...],
output_colors: Tuple[int, ...]) -> Tuple[int, ...]:
"""Match the maximum value of the output to the input."""
max_in = max(input_colors)
max_out = max(output_colors)
if max_out == 0:
factor = 0.0
else:
factor = max_in / max_out
return tuple(int(round(i * factor)) for i in output_colors)
def color_rgb_to_rgbw(r, g, b):
"""Convert an rgb color to an rgbw representation."""
# Calculate the white channel as the minimum of input rgb channels.
# Subtract the white portion from the remaining rgb channels.
w = min(r, g, b)
rgbw = (r - w, g - w, b - w, w)
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return _match_max_scale((r, g, b), rgbw)
def color_rgbw_to_rgb(r, g, b, w):
"""Convert an rgbw color to an rgb representation."""
# Add the white channel back into the rgb channels.
rgb = (r + w, g + w, b + w)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return _match_max_scale((r, g, b, w), rgb)
def color_rgb_to_hex(r, g, b):
"""Return a RGB color from a hex color string."""
return '{0:02x}{1:02x}{2:02x}'.format(r, g, b)
def rgb_hex_to_rgb_list(hex_string):
"""Return an RGB color value list from a hex color string."""
return [int(hex_string[i:i + len(hex_string) // 3], 16)
for i in range(0,
len(hex_string),
len(hex_string) // 3)]
def color_temperature_to_rgb(color_temperature_kelvin):
"""
Return an RGB color from a color temperature in Kelvin.
This is a rough approximation based on the formula provided by T. Helland
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if color_temperature_kelvin < 1000:
color_temperature_kelvin = 1000
elif color_temperature_kelvin > 40000:
color_temperature_kelvin = 40000
tmp_internal = color_temperature_kelvin / 100.0
red = _get_red(tmp_internal)
green = _get_green(tmp_internal)
blue = _get_blue(tmp_internal)
return (red, green, blue)
def _bound(color_component: float, minimum: float=0,
maximum: float=255) -> float:
"""
Bound the given color component value between the given min and max values.
The minimum and maximum values will be included in the valid output.
i.e. Given a color_component of 0 and a minimum of 10, the returned value
will be 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
def _get_red(temperature: float) -> float:
"""Get the red component of the temperature in RGB space."""
if temperature <= 66:
return 255
tmp_red = 329.698727446 * math.pow(temperature - 60, -0.1332047592)
return _bound(tmp_red)
def _get_green(temperature: float) -> float:
"""Get the green component of the given color temp in RGB space."""
if temperature <= 66:
green = 99.4708025861 * math.log(temperature) - 161.1195681661
else:
green = 288.1221695283 * math.pow(temperature - 60, -0.0755148492)
return _bound(green)
def _get_blue(temperature: float) -> float:
"""Get the blue component of the given color temperature in RGB space."""
if temperature >= 66:
return 255
if temperature <= 19:
return 0
blue = 138.5177312231 * math.log(temperature - 10) - 305.0447927307
return _bound(blue)
def color_temperature_mired_to_kelvin(mired_temperature):
"""Convert absolute mired shift to degrees kelvin."""
return math.floor(1000000 / mired_temperature)
def color_temperature_kelvin_to_mired(kelvin_temperature):
"""Convert degrees kelvin to mired shift."""
return math.floor(1000000 / kelvin_temperature)
| 31.715576
| 79
| 0.575943
|
794f66f98437dbdef2a2e1b9f1ffa5042390d1c4
| 490
|
py
|
Python
|
pyro/contrib/bnn/utils.py
|
futurewarning/pyro
|
005032f10099188fea86f63b6baa46a27867983f
|
[
"Apache-2.0"
] | 4,959
|
2017-11-03T14:39:17.000Z
|
2019-02-04T16:14:30.000Z
|
pyro/contrib/bnn/utils.py
|
futurewarning/pyro
|
005032f10099188fea86f63b6baa46a27867983f
|
[
"Apache-2.0"
] | 985
|
2017-11-03T14:27:56.000Z
|
2019-02-02T18:52:54.000Z
|
pyro/contrib/bnn/utils.py
|
futurewarning/pyro
|
005032f10099188fea86f63b6baa46a27867983f
|
[
"Apache-2.0"
] | 564
|
2017-11-03T15:05:55.000Z
|
2019-01-31T14:02:29.000Z
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
def xavier_uniform(D_in, D_out):
scale = math.sqrt(6.0 / float(D_in + D_out))
noise = torch.rand(D_in, D_out)
return 2.0 * scale * noise - scale
def adjoin_ones_vector(x):
return torch.cat([x, torch.ones(x.shape[:-1] + (1,)).type_as(x)], dim=-1)
def adjoin_zeros_vector(x):
return torch.cat([x, torch.zeros(x.shape[:-1] + (1,)).type_as(x)], dim=-1)
| 23.333333
| 78
| 0.657143
|
794f677087ca003f851b8efbc538e42b502f7601
| 2,767
|
py
|
Python
|
src/enamlnative/widgets/web_view.py
|
codelv/enaml-native
|
04c3a015bcd649f374c5ecd98fcddba5e4fbdbdc
|
[
"MIT"
] | 237
|
2017-09-15T19:31:45.000Z
|
2022-03-17T04:22:20.000Z
|
src/enamlnative/widgets/web_view.py
|
codelv/enaml-native
|
04c3a015bcd649f374c5ecd98fcddba5e4fbdbdc
|
[
"MIT"
] | 74
|
2017-09-06T20:16:41.000Z
|
2022-03-05T13:34:35.000Z
|
src/enamlnative/widgets/web_view.py
|
codelv/enaml-native
|
04c3a015bcd649f374c5ecd98fcddba5e4fbdbdc
|
[
"MIT"
] | 22
|
2017-09-15T19:32:11.000Z
|
2022-03-17T18:33:39.000Z
|
"""
Copyright (c) 2017, Jairus Martin.
Distributed under the terms of the MIT License.
The full license is in the file LICENSE, distributed with this software.
Created on May 20, 2017
@author: jrm
"""
from atom.api import (
Typed, ForwardTyped, Unicode, Int, Event, Bool, observe, set_default
)
from enaml.core.declarative import d_
from .view_group import ViewGroup, ProxyViewGroup
class ProxyWebView(ProxyViewGroup):
""" The abstract definition of a proxy WebView object.
"""
#: A reference to the Label declaration.
declaration = ForwardTyped(lambda: WebView)
def set_url(self, url):
raise NotImplementedError
def set_source(self, source):
raise NotImplementedError
def do_reload(self):
raise NotImplementedError
def do_go_back(self):
raise NotImplementedError
def do_go_forward(self):
raise NotImplementedError
def do_zoom_in(self):
raise NotImplementedError
def do_zoom_out(self):
raise NotImplementedError
class WebView(ViewGroup):
""" A layout that places its children in a rectangular grid.
"""
#: Page load error occurred
error = d_(Bool(), writable=False)
#: Page error code
error_code = d_(Int(), writable=False)
#: Error message
error_message = d_(Unicode(), writable=False)
#: Enable javascript
javascript_enabled = d_(Bool(True))
#: Read only title from the loaded page
title = d_(Unicode(), writable=False)
#: Read only loading progress
progress = d_(Int(), writable=False)
#: State
loading = d_(Bool(), writable=False)
#: Loads the URL (if given)
url = d_(Unicode())
#: Loads the source (if given)
source = d_(Unicode())
#: Reloads the current URL.
reload = d_(Event())
#: Go back in history
go_back = d_(Event())
#: Go forward in history
go_forward = d_(Event())
#: Zoom in
zoom_in = d_(Event())
#: Zoom out
zoom_out = d_(Event())
#: A reference to the ProxyLabel object.
proxy = Typed(ProxyWebView)
# -------------------------------------------------------------------------
# Observers
# -------------------------------------------------------------------------
@observe('javascript_enabled', 'url', 'reload', 'source',
'go_forward', 'go_back', 'zoom_in', 'zoom_out')
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'event':
name = 'do_'+change['name']
if hasattr(self.proxy, name):
handler = getattr(self.proxy, name)
handler()
else:
super(WebView, self)._update_proxy(change)
| 23.853448
| 79
| 0.597037
|
794f6957d4cfbd4736cc14dc8600aa278833bbde
| 38,488
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_disks_operations.py
|
dubiety/azure-sdk-for-python
|
62ffa839f5d753594cf0fe63668f454a9d87a346
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_disks_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_09_30/aio/operations/_disks_operations.py
|
ellhe-blaster/azure-sdk-for-python
|
82193ba5e81cc5e5e5a5239bba58abe62e86f469
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._disks_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_grant_access_request_initial, build_list_by_resource_group_request, build_list_request, build_revoke_access_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
"""DisksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'Disk')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> AsyncLROPoller["_models.Disk"]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation.
:type disk: ~azure.mgmt.compute.v2020_09_30.models.Disk
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'DiskUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Disk"]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation.
:type disk: ~azure.mgmt.compute.v2020_09_30.models.DiskUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> "_models.Disk":
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.Disk
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks"} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks"} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(grant_access_data, 'GrantAccessData')
request = build_grant_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._grant_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess"} # type: ignore
@distributed_trace_async
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2020_09_30.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.AccessUri]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess"} # type: ignore
async def _revoke_access_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
request = build_revoke_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
template_url=self._revoke_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess"} # type: ignore
@distributed_trace_async
async def begin_revoke_access( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2020-09-30") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess"} # type: ignore
| 45.067916
| 292
| 0.65384
|
794f6a07caed6eaa8277a08a7191e640b8efc7e8
| 11,770
|
py
|
Python
|
teamcat_service/docker_build/target/one_step_build/teamcat/model_managers/ci_model_manager.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 6
|
2018-11-26T08:42:52.000Z
|
2020-06-01T08:33:48.000Z
|
teamcat_service/docker_build/target/one_step_build/teamcat/model_managers/ci_model_manager.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | null | null | null |
teamcat_service/docker_build/target/one_step_build/teamcat/model_managers/ci_model_manager.py
|
zhangyin2088/Teamcat
|
be9be8d7c1e58c8d2d22ab78d25783d9aee4de71
|
[
"Apache-2.0"
] | 1
|
2019-01-22T06:45:36.000Z
|
2019-01-22T06:45:36.000Z
|
# coding=utf-8
'''
Created on 2015-10-22
@author: zhangtiande
'''
from gatesidelib.common.simplelogger import SimpleLogger
from model_managers.model_manager import ModelManager
from gatesidelib.mongodb_helper import MongodbHelper
from doraemon.settings import MONGODB
class CIPluginManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CIPluginManager, self).get_queryset().filter(IsActive=1)
def get(self, plugin_id):
return super(CIPluginManager, self).get_queryset().get(id=plugin_id)
class CITaskManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CITaskManager, self).get_queryset().filter(IsActive=1)
def get(self, task_id):
result = None
try:
result = super(CITaskManager, self).get_queryset().get(id=task_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def project_tasks(self, project_id):
result = None
try:
result = self.all().filter(Project=project_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CITaskFlowManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CITaskFlowManager, self).get_queryset().filter(IsActive=1)
def get(self, flow_id):
result = None
try:
result = super(CITaskFlowManager, self).get_queryset().get(id=flow_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def project_taskflows(self, project_id):
result = None
try:
result = self.all().filter(Project=project_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CITaskFlowSectionManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CITaskFlowSectionManager, self).get_queryset().filter(IsActive=1)
def get(self, section_id):
result = None
try:
result = super(CITaskFlowSectionManager, self).get_queryset().get(id=section_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def flow_sections(self, flow_id):
result = None
try:
result = self.all().filter(TaskFlow=flow_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CITaskFlowHistoryManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CITaskFlowHistoryManager, self).get_queryset().filter(IsActive=1)
def flow_history(self,flow_id):
return self.all().filter(TaskFlow = flow_id)
def get(self, history_id):
result = None
try:
result = super(CITaskFlowHistoryManager, self).get_queryset().get(id=history_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CIFlowSectionHistoryManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CIFlowSectionHistoryManager, self).get_queryset().filter(IsActive=1)
def flow__section_history(self,flow_history_id):
return self.all().filter(TaskFlowHistory = flow_history_id)
def get(self, history_id):
result = None
try:
result = super(CIFlowSectionHistoryManager, self).get_queryset().get(id=history_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CITaskHistoryManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self, is_active=1):
if is_active == 1:
return super(CITaskHistoryManager, self).get_queryset().filter(IsActive=is_active)
else:
return super(CITaskHistoryManager, self).get_queryset()
def get(self, history_id, is_active=1):
return self.all(is_active).get(id=history_id)
def get_by_tqid(self, tq_id):
result = None
try:
result = self.all().filter(TaskQueueID=tq_id)[0]
except Exception as ex:
SimpleLogger.exception(ex)
return result
def get_task_history(self, task_id, is_active=1):
result = None
try:
result = self.all(is_active).filter(CITaskID=task_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def get_history_by_tq(self, tq_id):
result = list()
try:
result = self.all().filter(TaskQueueID=tq_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result[0]
def get_history_by_sechistory(self,sec_history_id,is_active=1):
result = list()
try:
result = self.all(is_active).filter(FlowSectionHistory=sec_history_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def get_history_by_uuid(self, uuid):
result = list()
try:
result = self.all().filter(TaskUUID=uuid)
except Exception as ex:
SimpleLogger.exception(ex)
return result[0]
class AutoTaskResultManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(AutoTaskResultManager, self).get_queryset().filter(IsActive=1)
def get(self, result_id):
return self.all().get(id=result_id)
def get_by_historyid(self, history_id):
result = None
try:
result = self.all().filter(TaskHistoryID=history_id).filter(ParentResultID=0)[0]
except Exception as ex:
SimpleLogger.exception(ex)
return result
class AutoCaseResultManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(AutoCaseResultManager, self).get_queryset()
def get(self, result_id):
result = None
try:
self.all().get(id=result_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def get_by_resultid(self, result_id, result_type):
result = list()
try:
result = self.all().filter(TaskResultID=result_id)
if int(result_type) != 0:
result = result.filter(Result=int(result_type))
except Exception as ex:
SimpleLogger.exception(ex)
return result
class UnitTestCaseResultManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(UnitTestCaseResultManager, self).get_queryset()
def get(self, result_id):
return self.all().get(id=result_id)
def get_by_task_result(self, result_id, result_type):
result = list()
try:
result = self.all().filter(TaskResultID=result_id)
if int(result_type) != 0:
result = result.filter(Result=int(result_type))
except Exception as ex:
SimpleLogger.exception(ex)
return result
class AutoCaseManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(AutoCaseManager, self).get_queryset()
def get(self, case_id):
result = None
try:
result = self.all().get(id=case_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def get_by_project(self, Project_id, case_type=None):
'''
case_type:filter case by case_type. case_type is a list.like:[1,2]
'''
result = None
try:
result = self.all().filter(ProjectID=Project_id)
if case_type:
result = result.filter(CaseType__in=case_type)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CaseTagManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CaseTagManager, self).get_queryset()
def get(self, tag_id):
return self.all().get(id=tag_id)
class ServiceHostManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(ServiceHostManager, self).get_queryset().filter(IsActive=1)
def get(self, service_id):
return self.all().get(id=service_id)
def get_by_envid(self, env_id):
'''
case_type:filter case by case_type. case_type is a list.like:[1,2]
'''
result = list()
try:
result = self.all().filter(EnvID=env_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
class CICredentialsManager(ModelManager):
'''
classdocs
'''
use_in_migrations = True
def all(self):
return super(CICredentialsManager, self).get_queryset().filter(IsActive=1)
def get(self, task_id):
return super(CICredentialsManager, self).get_queryset().get(id=task_id)
def get_public_credentials(self):
return self.all().filter(Scope=1)
def get_my_credentials(self, user_id):
return self.all().filter(Creator=user_id)
class CIDeployServiceManager(ModelManager):
'''
classdocs
'''
db = MONGODB['default']['DB']
port = MONGODB['default']['PORT']
host = MONGODB['default']['HOST']
collection = "ci_deployservice"
default_db = "doraemon"
default_collection = "ci"
def all(self):
return super(CIDeployServiceManager, self).get_queryset().filter(IsActive=1)
def get(self, service_id):
result = None
try:
result = super(CIDeployServiceManager, self).get_queryset().get(id=service_id)
except Exception as ex:
SimpleLogger.exception(ex)
return result
def save_replace_config(self, value):
mongo_helper = MongodbHelper(CIDeployServiceManager.host, CIDeployServiceManager.port)
return mongo_helper.save(CIDeployServiceManager.db, CIDeployServiceManager.collection, value)
def get_replace_config(self, doc_id):
mongo_helper = MongodbHelper(CIDeployServiceManager.host, CIDeployServiceManager.port)
result = mongo_helper.get(CIDeployServiceManager.db, CIDeployServiceManager.collection, doc_id)
if result == None:
result = mongo_helper.get(CIDeployServiceManager.default_db, CIDeployServiceManager.default_collection,
doc_id)
return result
class CITaskConfigManager(object):
def __init__(self, mongo_host, mongo_port, db, collection):
self.host = mongo_host
self.port = mongo_port
self.db = db
self.collection = collection
self.default_db = "doraemon"
self.default_collection = "ci"
self.mongo_helper = MongodbHelper(self.host, self.port)
def save(self, value):
return self.mongo_helper.save(self.db, self.collection, value)
def remove(self, doc_id):
result = self.mongo_helper.remove(self.db, self.collection, doc_id)
if result == None:
result = self.mongo_helper.remove(self.default_db, self.default_collection, doc_id)
return result
def get(self, doc_id):
result = self.mongo_helper.get(self.db, self.collection, doc_id)
if result == None:
result = self.mongo_helper.get(self.default_db, self.default_collection, doc_id)
return result
| 27.564403
| 115
| 0.63322
|
794f6a608f5a188274ed5445cad3de3af4ab9d5d
| 1,081
|
py
|
Python
|
src/preprocessing/component.py
|
AmeyKamat/MEERA
|
30635b68995a9ce17024a76eb1b23cc1c300404f
|
[
"MIT"
] | 20
|
2019-01-30T17:03:50.000Z
|
2021-07-06T06:09:29.000Z
|
src/preprocessing/component.py
|
arpit006/MEERA
|
30635b68995a9ce17024a76eb1b23cc1c300404f
|
[
"MIT"
] | 22
|
2019-03-13T04:52:07.000Z
|
2020-07-17T07:25:44.000Z
|
src/preprocessing/component.py
|
arpit006/MEERA
|
30635b68995a9ce17024a76eb1b23cc1c300404f
|
[
"MIT"
] | 1
|
2020-07-18T18:52:39.000Z
|
2020-07-18T18:52:39.000Z
|
import sys
from circuits import Component, handler
from events import EntitiesPreprocessedEvent
from preprocessing.location import LocationPreprocessor
from preprocessing.date import DatePreprocessor
class PreprocessingComponent(Component):
prepocessors = {
"location": LocationPreprocessor(),
"source-location": LocationPreprocessor(),
"destination-location": LocationPreprocessor(),
"date": DatePreprocessor()
}
@handler("SkillRequestedEvent")
def preprocess(self, context):
try:
entities = context.nlp_analysis.entities
for key in entities.keys():
value = entities[key]
preprocessor = self.prepocessors.get(key)
if preprocessor is not None:
entities[key] = preprocessor.preprocess(value)
context.nlp_analysis.entities = entities
# pylint: disable=broad-except
except Exception as exception:
print(str(exception), file=sys.stderr)
self.fire(EntitiesPreprocessedEvent(context))
| 32.757576
| 66
| 0.666975
|
794f6ad69945b287dadeb965ee6ae8278a2a8feb
| 522
|
py
|
Python
|
500. Keyboard Row/main.py
|
Competitive-Programmers-Community/LeetCode
|
841fdee805b1a626e9f1cd0e12398d25054638af
|
[
"MIT"
] | 2
|
2019-10-05T09:48:20.000Z
|
2019-10-05T15:40:01.000Z
|
500. Keyboard Row/main.py
|
Competitive-Programmers-Community/LeetCode
|
841fdee805b1a626e9f1cd0e12398d25054638af
|
[
"MIT"
] | null | null | null |
500. Keyboard Row/main.py
|
Competitive-Programmers-Community/LeetCode
|
841fdee805b1a626e9f1cd0e12398d25054638af
|
[
"MIT"
] | 3
|
2020-09-27T05:48:30.000Z
|
2021-08-13T10:07:08.000Z
|
class Solution:
def findWords(self, words):
"""
:type words: List[str]
:rtype: List[str]
"""
row1="qwertyuiop"
row2="asdfghjkl"
row3="zxcvbnm"
res=[]
for word in words:
if set(word.lower()).issubset(set(row1)) or set(word.lower()).issubset(set(row2)) or set(word.lower()).issubset(set(row3)):
res.append(word)
return res
| 24.857143
| 135
| 0.43295
|
794f6d5013c29aee3e8b6cdecb7a32806653e287
| 2,464
|
py
|
Python
|
pitop/robotics/pan_tilt_controller.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 28
|
2020-11-24T08:02:58.000Z
|
2022-02-27T18:37:33.000Z
|
pitop/robotics/pan_tilt_controller.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 263
|
2020-11-10T14:35:10.000Z
|
2022-03-31T12:35:13.000Z
|
pitop/robotics/pan_tilt_controller.py
|
pi-top/pi-top-Python-SDK
|
6c83cc5f612d77f86f8d391c7f2924a28f7b1232
|
[
"Apache-2.0"
] | 1
|
2022-01-31T22:48:35.000Z
|
2022-01-31T22:48:35.000Z
|
from pitop.core.mixins import Recreatable, Stateful
from pitop.pma import ServoMotor
from pitop.robotics.two_servo_assembly_calibrator import TwoServoAssemblyCalibrator
from .pan_tilt_object_tracker import PanTiltObjectTracker
class PanTiltController(Stateful, Recreatable):
CALIBRATION_FILE_NAME = "pan_tilt.conf"
_pan_servo = None
_tilt_servo = None
def __init__(self, servo_pan_port="S0", servo_tilt_port="S3", name="pan_tilt"):
self.name = name
self._pan_servo = ServoMotor(servo_pan_port)
self._tilt_servo = ServoMotor(servo_tilt_port)
self._object_tracker = PanTiltObjectTracker(
pan_servo=self._pan_servo, tilt_servo=self._tilt_servo
)
Stateful.__init__(self, children=["_pan_servo", "_tilt_servo"])
Recreatable.__init__(
self,
config_dict={
"servo_pan_port": servo_pan_port,
"servo_tilt_port": servo_tilt_port,
"name": name,
},
)
@property
def pan_servo(self):
return self._pan_servo
@property
def tilt_servo(self):
return self._tilt_servo
@property
def track_object(self) -> PanTiltObjectTracker:
return self._object_tracker
def calibrate(self, save=True, reset=False):
"""Calibrates the assembly to work in optimal conditions.
Based on the provided arguments, it will either load the calibration
values stored in the pi-top, or it will run the calibration process,
requesting the user input in an interactive fashion.
:param bool reset:
If `true`, the existing calibration values will be reset, and the calibration process will be started.
If set to `false`, the calibration values will be retrieved from the calibration file.
:param bool save:
If `reset` is `true`, this parameter will cause the calibration values to be stored to the calibration file if set to `true`.
If `save=False`, the calibration values will only be used for the current session.
"""
calibration_object = TwoServoAssemblyCalibrator(
filename=self.CALIBRATION_FILE_NAME,
section_name="PAN_TILT",
servo_lookup_dict={
"pan_zero_point": self.pan_servo,
"tilt_zero_point": self.tilt_servo,
},
)
calibration_object.calibrate(save, reset)
| 36.776119
| 137
| 0.663149
|
794f6d9ee8de41e41610b5af1f4a8a655b8bcbd1
| 105
|
py
|
Python
|
tests/settings_alphanumeric.py
|
pavanv/django-tastypie
|
b4ffc642aa56d25d3c577ccae0a03c820b71c4bc
|
[
"BSD-3-Clause"
] | 1,570
|
2015-02-03T10:19:33.000Z
|
2022-03-29T10:34:18.000Z
|
tests/settings_alphanumeric.py
|
pavanv/django-tastypie
|
b4ffc642aa56d25d3c577ccae0a03c820b71c4bc
|
[
"BSD-3-Clause"
] | 587
|
2015-02-06T13:59:23.000Z
|
2022-03-09T22:56:30.000Z
|
tests/settings_alphanumeric.py
|
pavanv/django-tastypie
|
b4ffc642aa56d25d3c577ccae0a03c820b71c4bc
|
[
"BSD-3-Clause"
] | 492
|
2015-02-07T06:18:36.000Z
|
2022-03-29T19:06:44.000Z
|
from settings import * # noqa
INSTALLED_APPS.append('alphanumeric')
ROOT_URLCONF = 'alphanumeric.urls'
| 21
| 37
| 0.780952
|
794f6de8a88fde80596f5459b402fde3c4ae2765
| 14,152
|
py
|
Python
|
scripts/nowcommons.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 326
|
2017-11-21T07:04:19.000Z
|
2022-03-26T01:25:44.000Z
|
scripts/nowcommons.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 17
|
2017-12-20T13:41:32.000Z
|
2022-02-16T16:42:41.000Z
|
scripts/nowcommons.py
|
ZabeMath/pywikibot
|
856a197c53efcb80b16475a8d203a4ecd79eee2f
|
[
"MIT"
] | 147
|
2017-11-22T19:13:40.000Z
|
2022-03-29T04:47:07.000Z
|
#!/usr/bin/python
r"""
Script to delete files that are also present on Wikimedia Commons.
Do not run this script on Wikimedia Commons itself. It works based on
a given array of templates defined below.
Files are downloaded and compared. If the files match, it can be deleted on
the source wiki. If multiple versions of the file exist, the script will not
delete. If the SHA1 comparison is not equal, the script will not delete.
A sysop rights on the local wiki is required if you want all features of
this script to work properly.
This script understands various command-line arguments:
-always run automatically, do not ask any questions. All files
that qualify for deletion are deleted. Reduced screen
output.
-replace replace links if the files are equal and the file names
differ
-replacealways replace links if the files are equal and the file names
differ without asking for confirmation
-replaceloose Do loose replacements. This will replace all occurrences
of the name of the file (and not just explicit file
syntax). This should work to catch all instances of the
file, including where it is used as a template parameter
or in galleries. However, it can also make more mistakes.
-replaceonly Use this if you do not have a local sysop rights, but do
wish to replace links from the NowCommons template.
Example
-------
python pwb.py nowcommons -replaceonly -replaceloose -replacealways -replace
.. note:: This script is a
:py:obj:`ConfigParserBot <pywikibot.bot.ConfigParserBot>`. All options
can be set within a settings file which is scripts.ini by default.
"""
#
# (C) Pywikibot team, 2006-2021
#
# Distributed under the terms of the MIT license.
#
import sys
from itertools import chain
import pywikibot
from pywikibot import i18n
from pywikibot.bot import Bot, ConfigParserBot
from pywikibot import pagegenerators as pg
from pywikibot.exceptions import IsRedirectPageError, NoPageError
from pywikibot.tools import filter_unique
from pywikibot.tools.formatter import color_format
from scripts.image import ImageRobot as ImageBot
nowcommons = {
'_default': [
'NowCommons'
],
'ar': [
'الآن كومنز',
'الآن كومونز',
],
'de': [
'NowCommons',
'NC',
'Nowcommons',
'Now Commons',
'NowCommons/Mängel',
'NC/M',
],
'en': [
'NowCommons',
'Ncd',
],
'eo': [
'Nun en komunejo',
'NowCommons',
],
'fa': [
'موجود در انبار',
'NowCommons',
],
'fr': [
'Image sur Commons',
'DoublonCommons',
'Déjà sur Commons',
'Maintenant sur commons',
'Désormais sur Commons',
'NC',
'NowCommons',
'Nowcommons',
'Sharedupload',
'Sur Commons',
'Sur Commons2',
],
'he': [
'גם בוויקישיתוף'
],
'hu': [
'Azonnali-commons',
'NowCommons',
'Nowcommons',
'NC'
],
'ia': [
'OraInCommons'
],
'it': [
'NowCommons',
],
'ja': [
'NowCommons',
],
'ko': [
'공용중복',
'공용 중복',
'NowCommons',
'Now Commons',
'Nowcommons',
],
'nds-nl': [
'NoenCommons',
'NowCommons',
],
'nl': [
'NuCommons',
'Nucommons',
'NowCommons',
'Nowcommons',
'NCT',
'Nct',
],
'ro': [
'NowCommons'
],
'ru': [
'NowCommons',
'NCT',
'Nowcommons',
'Now Commons',
'Db-commons',
'Перенесено на Викисклад',
'На Викискладе',
],
'sr': [
'NowCommons',
'На Остави',
],
'zh': [
'NowCommons',
'Nowcommons',
'NCT',
],
}
namespace_in_template = [
'en',
'ia',
'it',
'ja',
'ko',
'lt',
'ro',
'zh',
]
class NowCommonsDeleteBot(Bot, ConfigParserBot):
"""Bot to delete migrated files.
.. versionchanged:: 7.0
NowCommonsDeleteBot is a ConfigParserBot
"""
update_options = {
'replace': False,
'replacealways': False,
'replaceloose': False,
'replaceonly': False,
}
def __init__(self, **kwargs):
"""Initializer."""
super().__init__(**kwargs)
self.site = pywikibot.Site()
if not self.site.has_image_repository:
sys.exit('There must be a file repository to run this script')
self.commons = self.site.image_repository()
if self.site == self.commons:
sys.exit(
'You cannot run this bot on file repository like Commons.')
self.summary = i18n.twtranslate(self.site,
'imagetransfer-nowcommons_notice')
def nc_templates_list(self):
"""Return nowcommons templates."""
if self.site.lang in nowcommons:
return nowcommons[self.site.lang]
return nowcommons['_default']
@property
def nc_templates(self):
"""A set of now commons template Page instances."""
if not hasattr(self, '_nc_templates'):
self._nc_templates = {pywikibot.Page(self.site, title, ns=10)
for title in self.nc_templates_list()}
return self._nc_templates
@property
def generator(self):
"""Generator method."""
gens = (t.getReferences(follow_redirects=True, namespaces=[6],
only_template_inclusion=True)
for t in self.nc_templates)
gen = chain(*gens)
gen = filter_unique(gen, key=lambda p: '{}:{}:{}'.format(*p._cmpkey()))
gen = pg.PreloadingGenerator(gen)
return gen
def find_file_on_commons(self, local_file_page):
"""Find filename on Commons."""
for template_name, params in local_file_page.templatesWithParams():
if template_name not in self.nc_templates:
continue
if not params:
file_on_commons = local_file_page.title(with_ns=False)
elif self.site.lang in namespace_in_template:
skip = False
file_on_commons = None
for par in params:
val = par.split('=')
if len(val) == 1 and not skip:
file_on_commons = par[par.find(':') + 1:]
break
if val[0].strip() == '1':
file_on_commons = \
val[1].strip()[val[1].strip().find(':') + 1:]
break
skip = True
if not file_on_commons:
file_on_commons = local_file_page.title(with_ns=False)
else:
val = params[0].split('=')
if len(val) == 1:
file_on_commons = params[0].strip()
else:
file_on_commons = val[1].strip()
return file_on_commons
def run(self):
"""Run the bot."""
commons = self.commons
comment = self.summary
for page in self.generator:
self.current_page = page
try:
local_file_page = pywikibot.FilePage(self.site, page.title())
if local_file_page.file_is_shared():
pywikibot.output('File is already on Commons.')
continue
sha1 = local_file_page.latest_file_info.sha1
file_on_commons = self.find_file_on_commons(local_file_page)
if not file_on_commons:
pywikibot.output('NowCommons template not found.')
continue
commons_file_page = pywikibot.FilePage(commons, 'File:'
+ file_on_commons)
if (local_file_page.title(with_ns=False)
!= commons_file_page.title(with_ns=False)):
using_pages = list(local_file_page.using_pages())
if using_pages and using_pages != [local_file_page]:
pywikibot.output(color_format(
'"{lightred}{0}{default}" '
'is still used in {1} pages.',
local_file_page.title(with_ns=False),
len(using_pages)))
if self.opt.replace:
pywikibot.output(color_format(
'Replacing "{lightred}{0}{default}" by '
'"{lightgreen}{1}{default}\".',
local_file_page.title(with_ns=False),
commons_file_page.title(with_ns=False)))
bot = ImageBot(
local_file_page.usingPages(),
local_file_page.title(with_ns=False),
commons_file_page.title(with_ns=False),
always=self.opt.replacealways,
loose=self.opt.replaceloose)
bot.run()
# If the image is used with the urlname the
# previous function won't work
is_used = bool(list(pywikibot.FilePage(
self.site,
page.title()).using_pages(total=1)))
if is_used and self.opt.replaceloose:
bot = ImageBot(
local_file_page.usimgPages(),
local_file_page.title(with_ns=False,
as_url=True),
commons_file_page.title(with_ns=False),
always=self.opt.replacealways,
loose=self.opt.replaceloose)
bot.run()
# refresh because we want the updated list
using_pages = len(list(pywikibot.FilePage(
self.site, page.title()).using_pages()))
else:
pywikibot.output('Please change them manually.')
continue
pywikibot.output(color_format(
'No page is using "{lightgreen}{0}{default}" '
'anymore.',
local_file_page.title(with_ns=False)))
commons_text = commons_file_page.get()
if not self.opt.replaceonly:
if sha1 == commons_file_page.latest_file_info.sha1:
pywikibot.output(
'The file is identical to the one on Commons.')
if len(local_file_page.get_file_history()) > 1:
pywikibot.output(
'This file has a version history. Please '
'delete it manually after making sure that '
'the old versions are not worth keeping.')
continue
if self.opt.always is False:
format_str = color_format(
'\n\n>>>> Description on {lightpurple}%s'
'{default} <<<<\n')
pywikibot.output(format_str % page.title())
pywikibot.output(local_file_page.get())
pywikibot.output(format_str %
commons_file_page.title())
pywikibot.output(commons_text)
if pywikibot.input_yn(
'Does the description on Commons contain '
'all required source and license\n'
'information?',
default=False, automatic_quit=False):
local_file_page.delete(
'{} [[:commons:File:{}]]'
.format(comment, file_on_commons),
prompt=False)
else:
local_file_page.delete(
comment + ' [[:commons:File:{}]]'
.format(file_on_commons),
prompt=False)
else:
pywikibot.output('The file is not identical to '
'the one on Commons.')
except (NoPageError, IsRedirectPageError) as e:
pywikibot.output(str(e[0]))
continue
else:
self.counter['read'] += 1
if not self.counter['read']:
pywikibot.output('No transcluded files found for {}.'
.format(self.nc_templates_list()[0]))
self.exit()
def main(*args: str) -> None:
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
:param args: command line arguments
"""
options = {}
for arg in pywikibot.handle_args(args):
if arg == '-replacealways':
options['replace'] = True
options['replacealways'] = True
elif arg.startswith('-'):
if arg[1:] in ('always', 'replace', 'replaceloose', 'replaceonly'):
options[arg[1:]] = True
bot = NowCommonsDeleteBot(**options)
bot.run()
if __name__ == '__main__':
main()
| 35.557789
| 79
| 0.491874
|
794f6e085e0d2efdeeb7e3780d90ebf40eae3007
| 287
|
py
|
Python
|
pandas/read_excel/read_excel.py
|
ybdesire/machinelearning
|
0224746332e1085336e0b02e0ca3b11d74bd9a91
|
[
"MIT"
] | 30
|
2017-02-28T13:52:58.000Z
|
2022-03-24T10:28:43.000Z
|
pandas/read_excel/read_excel.py
|
ybdesire/machinelearning
|
0224746332e1085336e0b02e0ca3b11d74bd9a91
|
[
"MIT"
] | null | null | null |
pandas/read_excel/read_excel.py
|
ybdesire/machinelearning
|
0224746332e1085336e0b02e0ca3b11d74bd9a91
|
[
"MIT"
] | 17
|
2017-03-03T12:38:04.000Z
|
2022-03-11T01:53:20.000Z
|
import pandas as pd
xl = pd.ExcelFile("test.xlsx")
print('sheet_names: {0}'.format(xl.sheet_names))
df = xl.parse("details")
for index, row in df.iterrows():
name = row['name']
age = row['age']
country = row['country']
print('{0},{1}'.format(country, name))
| 23.916667
| 49
| 0.602787
|
794f6e40c816e49068e37754a9f08e64174c7dc9
| 4,470
|
py
|
Python
|
proxypool/storages/redis.py
|
lixinjiang/ProxyPool
|
b39461f11ce0bdb81b0898fb7ce10075b4526d1f
|
[
"MIT"
] | 5
|
2020-06-18T02:17:41.000Z
|
2021-07-19T01:52:41.000Z
|
proxypool/storages/redis.py
|
lixinjiang/ProxyPool
|
b39461f11ce0bdb81b0898fb7ce10075b4526d1f
|
[
"MIT"
] | 2
|
2021-03-31T19:54:55.000Z
|
2021-12-13T20:42:21.000Z
|
proxypool/storages/redis.py
|
lixinjiang/ProxyPool
|
b39461f11ce0bdb81b0898fb7ce10075b4526d1f
|
[
"MIT"
] | 2
|
2020-10-30T03:25:23.000Z
|
2020-11-10T07:18:05.000Z
|
import redis
from proxypool.exceptions import PoolEmptyException
from proxypool.schemas.proxy import Proxy
from proxypool.setting import REDIS_HOST, REDIS_PORT, REDIS_PASSWORD, REDIS_KEY, PROXY_SCORE_MAX, PROXY_SCORE_MIN, \
PROXY_SCORE_INIT
from random import choice
from typing import List
from loguru import logger
from proxypool.utils.proxy import is_valid_proxy, convert_proxy_or_proxies
REDIS_CLIENT_VERSION = redis.__version__
IS_REDIS_VERSION_2 = REDIS_CLIENT_VERSION.startswith('2.')
class RedisClient(object):
"""
redis connection client of proxypool
"""
def __init__(self, host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD, **kwargs):
"""
init redis client
:param host: redis host
:param port: redis port
:param password: redis password
"""
self.db = redis.StrictRedis(host=host, port=port, password=password, decode_responses=True, **kwargs)
def add(self, proxy: Proxy, score=PROXY_SCORE_INIT) -> int:
"""
add proxy and set it to init score
:param proxy: proxy, ip:port, like 8.8.8.8:88
:param score: int score
:return: result
"""
if not is_valid_proxy(f'{proxy.host}:{proxy.port}'):
logger.info(f'invalid proxy {proxy}, throw it')
return
if not self.exists(proxy):
if IS_REDIS_VERSION_2:
return self.db.zadd(REDIS_KEY, score, proxy.string())
return self.db.zadd(REDIS_KEY, {proxy.string(): score})
def random(self) -> Proxy:
"""
get random proxy
firstly try to get proxy with max score
if not exists, try to get proxy by rank
if not exists, raise error
:return: proxy, like 8.8.8.8:8
"""
# try to get proxy with max score
proxies = self.db.zrangebyscore(REDIS_KEY, PROXY_SCORE_MAX, PROXY_SCORE_MAX)
if len(proxies):
return convert_proxy_or_proxies(choice(proxies))
# else get proxy by rank
proxies = self.db.zrevrange(REDIS_KEY, PROXY_SCORE_MIN, PROXY_SCORE_MAX)
if len(proxies):
return convert_proxy_or_proxies(choice(proxies))
# else raise error
raise PoolEmptyException
def decrease(self, proxy: Proxy) -> int:
"""
decrease score of proxy, if small than PROXY_SCORE_MIN, delete it
:param proxy: proxy
:return: new score
"""
score = self.db.zscore(REDIS_KEY, proxy.string())
# current score is larger than PROXY_SCORE_MIN
if score and score > PROXY_SCORE_MIN:
logger.info(f'{proxy.string()} current score {score}, decrease 1')
if IS_REDIS_VERSION_2:
return self.db.zincrby(REDIS_KEY, proxy.string(), -1)
return self.db.zincrby(REDIS_KEY, -1, proxy.string())
# otherwise delete proxy
else:
logger.info(f'{proxy.string()} current score {score}, remove')
return self.db.zrem(REDIS_KEY, proxy.string())
def exists(self, proxy: Proxy) -> bool:
"""
if proxy exists
:param proxy: proxy
:return: if exists, bool
"""
return not self.db.zscore(REDIS_KEY, proxy.string()) is None
def max(self, proxy: Proxy) -> int:
"""
set proxy to max score
:param proxy: proxy
:return: new score
"""
logger.info(f'{proxy.string()} is valid, set to {PROXY_SCORE_MAX}')
if IS_REDIS_VERSION_2:
return self.db.zadd(REDIS_KEY, PROXY_SCORE_MAX, proxy.string())
return self.db.zadd(REDIS_KEY, {proxy.string(): PROXY_SCORE_MAX})
def count(self) -> int:
"""
get count of proxies
:return: count, int
"""
return self.db.zcard(REDIS_KEY)
def all(self) -> List[Proxy]:
"""
get all proxies
:return: list of proxies
"""
return convert_proxy_or_proxies(self.db.zrangebyscore(REDIS_KEY, PROXY_SCORE_MIN, PROXY_SCORE_MAX))
def batch(self, start, end) -> List[Proxy]:
"""
get batch of proxies
:param start: start index
:param end: end index
:return: list of proxies
"""
return convert_proxy_or_proxies(self.db.zrevrange(REDIS_KEY, start, end - 1))
if __name__ == '__main__':
conn = RedisClient()
result = conn.random()
print(result)
| 34.651163
| 116
| 0.616779
|
794f6e4f45c23592c7919c7e04e2727ddd8f0ac7
| 2,021
|
py
|
Python
|
elfi/methods/model_selection.py
|
diadochos/elfi
|
f2932297d686403950f7f55a290cd25af10dbda6
|
[
"BSD-3-Clause"
] | 166
|
2017-03-05T17:10:38.000Z
|
2022-03-31T21:25:04.000Z
|
elfi/methods/model_selection.py
|
diadochos/elfi
|
f2932297d686403950f7f55a290cd25af10dbda6
|
[
"BSD-3-Clause"
] | 78
|
2017-04-05T11:46:23.000Z
|
2022-03-28T13:11:44.000Z
|
elfi/methods/model_selection.py
|
diadochos/elfi
|
f2932297d686403950f7f55a290cd25af10dbda6
|
[
"BSD-3-Clause"
] | 56
|
2017-03-19T17:51:57.000Z
|
2022-03-16T13:17:52.000Z
|
"""This module contains methods for model comparison and selection."""
import numpy as np
def compare_models(sample_objs, model_priors=None):
"""Find posterior probabilities for different models.
The algorithm requires elfi.Sample objects from prerun inference methods. For example the
output from elfi.Rejection.sample is valid. The portion of samples for each model in the top
discrepancies are adjusted by each models acceptance ratio and prior probability.
The discrepancies (including summary statistics) must be comparable so that it is
meaningful to sort them!
Parameters
----------
sample_objs : list of elfi.Sample
Resulting Sample objects from prerun inference models. The objects must include
a valid `discrepancies` attribute.
model_priors : array_like, optional
Prior probability of each model. Defaults to 1 / n_models.
Returns
-------
np.array
Posterior probabilities for the considered models.
"""
n_models = len(sample_objs)
n_min = min([s.n_samples for s in sample_objs])
# concatenate discrepancy vectors
try:
discrepancies = np.concatenate([s.discrepancies for s in sample_objs])
except ValueError:
raise ValueError("All Sample objects must include valid discrepancies.")
# sort and take the smallest n_min
inds = np.argsort(discrepancies)[:n_min]
# calculate the portions of accepted samples for each model in the top discrepancies
p_models = np.empty(n_models)
up_bound = 0
for i in range(n_models):
low_bound = up_bound
up_bound += sample_objs[i].n_samples
p_models[i] = np.logical_and(inds >= low_bound, inds < up_bound).sum()
# adjust by the number of simulations run
p_models[i] /= sample_objs[i].n_sim
# adjust by the prior model probability
if model_priors is not None:
p_models[i] *= model_priors[i]
p_models = p_models / p_models.sum()
return p_models
| 33.683333
| 96
| 0.696685
|
794f6f01f6cbd549e9fb5becaeb56b65065e8aca
| 35
|
py
|
Python
|
jss28_bayesian/__init__.py
|
JessikaSmith/jss28_bayesian
|
8574afd0c470238cb6eb37f41ecb0810fd25900d
|
[
"MIT"
] | null | null | null |
jss28_bayesian/__init__.py
|
JessikaSmith/jss28_bayesian
|
8574afd0c470238cb6eb37f41ecb0810fd25900d
|
[
"MIT"
] | null | null | null |
jss28_bayesian/__init__.py
|
JessikaSmith/jss28_bayesian
|
8574afd0c470238cb6eb37f41ecb0810fd25900d
|
[
"MIT"
] | null | null | null |
__author__ = 'Maria Khodorchenko'
| 11.666667
| 33
| 0.771429
|
794f6ff8385738738276f497206cde3ca049343f
| 2,601
|
py
|
Python
|
tests/flytekit/common/workflows/python.py
|
flytehub/flytekit
|
f8f53567594069b29fcd3f99abd1da71a5ef0e22
|
[
"Apache-2.0"
] | 1
|
2019-10-22T05:22:16.000Z
|
2019-10-22T05:22:16.000Z
|
tests/flytekit/common/workflows/python.py
|
chixcode/flytekit
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
[
"Apache-2.0"
] | null | null | null |
tests/flytekit/common/workflows/python.py
|
chixcode/flytekit
|
f901aee721847c6264d44079d4fa31a75b8876e1
|
[
"Apache-2.0"
] | 1
|
2019-08-28T22:27:07.000Z
|
2019-08-28T22:27:07.000Z
|
from __future__ import absolute_import, division, print_function
from flytekit.sdk.tasks import python_task, inputs, outputs
from flytekit.sdk.types import Types
from flytekit.sdk.workflow import workflow_class, Input
@inputs(value_to_print=Types.Integer)
@outputs(out=Types.Integer)
@python_task(cache_version='1')
def add_one_and_print(workflow_parameters, value_to_print, out):
workflow_parameters.stats.incr("task_run")
added = value_to_print + 1
print("My printed value: {}".format(added))
out.set(added)
@inputs(value1_to_print=Types.Integer, value2_to_print=Types.Integer)
@outputs(out=Types.Integer)
@python_task(cache_version='1')
def sum_non_none(workflow_parameters, value1_to_print, value2_to_print, out):
workflow_parameters.stats.incr("task_run")
added = 0
for value in [value1_to_print, value2_to_print]:
print("Adding values: {}".format(value))
if value is not None:
added += value
added += 1
print("My printed value: {}".format(added))
out.set(added)
@inputs(value1_to_add=Types.Integer, value2_to_add=Types.Integer, value3_to_add=Types.Integer,
value4_to_add=Types.Integer)
@outputs(out=Types.Integer)
@python_task(cache_version='1')
def sum_and_print(workflow_parameters, value1_to_add, value2_to_add, value3_to_add, value4_to_add, out):
workflow_parameters.stats.incr("task_run")
summed = sum([value1_to_add, value2_to_add, value3_to_add, value4_to_add])
print("Summed up to: {}".format(summed))
out.set(summed)
@inputs(value_to_print=Types.Integer, date_triggered=Types.Datetime)
@python_task(cache_version='1')
def print_every_time(workflow_parameters, value_to_print, date_triggered):
workflow_parameters.stats.incr("task_run")
print("My printed value: {} @ {}".format(value_to_print, date_triggered))
@workflow_class
class PythonTasksWorkflow(object):
triggered_date = Input(Types.Datetime)
print1a = add_one_and_print(value_to_print=3)
print1b = add_one_and_print(value_to_print=101)
print2 = sum_non_none(value1_to_print=print1a.outputs.out,
value2_to_print=print1b.outputs.out)
print3 = add_one_and_print(value_to_print=print2.outputs.out)
print4 = add_one_and_print(value_to_print=print3.outputs.out)
print_sum = sum_and_print(
value1_to_add=print2.outputs.out,
value2_to_add=print3.outputs.out,
value3_to_add=print4.outputs.out,
value4_to_add=100
)
print_always = print_every_time(
value_to_print=print_sum.outputs.out,
date_triggered=triggered_date)
| 37.695652
| 104
| 0.750096
|
794f7080617ec15885663935832c5c7f8aadd875
| 2,193
|
py
|
Python
|
pre_commit_hooks/debug_statement_hook.py
|
christhekeele/pre-commit-hooks
|
2f7e22dc211ddec22670778553b91850a4ba8c1f
|
[
"MIT"
] | 3
|
2021-08-23T04:25:59.000Z
|
2022-01-24T08:37:46.000Z
|
pre_commit_hooks/debug_statement_hook.py
|
christhekeele/pre-commit-hooks
|
2f7e22dc211ddec22670778553b91850a4ba8c1f
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/debug_statement_hook.py
|
christhekeele/pre-commit-hooks
|
2f7e22dc211ddec22670778553b91850a4ba8c1f
|
[
"MIT"
] | 4
|
2021-06-03T09:56:50.000Z
|
2022-03-17T09:48:29.000Z
|
import argparse
import ast
import traceback
from typing import List
from typing import NamedTuple
from typing import Optional
from typing import Sequence
DEBUG_STATEMENTS = {
'ipdb',
'pdb',
'pudb',
'pydevd_pycharm',
'q',
'rdb',
'rpdb',
'wdb',
}
class Debug(NamedTuple):
line: int
col: int
name: str
reason: str
class DebugStatementParser(ast.NodeVisitor):
def __init__(self) -> None:
self.breakpoints: List[Debug] = []
def visit_Import(self, node: ast.Import) -> None:
for name in node.names:
if name.name in DEBUG_STATEMENTS:
st = Debug(node.lineno, node.col_offset, name.name, 'imported')
self.breakpoints.append(st)
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
if node.module in DEBUG_STATEMENTS:
st = Debug(node.lineno, node.col_offset, node.module, 'imported')
self.breakpoints.append(st)
def visit_Call(self, node: ast.Call) -> None:
"""python3.7+ breakpoint()"""
if isinstance(node.func, ast.Name) and node.func.id == 'breakpoint':
st = Debug(node.lineno, node.col_offset, node.func.id, 'called')
self.breakpoints.append(st)
self.generic_visit(node)
def check_file(filename: str) -> int:
try:
with open(filename, 'rb') as f:
ast_obj = ast.parse(f.read(), filename=filename)
except SyntaxError:
print(f'{filename} - Could not parse ast')
print()
print('\t' + traceback.format_exc().replace('\n', '\n\t'))
print()
return 1
visitor = DebugStatementParser()
visitor.visit(ast_obj)
for bp in visitor.breakpoints:
print(f'{filename}:{bp.line}:{bp.col} - {bp.name} {bp.reason}')
return int(bool(visitor.breakpoints))
def main(argv: Optional[Sequence[str]] = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='Filenames to run')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
retv |= check_file(filename)
return retv
if __name__ == '__main__':
exit(main())
| 25.8
| 79
| 0.619699
|
794f7086d05f39de6a372febd4011422f9203511
| 22,732
|
py
|
Python
|
examples/seismic/tti/operators.py
|
rwalkerlewis/devito
|
262364e5f2855ad01a281d517d400704b7667420
|
[
"MIT"
] | null | null | null |
examples/seismic/tti/operators.py
|
rwalkerlewis/devito
|
262364e5f2855ad01a281d517d400704b7667420
|
[
"MIT"
] | null | null | null |
examples/seismic/tti/operators.py
|
rwalkerlewis/devito
|
262364e5f2855ad01a281d517d400704b7667420
|
[
"MIT"
] | null | null | null |
from sympy import cos, sin
from devito import Eq, Operator, TimeFunction
from examples.seismic import PointSource, Receiver
from devito.finite_differences import centered, first_derivative, right, transpose
def second_order_stencil(model, u, v, H0, Hz):
"""
Creates the stencil corresponding to the second order TTI wave equation
u.dt2 = (epsilon * H0 + delta * Hz) - damp * u.dt
v.dt2 = (delta * H0 + Hz) - damp * v.dt
"""
# Stencils
m, damp, delta, epsilon = model.m, model.damp, model.delta, model.epsilon
s = model.grid.stepping_dim.spacing
stencilp = 1.0 / (2.0 * m + s * damp) * \
(4.0 * m * u + (s * damp - 2.0 * m) *
u.backward + 2.0 * s ** 2 * (epsilon * H0 + delta * Hz))
stencilr = 1.0 / (2.0 * m + s * damp) * \
(4.0 * m * v + (s * damp - 2.0 * m) *
v.backward + 2.0 * s ** 2 * (delta * H0 + Hz))
first_stencil = Eq(u.forward, stencilp)
second_stencil = Eq(v.forward, stencilr)
stencils = [first_stencil, second_stencil]
return stencils
def Gxx_shifted(field, costheta, sintheta, cosphi, sinphi, space_order):
"""
3D rotated second order derivative in the direction x as an average of
two non-centered rotated second order derivative in the direction x
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param cosphi: cosine of the azymuth angle
:param sinphi: sine of the azymuth angle
:param space_order: discretization order
:return: rotated second order derivative wrt x
"""
x, y, z = field.space_dimensions
Gx1 = (costheta * cosphi * field.dx + costheta * sinphi * field.dyr -
sintheta * field.dzr)
Gxx1 = (first_derivative(Gx1 * costheta * cosphi,
dim=x, side=centered, fd_order=space_order,
matvec=transpose) +
first_derivative(Gx1 * costheta * sinphi,
dim=y, side=right, fd_order=space_order,
matvec=transpose) -
first_derivative(Gx1 * sintheta,
dim=z, side=right, fd_order=space_order,
matvec=transpose))
Gx2 = (costheta * cosphi * field.dxr + costheta * sinphi * field.dy -
sintheta * field.dz)
Gxx2 = (first_derivative(Gx2 * costheta * cosphi,
dim=x, side=right, fd_order=space_order,
matvec=transpose) +
first_derivative(Gx2 * costheta * sinphi,
dim=y, side=centered, fd_order=space_order,
matvec=transpose) -
first_derivative(Gx2 * sintheta,
dim=z, side=centered, fd_order=space_order,
matvec=transpose))
return -.5 * (Gxx1 + Gxx2)
def Gxx_shifted_2d(field, costheta, sintheta, space_order):
"""
2D rotated second order derivative in the direction x as an average of
two non-centered rotated second order derivative in the direction x
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param space_order: discretization order
:return: rotated second order derivative wrt x
"""
x, y = field.space_dimensions[:2]
Gx1 = (costheta * field.dxr - sintheta * field.dy)
Gxx1 = (first_derivative(Gx1 * costheta, dim=x,
side=right, fd_order=space_order,
matvec=transpose) -
first_derivative(Gx1 * sintheta, dim=y,
side=centered, fd_order=space_order,
matvec=transpose))
Gx2p = (costheta * field.dx - sintheta * field.dyr)
Gxx2 = (first_derivative(Gx2p * costheta, dim=x,
side=centered, fd_order=space_order,
matvec=transpose) -
first_derivative(Gx2p * sintheta, dim=y,
side=right, fd_order=space_order,
matvec=transpose))
return -.5 * (Gxx1 + Gxx2)
def Gyy_shifted(field, cosphi, sinphi, space_order):
"""
3D rotated second order derivative in the direction y as an average of
two non-centered rotated second order derivative in the direction y
:param field: symbolic data whose derivative we are computing
:param cosphi: cosine of the azymuth angle
:param sinphi: sine of the azymuth angle
:param space_order: discretization order
:return: rotated second order derivative wrt y
"""
x, y = field.space_dimensions[:2]
Gyp = (sinphi * field.dx - cosphi * field.dyr)
Gyy = (first_derivative(Gyp * sinphi,
dim=x, side=centered, fd_order=space_order,
matvec=transpose) -
first_derivative(Gyp * cosphi,
dim=y, side=right, fd_order=space_order,
matvec=transpose))
Gyp2 = (sinphi * field.dxr - cosphi * field.dy)
Gyy2 = (first_derivative(Gyp2 * sinphi,
dim=x, side=right, fd_order=space_order,
matvec=transpose) -
first_derivative(Gyp2 * cosphi,
dim=y, side=centered, fd_order=space_order,
matvec=transpose))
return -.5 * (Gyy + Gyy2)
def Gzz_shifted(field, costheta, sintheta, cosphi, sinphi, space_order):
"""
3D rotated second order derivative in the direction z as an average of
two non-centered rotated second order derivative in the direction z
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param cosphi: cosine of the azymuth angle
:param sinphi: sine of the azymuth angle
:param space_order: discretization order
:return: rotated second order derivative wrt z
"""
x, y, z = field.space_dimensions
Gzr = (sintheta * cosphi * field.dx + sintheta * sinphi * field.dyr +
costheta * field.dzr)
Gzz = (first_derivative(Gzr * sintheta * cosphi,
dim=x, side=centered, fd_order=space_order,
matvec=transpose) +
first_derivative(Gzr * sintheta * sinphi,
dim=y, side=right, fd_order=space_order,
matvec=transpose) +
first_derivative(Gzr * costheta,
dim=z, side=right, fd_order=space_order,
matvec=transpose))
Gzr2 = (sintheta * cosphi * field.dxr + sintheta * sinphi * field.dy +
costheta * field.dz)
Gzz2 = (first_derivative(Gzr2 * sintheta * cosphi,
dim=x, side=right, fd_order=space_order,
matvec=transpose) +
first_derivative(Gzr2 * sintheta * sinphi,
dim=y, side=centered, fd_order=space_order,
matvec=transpose) +
first_derivative(Gzr2 * costheta,
dim=z, side=centered, fd_order=space_order,
matvec=transpose))
return -.5 * (Gzz + Gzz2)
def Gzz_shifted_2d(field, costheta, sintheta, space_order):
"""
2D rotated second order derivative in the direction z as an average of
two non-centered rotated second order derivative in the direction z
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt
:param sintheta: sine of the tilt
:param space_order: discretization order
:return: rotated second order derivative wrt z
"""
x, y = field.space_dimensions[:2]
Gz1r = (sintheta * field.dxr + costheta * field.dy)
Gzz1 = (first_derivative(Gz1r * sintheta, dim=x,
side=right, fd_order=space_order,
matvec=transpose) +
first_derivative(Gz1r * costheta, dim=y,
side=centered, fd_order=space_order,
matvec=transpose))
Gz2r = (sintheta * field.dx + costheta * field.dyr)
Gzz2 = (first_derivative(Gz2r * sintheta, dim=x,
side=centered, fd_order=space_order,
matvec=transpose) +
first_derivative(Gz2r * costheta, dim=y,
side=right, fd_order=space_order,
matvec=transpose))
return -.5 * (Gzz1 + Gzz2)
def Gzz_centered(field, costheta, sintheta, cosphi, sinphi, space_order):
"""
3D rotated second order derivative in the direction z
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param cosphi: cosine of the azymuth angle
:param sinphi: sine of the azymuth angle
:param space_order: discretization order
:return: rotated second order derivative wrt z
"""
order1 = space_order / 2
x, y, z = field.space_dimensions
Gz = -(sintheta * cosphi * first_derivative(field, dim=x,
side=centered, fd_order=order1) +
sintheta * sinphi * first_derivative(field, dim=y,
side=centered, fd_order=order1) +
costheta * first_derivative(field, dim=z,
side=centered, fd_order=order1))
Gzz = (first_derivative(Gz * sintheta * cosphi,
dim=x, side=centered, fd_order=order1,
matvec=transpose) +
first_derivative(Gz * sintheta * sinphi,
dim=y, side=centered, fd_order=order1,
matvec=transpose) +
first_derivative(Gz * costheta,
dim=z, side=centered, fd_order=order1,
matvec=transpose))
return Gzz
def Gzz_centered_2d(field, costheta, sintheta, space_order):
"""
2D rotated second order derivative in the direction z
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param space_order: discretization order
:return: rotated second order derivative wrt z
"""
order1 = space_order / 2
x, y = field.space_dimensions[:2]
Gz = -(sintheta * first_derivative(field, dim=x, side=centered, fd_order=order1) +
costheta * first_derivative(field, dim=y, side=centered, fd_order=order1))
Gzz = (first_derivative(Gz * sintheta, dim=x,
side=centered, fd_order=order1,
matvec=transpose) +
first_derivative(Gz * costheta, dim=y,
side=centered, fd_order=order1,
matvec=transpose))
return Gzz
# Centered case produces directly Gxx + Gyy
def Gxxyy_centered(field, costheta, sintheta, cosphi, sinphi, space_order):
"""
Sum of the 3D rotated second order derivative in the direction x and y.
As the Laplacian is rotation invariant, it is computed as the conventional
Laplacian minus the second order rotated second order derivative in the direction z
Gxx + Gyy = field.laplace - Gzz
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param cosphi: cosine of the azymuth angle
:param sinphi: sine of the azymuth angle
:param space_order: discretization order
:return: Sum of the 3D rotated second order derivative in the direction x and y
"""
Gzz = Gzz_centered(field, costheta, sintheta, cosphi, sinphi, space_order)
return field.laplace - Gzz
def Gxx_centered_2d(field, costheta, sintheta, space_order):
"""
2D rotated second order derivative in the direction x.
As the Laplacian is rotation invariant, it is computed as the conventional
Laplacian minus the second order rotated second order derivative in the direction z
Gxx = field.laplace - Gzz
:param field: symbolic data whose derivative we are computing
:param costheta: cosine of the tilt angle
:param sintheta: sine of the tilt angle
:param cosphi: cosine of the azymuth angle
:param sinphi: sine of the azymuth angle
:param space_order: discretization order
:return: Sum of the 3D rotated second order derivative in the direction x
"""
return field.laplace - Gzz_centered_2d(field, costheta, sintheta, space_order)
def kernel_shifted_2d(model, u, v, space_order):
"""
TTI finite difference kernel. The equation we solve is:
u.dt2 = (1+2 *epsilon) (Gxx(u)) + sqrt(1+ 2*delta) Gzz(v)
v.dt2 = sqrt(1+ 2*delta) (Gxx(u)) + Gzz(v)
where epsilon and delta are the thomsen parameters. This function computes
H0 = Gxx(u) + Gyy(u)
Hz = Gzz(v)
:param u: first TTI field
:param v: second TTI field
:param space_order: discretization order
:return: u and v component of the rotated Laplacian in 2D
"""
# Tilt and azymuth setup
costheta = cos(model.theta)
sintheta = sin(model.theta)
Gxx = Gxx_shifted_2d(u, costheta, sintheta, space_order)
Gzz = Gzz_shifted_2d(v, costheta, sintheta, space_order)
return second_order_stencil(model, u, v, Gxx, Gzz)
def kernel_shifted_3d(model, u, v, space_order):
"""
TTI finite difference kernel. The equation we solve is:
u.dt2 = (1+2 *epsilon) (Gxx(u)+Gyy(u)) + sqrt(1+ 2*delta) Gzz(v)
v.dt2 = sqrt(1+ 2*delta) (Gxx(u)+Gyy(u)) + Gzz(v)
where epsilon and delta are the thomsen parameters. This function computes
H0 = Gxx(u) + Gyy(u)
Hz = Gzz(v)
:param u: first TTI field
:param v: second TTI field
:param space_order: discretization order
:return: u and v component of the rotated Laplacian in 3D
"""
# Tilt and azymuth setup
costheta = cos(model.theta)
sintheta = sin(model.theta)
cosphi = cos(model.phi)
sinphi = sin(model.phi)
Gxx = Gxx_shifted(u, costheta, sintheta, cosphi, sinphi, space_order)
Gyy = Gyy_shifted(u, cosphi, sinphi, space_order)
Gzz = Gzz_shifted(v, costheta, sintheta, cosphi, sinphi, space_order)
return second_order_stencil(model, u, v, Gxx + Gyy, Gzz)
def kernel_centered_2d(model, u, v, space_order):
"""
TTI finite difference kernel. The equation we solve is:
u.dt2 = (1+2 *epsilon) (Gxx(u)) + sqrt(1+ 2*delta) Gzz(v)
v.dt2 = sqrt(1+ 2*delta) (Gxx(u)) + Gzz(v)
where epsilon and delta are the thomsen parameters. This function computes
H0 = Gxx(u) + Gyy(u)
Hz = Gzz(v)
:param u: first TTI field
:param v: second TTI field
:param space_order: discretization order
:return: u and v component of the rotated Laplacian in 2D
"""
# Tilt and azymuth setup
costheta = cos(model.theta)
sintheta = sin(model.theta)
Gxx = Gxx_centered_2d(u, costheta, sintheta, space_order)
Gzz = Gzz_centered_2d(v, costheta, sintheta, space_order)
return second_order_stencil(model, u, v, Gxx, Gzz)
def kernel_centered_3d(model, u, v, space_order):
"""
TTI finite difference kernel. The equation we solve is:
u.dt2 = (1+2 *epsilon) (Gxx(u)+Gyy(u)) + sqrt(1+ 2*delta) Gzz(v)
v.dt2 = sqrt(1+ 2*delta) (Gxx(u)+Gyy(u)) + Gzz(v)
where epsilon and delta are the thomsen parameters. This function computes
H0 = Gxx(u) + Gyy(u)
Hz = Gzz(v)
:param u: first TTI field
:param v: second TTI field
:return: u and v component of the rotated Laplacian in 2D
"""
# Tilt and azymuth setup
costheta = cos(model.theta)
sintheta = sin(model.theta)
cosphi = cos(model.phi)
sinphi = sin(model.phi)
Gxx = Gxxyy_centered(u, costheta, sintheta, cosphi, sinphi, space_order)
Gzz = Gzz_centered(v, costheta, sintheta, cosphi, sinphi, space_order)
return second_order_stencil(model, u, v, Gxx, Gzz)
def particle_velocity_fields(model, space_order):
"""
Initialize partcle vleocity fields for staggered tti
"""
if model.grid.dim == 2:
x, z = model.space_dimensions
stagg_x = x
stagg_z = z
x, z = model.grid.dimensions
# Create symbols for forward wavefield, source and receivers
vx = TimeFunction(name='vx', grid=model.grid, staggered=stagg_x,
time_order=1, space_order=space_order)
vz = TimeFunction(name='vz', grid=model.grid, staggered=stagg_z,
time_order=1, space_order=space_order)
vy = None
elif model.grid.dim == 3:
x, y, z = model.space_dimensions
stagg_x = x
stagg_y = y
stagg_z = z
x, y, z = model.grid.dimensions
# Create symbols for forward wavefield, source and receivers
vx = TimeFunction(name='vx', grid=model.grid, staggered=stagg_x,
time_order=1, space_order=space_order)
vy = TimeFunction(name='vy', grid=model.grid, staggered=stagg_y,
time_order=1, space_order=space_order)
vz = TimeFunction(name='vz', grid=model.grid, staggered=stagg_z,
time_order=1, space_order=space_order)
return vx, vz, vy
def kernel_staggered_2d(model, u, v, space_order):
"""
TTI finite difference. The equation solved is:
vx.dt = - u.dx
vz.dt = - v.dx
m * v.dt = - sqrt(1 + 2 delta) vx.dx - vz.dz + Fh
m * u.dt = - (1 + 2 epsilon) vx.dx - sqrt(1 + 2 delta) vz.dz + Fv
"""
dampl = 1 - model.damp
m, epsilon, delta, theta = (model.m, model.epsilon, model.delta, model.theta)
s = model.grid.stepping_dim.spacing
x, z = model.grid.dimensions
# Staggered setup
vx, vz, _ = particle_velocity_fields(model, space_order)
# Stencils
phdx = cos(theta) * u.dx - sin(theta) * u.dy
u_vx = Eq(vx.forward, dampl * vx - dampl * s * phdx)
pvdz = sin(theta) * v.dx + cos(theta) * v.dy
u_vz = Eq(vz.forward, dampl * vz - dampl * s * pvdz)
dvx = cos(theta) * vx.forward.dx - sin(theta) * vx.forward.dy
dvz = sin(theta) * vz.forward.dx + cos(theta) * vz.forward.dy
# u and v equations
pv_eq = Eq(v.forward, dampl * (v - s / m * (delta * dvx + dvz)))
ph_eq = Eq(u.forward, dampl * (u - s / m * (epsilon * dvx + delta * dvz)))
return [u_vx, u_vz] + [pv_eq, ph_eq]
def kernel_staggered_3d(model, u, v, space_order):
"""
TTI finite difference. The equation solved is:
vx.dt = - u.dx
vy.dt = - u.dx
vz.dt = - v.dx
m * v.dt = - sqrt(1 + 2 delta) (vx.dx + vy.dy) - vz.dz + Fh
m * u.dt = - (1 + 2 epsilon) (vx.dx + vy.dy) - sqrt(1 + 2 delta) vz.dz + Fv
"""
dampl = 1 - model.damp
m, epsilon, delta, theta, phi = (model.m, model.epsilon, model.delta,
model.theta, model.phi)
s = model.grid.stepping_dim.spacing
x, y, z = model.grid.dimensions
# Staggered setup
vx, vz, vy = particle_velocity_fields(model, space_order)
# Stencils
phdx = (cos(theta) * cos(phi) * u.dx +
cos(theta) * sin(phi) * u.dyc -
sin(theta) * u.dzc)
u_vx = Eq(vx.forward, dampl * vx - dampl * s * phdx)
phdy = -sin(phi) * u.dxc + cos(phi) * u.dy
u_vy = Eq(vy.forward, dampl * vy - dampl * s * phdy)
pvdz = (sin(theta) * cos(phi) * v.dxc +
sin(theta) * sin(phi) * v.dyc +
cos(theta) * v.dz)
u_vz = Eq(vz.forward, dampl * vz - dampl * s * pvdz)
dvx = (cos(theta) * cos(phi) * vx.forward.dx +
cos(theta) * sin(phi) * vx.forward.dyc -
sin(theta) * vx.forward.dzc)
dvy = -sin(phi) * vy.forward.dxc + cos(phi) * vy.forward.dy
dvz = (sin(theta) * cos(phi) * vz.forward.dxc +
sin(theta) * sin(phi) * vz.forward.dyc +
cos(theta) * vz.forward.dz)
# u and v equations
pv_eq = Eq(v.forward, dampl * (v - s / m * (delta * (dvx + dvy) + dvz)))
ph_eq = Eq(u.forward, dampl * (u - s / m * (epsilon * (dvx + dvy) + delta * dvz)))
return [u_vx, u_vy, u_vz] + [pv_eq, ph_eq]
def ForwardOperator(model, geometry, space_order=4,
save=False, kernel='centered', **kwargs):
"""
Constructor method for the forward modelling operator in an acoustic media
:param model: :class:`Model` object containing the physical parameters
:param src: None ot IShot() (not currently supported properly)
:param data: IShot() object containing the acquisition geometry and field data
:param: time_order: Time discretization order
:param: spc_order: Space discretization order
"""
dt = model.grid.time_dim.spacing
m = model.m
time_order = 1 if kernel == 'staggered' else 2
if kernel == 'staggered':
dims = model.space_dimensions
stagg_u = (-dims[-1])
stagg_v = (-dims[0], -dims[1]) if model.grid.dim == 3 else (-dims[0])
else:
stagg_u = stagg_v = None
# Create symbols for forward wavefield, source and receivers
u = TimeFunction(name='u', grid=model.grid, staggered=stagg_u,
save=geometry.nt if save else None,
time_order=time_order, space_order=space_order)
v = TimeFunction(name='v', grid=model.grid, staggered=stagg_v,
save=geometry.nt if save else None,
time_order=time_order, space_order=space_order)
src = PointSource(name='src', grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nsrc)
rec = Receiver(name='rec', grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nrec)
# FD kernels of the PDE
FD_kernel = kernels[(kernel, len(model.shape))]
stencils = FD_kernel(model, u, v, space_order)
# Source and receivers
stencils += src.inject(field=u.forward, expr=src * dt**2 / m)
stencils += src.inject(field=v.forward, expr=src * dt**2 / m)
stencils += rec.interpolate(expr=u + v)
# Substitute spacing terms to reduce flops
return Operator(stencils, subs=model.spacing_map, name='ForwardTTI', **kwargs)
kernels = {('shifted', 3): kernel_shifted_3d, ('shifted', 2): kernel_shifted_2d,
('centered', 3): kernel_centered_3d, ('centered', 2): kernel_centered_2d,
('staggered', 3): kernel_staggered_3d, ('staggered', 2): kernel_staggered_2d}
| 41.786765
| 88
| 0.606766
|
794f70bac321604a36c97e4b1909026ae9ff0306
| 2,966
|
py
|
Python
|
tests/TestDotRenderer.py
|
DaveTCode/tradingsim
|
4e7fe5389d9af9a0a34ca23b9e42e7e366a71966
|
[
"MIT"
] | null | null | null |
tests/TestDotRenderer.py
|
DaveTCode/tradingsim
|
4e7fe5389d9af9a0a34ca23b9e42e7e366a71966
|
[
"MIT"
] | null | null | null |
tests/TestDotRenderer.py
|
DaveTCode/tradingsim
|
4e7fe5389d9af9a0a34ca23b9e42e7e366a71966
|
[
"MIT"
] | null | null | null |
import pygame
import unittest
from tradingsim.renderers.dotrenderer import DotRenderer
class DotRendererTests(unittest.TestCase):
def setUp(self):
"""
Renderer uses font objects so need to set this up.
"""
pygame.init()
pygame.font.init()
def test_agent_to_window_coords_no_change(self):
renderer = DotRenderer()
self.assertEqual(renderer._agent_to_window_coords(0, 0), (0, 0))
self.assertEqual(renderer._agent_to_window_coords(10, 10), (10, 10))
self.assertEqual(renderer._agent_to_window_coords(0, 21), (0, 21))
def test_agent_to_window_coords_offset(self):
renderer = DotRenderer()
renderer.move_camera(18, 23)
self.assertEqual(renderer._agent_to_window_coords(0, 0), (-18, -23))
self.assertEqual(renderer._agent_to_window_coords(100, 100), (82, 77))
self.assertEqual(renderer._agent_to_window_coords(18, 83), (0, 60))
def test_agent_to_window_coords_scale(self):
renderer = DotRenderer()
renderer.zoom_camera(1)
self.assertEqual(renderer._agent_to_window_coords(0, 0), (0, 0))
self.assertEqual(renderer._agent_to_window_coords(10, 10), (20, 20))
self.assertEqual(renderer._agent_to_window_coords(0, 21), (0, 42))
def test_agent_to_window_coords_scale_and_move(self):
renderer = DotRenderer()
renderer.zoom_camera(1)
renderer.move_camera(19, 1)
self.assertEqual(renderer._agent_to_window_coords(0, 0), (-19, -1))
self.assertEqual(renderer._agent_to_window_coords(10, 10), (1, 19))
def test_window_to_agent_coords_no_change(self):
renderer = DotRenderer()
self.assertEqual(renderer._window_to_agent_coords(0, 0), (0, 0))
self.assertEqual(renderer._window_to_agent_coords(10, 10), (10, 10))
self.assertEqual(renderer._window_to_agent_coords(0, 21), (0, 21))
def test_window_to_agent_coords_offset(self):
renderer = DotRenderer()
renderer.move_camera(5, 7)
self.assertEqual(renderer._window_to_agent_coords(0, 0), (5, 7))
self.assertEqual(renderer._window_to_agent_coords(10, 10), (15, 17))
self.assertEqual(renderer._window_to_agent_coords(0, 21), (5, 28))
def test_window_to_agent_coords_zoom(self):
renderer = DotRenderer()
renderer.zoom_camera(1)
self.assertEqual(renderer._window_to_agent_coords(0, 0), (0, 0))
self.assertEqual(renderer._window_to_agent_coords(10, 10), (5, 5))
self.assertEqual(renderer._window_to_agent_coords(4, 22), (2, 11))
def test_window_to_agent_coords_scale_and_move(self):
renderer = DotRenderer()
renderer.zoom_camera(1)
renderer.move_camera(19, 1)
self.assertEqual(renderer._window_to_agent_coords(0, 0), (9.5, 0.5))
self.assertEqual(renderer._window_to_agent_coords(10, 10), (14.5, 5.5))
if __name__ == '__main__':
unittest.main()
| 41.194444
| 79
| 0.686784
|
794f714fc23d533712b4baa6dc27c03d9b9ed2d5
| 5,137
|
py
|
Python
|
env/lib/python3.5/site-packages/importlib_metadata/tests/test_api.py
|
ekwoodrich/dvr-ip-client
|
429fb07a2b06cc505fdd9350148266a6b4e23e64
|
[
"MIT"
] | 33
|
2019-08-04T01:48:11.000Z
|
2022-03-20T13:53:42.000Z
|
env/lib/python3.5/site-packages/importlib_metadata/tests/test_api.py
|
ekwoodrich/python-dvrip
|
429fb07a2b06cc505fdd9350148266a6b4e23e64
|
[
"MIT"
] | 8
|
2020-06-05T21:36:23.000Z
|
2022-02-12T12:24:00.000Z
|
flask/lib/python3.6/site-packages/importlib_metadata/tests/test_api.py
|
JOFLIX/grapevines
|
34576e01184570d79cc140b42ffb71d322132da6
|
[
"MIT",
"Unlicense"
] | 12
|
2019-08-12T07:59:38.000Z
|
2022-03-24T08:09:40.000Z
|
import re
import textwrap
import unittest
import itertools
from . import fixtures
from .. import (
Distribution, PackageNotFoundError, __version__, distribution,
entry_points, files, metadata, requires, version,
)
try:
from collections.abc import Iterator
except ImportError:
from collections import Iterator # noqa: F401
try:
from builtins import str as text
except ImportError:
from __builtin__ import unicode as text
class APITests(
fixtures.EggInfoPkg,
fixtures.DistInfoPkg,
fixtures.EggInfoFile,
unittest.TestCase):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_self(self):
pkg_version = version('egginfo-pkg')
assert isinstance(pkg_version, text)
assert re.match(self.version_pattern, pkg_version)
def test_retrieves_version_of_distinfo_pkg(self):
pkg_version = version('distinfo-pkg')
assert isinstance(pkg_version, text)
assert re.match(self.version_pattern, pkg_version)
def test_for_name_does_not_exist(self):
with self.assertRaises(PackageNotFoundError):
distribution('does-not-exist')
def test_for_top_level(self):
self.assertEqual(
distribution('egginfo-pkg').read_text('top_level.txt').strip(),
'mod')
def test_read_text(self):
top_level = [
path for path in files('egginfo-pkg')
if path.name == 'top_level.txt'
][0]
self.assertEqual(top_level.read_text(), 'mod\n')
def test_entry_points(self):
entries = dict(entry_points()['entries'])
ep = entries['main']
self.assertEqual(ep.value, 'mod:main')
self.assertEqual(ep.extras, [])
def test_metadata_for_this_package(self):
md = metadata('egginfo-pkg')
assert md['author'] == 'Steven Ma'
assert md['LICENSE'] == 'Unknown'
assert md['Name'] == 'egginfo-pkg'
classifiers = md.get_all('Classifier')
assert 'Topic :: Software Development :: Libraries' in classifiers
def test_importlib_metadata_version(self):
assert re.match(self.version_pattern, __version__)
@staticmethod
def _test_files(files_iter):
assert isinstance(files_iter, Iterator), files_iter
files = list(files_iter)
root = files[0].root
for file in files:
assert file.root == root
assert not file.hash or file.hash.value
assert not file.hash or file.hash.mode == 'sha256'
assert not file.size or file.size >= 0
assert file.locate().exists()
assert isinstance(file.read_binary(), bytes)
if file.name.endswith('.py'):
file.read_text()
def test_file_hash_repr(self):
try:
assertRegex = self.assertRegex
except AttributeError:
# Python 2
assertRegex = self.assertRegexpMatches
util = [
p for p in files('distinfo-pkg')
if p.name == 'mod.py'
][0]
assertRegex(
repr(util.hash),
'<FileHash mode: sha256 value: .*>')
def test_files_dist_info(self):
self._test_files(files('distinfo-pkg'))
def test_files_egg_info(self):
self._test_files(files('egginfo-pkg'))
def test_version_egg_info_file(self):
self.assertEqual(version('egginfo-file'), '0.1')
def test_requires_egg_info_file(self):
requirements = requires('egginfo-file')
self.assertIsNone(requirements)
def test_requires(self):
deps = requires('egginfo-pkg')
assert any(
dep == 'wheel >= 1.0; python_version >= "2.7"'
for dep in deps
)
def test_requires_dist_info(self):
deps = list(requires('distinfo-pkg'))
assert deps and all(deps)
def test_more_complex_deps_requires_text(self):
requires = textwrap.dedent("""
dep1
dep2
[:python_version < "3"]
dep3
[extra1]
dep4
[extra2:python_version < "3"]
dep5
""")
deps = sorted(Distribution._deps_from_requires_text(requires))
expected = [
'dep1',
'dep2',
'dep3; python_version < "3"',
'dep4; extra == "extra1"',
'dep5; (python_version < "3") and extra == "extra2"',
]
# It's important that the environment marker expression be
# wrapped in parentheses to avoid the following 'and' binding more
# tightly than some other part of the environment expression.
assert deps == expected
class OffSysPathTests(fixtures.DistInfoPkgOffPath, unittest.TestCase):
def test_find_distributions_specified_path(self):
dists = itertools.chain.from_iterable(
resolver(path=[str(self.site_dir)])
for resolver in Distribution._discover_resolvers()
)
assert any(
dist.metadata['Name'] == 'distinfo-pkg'
for dist in dists
)
| 30.760479
| 75
| 0.606774
|
794f7165169c3eb45394cd6ad4a486b54173f1ad
| 746
|
py
|
Python
|
facebook_client.py
|
mjalkio/parallel-python-tutorial
|
86e77cb6b1e0e21aa2b09270c249e232761f78e5
|
[
"MIT"
] | null | null | null |
facebook_client.py
|
mjalkio/parallel-python-tutorial
|
86e77cb6b1e0e21aa2b09270c249e232761f78e5
|
[
"MIT"
] | null | null | null |
facebook_client.py
|
mjalkio/parallel-python-tutorial
|
86e77cb6b1e0e21aa2b09270c249e232761f78e5
|
[
"MIT"
] | null | null | null |
"""Super thin wrapper of Facebook API."""
import facebook
class FacebookClient:
"""Simple class to get basic information on Facebook Pages."""
def __init__(self, access_token):
"""Initialize GraphAPI object."""
self.graph = facebook.GraphAPI(access_token=access_token,
version='2.7')
def get_page_fan_count(self, page_id):
"""Return number of fans for the given page."""
page = self.graph.get_object(id=page_id, fields='fan_count')
return page['fan_count']
def get_page_about(self, page_id):
"""Return some information about the given page."""
page = self.graph.get_object(id=page_id, fields='about')
return page['about']
| 33.909091
| 68
| 0.634048
|
794f717cf1765059b308678699d61edb7eba2692
| 2,194
|
py
|
Python
|
larousse_api/larousse.py
|
Hugo291/larousse_api
|
ceda426c31ddeda02e7b539be1c23b540d149f25
|
[
"MIT"
] | 8
|
2020-08-05T08:00:57.000Z
|
2021-11-21T20:00:05.000Z
|
larousse_api/larousse.py
|
Hugo291/larousse_api
|
ceda426c31ddeda02e7b539be1c23b540d149f25
|
[
"MIT"
] | 10
|
2021-01-27T04:51:52.000Z
|
2021-07-14T04:53:47.000Z
|
larousse_api/larousse.py
|
Hugo291/larousse_api
|
ceda426c31ddeda02e7b539be1c23b540d149f25
|
[
"MIT"
] | null | null | null |
import requests
import re
import unicodedata
from bs4 import BeautifulSoup
class Larousse:
def __init__(self, word):
self.word = word
self.soup = self.__get_content()
def get_definitions(self):
"""
:return: A list containing all definitions of word
"""
for ul in self.soup.find_all('ul'):
if ul.get('class') is not None and 'Definitions' in ul.get('class'):
return [unicodedata.normalize("NFKD", re.sub("<.*?>", "", str(li))) for li in
ul.find_all('li')], ul.find_all('li')
return None, None
def get_synonymes(self):
"""
:return: A list containing all synonymes of word
"""
for ul in self.soup.find_all('ul'):
if ul.get('class') is not None and 'Synonymes' in ul.get('class'):
return [unicodedata.normalize("NFKD", re.sub("<.*?>", "", str(li))) for li in
ul.find_all('li')], ul.find_all('li')
return None, None
def get_citations(self):
"""
:return: A list containing all citations of word
"""
for ul in self.soup.find_all('ul'):
if ul.get('class') is not None and 'ListeCitations' in ul.get('class'):
return [unicodedata.normalize("NFKD", re.sub("<.*?>", "", str(li))) for li in
ul.find_all('li')], ul.find_all('li')
return None, None
def get_locutions(self):
"""
:return: A list containing all locutions of word
"""
for ul in self.soup.find_all('ul'):
if ul.get('class') is not None and 'ListeCitations' in ul.get('class'):
return [unicodedata.normalize("NFKD", re.sub("<.*?>", "", str(li))) for li in
ul.find_all('li')], ul.find_all('li')
return None, None
def __get_content(self):
url = "https://www.larousse.fr/dictionnaires/francais/" + self.word.lower()
rq = requests.get(url=url)
if rq.status_code != 200:
raise Exception("Status code return an error")
return BeautifulSoup(rq.text, 'html.parser')
| 35.387097
| 93
| 0.542388
|
794f71d95c3909734ba3a9489824fffb4d144bd4
| 52,715
|
py
|
Python
|
test/functional/test_framework/messages.py
|
MrCryptoBeast/WWW
|
857e860df0aa1bc7fde2ee6f5918ff32933beeb3
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/messages.py
|
MrCryptoBeast/WWW
|
857e860df0aa1bc7fde2ee6f5918ff32933beeb3
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/messages.py
|
MrCryptoBeast/WWW
|
857e860df0aa1bc7fde2ee6f5918ff32933beeb3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2020 The worldwideweb Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""worldwideweb test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
worldwideweb/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from base64 import b32decode, b32encode
import copy
import hashlib
from io import BytesIO
import math
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import assert_equal
MAX_LOCATOR_SZ = 101
MAX_BLOCK_WEIGHT = 4000000
MAX_BLOOM_FILTER_SIZE = 36000
MAX_BLOOM_HASH_FUNCS = 50
COIN = 100000000 # 1 btc in satoshis
MAX_MONEY = 21000000 * COIN
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is rbf-opt-in (BIP 125) and csv-opt-out (BIP 68)
MAX_PROTOCOL_MESSAGE_LENGTH = 4000000 # Maximum length of incoming protocol messages
MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result
MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message
NODE_NETWORK = (1 << 0)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_COMPACT_FILTERS = (1 << 6)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_FILTERED_BLOCK = 3
MSG_CMPCT_BLOCK = 4
MSG_WTX = 5
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
MSG_WITNESS_TX = MSG_TX | MSG_WITNESS_FLAG
FILTER_TYPE_BASIC = 0
WITNESS_SCALE_FACTOR = 4
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for _ in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
# deser_function_name: Allow for an alternate deserialization function on the
# entries in the vector.
def deser_vector(f, c, deser_function_name=None):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = c()
if deser_function_name:
getattr(t, deser_function_name)(f)
else:
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for _ in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def from_hex(obj, hex_string):
"""Deserialize from a hex string representation (e.g. from RPC)
Note that there is no complementary helper like e.g. `to_hex` for the
inverse operation. To serialize a message object to a hex string, simply
use obj.serialize().hex()"""
obj.deserialize(BytesIO(bytes.fromhex(hex_string)))
return obj
def tx_from_hex(hex_string):
"""Deserialize from hex string to a transaction object"""
return from_hex(CTransaction(), hex_string)
# Objects that map to worldwidewebd objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("net", "ip", "nServices", "port", "time")
# see https://github.com/worldwideweb/bips/blob/master/bip-0155.mediawiki
NET_IPV4 = 1
NET_I2P = 5
ADDRV2_NET_NAME = {
NET_IPV4: "IPv4",
NET_I2P: "I2P"
}
ADDRV2_ADDRESS_LENGTH = {
NET_IPV4: 4,
NET_I2P: 32
}
I2P_PAD = "===="
def __init__(self):
self.time = 0
self.nServices = 1
self.net = self.NET_IPV4
self.ip = "0.0.0.0"
self.port = 0
def __eq__(self, other):
return self.net == other.net and self.ip == other.ip and self.nServices == other.nServices and self.port == other.port and self.time == other.time
def deserialize(self, f, *, with_time=True):
"""Deserialize from addrv1 format (pre-BIP155)"""
if with_time:
# VERSION messages serialize CAddress objects without time
self.time = struct.unpack("<I", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
# We only support IPv4 which means skip 12 bytes and read the next 4 as IPv4 address.
f.read(12)
self.net = self.NET_IPV4
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, *, with_time=True):
"""Serialize in addrv1 format (pre-BIP155)"""
assert self.net == self.NET_IPV4
r = b""
if with_time:
# VERSION messages serialize CAddress objects without time
r += struct.pack("<I", self.time)
r += struct.pack("<Q", self.nServices)
r += b"\x00" * 10 + b"\xff" * 2
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def deserialize_v2(self, f):
"""Deserialize from addrv2 format (BIP155)"""
self.time = struct.unpack("<I", f.read(4))[0]
self.nServices = deser_compact_size(f)
self.net = struct.unpack("B", f.read(1))[0]
assert self.net in (self.NET_IPV4, self.NET_I2P)
address_length = deser_compact_size(f)
assert address_length == self.ADDRV2_ADDRESS_LENGTH[self.net]
addr_bytes = f.read(address_length)
if self.net == self.NET_IPV4:
self.ip = socket.inet_ntoa(addr_bytes)
else:
self.ip = b32encode(addr_bytes)[0:-len(self.I2P_PAD)].decode("ascii").lower() + ".b32.i2p"
self.port = struct.unpack(">H", f.read(2))[0]
def serialize_v2(self):
"""Serialize in addrv2 format (BIP155)"""
assert self.net in (self.NET_IPV4, self.NET_I2P)
r = b""
r += struct.pack("<I", self.time)
r += ser_compact_size(self.nServices)
r += struct.pack("B", self.net)
r += ser_compact_size(self.ADDRV2_ADDRESS_LENGTH[self.net])
if self.net == self.NET_IPV4:
r += socket.inet_aton(self.ip)
else:
sfx = ".b32.i2p"
assert self.ip.endswith(sfx)
r += b32decode(self.ip[0:-len(sfx)] + self.I2P_PAD, True)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return ("CAddress(nServices=%i net=%s addr=%s port=%i)"
% (self.nServices, self.ADDRV2_NET_NAME[self.net], self.ip, self.port))
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
MSG_TX: "TX",
MSG_BLOCK: "Block",
MSG_TX | MSG_WITNESS_FLAG: "WitnessTx",
MSG_BLOCK | MSG_WITNESS_FLAG: "WitnessBlock",
MSG_FILTERED_BLOCK: "filtered Block",
MSG_CMPCT_BLOCK: "CompactBlock",
MSG_WTX: "WTX",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<I", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<I", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
def __eq__(self, other):
return isinstance(other, CInv) and self.hash == other.hash and self.type == other.type
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.vHave = []
def deserialize(self, f):
struct.unpack("<i", f.read(4))[0] # Ignore version field.
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", 0) # worldwideweb Core ignores version field. Set it to 0.
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(vHave=%s)" % (repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in worldwidewebd
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for _ in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
def getwtxid(self):
return hash256(self.serialize())[::-1].hex()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = hash256(self.serialize_without_witness())[::-1].hex()
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
# Calculate the transaction weight using witness and non-witness
# serialization size (does NOT use sigops).
def get_weight(self):
with_witness_size = len(self.serialize_with_witness())
without_witness_size = len(self.serialize_without_witness())
return (WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size
def get_vsize(self):
return math.ceil(self.get_weight() / WITNESS_SCALE_FACTOR)
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].hex()
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super().__init__(header)
self.vtx = []
def deserialize(self, f):
super().deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += super().serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
# Calculate the block weight using witness and non-witness
# serialization size (does NOT use sigops).
def get_weight(self):
with_witness_size = len(self.serialize(with_witness=True))
without_witness_size = len(self.serialize(with_witness=False))
return (WITNESS_SCALE_FACTOR - 1) * without_witness_size + with_witness_size
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for _ in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super().serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for _ in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "relay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
msgtype = b"version"
def __init__(self):
self.nVersion = 0
self.nServices = 0
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = ''
self.nStartingHeight = -1
self.relay = 0
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, with_time=False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, with_time=False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f).decode('utf-8')
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
# Relay field is optional for version 70001 onwards
# But, unconditionally check it to match behaviour in worldwidewebd
try:
self.relay = struct.unpack("<b", f.read(1))[0]
except struct.error:
self.relay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(with_time=False)
r += self.addrFrom.serialize(with_time=False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer.encode('utf-8'))
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.relay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i relay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.relay)
class msg_verack:
__slots__ = ()
msgtype = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
msgtype = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_addrv2:
__slots__ = ("addrs",)
msgtype = b"addrv2"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress, "deserialize_v2")
def serialize(self):
return ser_vector(self.addrs, "serialize_v2")
def __repr__(self):
return "msg_addrv2(addrs=%s)" % (repr(self.addrs))
class msg_sendaddrv2:
__slots__ = ()
msgtype = b"sendaddrv2"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendaddrv2()"
class msg_inv:
__slots__ = ("inv",)
msgtype = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
msgtype = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
msgtype = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
msgtype = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_with_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_wtxidrelay:
__slots__ = ()
msgtype = b"wtxidrelay"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_wtxidrelay()"
class msg_no_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_without_witness()
class msg_block:
__slots__ = ("block",)
msgtype = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the msgtype, and the data
class msg_generic:
__slots__ = ("data")
def __init__(self, msgtype, data=None):
self.msgtype = msgtype
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_no_witness_block(msg_block):
__slots__ = ()
def serialize(self):
return self.block.serialize(with_witness=False)
class msg_getaddr:
__slots__ = ()
msgtype = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
msgtype = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
msgtype = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
msgtype = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
msgtype = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
msgtype = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
msgtype = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
msgtype = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in worldwidewebd indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_merkleblock:
__slots__ = ("merkleblock",)
msgtype = b"merkleblock"
def __init__(self, merkleblock=None):
if merkleblock is None:
self.merkleblock = CMerkleBlock()
else:
self.merkleblock = merkleblock
def deserialize(self, f):
self.merkleblock.deserialize(f)
def serialize(self):
return self.merkleblock.serialize()
def __repr__(self):
return "msg_merkleblock(merkleblock=%s)" % (repr(self.merkleblock))
class msg_filterload:
__slots__ = ("data", "nHashFuncs", "nTweak", "nFlags")
msgtype = b"filterload"
def __init__(self, data=b'00', nHashFuncs=0, nTweak=0, nFlags=0):
self.data = data
self.nHashFuncs = nHashFuncs
self.nTweak = nTweak
self.nFlags = nFlags
def deserialize(self, f):
self.data = deser_string(f)
self.nHashFuncs = struct.unpack("<I", f.read(4))[0]
self.nTweak = struct.unpack("<I", f.read(4))[0]
self.nFlags = struct.unpack("<B", f.read(1))[0]
def serialize(self):
r = b""
r += ser_string(self.data)
r += struct.pack("<I", self.nHashFuncs)
r += struct.pack("<I", self.nTweak)
r += struct.pack("<B", self.nFlags)
return r
def __repr__(self):
return "msg_filterload(data={}, nHashFuncs={}, nTweak={}, nFlags={})".format(
self.data, self.nHashFuncs, self.nTweak, self.nFlags)
class msg_filteradd:
__slots__ = ("data")
msgtype = b"filteradd"
def __init__(self, data):
self.data = data
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.data)
return r
def __repr__(self):
return "msg_filteradd(data={})".format(self.data)
class msg_filterclear:
__slots__ = ()
msgtype = b"filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_filterclear()"
class msg_feefilter:
__slots__ = ("feerate",)
msgtype = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
msgtype = b"sendcmpct"
def __init__(self, announce=False, version=1):
self.announce = announce
self.version = version
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
msgtype = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
msgtype = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
msgtype = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_no_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
class msg_getcfilters:
__slots__ = ("filter_type", "start_height", "stop_hash")
msgtype = b"getcfilters"
def __init__(self, filter_type, start_height, stop_hash):
self.filter_type = filter_type
self.start_height = start_height
self.stop_hash = stop_hash
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.start_height = struct.unpack("<I", f.read(4))[0]
self.stop_hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += struct.pack("<I", self.start_height)
r += ser_uint256(self.stop_hash)
return r
def __repr__(self):
return "msg_getcfilters(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
self.filter_type, self.start_height, self.stop_hash)
class msg_cfilter:
__slots__ = ("filter_type", "block_hash", "filter_data")
msgtype = b"cfilter"
def __init__(self, filter_type=None, block_hash=None, filter_data=None):
self.filter_type = filter_type
self.block_hash = block_hash
self.filter_data = filter_data
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.block_hash = deser_uint256(f)
self.filter_data = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.block_hash)
r += ser_string(self.filter_data)
return r
def __repr__(self):
return "msg_cfilter(filter_type={:#x}, block_hash={:x})".format(
self.filter_type, self.block_hash)
class msg_getcfheaders:
__slots__ = ("filter_type", "start_height", "stop_hash")
msgtype = b"getcfheaders"
def __init__(self, filter_type, start_height, stop_hash):
self.filter_type = filter_type
self.start_height = start_height
self.stop_hash = stop_hash
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.start_height = struct.unpack("<I", f.read(4))[0]
self.stop_hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += struct.pack("<I", self.start_height)
r += ser_uint256(self.stop_hash)
return r
def __repr__(self):
return "msg_getcfheaders(filter_type={:#x}, start_height={}, stop_hash={:x})".format(
self.filter_type, self.start_height, self.stop_hash)
class msg_cfheaders:
__slots__ = ("filter_type", "stop_hash", "prev_header", "hashes")
msgtype = b"cfheaders"
def __init__(self, filter_type=None, stop_hash=None, prev_header=None, hashes=None):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.prev_header = prev_header
self.hashes = hashes
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.stop_hash = deser_uint256(f)
self.prev_header = deser_uint256(f)
self.hashes = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.stop_hash)
r += ser_uint256(self.prev_header)
r += ser_uint256_vector(self.hashes)
return r
def __repr__(self):
return "msg_cfheaders(filter_type={:#x}, stop_hash={:x})".format(
self.filter_type, self.stop_hash)
class msg_getcfcheckpt:
__slots__ = ("filter_type", "stop_hash")
msgtype = b"getcfcheckpt"
def __init__(self, filter_type, stop_hash):
self.filter_type = filter_type
self.stop_hash = stop_hash
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.stop_hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.stop_hash)
return r
def __repr__(self):
return "msg_getcfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
self.filter_type, self.stop_hash)
class msg_cfcheckpt:
__slots__ = ("filter_type", "stop_hash", "headers")
msgtype = b"cfcheckpt"
def __init__(self, filter_type=None, stop_hash=None, headers=None):
self.filter_type = filter_type
self.stop_hash = stop_hash
self.headers = headers
def deserialize(self, f):
self.filter_type = struct.unpack("<B", f.read(1))[0]
self.stop_hash = deser_uint256(f)
self.headers = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<B", self.filter_type)
r += ser_uint256(self.stop_hash)
r += ser_uint256_vector(self.headers)
return r
def __repr__(self):
return "msg_cfcheckpt(filter_type={:#x}, stop_hash={:x})".format(
self.filter_type, self.stop_hash)
| 29.012108
| 262
| 0.606393
|
794f72033515bda9a872b501a2d26543281fba5e
| 20,968
|
py
|
Python
|
app/grandchallenge/components/backends/amazon_ecs.py
|
kaczmarj/grand-challenge.org
|
8dc8a2170e51072354f7e94f2a22578805a67b94
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/components/backends/amazon_ecs.py
|
kaczmarj/grand-challenge.org
|
8dc8a2170e51072354f7e94f2a22578805a67b94
|
[
"Apache-2.0"
] | null | null | null |
app/grandchallenge/components/backends/amazon_ecs.py
|
kaczmarj/grand-challenge.org
|
8dc8a2170e51072354f7e94f2a22578805a67b94
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import shutil
from datetime import datetime, timezone
from enum import Enum
from json import JSONDecodeError
from pathlib import Path
from time import sleep
import boto3
from dateutil.parser import isoparse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.files import File
from django.db import transaction
from django.utils._os import safe_join
from panimg.image_builders import image_builder_mhd, image_builder_tiff
from grandchallenge.cases.tasks import import_images
from grandchallenge.components.backends.exceptions import (
ComponentException,
EventError,
RetryStep,
TaskCancelled,
TaskStillExecuting,
)
from grandchallenge.components.backends.utils import (
LOGLINES,
safe_extract,
user_error,
)
logger = logging.getLogger(__name__)
class TaskStatus(Enum):
RUNNING = "RUNNING"
STOPPED = "STOPPED"
class AmazonECSExecutor:
IS_EVENT_DRIVEN = True
def __init__(
self,
*,
job_id: str,
exec_image_sha256: str,
exec_image_repo_tag: str,
exec_image_file: File,
memory_limit: int,
time_limit: int,
requires_gpu: bool,
):
self._job_id = job_id
self._exec_image_sha256 = exec_image_sha256
self._exec_image_repo_tag = exec_image_repo_tag
self._exec_image_file = exec_image_file
self._memory_limit = memory_limit
self._time_limit = time_limit
self._requires_gpu = requires_gpu
if not self._requires_gpu and self._memory_limit > 6:
# Currently non-GPU jobs can only get 6GB of memory
# due to the CPU pools instance types
logger.warning("Non-GPU job memory restricted")
self._memory_limit = 6
if self._memory_limit < 4 or self._memory_limit > 30:
raise RuntimeError("AWS only supports 4g to 30g of memory")
self.__duration = None
self.__ecs_client = None
self.__logs_client = None
@staticmethod
def get_job_params(*, event):
try:
task_definition_arn = event["taskDefinitionArn"]
group = event["group"]
except KeyError as e:
raise EventError("Malformed event") from e
if group.startswith("service:"):
raise EventError("Service events not handled")
job_id = task_definition_arn.split("/")[-1].split(":")[0]
job_app_label, job_model_name, job_pk = job_id.split("-", 2)
return job_app_label, job_model_name, job_pk
def provision(self, *, input_civs, input_prefixes):
self._create_io_volumes()
self._copy_input_files(
input_civs=input_civs, input_prefixes=input_prefixes
)
def execute(self):
task_definition_arn = self._register_task_definition()
self._run_task(task_definition_arn=task_definition_arn)
def handle_event(self, *, event):
logger.info(f"Handling {event=}")
container_exit_codes = self._get_container_exit_codes(event=event)
self._set_duration(event=event)
self._wait_for_log_delivery()
self._handle_container_exit(container_exit_codes=container_exit_codes)
def get_outputs(self, *, output_interfaces):
outputs = []
with transaction.atomic():
# Atomic block required as create_instance needs to
# create interfaces in order to store the files
for interface in output_interfaces:
if interface.is_image_kind:
res = self._create_images_result(interface=interface)
elif interface.is_json_kind:
res = self._create_json_result(interface=interface)
else:
res = self._create_file_result(interface=interface)
outputs.append(res)
return outputs
def deprovision(self):
try:
shutil.rmtree(self._job_directory)
except FileNotFoundError:
logger.warning(
f"Directory not found when trying to remove it: {self._job_directory}"
)
self._stop_running_tasks()
self._deregister_task_definitions()
@property
def stdout(self):
try:
return "\n".join(self._get_task_logs(source="stdout"))
except Exception as e:
logger.warning(f"Could not fetch stdout: {e}")
return ""
@property
def stderr(self):
try:
return "\n".join(self._get_task_logs(source="stderr"))
except Exception as e:
logger.warning(f"Could not fetch stderr: {e}")
return ""
@property
def duration(self):
return self.__duration
@property
def _ecs_client(self):
if self.__ecs_client is None:
self.__ecs_client = boto3.client(
"ecs", region_name=settings.COMPONENTS_AMAZON_ECS_REGION
)
return self.__ecs_client
@property
def _logs_client(self):
if self.__logs_client is None:
self.__logs_client = boto3.client(
"logs", region_name=settings.COMPONENTS_AMAZON_ECS_REGION
)
return self.__logs_client
@property
def _cluster_arn(self):
if self._requires_gpu:
return settings.COMPONENTS_AMAZON_ECS_GPU_CLUSTER_ARN
else:
return settings.COMPONENTS_AMAZON_ECS_CPU_CLUSTER_ARN
@property
def _log_stream_prefix(self):
return "ecs"
@property
def _main_container_name(self):
return self._job_id
@property
def _timeout_container_name(self):
return f"{self._main_container_name}-timeout"
def _wait_for_log_delivery(self):
# It takes some time for all of the logs to finish delivery to
# CloudWatch. Add a wait period here to allow for this.
# Maybe we should do this in a better way, but the rest of the
# system assumes that all the logs are available.
sleep(10)
def _get_task_logs(self, *, source):
response = self._logs_client.get_log_events(
logGroupName=settings.COMPONENTS_AMAZON_ECS_LOG_GROUP_NAME,
logStreamName=f"{self._log_stream_prefix}/{self._main_container_name}",
limit=LOGLINES,
startFromHead=False,
)
events = response["events"]
loglines = []
for event in events:
message = json.loads(event["message"])
if message["source"] == source:
timestamp = self._timestamp_to_datetime(event["timestamp"])
log = message["log"].replace("\x00", "")
loglines.append(f"{timestamp.isoformat()} {log}")
return loglines
@staticmethod
def _timestamp_to_datetime(timestamp):
"""Convert AWS timestamps (ms from epoch) to datetime"""
return datetime.fromtimestamp(timestamp * 0.001, tz=timezone.utc)
def _set_duration(self, *, event):
try:
started = (
event["startedAt"]
if "startedAt" in event
else event["createdAt"]
)
stopped = event["stoppedAt"]
self.__duration = isoparse(stopped) - isoparse(started)
except Exception as e:
logger.warning(f"Could not determine duration: {e}")
self.__duration = None
@property
def _job_directory(self):
dir_parts = self._job_id.split("-", 2)
if len(dir_parts) != 3:
raise ValueError(f"Invalid job id {self._job_id}")
return (
Path(settings.COMPONENTS_AMAZON_ECS_NFS_MOUNT_POINT)
/ dir_parts[0]
/ dir_parts[1]
/ dir_parts[2]
).resolve()
@property
def _input_directory(self):
return self._job_directory / "input"
@property
def _output_directory(self):
return self._job_directory / "output"
def _create_io_volumes(self):
self._job_directory.parent.parent.mkdir(exist_ok=True, parents=False)
self._job_directory.parent.mkdir(exist_ok=True, parents=False)
self._job_directory.mkdir(exist_ok=False, parents=False)
self._input_directory.mkdir(exist_ok=False, parents=False)
self._output_directory.mkdir(exist_ok=False, parents=False)
def _copy_input_files(self, *, input_civs, input_prefixes):
for civ in input_civs:
prefix = self._input_directory
if str(civ.pk) in input_prefixes:
prefix = safe_join(prefix, input_prefixes[str(civ.pk)])
dest = Path(safe_join(prefix, civ.relative_path))
# We know that the dest is within the prefix as
# safe_join is used, so ok to create the parents here
dest.parent.mkdir(exist_ok=True, parents=True)
if civ.decompress:
try:
safe_extract(src=civ.input_file, dest=dest.parent)
except Exception as e:
raise ComponentException(
"Could not extract input zip file"
) from e
else:
with civ.input_file.open("rb") as fs, open(dest, "wb") as fd:
for chunk in fs.chunks():
fd.write(chunk)
@property
def _resource_requirements(self):
if self._requires_gpu:
return [{"type": "GPU", "value": "1"}]
else:
return []
@property
def _required_memory_units(self):
return 1024 * self._memory_limit
@property
def _required_cpu_units(self):
return 4096 if self._memory_limit > 16 else 2048
@property
def _container_definitions(self):
container_definitions = [
{
# Add a second essential container that kills the task
# once the time limit is reached.
# See https://github.com/aws/containers-roadmap/issues/572
"command": ["sleep", str(self._time_limit)],
"image": "public.ecr.aws/amazonlinux/amazonlinux:2",
"name": self._timeout_container_name,
"dependsOn": [
{
"containerName": self._main_container_name,
"condition": "START",
}
],
},
{
"cpu": self._required_cpu_units,
"image": self._exec_image_repo_tag,
"memory": self._required_memory_units,
"mountPoints": [
{
"containerPath": "/input",
"sourceVolume": f"{self._job_id}-input",
"readOnly": True,
},
{
"containerPath": "/output",
"sourceVolume": f"{self._job_id}-output",
"readOnly": False,
},
],
"name": self._main_container_name,
"resourceRequirements": self._resource_requirements,
},
]
for c in container_definitions:
c.update(
{
"disableNetworking": True,
"dockerSecurityOptions": ["no-new-privileges"],
"essential": True, # all essential for timeout to work
"linuxParameters": {
"capabilities": {"drop": ["ALL"]},
"initProcessEnabled": True,
"maxSwap": 0,
"swappiness": 0,
"sharedMemorySize": settings.COMPONENTS_SHARED_MEMORY_SIZE,
},
"logConfiguration": {
"logDriver": "fluentd",
"options": {
"fluentd-address": "unix:///tmp/fluent-bit/sock",
"tag": f"/{c['name']}",
},
},
"privileged": False,
"ulimits": [
{
"name": "nproc",
"hardLimit": settings.COMPONENTS_PIDS_LIMIT,
"softLimit": settings.COMPONENTS_PIDS_LIMIT,
}
],
}
)
return container_definitions
def _register_task_definition(self):
response = self._ecs_client.register_task_definition(
containerDefinitions=self._container_definitions,
cpu=str(self._required_cpu_units),
family=self._job_id,
memory=str(self._required_memory_units),
networkMode="none",
requiresCompatibilities=["EC2"],
taskRoleArn=settings.COMPONENTS_AMAZON_ECS_TASK_ROLE_ARN,
# TODO set tags
volumes=[
{
"name": f"{self._job_id}-input",
"host": {"sourcePath": str(self._input_directory)},
},
{
"name": f"{self._job_id}-output",
"host": {"sourcePath": str(self._output_directory)},
},
],
)
return response["taskDefinition"]["taskDefinitionArn"]
def _run_task(self, *, task_definition_arn):
if not self._list_task_arns(desired_status=TaskStatus.RUNNING):
try:
response = self._ecs_client.run_task(
cluster=self._cluster_arn,
count=1,
enableExecuteCommand=False,
enableECSManagedTags=True,
group=settings.COMPONENTS_AMAZON_ECS_LOG_GROUP_NAME,
placementConstraints=[{"type": "distinctInstance"}],
propagateTags="TASK_DEFINITION",
referenceId=self._job_id,
taskDefinition=task_definition_arn,
)
except self._ecs_client.exceptions.ClientException as e:
if (
e.response["Error"]["Message"]
== "Tasks provisioning capacity limit exceeded."
):
raise RetryStep("Capacity Limit Exceeded") from e
else:
raise
task_arns = [t["taskArn"] for t in response["tasks"]]
if len(task_arns) == 0:
logger.info(f"ECS run_task {response=}")
raise RetryStep("No tasks started by ECS")
else:
logger.info(f"Scheduled {task_arns=}")
else:
logger.warning("A task is already running for this job")
def _get_container_exit_codes(self, *, event):
stop_code = event["stopCode"]
container_exit_codes = {
c["name"]: int(c["exitCode"])
for c in event.get("containers", {})
if "exitCode" in c
}
if stop_code == "TaskFailedToStart" and container_exit_codes.get(
self._main_container_name
) in {0, 1}:
# Sometimes the entire task fails to start, but the main
# container ran before the sidecar(s) could start
pass
elif stop_code in ["TaskFailedToStart", "TerminationNotice"]:
# Requeue the task in the event of resources not available
# or termination
self._run_task(task_definition_arn=event["taskDefinitionArn"])
raise TaskStillExecuting
elif stop_code == "UserInitiated":
raise TaskCancelled
return container_exit_codes
def _handle_container_exit(self, *, container_exit_codes):
if container_exit_codes.get(self._main_container_name) == 0:
# Job's a good un
return
elif container_exit_codes.get(self._main_container_name) == 137:
raise ComponentException(
"The container was killed as it exceeded the memory limit "
f"of {self._memory_limit}g."
)
elif container_exit_codes.get(self._timeout_container_name) == 0:
raise ComponentException("Time limit exceeded")
else:
raise ComponentException(user_error(self.stderr))
def _list_task_arns(
self, *, desired_status, next_token="", task_arns=None
):
if task_arns is None:
task_arns = []
response = self._ecs_client.list_tasks(
cluster=self._cluster_arn,
family=self._job_id,
desiredStatus=desired_status.value,
nextToken=next_token,
)
task_arns += response["taskArns"]
if "nextToken" in response:
return self._list_task_arns(
desired_status=desired_status,
next_token=response["nextToken"],
task_arns=task_arns,
)
return task_arns
def _stop_running_tasks(self):
"""Stop all the running tasks for this job"""
task_arns = self._list_task_arns(desired_status=TaskStatus.RUNNING)
for task_arn in task_arns:
self._ecs_client.stop_task(
cluster=self._cluster_arn, task=task_arn
)
def _deregister_task_definitions(self):
response = self._ecs_client.list_task_definitions(
familyPrefix=self._job_id, status="ACTIVE"
)
next_token = response.get("nextToken")
for task_definition_arn in response["taskDefinitionArns"]:
self._ecs_client.deregister_task_definition(
taskDefinition=task_definition_arn
)
if next_token:
self._deregister_task_definitions()
def _create_images_result(self, *, interface):
base_dir = Path(
safe_join(self._output_directory, interface.relative_path)
)
output_files = [f for f in base_dir.glob("*") if f.is_file()]
if not output_files:
raise ComponentException(f"{interface.relative_path} is empty")
importer_result = import_images(
input_directory=base_dir,
builders=[image_builder_mhd, image_builder_tiff],
recurse_subdirectories=False,
)
if len(importer_result.new_images) == 0:
raise ComponentException(
f"No images imported from {interface.relative_path}"
)
elif len(importer_result.new_images) > 1:
raise ComponentException(
f"Only 1 image should be produced in {interface.relative_path}, "
f"we found {len(importer_result.new_images)}"
)
try:
civ = interface.create_instance(
image=next(iter(importer_result.new_images))
)
except ValidationError:
raise ComponentException(
f"The image produced in {interface.relative_path} is not valid"
)
return civ
def _create_file_result(self, *, interface):
output_file = Path(
safe_join(self._output_directory, interface.relative_path)
)
if (
output_file.is_symlink()
or not output_file.is_file()
or not output_file.exists()
):
raise ComponentException(
f"File {interface.relative_path} was not produced"
)
try:
with open(output_file, "rb") as f:
civ = interface.create_instance(fileobj=f)
except ValidationError:
raise ComponentException(
f"The file produced at {interface.relative_path} is not valid"
)
return civ
def _create_json_result(self, *, interface):
output_file = Path(
safe_join(self._output_directory, interface.relative_path)
)
if (
output_file.is_symlink()
or not output_file.is_file()
or not output_file.exists()
):
raise ComponentException(
f"File {interface.relative_path} was not produced"
)
try:
with open(output_file, "rb") as f:
result = json.loads(
f.read().decode("utf-8"),
parse_constant=lambda x: None, # Removes -inf, inf and NaN
)
except JSONDecodeError:
raise ComponentException(
f"The file produced at {interface.relative_path} is not valid json"
)
try:
civ = interface.create_instance(value=result)
except ValidationError:
raise ComponentException(
f"The file produced at {interface.relative_path} is not valid"
)
return civ
| 34.038961
| 86
| 0.571442
|
794f72365df299a5f1f73181a71ac52f05e426f2
| 39,685
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/parse_example_dataset_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 56
|
2018-06-21T13:47:23.000Z
|
2020-05-13T09:31:47.000Z
|
tensorflow/python/data/experimental/kernel_tests/parse_example_dataset_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 6
|
2022-01-15T07:17:47.000Z
|
2022-02-14T15:28:22.000Z
|
tensorflow/python/data/experimental/kernel_tests/parse_example_dataset_test.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 15
|
2018-09-06T14:18:32.000Z
|
2020-05-14T06:35:30.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.parse_example_dataset()."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import parsing_ops as contrib_parsing_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample
@test_util.run_all_in_graph_and_eager_modes
class ParseExampleDatasetTest(test_base.DatasetTestBase):
def _compare_output_to_expected(self, dict_tensors, expected_tensors):
self.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))
for k, v in sorted(dict_tensors.items()):
expected_v = expected_tensors[k]
self.assertValuesEqual(expected_v, v)
def _test(self,
input_tensor,
feature_val,
expected_values=None,
expected_err=None,
create_iterator_twice=False):
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
dataset = dataset_ops.Dataset.from_tensors(input_tensor).apply(
contrib_parsing_ops.parse_example_dataset(feature_val))
get_next = self.getNext(dataset)
self.evaluate(get_next())
return
else:
# Returns dict w/ Tensors and SparseTensors.
# Check values.
dataset = dataset_ops.Dataset.from_tensors(input_tensor).apply(
contrib_parsing_ops.parse_example_dataset(feature_val))
get_next = self.getNext(dataset)
result = self.evaluate(get_next())
self._compare_output_to_expected(result, expected_values)
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(get_next())
if create_iterator_twice:
get_next = self.getNext(dataset)
result = self.evaluate(get_next())
self._compare_output_to_expected(result, expected_values)
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(get_next())
# Check shapes; if serialized is a Tensor we need its size to
# properly check.
batch_size = (
self.evaluate(input_tensor).size if isinstance(input_tensor, ops.Tensor)
else np.asarray(input_tensor).size)
for k, f in feature_val.items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset)[k].as_list()[0],
batch_size)
elif isinstance(f, parsing_ops.VarLenFeature):
self.assertEqual(
dataset_ops.get_legacy_output_shapes(dataset)[k].as_list()[1], None)
def testEmptySerializedWithAllDefaults(self):
sparse_name = "st_a"
a_name = "a"
b_name = "b"
c_name = "c:has_a_tricky_name"
a_default = [0, 42, 0]
b_default = np.random.rand(3, 3).astype(bytes)
c_default = np.random.rand(2).astype(np.float32)
expected_st_a = sparse_tensor.SparseTensorValue( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_output = {
sparse_name: expected_st_a,
a_name: np.array(2 * [[a_default]]),
b_name: np.array(2 * [b_default]),
c_name: np.array(2 * [c_default]),
}
self._test(
ops.convert_to_tensor(["", ""]), {
sparse_name:
parsing_ops.VarLenFeature(dtypes.int64),
a_name:
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
b_name:
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
c_name:
parsing_ops.FixedLenFeature(
(2,), dtypes.float32, default_value=c_default),
},
expected_values=expected_output,
create_iterator_twice=True)
@test_util.run_deprecated_v1
def testEmptySerializedWithoutDefaultsShouldFail(self):
input_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=[0, 42, 0]),
"b":
parsing_ops.FixedLenFeature(
(3, 3),
dtypes.string,
default_value=np.random.rand(3, 3).astype(bytes)),
# Feature "c" is missing a default, this gap will cause failure.
"c":
parsing_ops.FixedLenFeature(
(2,), dtype=dtypes.float32),
}
# Edge case where the key is there but the feature value is empty
original = example(features=features({"c": feature()}))
self._test(
[original.SerializeToString()],
input_features,
expected_err=(errors_impl.InvalidArgumentError,
"Feature: c \\(data type: float\\) is required"))
# Standard case of missing key and value.
self._test(
["", ""],
input_features,
expected_err=(errors_impl.InvalidArgumentError,
"Feature: c \\(data type: float\\) is required"))
@test_util.run_deprecated_v1
def testDenseNotMatchingShapeShouldFail(self):
original = [
example(features=features({
"a": float_feature([1, 1, 3]),
})), example(features=features({
"a": float_feature([-1, -1]),
}))
]
serialized = [m.SerializeToString() for m in original]
self._test(
ops.convert_to_tensor(serialized),
{"a": parsing_ops.FixedLenFeature((1, 3), dtypes.float32)},
expected_err=(errors_impl.InvalidArgumentError,
"Key: a, Index: 1. Number of float values"))
def testDenseDefaultNoShapeShouldFail(self):
original = [example(features=features({"a": float_feature([1, 1, 3]),})),]
serialized = [m.SerializeToString() for m in original]
self._test(
ops.convert_to_tensor(serialized),
{"a": parsing_ops.FixedLenFeature(None, dtypes.float32)},
expected_err=(ValueError, "Missing shape for feature a"))
def testSerializedContainingSparse(self):
original = [
example(features=features({
"st_c": float_feature([3, 4])
})),
example(features=features({
"st_c": float_feature([]), # empty float list
})),
example(features=features({
"st_d": feature(), # feature with nothing in it
})),
example(features=features({
"st_c": float_feature([1, 2, -1]),
"st_d": bytes_feature([b"hi"])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_st_c = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 1], [3, 0], [3, 1], [3, 2]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, 2.0, -1.0], dtype=np.float32),
np.array([4, 3], dtype=np.int64)) # batch == 2, max_elems = 3
expected_st_d = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[3, 0]], dtype=np.int64), np.array(["hi"], dtype=bytes),
np.array([4, 1], dtype=np.int64)) # batch == 2, max_elems = 1
expected_output = {
"st_c": expected_st_c,
"st_d": expected_st_d,
}
self._test(
ops.convert_to_tensor(serialized), {
"st_c": parsing_ops.VarLenFeature(dtypes.float32),
"st_d": parsing_ops.VarLenFeature(dtypes.string)
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx": int64_feature([])
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx":
int64_feature([0, 9, 3]) # unsorted
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 5], [0, 10], [3, 0], [3, 3], [3, 9]], dtype=np.int64),
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
np.array([4, 13], dtype=np.int64)) # batch == 4, max_elems = 13
expected_output = {"sp": expected_sp,}
self._test(
ops.convert_to_tensor(serialized),
{"sp": parsing_ops.SparseFeature(["idx"], "val", dtypes.float32, [13])},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingSparseFeatureReuse(self):
original = [
example(features=features({
"val1": float_feature([3, 4]),
"val2": float_feature([5, 6]),
"idx": int64_feature([5, 10])
})),
example(features=features({
"val1": float_feature([]), # empty float list
"idx": int64_feature([])
})),
]
serialized = [m.SerializeToString() for m in original]
expected_sp1 = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([3.0, 4.0], dtype=np.float32),
np.array([2, 13], dtype=np.int64)) # batch == 2, max_elems = 13
expected_sp2 = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 5], [0, 10]], dtype=np.int64),
np.array([5.0, 6.0], dtype=np.float32),
np.array([2, 7], dtype=np.int64)) # batch == 2, max_elems = 13
expected_output = {
"sp1": expected_sp1,
"sp2": expected_sp2,
}
self._test(
ops.convert_to_tensor(serialized), {
"sp1":
parsing_ops.SparseFeature("idx", "val1", dtypes.float32, 13),
"sp2":
parsing_ops.SparseFeature(
"idx", "val2", dtypes.float32, size=7, already_sorted=True)
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContaining3DSparseFeature(self):
original = [
example(features=features({
"val": float_feature([3, 4]),
"idx0": int64_feature([5, 10]),
"idx1": int64_feature([0, 2]),
})),
example(features=features({
"val": float_feature([]), # empty float list
"idx0": int64_feature([]),
"idx1": int64_feature([]),
})),
example(features=features({
"val": feature(), # feature with nothing in it
# missing idx feature
})),
example(features=features({
"val": float_feature([1, 2, -1]),
"idx0": int64_feature([0, 9, 3]), # unsorted
"idx1": int64_feature([1, 0, 2]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_sp = sparse_tensor.SparseTensorValue(
# indices
np.array([[0, 5, 0], [0, 10, 2], [3, 0, 1], [3, 3, 2], [3, 9, 0]],
dtype=np.int64),
# values
np.array([3.0, 4.0, 1.0, -1.0, 2.0], dtype=np.float32),
# shape batch == 4, max_elems = 13
np.array([4, 13, 3], dtype=np.int64))
expected_output = {"sp": expected_sp,}
self._test(
ops.convert_to_tensor(serialized), {
"sp":
parsing_ops.SparseFeature(["idx0", "idx1"], "val",
dtypes.float32, [13, 3])
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingDense(self):
aname = "a"
bname = "b*has+a:tricky_name"
original = [
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
})), example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b""]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(
["b0_str", ""], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
},
expected_values=expected_output,
create_iterator_twice=True)
# This test is identical as the previous one except
# for the creation of 'serialized'.
def testSerializedContainingDenseWithConcat(self):
aname = "a"
bname = "b*has+a:tricky_name"
# TODO(lew): Feature appearing twice should be an error in future.
original = [
(example(features=features({
aname: float_feature([10, 10]),
})), example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str"]),
}))),
(
example(features=features({
bname: bytes_feature([b"b100"]),
})),
example(features=features({
aname: float_feature([-1, -1]),
bname: bytes_feature([b"b1"]),
})),),
]
serialized = [
m.SerializeToString() + n.SerializeToString() for (m, n) in original
]
expected_output = {
aname:
np.array(
[[1, 1], [-1, -1]], dtype=np.float32).reshape(2, 1, 2, 1),
bname:
np.array(
["b0_str", "b1"], dtype=bytes).reshape(2, 1, 1, 1, 1),
}
# No defaults, values required
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenFeature((1, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenFeature((1, 1, 1, 1), dtype=dtypes.string),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingDenseScalar(self):
original = [
example(features=features({
"a": float_feature([1]),
})), example(features=features({}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array(
[[1], [-1]], dtype=np.float32) # 2x1 (column vector)
}
self._test(
ops.convert_to_tensor(serialized), {
"a":
parsing_ops.FixedLenFeature(
(1,), dtype=dtypes.float32, default_value=-1),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingDenseWithDefaults(self):
original = [
example(features=features({
"a": float_feature([1, 1]),
})),
example(features=features({
"b": bytes_feature([b"b1"]),
})),
example(features=features({
"b": feature()
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"a":
np.array(
[[1, 1], [3, -3], [3, -3]], dtype=np.float32).reshape(3, 1, 2,
1),
"b":
np.array(
["tmp_str", "b1", "tmp_str"], dtype=bytes).reshape(3, 1, 1, 1,
1),
}
self._test(
ops.convert_to_tensor(serialized), {
"a":
parsing_ops.FixedLenFeature(
(1, 2, 1), dtype=dtypes.float32, default_value=[3.0, -3.0]),
"b":
parsing_ops.FixedLenFeature(
(1, 1, 1, 1), dtype=dtypes.string, default_value="tmp_str"),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedSparseAndSparseFeatureAndDenseWithNoDefault(self):
expected_st_a = sparse_tensor.SparseTensorValue( # indices, values, shape
np.empty((0, 2), dtype=np.int64), # indices
np.empty((0,), dtype=np.int64), # sp_a is DT_INT64
np.array([2, 0], dtype=np.int64)) # batch == 2, max_elems = 0
expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 7]], dtype=np.int64),
np.array(["a", "b", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(features=features({
"c": float_feature([3, 4]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})), example(features=features({
"c": float_feature([1, 2]),
"val": bytes_feature([b"c"]),
"idx": int64_feature([7])
}))
]
serialized = [m.SerializeToString() for m in original]
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": np.array(2 * [[a_default]]),
"b": np.array(2 * [b_default]),
"c": np.array(
[[3, 4], [1, 2]], dtype=np.float32),
}
self._test(
ops.convert_to_tensor(serialized),
{
"st_a":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature("idx", "val", dtypes.string, 13),
"a":
parsing_ops.FixedLenFeature(
(1, 3), dtypes.int64, default_value=a_default),
"b":
parsing_ops.FixedLenFeature(
(3, 3), dtypes.string, default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature((2,), dtypes.float32),
},
expected_values=expected_output,
create_iterator_twice=True)
def testerializedContainingSparseAndSparseFeatureWithReuse(self):
expected_idx = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.int64),
np.array([0, 3, 7, 1]),
np.array([2, 2], dtype=np.int64)) # batch == 4, max_elems = 2
expected_sp = sparse_tensor.SparseTensorValue( # indices, values, shape
np.array([[0, 0], [0, 3], [1, 1], [1, 7]], dtype=np.int64),
np.array(["a", "b", "d", "c"], dtype="|S"),
np.array([2, 13], dtype=np.int64)) # batch == 4, max_elems = 13
original = [
example(features=features({
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3])
})), example(features=features({
"val": bytes_feature([b"c", b"d"]),
"idx": int64_feature([7, 1])
}))
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
"idx": expected_idx,
"sp": expected_sp,
}
self._test(
ops.convert_to_tensor(serialized), {
"idx":
parsing_ops.VarLenFeature(dtypes.int64),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
},
expected_values=expected_output,
create_iterator_twice=True)
def _testSerializedContainingVarLenDenseLargerBatch(self, batch_size):
# During parsing, data read from the serialized proto is stored in buffers.
# For small batch sizes, a buffer will contain one minibatch entry.
# For larger batch sizes, a buffer may contain several minibatch
# entries. This test identified a bug where the code that copied
# data out of the buffers and into the output tensors assumed each
# buffer only contained one minibatch entry. The bug has since been fixed.
truth_int = [i for i in range(batch_size)]
truth_str = [[("foo%d" % i).encode(), ("bar%d" % i).encode()]
for i in range(batch_size)]
expected_str = copy.deepcopy(truth_str)
# Delete some intermediate entries
for i in range(batch_size):
col = 1
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry
expected_str[i][col] = b"default"
col -= 1
truth_str[i].pop()
if np.random.rand() < 0.25:
# w.p. 25%, drop out the second entry (possibly again)
expected_str[i][col] = b"default"
truth_str[i].pop()
expected_output = {
# Batch size batch_size, 1 time step.
"a": np.array(truth_int, dtype=np.int64).reshape(batch_size, 1),
# Batch size batch_size, 2 time steps.
"b": np.array(expected_str, dtype="|S").reshape(batch_size, 2),
}
original = [
example(features=features(
{"a": int64_feature([truth_int[i]]),
"b": bytes_feature(truth_str[i])}))
for i in range(batch_size)
]
serialized = [m.SerializeToString() for m in original]
self._test(
ops.convert_to_tensor(serialized, dtype=dtypes.string), {
"a":
parsing_ops.FixedLenSequenceFeature(
shape=(),
dtype=dtypes.int64,
allow_missing=True,
default_value=-1),
"b":
parsing_ops.FixedLenSequenceFeature(
shape=[],
dtype=dtypes.string,
allow_missing=True,
default_value="default"),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingVarLenDenseLargerBatch(self):
np.random.seed(3456)
for batch_size in (1, 10, 20, 100, 256):
self._testSerializedContainingVarLenDenseLargerBatch(batch_size)
def testSerializedShapeMismatch(self):
aname = "a"
bname = "b"
cname = "c"
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
if context.executing_eagerly():
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature((2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(errors_impl.InvalidArgumentError,
"Input to reshape is a tensor with 0 values"))
else:
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature((2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=[]),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(ValueError,
"Cannot reshape a tensor with 0 elements to shape"))
@test_util.run_deprecated_v1
def testSerializedContainingVarLenDense(self):
aname = "a"
bname = "b"
cname = "c"
dname = "d"
original = [
example(features=features({
cname: int64_feature([2]),
})),
example(
features=features({
aname: float_feature([1, 1]),
bname: bytes_feature([b"b0_str", b"b1_str"]),
})),
example(
features=features({
aname: float_feature([-1, -1, 2, 2]),
bname: bytes_feature([b"b1"]),
})),
example(
features=features({
aname: float_feature([]),
cname: int64_feature([3]),
})),
]
serialized = [m.SerializeToString() for m in original]
expected_output = {
aname:
np.array(
[
[0, 0, 0, 0],
[1, 1, 0, 0],
[-1, -1, 2, 2],
[0, 0, 0, 0],
],
dtype=np.float32).reshape(4, 2, 2, 1),
bname:
np.array(
[["", ""], ["b0_str", "b1_str"], ["b1", ""], ["", ""]],
dtype=bytes).reshape(4, 2, 1, 1, 1),
cname:
np.array([2, 0, 0, 3], dtype=np.int64).reshape(4, 1),
dname:
np.empty(shape=(4, 0), dtype=bytes),
}
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
},
expected_values=expected_output,
create_iterator_twice=True)
# Test with padding values.
expected_output_custom_padding = dict(expected_output)
expected_output_custom_padding[aname] = np.array(
[
[-2, -2, -2, -2],
[1, 1, -2, -2],
[-1, -1, 2, 2],
[-2, -2, -2, -2],
],
dtype=np.float32).reshape(4, 2, 2, 1)
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1),
dtype=dtypes.float32,
allow_missing=True,
default_value=-2.0),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=True),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
}, expected_output_custom_padding)
# Change number of required values so the inputs are not a
# multiple of this size.
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(
errors_impl.OpError, "Key: b, Index: 2. "
"Number of bytes values is not a multiple of stride length."))
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenFeature((None, 2, 1), dtype=dtypes.float32),
bname:
parsing_ops.FixedLenSequenceFeature(
(2, 1, 1), dtype=dtypes.string, allow_missing=True),
},
expected_err=(ValueError,
"First dimension of shape for feature a unknown. "
"Consider using FixedLenSequenceFeature."))
self._test(
ops.convert_to_tensor(serialized), {
cname:
parsing_ops.FixedLenFeature(
(1, None), dtype=dtypes.int64, default_value=[[1]]),
},
expected_err=(ValueError,
"All dimensions of shape for feature c need to be known "
r"but received \(1, None\)."))
self._test(
ops.convert_to_tensor(serialized), {
aname:
parsing_ops.FixedLenSequenceFeature(
(2, 1), dtype=dtypes.float32, allow_missing=True),
bname:
parsing_ops.FixedLenSequenceFeature(
(1, 1, 1), dtype=dtypes.string, allow_missing=True),
cname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.int64, allow_missing=False),
dname:
parsing_ops.FixedLenSequenceFeature(
shape=[], dtype=dtypes.string, allow_missing=True),
},
expected_err=(ValueError,
"Unsupported: FixedLenSequenceFeature requires "
"allow_missing to be True."))
def testSerializedContainingRaggedFeatureWithNoPartitions(self):
original = [
example(
features=features({
"rt_c": float_feature([3, 4, 5, 6, 7, 8]),
})),
example(
features=features({
"rt_c": float_feature([]), # empty float list
})),
example(
features=features({
"rt_d": feature(), # feature with nothing in it
})),
example(
features=features({
"rt_c": float_feature([1, 2, -1]),
"rt_d": bytes_feature([b"hi"]),
}))
]
serialized = [m.SerializeToString() for m in original]
expected_rt_c = ragged_factory_ops.constant_value(
[[3.0, 4.0, 5.0, 6.0, 7.0, 8.0], [], [], [1.0, 2.0, -1.0]],
row_splits_dtype=dtypes.int32)
expected_rt_d = ragged_factory_ops.constant_value(
[[], [], [], [b"hi"]], row_splits_dtype=dtypes.int64)
expected_output = {
"rt_c": expected_rt_c,
"rt_d": expected_rt_d,
}
self._test(
ops.convert_to_tensor(serialized), {
"rt_c":
parsing_ops.RaggedFeature(dtypes.float32),
"rt_d":
parsing_ops.RaggedFeature(
dtypes.string, row_splits_dtype=dtypes.int64),
},
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingRaggedFeatureWithOnePartition(self):
original = [
example(
features=features({
# rt = [[3], [4, 5, 6]]
"rt_values": float_feature([3, 4, 5, 6]),
"rt_splits": int64_feature([0, 1, 4]),
"rt_lengths": int64_feature([1, 3]),
"rt_starts": int64_feature([0, 1]),
"rt_limits": int64_feature([1, 4]),
"rt_rowids": int64_feature([0, 1, 1, 1]),
})),
example(
features=features({
# rt = []
"rt_values": float_feature([]),
"rt_splits": int64_feature([0]),
"rt_lengths": int64_feature([]),
"rt_starts": int64_feature([]),
"rt_limits": int64_feature([]),
"rt_rowids": int64_feature([]),
})),
example(
features=features({
# rt = []
"rt_values": feature(), # feature with nothing in it
"rt_splits": int64_feature([0]),
"rt_lengths": feature(),
"rt_starts": feature(),
"rt_limits": feature(),
"rt_rowids": feature(),
})),
example(
features=features({
# rt = [[1.0, 2.0, -1.0], [], [8.0, 9.0], [5.0]]
"rt_values": float_feature([1, 2, -1, 8, 9, 5]),
"rt_splits": int64_feature([0, 3, 3, 5, 6]),
"rt_lengths": int64_feature([3, 0, 2, 1]),
"rt_starts": int64_feature([0, 3, 3, 5]),
"rt_limits": int64_feature([3, 3, 5, 6]),
"rt_rowids": int64_feature([0, 0, 0, 2, 2, 3]),
}))
]
serialized = [m.SerializeToString() for m in original]
test_features = {
"rt1":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowSplits("rt_splits")],
dtype=dtypes.float32),
"rt2":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowLengths("rt_lengths")],
dtype=dtypes.float32),
"rt3":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowStarts("rt_starts")],
dtype=dtypes.float32),
"rt4":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowLimits("rt_limits")],
dtype=dtypes.float32),
"rt5":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.ValueRowIds("rt_rowids")],
dtype=dtypes.float32),
"uniform1":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.UniformRowLength(2)],
dtype=dtypes.float32),
"uniform2":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[
parsing_ops.RaggedFeature.UniformRowLength(2),
parsing_ops.RaggedFeature.RowSplits("rt_splits")
],
dtype=dtypes.float32),
}
expected_rt = ragged_factory_ops.constant(
[[[3], [4, 5, 6]], [], [], [[1, 2, -1], [], [8, 9], [5]]],
dtype=dtypes.float32,
row_splits_dtype=dtypes.int32)
expected_uniform1 = ragged_factory_ops.constant(
[[[3, 4], [5, 6]], [], [], [[1, 2], [-1, 8], [9, 5]]],
ragged_rank=1,
dtype=dtypes.float32,
row_splits_dtype=dtypes.int32)
expected_uniform2 = ragged_factory_ops.constant(
[[[[3], [4, 5, 6]]], [], [], [[[1, 2, -1], []], [[8, 9], [5]]]],
dtype=dtypes.float32,
row_splits_dtype=dtypes.int32)
expected_output = {
"rt1": expected_rt,
"rt2": expected_rt,
"rt3": expected_rt,
"rt4": expected_rt,
"rt5": expected_rt,
"uniform1": expected_uniform1,
"uniform2": expected_uniform2,
}
self._test(
ops.convert_to_tensor(serialized),
test_features,
expected_values=expected_output,
create_iterator_twice=True)
def testSerializedContainingRaggedFeatureWithMultiplePartitions(self):
original = [
# rt shape: [(batch), 2, None, None]
example(
features=features({
# rt = [[[[1]], [[2, 3], [4]]], [[], [[5, 6, 7]]]]
"rt_values": float_feature([1, 2, 3, 4, 5, 6, 7]),
"lengths_axis2": int64_feature([1, 2, 0, 1]),
"lengths_axis3": int64_feature([1, 2, 1, 3]),
"splits_axis3": int64_feature([0, 1, 3, 4, 7]),
})),
example(
features=features({
# rt = [[[[1, 2, 3], [4]], [[5], [6], [7, 8]]]]
"rt_values": float_feature([1, 2, 3, 4, 5, 6, 7, 8]),
"lengths_axis2": int64_feature([2, 3]),
"lengths_axis3": int64_feature([3, 1, 1, 1, 2]),
"splits_axis3": int64_feature([0, 3, 4, 5, 6, 8]),
}))
]
serialized = [m.SerializeToString() for m in original]
test_features = {
"rt1":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[
parsing_ops.RaggedFeature.UniformRowLength(2),
parsing_ops.RaggedFeature.RowLengths("lengths_axis2"),
parsing_ops.RaggedFeature.RowSplits("splits_axis3"),
],
dtype=dtypes.float32,
row_splits_dtype=dtypes.int64,
),
}
expected_rt = ragged_factory_ops.constant(
[[[[[1]], [[2, 3], [4]]], [[], [[5, 6, 7]]]],
[[[[1, 2, 3], [4]], [[5], [6], [7, 8]]]]],
dtype=dtypes.float32,
row_splits_dtype=dtypes.int64)
expected_output = {
"rt1": expected_rt,
}
self._test(
ops.convert_to_tensor(serialized),
test_features,
expected_values=expected_output,
create_iterator_twice=True)
if __name__ == "__main__":
test.main()
| 36.142987
| 86
| 0.54789
|
794f72ae7f02726ce125a5446a10239ebef117ff
| 1,351
|
py
|
Python
|
polyglot/__main__.py
|
AustEcon/polyglot
|
e3511db0c1bda8fcfce91b466fe55bdb8e3aebc1
|
[
"MIT"
] | 21
|
2019-05-04T14:06:26.000Z
|
2022-01-17T15:46:22.000Z
|
polyglot/__main__.py
|
AustEcon/polyglot
|
e3511db0c1bda8fcfce91b466fe55bdb8e3aebc1
|
[
"MIT"
] | 16
|
2019-07-11T08:57:20.000Z
|
2021-04-15T08:07:30.000Z
|
polyglot/__main__.py
|
AustEcon/polyglot
|
e3511db0c1bda8fcfce91b466fe55bdb8e3aebc1
|
[
"MIT"
] | 4
|
2019-06-16T14:27:30.000Z
|
2020-03-02T02:40:59.000Z
|
#!/usr/bin/env python3
import argparse
import getpass
import sys
import bitsv
import polyglot
def set_network(args):
if args.testnet:
network = 'test'
elif args.scalingtestnet:
network = 'stn'
else:
network = 'main'
return network
def get_wif_securely():
wif = ""
while wif == "":
wif = getpass.getpass("Enter private key in wif format:")
if not wif:
print("Was expecting a wif format private key but got an empty string. Try again.")
return wif
def main():
parser = argparse.ArgumentParser(description='Upload a file to Bitcoin SV.')
parser.add_argument('file', help='filename')
parser.add_argument("--testnet", action="store_true", dest="testnet", default=False,
help="Use Testnet")
parser.add_argument("--scaling-testnet", action="store_true", dest="scalingtestnet",
default=False, help="Use Scaling Testnet")
args = parser.parse_args()
wif = get_wif_securely()
try:
bitsv.format.wif_checksum_check(wif)
bitsv.format.wif_to_bytes(wif)
except ValueError as e:
print(f"'{wif}' is not a valid WIF format private key")
sys.exit(1)
uploader = polyglot.Upload(wif, network=set_network(args))
txid = uploader.upload_easy(args.file)
print(txid)
| 27.571429
| 95
| 0.640266
|
794f730e9e21e68e8b372be61db423b436c97e6c
| 1,666
|
py
|
Python
|
quartz/cli.py
|
vladcalin/eventer
|
f0b62a969714ea09c50191f770265a4a7d1a7bec
|
[
"MIT"
] | 3
|
2017-05-29T18:55:35.000Z
|
2019-08-31T04:45:36.000Z
|
quartz/cli.py
|
vladcalin/eventer
|
f0b62a969714ea09c50191f770265a4a7d1a7bec
|
[
"MIT"
] | 1
|
2017-05-29T18:59:47.000Z
|
2017-05-30T05:59:35.000Z
|
quartz/cli.py
|
vladcalin/eventer
|
f0b62a969714ea09c50191f770265a4a7d1a7bec
|
[
"MIT"
] | 4
|
2017-03-19T13:49:46.000Z
|
2020-07-14T08:58:20.000Z
|
import json
import sys
import os.path
import click
from quartz.service import QuartzService
from quartz.models import set_db_parameters
from quartz import __version__
BANNER = """
_
__ _ _ _ __ _ _ __| |_ ____
/ _` | | | |/ _` | '__| __|_ /
| (_| | |_| | (_| | | | |_ / /
\__, |\__,_|\__,_|_| \__/___|
|_|
Current version: {version}
""".format(version=__version__)
DEFAULTS = {
"database_url": "mongo://localhost:27017/quartz",
"host": "0.0.0.0",
"port": 8000,
"registry": [],
"accessible_at": ["127.0.0.1", 8000]
}
def get_config_value(config, key):
return config.get(key, DEFAULTS[key])
def print_banner(config_file):
print(BANNER)
print("")
print("Using the configuration file '{0}'".format(os.path.abspath(config_file)))
print("")
@click.group()
def cli():
pass
@cli.command("start", help="The JSON configuration file with the parameters")
@click.argument("config")
def start(config):
print_banner(config)
with open(config) as f:
cfg = json.load(f)
set_db_parameters(get_config_value(cfg, "database_url"))
service = QuartzService(get_config_value(cfg, "host"), get_config_value(cfg, "port"),
get_config_value(cfg, "registry"), get_config_value(cfg, "accessible_at"))
service.start()
SAMPLE_CONFIG = json.dumps(DEFAULTS, indent=4, sort_keys=True)
@cli.command("init", help="Writes a sample configuration file to STDOUT. Can be used as template"
" for writing the actual configuration file")
def init():
print(SAMPLE_CONFIG)
| 25.242424
| 102
| 0.615846
|
794f75a843b84d9747bc675f38c5af061962112f
| 1,245
|
py
|
Python
|
src/model/logreg.py
|
nutcrtnk/PGRA
|
4991f591ff7593a9149cc10a484682a834230979
|
[
"MIT"
] | 3
|
2021-11-30T08:16:03.000Z
|
2022-03-06T07:30:58.000Z
|
src/model/logreg.py
|
nutcrtnk/PGRA
|
4991f591ff7593a9149cc10a484682a834230979
|
[
"MIT"
] | null | null | null |
src/model/logreg.py
|
nutcrtnk/PGRA
|
4991f591ff7593a9149cc10a484682a834230979
|
[
"MIT"
] | null | null | null |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
if not os.environ.get("RAND", False):
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
print('random seed')
class LogReg(nn.Module):
def __init__(self, ft_in, nb_classes, rela=1, drop=0.):
super(LogReg, self).__init__()
self.fc = nn.Linear(ft_in // rela, nb_classes)
self.rela = rela
if rela > 1:
self.att = nn.Parameter(torch.zeros([rela, 1]))
torch.nn.init.xavier_uniform_(self.att)
else:
self.att = None
for m in self.modules():
self.weights_init(m)
self.dropout = nn.Dropout(drop)
def weights_init(self, m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight.data)
if m.bias is not None:
m.bias.data.fill_(0.0)
def forward(self, seq):
seq = self.dropout(seq)
if self.att is not None:
att = F.softmax(self.att, dim=0)
seq = (seq.view(seq.shape[0], self.rela, -1) * att).sum(1)
ret = self.fc(seq)
return ret
| 30.365854
| 70
| 0.585542
|
794f76467ffbfb486500d47e5fd77706baa04454
| 4,364
|
py
|
Python
|
src/Python/Unittests/test_trimesh_circulator_halfedge_loop.py
|
rzoller/OpenMesh
|
f84bca0b26c61eab5f9335b2191962ca8545c5f6
|
[
"BSD-3-Clause"
] | 19
|
2020-08-13T05:15:09.000Z
|
2022-03-31T14:51:29.000Z
|
src/Python/Unittests/test_trimesh_circulator_halfedge_loop.py
|
ccopsey/OpenMesh
|
93e6e626c3f282bf4275521c33cd8da1ca559c7d
|
[
"BSD-3-Clause"
] | 2
|
2020-09-08T07:03:04.000Z
|
2021-08-04T05:43:27.000Z
|
src/Python/Unittests/test_trimesh_circulator_halfedge_loop.py
|
ccopsey/OpenMesh
|
93e6e626c3f282bf4275521c33cd8da1ca559c7d
|
[
"BSD-3-Clause"
] | 10
|
2020-08-06T02:37:46.000Z
|
2021-07-01T09:12:06.000Z
|
import unittest
import openmesh
class TrimeshCirculatorHalfedgeLoop(unittest.TestCase):
def setUp(self):
self.mesh = openmesh.TriMesh()
self.vhandle = []
def test_halfedge_loop_with_face(self):
# Add some vertices
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(0, 1, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(1, 0, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(2, 1, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(3, 0, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(4, 1, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(2, -1, 0)))
# Add four faces
face_vhandles = []
face_vhandles.append(self.vhandle[0])
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[2])
self.mesh.add_face(face_vhandles)
face_vhandles = []
face_vhandles.append(self.vhandle[2])
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[3])
self.mesh.add_face(face_vhandles)
face_vhandles = []
face_vhandles.append(self.vhandle[2])
face_vhandles.append(self.vhandle[3])
face_vhandles.append(self.vhandle[4])
self.mesh.add_face(face_vhandles)
face_vhandles = []
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[5])
face_vhandles.append(self.vhandle[3])
self.mesh.add_face(face_vhandles)
# Test setup:
#
# edge x => halfedge x/x+1
# i.e. edge 0 => halfedge 0/1
#
# 0 --4--- 2 ------ 4
# \ / \ /
# 0 0 2 6 2 /
# \ / 1 \ /
# 1 ---8--- 3
# \ /
# \ 3 /
# \ /
# \ /
# 5
# Circle around face 1
hl_it = self.mesh.hl(self.mesh.halfedge_handle(3))
self.assertEqual(hl_it.__next__().idx(), 3)
self.assertEqual(hl_it.__next__().idx(), 6)
self.assertEqual(hl_it.__next__().idx(), 8)
self.assertRaises(StopIteration, hl_it.__next__)
def test_halfedge_loop_without_face(self):
# Add some vertices
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(0, 1, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(1, 0, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(2, 1, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(3, 0, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(4, 1, 0)))
self.vhandle.append(self.mesh.add_vertex(openmesh.Vec3d(2, -1, 0)))
# Add three faces
face_vhandles = []
face_vhandles.append(self.vhandle[0])
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[2])
self.mesh.add_face(face_vhandles)
face_vhandles = []
face_vhandles.append(self.vhandle[2])
face_vhandles.append(self.vhandle[3])
face_vhandles.append(self.vhandle[4])
self.mesh.add_face(face_vhandles)
face_vhandles = []
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[5])
face_vhandles.append(self.vhandle[3])
self.mesh.add_face(face_vhandles)
# Test setup:
#
# H => hole (no face)
# fx => face #x
# edge 0 => halfedge 0/1
#
# 0 --4--- 2 -10--- 4
# \ / \ /
# 0 f0 2 6 f2 8
# \ / H \ /
# 1 ---16---3
# \ /
# 12 f3 14
# \ /
# \ /
# 5
# Circle around the hole
hl_it = self.mesh.hl(self.mesh.halfedge_handle(3))
self.assertEqual(hl_it.__next__().idx(), 3)
self.assertEqual(hl_it.__next__().idx(), 17)
self.assertEqual(hl_it.__next__().idx(), 7)
self.assertRaises(StopIteration, hl_it.__next__)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TrimeshCirculatorHalfedgeLoop)
unittest.TextTestRunner(verbosity=2).run(suite)
| 33.060606
| 86
| 0.562786
|
794f7723978fed23c3a7212f122001b560aa8ae2
| 9,247
|
py
|
Python
|
docs/conf.py
|
nestauk/gtr
|
5a7fe88c8429fa78199fb2da42123a7079a5f8ab
|
[
"Apache-2.0"
] | 6
|
2016-06-08T11:41:45.000Z
|
2018-09-12T09:54:08.000Z
|
docs/conf.py
|
nestauk/gtr
|
5a7fe88c8429fa78199fb2da42123a7079a5f8ab
|
[
"Apache-2.0"
] | 2
|
2018-02-14T19:34:57.000Z
|
2018-02-14T19:46:02.000Z
|
docs/conf.py
|
nestauk/gtr
|
5a7fe88c8429fa78199fb2da42123a7079a5f8ab
|
[
"Apache-2.0"
] | 2
|
2017-11-07T15:38:39.000Z
|
2018-02-14T19:10:36.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# gtr documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 5 22:19:07 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'gtr'
copyright = '2016, Nesta'
author = 'James Gardiner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'gtrdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gtr.tex', 'gtr Documentation',
'James Gardiner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gtr', 'gtr Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gtr', 'gtr Documentation',
author, 'gtr', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.886207
| 79
| 0.714069
|
794f77725dc21644ec0f4a67ea4967fba9d6bac5
| 8,445
|
py
|
Python
|
src/dials/algorithms/scaling/scaling_options.py
|
PrinceWalnut/dials
|
13bbd3be1e3869a58b4eb2aa2717a4fcf33278f2
|
[
"BSD-3-Clause"
] | null | null | null |
src/dials/algorithms/scaling/scaling_options.py
|
PrinceWalnut/dials
|
13bbd3be1e3869a58b4eb2aa2717a4fcf33278f2
|
[
"BSD-3-Clause"
] | null | null | null |
src/dials/algorithms/scaling/scaling_options.py
|
PrinceWalnut/dials
|
13bbd3be1e3869a58b4eb2aa2717a4fcf33278f2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Phil scope of options for scaling.
"""
from __future__ import annotations
import iotbx.phil
phil_scope = iotbx.phil.parse(
"""
anomalous = False
.type = bool
.help = "Separate anomalous pairs in scaling and error model optimisation."
.expert_level=0
overwrite_existing_models = False
.type = bool
.help = "If True, create new scaling models for all datasets"
.expert_level = 0
reflection_selection {
method = *quasi_random intensity_ranges use_all random
.type = choice
.help = "Method to use when choosing a reflection subset for scaling model"
"minimisation."
"The quasi_random option randomly selects reflections groups"
"within a dataset, and also selects groups which have good"
"connectedness across datasets for multi-dataset cases. The random"
"option selects reflection groups randomly for both single"
"and multi dataset scaling, so for a single dataset"
"quasi_random == random."
"The intensity_ranges option uses the E2_range, Isigma_range and"
"d_range options to the subset of reflections"
"The use_all option uses all suitable reflections, which may be"
"slow for large datasets."
random {
multi_dataset {
Isigma_cutoff = 1.0
.type = float
.help = "Minimum average I/sigma of reflection groups to use when"
"selecting random reflections for minimisation."
}
min_groups = 2000
.type = int
.help = "The minimum number of symmetry groups to use during"
"minimisation."
.expert_level=1
min_reflections = 50000
.type = int
.help = "The minimum number of reflections to use during minimisation."
.expert_level=1
}
best_unit_cell = None
.type = unit_cell
.help = "Best unit cell value, to use when performing resolution cutting"
"and merging statistics. If None, the median cell will be used."
E2_range = 0.8, 5.0
.type = floats(size=2)
.help = "Minimum and maximum normalised E^2 value to used to select a"
"subset of reflections for minimisation."
.expert_level = 1
Isigma_range = -5.0, 0.0
.type = floats(size=2)
.help = "Minimum and maximum I/sigma values used to select a subset of"
"reflections for minimisation. A value of 0.0 for the maximum"
"indicates that no upper limit should be applied."
.expert_level = 1
d_range = None
.type = floats(size=2)
.help = "Minimum and maximum d-values used to select a subset of"
"reflections for minimisation."
.expert_level = 1
min_partiality = 0.95
.type = float
.help = "Minimum partiality to use when selecting reflections to use"
"to determine the scaling model and error model."
.expert_level = 2
intensity_choice = profile sum *combine
.alias = intensity
.type = choice
.help = "Option to choose from profile fitted or summation intensities, or
an optimised combination of profile/sum."
.expert_level = 1
combine.Imid = None
.type = floats
.help = "A list of values to try for the midpoint, for profile/sum combination
calculation: the value with the lowest Rmeas will be chosen.
0 and 1 are special values that can be supplied to include profile
and sum respectively in the comparison."
.expert_level = 2
combine.joint_analysis = True
.type = bool
.help = "Option of whether to do intensity combination optimisation
separately (i.e. different Imid per dataset) or joint for
multiple datasets"
.expert_level = 2
}
weighting {
weighting_scheme = *invvar
.type = choice
.help = "Weighting scheme used during Ih calculation. Weighting schemes
other than invvar and unity may trigger iterative reweighting
during minimisation, which may be unstable for certain minimisation
engines (LBFGS)."
.expert_level = 2
error_model {
include scope dials.algorithms.scaling.error_model.error_model.phil_scope
}
}
cut_data {
d_min = None
.type = float
.help = "Option to apply a high resolution cutoff for the dataset (i.e.
the chosen reflections have d > d_min)."
.expert_level = 1
d_max = None
.type = float
.help = "Option to apply a low resolution cutoff for the dataset (i.e.
the chosen reflections have d < d_max)."
.expert_level = 1
partiality_cutoff = 0.4
.type = float
.help = "Value below which reflections are removed from the dataset due
to low partiality."
.expert_level = 1
min_isigi = -5
.type = float
.help = "Value below which reflections are removed from the dataset due"
"to low I/sigI in either profile or summation intensity estimates"
.expert_level = 1
small_scale_cutoff = 1e-9
.type = float
.help = "After scaling, remove reflections with scale factors below this"
"value, in order to avoid reflections with negative scale factors."
.expert_level = 3
}
scaling_options {
check_consistent_indexing = False
.type = bool
.help = "If True, run dials.cosym on all data in the data preparation"
"step, to ensure consistent indexing."
target_cycle = True
.type = bool
.help = "Option to turn of initial round of targeted scaling
if some datasets are already scaled."
.expert_level = 2
only_target = False
.type = bool
.help = "Option to only do targeted scaling if some datasets
are already scaled."
.expert_level = 2
only_save_targeted = True
.type = bool
.help = "If only_target is true, this option to change whether the dataset
that is being scaled will be saved on its own, or combined with the
already scaled dataset."
.expert_level = 2
target_model = None
.type = path
.help = "Path to cif or pdb file to use to calculate target intensities for
scaling."
.expert_level = 2
target_mtz = None
.type = path
.help = "Path to merged mtz file to use as a target for scaling."
.expert_level = 2
nproc = 1
.type = int(value_min=1)
.help = "Number of blocks to divide the data into for minimisation.
This also sets the number of processes to use if the option is
available."
.expert_level = 2
use_free_set = False
.type = bool
.help = "Option to use a free set during scaling to check for overbiasing.
This free set is used to calculate an RMSD, which is shown alongisde
the 'working' RMSD during refinement, but is not currently used
to terminate refinement or make any choices on the model."
.expert_level = 2
free_set_percentage = 10.0
.type = float
.help = "Percentage of symmetry equivalent groups to use for the free set,
if use_free_set is True."
.expert_level = 2
free_set_offset = 0
.type = int
.help = "Offset for choosing unique groups for the free set from the whole
set of unique groups."
.expert_level = 2
full_matrix = True
.type = bool
.help = "Option to turn off GN/LM refinement round used to determine
error estimates on scale factors."
.expert_level = 2
outlier_rejection = *standard simple
.type = choice
.help = "Choice of outlier rejection routine. Standard may take a
significant amount of time to run for large datasets or high
multiplicities, whereas simple should be quick for these datasets."
.expert_level = 1
outlier_zmax = 6.0
.type = float(value_min=3.0)
.help = "Cutoff z-score value for identifying outliers based on their
normalised deviation within the group of equivalent reflections"
.expert_level = 1
emax = 10
.type = float(value_min = 0)
.help = "Reject reflections with normalised intensities E^2 > emax^2"
.expert_level = 2
}
"""
)
| 40.023697
| 84
| 0.62984
|
794f77bb8a46a3cd97ed1b4fa50c2ae5248034c0
| 968
|
py
|
Python
|
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/status_params.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/status_params.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/OOZIE/package/scripts/status_params.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
config = Script.get_config()
oozie_pid_dir = config['configurations']['oozie-env']['oozie_pid_dir']
pid_file = format("{oozie_pid_dir}/oozie.pid")
| 35.851852
| 72
| 0.785124
|
794f77e64b56eba98fbbde667443476db5ec975b
| 14,843
|
py
|
Python
|
SLING/src/relation_linking_core/metadata_generator/amr_graph_to_triples.py
|
nandana/kbqa-relation-linking
|
6d541341519eea56efe0ec87b18f0ba93604f778
|
[
"Apache-2.0"
] | null | null | null |
SLING/src/relation_linking_core/metadata_generator/amr_graph_to_triples.py
|
nandana/kbqa-relation-linking
|
6d541341519eea56efe0ec87b18f0ba93604f778
|
[
"Apache-2.0"
] | null | null | null |
SLING/src/relation_linking_core/metadata_generator/amr_graph_to_triples.py
|
nandana/kbqa-relation-linking
|
6d541341519eea56efe0ec87b18f0ba93604f778
|
[
"Apache-2.0"
] | null | null | null |
import calendar
import re
from itertools import combinations
from nltk.stem import WordNetLemmatizer
class AMR2Triples:
lemmatizer = WordNetLemmatizer()
op_pattern = re.compile("op([0-9]+)")
ARG_REGEX = re.compile("ARG([0-9]+)")
propbank_pattern = re.compile("([a-z0-9]+_)*(([a-z]+)-)+(\d\d)")
non_core_roles = ['accompanier', 'age', 'beneficiary', 'concession', 'condition', 'consist-of', 'destination',
'direction', 'domain', 'duration', 'example', 'extent', 'frequency', 'instrument', 'location',
'manner', 'medium', 'mod', 'ord', 'part', 'path', 'prep-with', 'purpose', 'quant', 'source',
'subevent', 'time', 'topic', 'value']
conjunctions = ['or', 'and']
ignored_roles = ['name', 'instance', 'entities', 'entity', 'surface_form', 'type', 'uri']
@classmethod
def print_triples(cls, triples, title):
print('\n{}:\n'.format(title))
for source, source_id, relation, target, target_id in triples:
print('{}\t{}\t{}\t{}\t{}'.format(source, source_id, relation, target, target_id))
print('\n')
@classmethod
def concat_name(cls, names_ops):
name = ''
for i in range(1, 15):
if i in names_ops:
name += ' ' + names_ops[i]
return name
@classmethod
def get_variable_text(cls, var_id, names, dates):
surface_text = ''
if var_id in names:
surface_text = names[var_id]
elif var_id in dates:
surface_text = dates[var_id]
return surface_text.strip()
@classmethod
def get_triples(cls, sentence_text, graph, debug=False):
sentence_text = sentence_text.replace('.', '').replace('?', '')
token_list = [AMR2Triples.lemmatizer.lemmatize(token) for token in sentence_text.split()]
triples = graph.triples()
if debug:
print("Raw triples:")
for trip in triples:
print("{}\t{}\t{}".format(trip[0], trip[1], trip[2]))
processed_triples = []
# mappings between variables, their types, and names
name_var_list, var_to_type, var_to_name, name_to_var = list(), dict(), dict(), dict()
# amr-unknown variable
amr_unknown_var = None
# info for concatenating named entity names.
grouped_names_ops, names = dict(), dict()
# map for resolving multi-words written with mod relations
mod_maps, mod_resolved = dict(), set()
# handling date time instances
date_var_list, grouped_var_to_date, dates = list(), dict(), dict()
# handling ordinals
ordinal_var_list, ordinal_value = list(), dict()
# temporal quantity
temporal_var_list, temporal_value = list(), dict()
# handle and triples
and_var_list, and_values = list(), dict()
# resolve the grouped_names_ops first
for source, relation, target in triples:
# instance - type relations
if relation == 'instance':
var_to_type[source] = target
if target == 'amr-unknown':
amr_unknown_var = source
elif target == 'name':
name_var_list.append(source)
elif target == 'date-entity':
date_var_list.append(source)
elif target == 'ordinal-entity':
ordinal_var_list.append(source)
elif target == 'temporal-quantity':
temporal_var_list.append(source)
elif target == 'and':
and_var_list.append(source)
# var - name relations
elif relation == 'name':
var_to_name[source] = target
# collecting all mod relation values
elif relation == 'mod' and target != 'amr-unknown':
# we ignore all expressive nodes
if target == 'expressive':
continue
mod_values = mod_maps.get(source, list())
mod_values.append(target)
mod_maps[source] = mod_values
elif relation in ['year', 'month', 'day', 'weekday'] and source in date_var_list:
var_to_date_group = grouped_var_to_date.get(source, dict())
var_to_date_group[relation] = target
grouped_var_to_date[source] = var_to_date_group
elif relation == 'value' and source in ordinal_var_list:
ordinal_value[source] = target
elif relation == 'quant' and source in temporal_var_list:
temporal_value[source] = target
# collecting all op* relations
elif re.match(AMR2Triples.op_pattern, relation):
if source in name_var_list:
op_pos = int(AMR2Triples.op_pattern.match(relation).group(1))
name_ops = grouped_names_ops.get(source, dict())
name_ops[op_pos] = str(target).replace("\"", "")
grouped_names_ops[source] = name_ops
elif source in and_var_list:
and_ops = and_values.get(source, set())
and_ops.add(target)
and_values[source] = and_ops
for var in var_to_name:
name_to_var[var_to_name[var]] = var
for var in mod_maps:
head_word = var_to_type[var]
if head_word in token_list:
head_pos = token_list.index(head_word)
mod_type_var = dict()
mod_list = list()
for mod_var in mod_maps[var]:
if mod_var in var_to_type:
mod_type = var_to_type[mod_var]
else:
mod_type = mod_var
mod_list.append(mod_type)
mod_type_var[mod_type] = mod_var
init_pos = head_pos - (len(mod_list) + 1)
init_pos = init_pos if init_pos >= 0 else 0
filtered_tokens = token_list[init_pos:head_pos]
new_type_tokens = list()
for token in filtered_tokens:
if token in mod_list:
mod_resolved.add(mod_type_var[token])
new_type_tokens.append(token)
new_type_tokens.append(head_word)
new_type = ' '.join(new_type_tokens)
var_to_type[var] = new_type
for name_id in grouped_names_ops:
if name_id in name_to_var:
names[name_to_var[name_id]] = AMR2Triples.concat_name(grouped_names_ops[name_id])
for date_id in grouped_var_to_date:
date_map = grouped_var_to_date[date_id]
date_list = list()
if 'day' in date_map:
date_list.append(str(date_map['day']))
if 'month' in date_map and date_map['month'] <= 12 and date_map['month'] > 0:
date_list.append(calendar.month_name[date_map['month']])
if 'year' in date_map:
date_list.append(str(date_map['year']))
dates[date_id] = '/'.join(date_list)
# process and values
# TODO this does not fix the issue, this only takes one of the values. This should be fixed in a higher level.
new_triples = list()
for source, relation, target in triples:
if target in and_values:
all_values = and_values[target]
for value in all_values:
new_triples.append([source, relation, value])
else:
new_triples.append([source, relation, target])
triples = new_triples
for source, relation, target in triples:
source_id = source
target_id = target
# TODO handle 'interrogative` trees
if target == 'interrogative':
continue
if relation in ['instance', 'name', 'entities', 'entity', 'id', 'type', 'surface_form', 'uri'] or re.match(AMR2Triples.op_pattern, relation) \
or source in date_var_list or source in ordinal_var_list or source in temporal_var_list:
# we have already processed these triples and collected the necessary information
continue
if relation == 'mod':
if target == amr_unknown_var:
# sometimes amr-unknown is indirectly attached with a mod relation to a variable
amr_unknown_var = source
continue
if target in mod_resolved:
continue
if relation == 'domain':
if target == amr_unknown_var:
# sometimes amr-unknown is indirectly attached with a mod relation to a variable
amr_unknown_var = source
continue
if source in var_to_type:
source = str(var_to_type[source])
if target in dates:
target = dates[target]
if target in var_to_type:
target = str(var_to_type[target])
if target in ordinal_value:
target = str(ordinal_value[target])
if target in temporal_value:
target = str(temporal_value[target])
processed_triples.append([source, source_id, relation, target, target_id])
if debug:
AMR2Triples.print_triples(processed_triples, 'Processed triples')
return processed_triples, var_to_name, var_to_type, names, dates, ordinal_value, temporal_value, amr_unknown_var, graph.top
@classmethod
def get_flat_triples(cls, sentence_text, penman_tree):
triple_info = list()
frame_args = dict()
id_to_type = dict()
reified_to_rel = dict()
processed_triples, var_to_name, var_to_type, names, dates, ordinal_value, temporal_value, amr_unknown_var, top_node = \
AMR2Triples.get_triples(sentence_text, penman_tree)
for subject, source_id, relation, target, target_id in processed_triples:
id_to_type[source_id] = subject
id_to_type[target_id] = target
subject_text, object_text = '', ''
if source_id in names:
subject_text = names[source_id]
elif source_id in dates:
subject_text = dates[source_id]
id_to_type[source_id] = 'date-entity'
if target_id in names:
object_text = names[target_id]
elif target_id in dates:
object_text = dates[target_id]
id_to_type[target_id] = 'date-entity'
subject_text = subject_text.strip()
object_text = object_text.strip()
# select subjects that are frames
if re.match(AMR2Triples.propbank_pattern, str(subject)):
# TODO what should we do when the object is a frame
if re.match(AMR2Triples.propbank_pattern, str(target)):
target = re.match(AMR2Triples.propbank_pattern, str(target)).group(2)
# we have handled these before (and & or)
if subject in AMR2Triples.conjunctions or target in AMR2Triples.conjunctions:
continue
args = frame_args.get(source_id, dict())
if re.match(AMR2Triples.ARG_REGEX, relation) or relation in AMR2Triples.non_core_roles:
args[relation] = target_id
frame_args[source_id] = args
elif relation not in AMR2Triples.ignored_roles and not re.match(AMR2Triples.ARG_REGEX, relation):
subject_type = str(var_to_type[source_id]).split()[-1]
triple = dict()
triple['subj_text'] = subject_text
triple['subj_type'] = str(subject).strip()
triple['subj_id'] = source_id
triple['predicate'] = "{}.{}".format(str(subject_type).strip(), str(relation).strip())
triple['predicate_id'] = source_id
triple['obj_text'] = object_text
triple['obj_type'] = str(target).strip()
triple['obj_id'] = target_id
triple['amr_unknown_var'] = amr_unknown_var
triple_info.append(triple)
for frame_id in frame_args:
frame_roles = frame_args[frame_id]
if id_to_type[frame_id] == 'have-rel-role-91':
if 'ARG2' in frame_roles:
reified_to_rel[frame_id] = id_to_type[frame_roles['ARG2']]
elif 'ARG3' in frame_roles:
reified_to_rel[frame_id] = id_to_type[frame_roles['ARG3']]
else:
reified_to_rel[frame_id] = 'relation'
if 'ARG0' in frame_roles and 'ARG1' in frame_roles:
for role in ['ARG2', 'ARG3']:
if role in frame_roles:
del frame_roles[role]
if id_to_type[frame_id] == 'have-org-role-91':
if 'ARG2' in frame_roles:
reified_to_rel[frame_id] = id_to_type[frame_roles['ARG2']]
elif 'ARG3' in frame_roles:
reified_to_rel[frame_id] = id_to_type[frame_roles['ARG3']]
else:
reified_to_rel[frame_id] = 'position'
if 'ARG0' in frame_roles and 'ARG1' in frame_roles:
for role in ['ARG2', 'ARG3']:
if role in frame_roles:
del frame_roles[role]
# logic to handle the special case of frames with a single argument
if len(frame_roles) == 1:
frame_roles['unknown'] = "unknown"
id_to_type['unknown'] = 'unknown'
rel_keys = sorted(list(frame_roles.keys()))
for role1, role2 in combinations(rel_keys, 2):
triple = dict()
triple['subj_text'] = AMR2Triples.get_variable_text(frame_roles[role1], names, dates)
triple['subj_type'] = str(id_to_type[frame_roles[role1]]).strip()
triple['subj_id'] = frame_roles[role1]
triple['predicate'] = '{}.{}.{}'.format(id_to_type[frame_id], role1.lower(), role2.lower())
triple['predicate_id'] = frame_id
triple['obj_text'] = AMR2Triples.get_variable_text(frame_roles[role2], names, dates)
triple['obj_type'] = str(id_to_type[frame_roles[role2]]).strip()
triple['obj_id'] = frame_roles[role2]
triple['amr_unknown_var'] = amr_unknown_var
triple_info.append(triple)
return triple_info, names, reified_to_rel, top_node
| 45.530675
| 154
| 0.562151
|
794f77ef84ce2603d19d801a3d8d1496a21e1bb4
| 1,220
|
py
|
Python
|
grr/server/grr_response_server/gui/selenium_tests/api_docs_test.py
|
4ndygu/grr
|
cfc725b5ee3a2626ac4cdae7fb14471612da4522
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/gui/selenium_tests/api_docs_test.py
|
4ndygu/grr
|
cfc725b5ee3a2626ac4cdae7fb14471612da4522
|
[
"Apache-2.0"
] | null | null | null |
grr/server/grr_response_server/gui/selenium_tests/api_docs_test.py
|
4ndygu/grr
|
cfc725b5ee3a2626ac4cdae7fb14471612da4522
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Tests for API docs view."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_core.lib import flags
from grr_response_server.gui import gui_test_lib
from grr.test_lib import test_lib
class TestAPIDocs(gui_test_lib.GRRSeleniumTest):
"""Tests the API docs UI."""
def testStatsMetricRouteIsShown(self):
self.Open("/#main=ApiDocumentation")
self.WaitUntil(
self.AllTextsPresent,
[
# Check that header is shown.
"GET /api/stats/store/<component>/metrics/<metric_name>",
# Check that parameters are shown along with possible Enum values.
"Parameters",
"distribution_handling_mode",
"DH_COUNT",
"DH_SUM",
"aggregation_mode",
"AGG_SUM",
"AGG_MEAN",
"AGG_NONE",
# Check that examples are shown.
"Examples",
"/api/stats/store/worker/metrics/sample_counter?"
"end=3600000000&start=42000000",
'"metric_name": "sample_counter"'
])
if __name__ == "__main__":
flags.StartMain(test_lib.main)
| 27.111111
| 78
| 0.631967
|
794f7880452e2667f2df298ce1264571ae40b630
| 3,666
|
py
|
Python
|
pyxiv-dl/__main__.py
|
reymarkus/pyxiv-dl-reborn
|
d89ce952a2f85fcb298f535e5391b5d504465cd1
|
[
"MIT"
] | null | null | null |
pyxiv-dl/__main__.py
|
reymarkus/pyxiv-dl-reborn
|
d89ce952a2f85fcb298f535e5391b5d504465cd1
|
[
"MIT"
] | null | null | null |
pyxiv-dl/__main__.py
|
reymarkus/pyxiv-dl-reborn
|
d89ce952a2f85fcb298f535e5391b5d504465cd1
|
[
"MIT"
] | null | null | null |
"""pyxiv-dl main script
This is the main script that executes the main pyxiv-dl argument parser.
"""
import argparse, sys, textwrap
from webcrawler import PixivWebCrawler
from pyxivhelpers import *
# constants
"""Script version"""
PYXIVDL_VERSION = "0.5.2"
"""Main function for accepting download args"""
def main():
# load argparse here
argParser = argparse.ArgumentParser(
description="pyxiv-dl: Downloads full-sized arts from Pixiv",
usage="pyxiv-dl.py [options] <id>",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent("""
ADDITIONAL NOTES
The -r/--range option lets you to download images in a multi-image post on a
specified range. This is silently ignored for single and ugoira posts. For the
-r/--range option, the format it accepts is the following:
\tx,y
where 0 > x > y. x denotes the start of the image index to download and y denotes the
end of the image index. If y exceeds the total number of posts, it will be silently
ignored and will download up to the last image index.
These are the valid formats accepted by the -r/--range option:
\t1,4\tDownloads images from index 1 to 4
\t4,6\tDownloads images from index 4 to 6
\t4,\tDownloads images from index 4 up to the last
\t,5\tDownloads images from the start up to index 5
Anything not in the valid formats are considered invalid.
""")
)
argParser.add_argument(
"-i",
"--index",
help="Download a specific image on a multi image post based on its index. Cannot be combined with -r/--range",
action="store",
type=int
)
argParser.add_argument(
"-r",
"--range",
help="Download images from a specified range using a from,to format. Cannot be combined with -i/--index. "
"See help for more info",
action="store"
)
# add NSFW confirmation bypass
argParser.add_argument(
"-n",
"--nsfw",
help="Always allow NSFW image download. If not set, you are asked to confirm the download first",
action="store_true"
)
# add verbose argument
argParser.add_argument(
"-v",
"--verbose",
help="Show verbose output",
action="store_true"
)
# show script version
argParser.add_argument(
"-V",
"--version",
help="Show the application's version and exit",
action="version",
version="%(prog)s v{}".format(PYXIVDL_VERSION)
)
# main argument: pixiv art IDs
argParser.add_argument(
"id",
help="your Pixiv medium ID to get original-sized images or ugoira from",
action="store"
)
# set parsed args variable
parsedArgs = argParser.parse_args()
# validate inputs first
# check first for valid pixiv IDs
if not validatePostIdRegex(parsedArgs.id):
print("One or more inputs is not a valid Pixiv post ID. Aborting.")
sys.exit(1)
if parsedArgs.range is not None and not validateRange(parsedArgs.range):
print("Range parameter is incorrect. See help for more info.")
sys.exit(1)
# run scraper
pxCrawl = PixivWebCrawler(
parsedArgs.id,
parsedArgs.verbose,
parsedArgs.nsfw,
parsedArgs.range,
parsedArgs.index
)
PixivWebCrawler.downloadImages(pxCrawl)
# main call
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\nKeyboard interrupt detected. Aborting.")
| 30.04918
| 118
| 0.627932
|
794f78d35e6334cc9a4abcbc981e57122bfb22ef
| 1,552
|
py
|
Python
|
model-optimizer/extensions/front/tf/matmul_ext.py
|
tdp2110/dldt
|
87f321c5365ed813e849ea0ed987354ef2c39743
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/front/tf/matmul_ext.py
|
tdp2110/dldt
|
87f321c5365ed813e849ea0ed987354ef2c39743
|
[
"Apache-2.0"
] | null | null | null |
model-optimizer/extensions/front/tf/matmul_ext.py
|
tdp2110/dldt
|
87f321c5365ed813e849ea0ed987354ef2c39743
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from extensions.ops.MatMul import MatMul
from mo.front.extractor import FrontExtractorOp
from mo.graph.graph import Node
from mo.utils.error import Error
class MatMulExtractor(FrontExtractorOp):
op = 'MatMul'
enabled = True
@classmethod
def extract(cls, node: Node):
unsupported_attrs = []
for attr_name in ['adjoint_a', 'adjoint_b', 'a_is_sparse', 'b_is_sparse']:
if attr_name in node.pb.attr and node.pb.attr[attr_name].b:
unsupported_attrs.append(attr_name)
if len(unsupported_attrs) != 0:
raise Error('MatMul operation {} use unsupported attrs: {}'.format(node.id, unsupported_attrs))
MatMul.update_node_stat(node,
{
'transpose_a': node.pb.attr['transpose_a'].b,
'transpose_b': node.pb.attr['transpose_b'].b,
})
return cls.enabled
| 37.853659
| 107
| 0.654639
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.