content
stringlengths 5
1.05M
|
|---|
from rest_framework import viewsets, mixins, pagination
from disease_classification.serializers import DiseaseClassificationSerializer
from core.models import DiseaseClassification
class DiseaseClassificationViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin):
serializer_class = DiseaseClassificationSerializer
pagination_class = pagination.api_settings.DEFAULT_PAGINATION_CLASS
queryset = DiseaseClassification.objects.all().order_by('id')
lookup_field = ('id')
|
#
# tokyoS
# www.fabiocrameri.ch/colourmaps
from matplotlib.colors import LinearSegmentedColormap
cm_data = [[0.10387, 0.056805, 0.20243],
[0.99708, 0.99733, 0.84887],
[0.56301, 0.52969, 0.52511],
[0.46223, 0.27624, 0.42457],
[0.63013, 0.77187, 0.5993],
[0.53661, 0.41269, 0.48849],
[0.80822, 0.93317, 0.71825],
[0.29442, 0.13697, 0.31333],
[0.5835, 0.64523, 0.55687],
[0.19833, 0.090586, 0.2555],
[0.57276, 0.58696, 0.54104],
[0.50996, 0.34812, 0.46237],
[0.55207, 0.4722, 0.50818],
[0.38753, 0.2015, 0.37279],
[0.69973, 0.85431, 0.64681],
[0.91316, 0.97898, 0.78927],
[0.59826, 0.70264, 0.57342],
[0.86348, 0.96072, 0.75524],
[0.65894, 0.81164, 0.61951],
[0.52526, 0.38125, 0.47656],
[0.34223, 0.16717, 0.34337],
[0.54522, 0.44286, 0.49885],
[0.55783, 0.50106, 0.51684],
[0.61097, 0.73576, 0.5846],
[0.48934, 0.31305, 0.44524],
[0.95708, 0.99022, 0.82012],
[0.75136, 0.89651, 0.68075],
[0.57782, 0.61589, 0.54891],
[0.2461, 0.11159, 0.28383],
[0.56789, 0.55827, 0.53315],
[0.15119, 0.072938, 0.22845],
[0.42811, 0.2385, 0.40025],
[0.59039, 0.67531, 0.56526],
[0.40854, 0.21982, 0.38685],
[0.22212, 0.10056, 0.26951],
[0.93575, 0.9853, 0.80504],
[0.57524, 0.60139, 0.54497],
[0.54118, 0.42791, 0.49383],
[0.604, 0.7189, 0.57868],
[0.6778, 0.83276, 0.63223],
[0.51819, 0.36491, 0.4698],
[0.58674, 0.66015, 0.56098],
[0.36533, 0.18389, 0.35824],
[0.97743, 0.99412, 0.83468],
[0.61955, 0.75338, 0.59139],
[0.3185, 0.15147, 0.32834],
[0.54882, 0.45761, 0.50361],
[0.57031, 0.57261, 0.53711],
[0.8364, 0.94818, 0.73701],
[0.55505, 0.48667, 0.51257],
[0.56546, 0.54398, 0.52916],
[0.58056, 0.6305, 0.55286],
[0.8891, 0.9709, 0.77268],
[0.17475, 0.081435, 0.24182],
[0.5004, 0.33082, 0.45422],
[0.44603, 0.25739, 0.41286],
[0.64313, 0.79129, 0.60859],
[0.77959, 0.91583, 0.69932],
[0.47666, 0.29481, 0.43537],
[0.72445, 0.87578, 0.66309],
[0.27026, 0.12365, 0.29845],
[0.53134, 0.39714, 0.48276],
[0.56047, 0.51538, 0.52102],
[0.12757, 0.064935, 0.21531],
[0.59349, 0.68688, 0.56863],
[0.65067, 0.80136, 0.61382],
[0.41851, 0.22913, 0.39364],
[0.16298, 0.077038, 0.23511],
[0.55358, 0.47945, 0.51039],
[0.92463, 0.98234, 0.79725],
[0.48321, 0.30398, 0.44041],
[0.57399, 0.59417, 0.54301],
[0.53406, 0.40495, 0.48567],
[0.13939, 0.068939, 0.22184],
[0.56669, 0.55112, 0.53116],
[0.73768, 0.88628, 0.67178],
[0.46968, 0.28555, 0.43009],
[0.21021, 0.095479, 0.26246],
[0.49507, 0.322, 0.44984],
[0.60099, 0.7107, 0.57599],
[0.52841, 0.38925, 0.47973],
[0.59578, 0.6947, 0.57097],
[0.11566, 0.061046, 0.20884],
[0.6363, 0.78146, 0.60376],
[0.55048, 0.46492, 0.50591],
[0.2341, 0.10596, 0.27664],
[0.55917, 0.50823, 0.51895],
[0.66798, 0.82213, 0.62564],
[0.90133, 0.97518, 0.78108],
[0.98733, 0.99579, 0.84181],
[0.96736, 0.99228, 0.82746],
[0.82241, 0.94098, 0.72768],
[0.76537, 0.90639, 0.68996],
[0.62456, 0.76251, 0.59518],
[0.56425, 0.53684, 0.52714],
[0.53897, 0.42033, 0.49121],
[0.33042, 0.15918, 0.33586],
[0.582, 0.63785, 0.55485],
[0.71178, 0.86509, 0.65474],
[0.43729, 0.24796, 0.40666]]
tokyoS_map = LinearSegmentedColormap.from_list('tokyoS', cm_data)
# For use of "viscm view"
test_cm = tokyoS_map
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(tokyoS_map)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=tokyoS_map)
plt.show()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api import httpbody_pb2 # type: ignore
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.devtools.cloudbuild_v1.services.cloud_build import CloudBuildAsyncClient
from google.devtools.cloudbuild_v1.services.cloud_build import CloudBuildClient
from google.devtools.cloudbuild_v1.services.cloud_build import pagers
from google.devtools.cloudbuild_v1.services.cloud_build import transports
from google.devtools.cloudbuild_v1.services.cloud_build.transports.base import _GOOGLE_AUTH_VERSION
from google.devtools.cloudbuild_v1.types import cloudbuild
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CloudBuildClient._get_default_mtls_endpoint(None) is None
assert CloudBuildClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CloudBuildClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CloudBuildClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CloudBuildClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CloudBuildClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
CloudBuildClient,
CloudBuildAsyncClient,
])
def test_cloud_build_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'cloudbuild.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.CloudBuildGrpcTransport, "grpc"),
(transports.CloudBuildGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_cloud_build_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
CloudBuildClient,
CloudBuildAsyncClient,
])
def test_cloud_build_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'cloudbuild.googleapis.com:443'
def test_cloud_build_client_get_transport_class():
transport = CloudBuildClient.get_transport_class()
available_transports = [
transports.CloudBuildGrpcTransport,
]
assert transport in available_transports
transport = CloudBuildClient.get_transport_class("grpc")
assert transport == transports.CloudBuildGrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(CloudBuildClient, transports.CloudBuildGrpcTransport, "grpc"),
(CloudBuildAsyncClient, transports.CloudBuildGrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(CloudBuildClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudBuildClient))
@mock.patch.object(CloudBuildAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudBuildAsyncClient))
def test_cloud_build_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(CloudBuildClient, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(CloudBuildClient, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(CloudBuildClient, transports.CloudBuildGrpcTransport, "grpc", "true"),
(CloudBuildAsyncClient, transports.CloudBuildGrpcAsyncIOTransport, "grpc_asyncio", "true"),
(CloudBuildClient, transports.CloudBuildGrpcTransport, "grpc", "false"),
(CloudBuildAsyncClient, transports.CloudBuildGrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(CloudBuildClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudBuildClient))
@mock.patch.object(CloudBuildAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CloudBuildAsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_cloud_build_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(CloudBuildClient, transports.CloudBuildGrpcTransport, "grpc"),
(CloudBuildAsyncClient, transports.CloudBuildGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_cloud_build_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(CloudBuildClient, transports.CloudBuildGrpcTransport, "grpc"),
(CloudBuildAsyncClient, transports.CloudBuildGrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_cloud_build_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_cloud_build_client_client_options_from_dict():
with mock.patch('google.devtools.cloudbuild_v1.services.cloud_build.transports.CloudBuildGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CloudBuildClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_build(transport: str = 'grpc', request_type=cloudbuild.CreateBuildRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_build_from_dict():
test_create_build(request_type=dict)
def test_create_build_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build),
'__call__') as call:
client.create_build()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateBuildRequest()
@pytest.mark.asyncio
async def test_create_build_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.CreateBuildRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_build_async_from_dict():
await test_create_build_async(request_type=dict)
def test_create_build_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_build(
project_id='project_id_value',
build=cloudbuild.Build(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].build == cloudbuild.Build(name='name_value')
def test_create_build_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_build(
cloudbuild.CreateBuildRequest(),
project_id='project_id_value',
build=cloudbuild.Build(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_build_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_build(
project_id='project_id_value',
build=cloudbuild.Build(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].build == cloudbuild.Build(name='name_value')
@pytest.mark.asyncio
async def test_create_build_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_build(
cloudbuild.CreateBuildRequest(),
project_id='project_id_value',
build=cloudbuild.Build(name='name_value'),
)
def test_get_build(transport: str = 'grpc', request_type=cloudbuild.GetBuildRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.Build(
name='name_value',
id='id_value',
project_id='project_id_value',
status=cloudbuild.Build.Status.PENDING,
status_detail='status_detail_value',
images=['images_value'],
logs_bucket='logs_bucket_value',
build_trigger_id='build_trigger_id_value',
log_url='log_url_value',
tags=['tags_value'],
service_account='service_account_value',
)
response = client.get_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.Build)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.project_id == 'project_id_value'
assert response.status == cloudbuild.Build.Status.PENDING
assert response.status_detail == 'status_detail_value'
assert response.images == ['images_value']
assert response.logs_bucket == 'logs_bucket_value'
assert response.build_trigger_id == 'build_trigger_id_value'
assert response.log_url == 'log_url_value'
assert response.tags == ['tags_value']
assert response.service_account == 'service_account_value'
def test_get_build_from_dict():
test_get_build(request_type=dict)
def test_get_build_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build),
'__call__') as call:
client.get_build()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetBuildRequest()
@pytest.mark.asyncio
async def test_get_build_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.GetBuildRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.Build(
name='name_value',
id='id_value',
project_id='project_id_value',
status=cloudbuild.Build.Status.PENDING,
status_detail='status_detail_value',
images=['images_value'],
logs_bucket='logs_bucket_value',
build_trigger_id='build_trigger_id_value',
log_url='log_url_value',
tags=['tags_value'],
service_account='service_account_value',
))
response = await client.get_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.Build)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.project_id == 'project_id_value'
assert response.status == cloudbuild.Build.Status.PENDING
assert response.status_detail == 'status_detail_value'
assert response.images == ['images_value']
assert response.logs_bucket == 'logs_bucket_value'
assert response.build_trigger_id == 'build_trigger_id_value'
assert response.log_url == 'log_url_value'
assert response.tags == ['tags_value']
assert response.service_account == 'service_account_value'
@pytest.mark.asyncio
async def test_get_build_async_from_dict():
await test_get_build_async(request_type=dict)
def test_get_build_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.Build()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_build(
project_id='project_id_value',
id='id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].id == 'id_value'
def test_get_build_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_build(
cloudbuild.GetBuildRequest(),
project_id='project_id_value',
id='id_value',
)
@pytest.mark.asyncio
async def test_get_build_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.Build()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.Build())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_build(
project_id='project_id_value',
id='id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].id == 'id_value'
@pytest.mark.asyncio
async def test_get_build_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_build(
cloudbuild.GetBuildRequest(),
project_id='project_id_value',
id='id_value',
)
def test_list_builds(transport: str = 'grpc', request_type=cloudbuild.ListBuildsRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListBuildsResponse(
next_page_token='next_page_token_value',
)
response = client.list_builds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListBuildsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBuildsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_builds_from_dict():
test_list_builds(request_type=dict)
def test_list_builds_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
client.list_builds()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListBuildsRequest()
@pytest.mark.asyncio
async def test_list_builds_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.ListBuildsRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListBuildsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_builds(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListBuildsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBuildsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_builds_async_from_dict():
await test_list_builds_async(request_type=dict)
def test_list_builds_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListBuildsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_builds(
project_id='project_id_value',
filter='filter_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].filter == 'filter_value'
def test_list_builds_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_builds(
cloudbuild.ListBuildsRequest(),
project_id='project_id_value',
filter='filter_value',
)
@pytest.mark.asyncio
async def test_list_builds_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListBuildsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListBuildsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_builds(
project_id='project_id_value',
filter='filter_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].filter == 'filter_value'
@pytest.mark.asyncio
async def test_list_builds_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_builds(
cloudbuild.ListBuildsRequest(),
project_id='project_id_value',
filter='filter_value',
)
def test_list_builds_pager():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
cloudbuild.Build(),
],
next_page_token='abc',
),
cloudbuild.ListBuildsResponse(
builds=[],
next_page_token='def',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
],
),
RuntimeError,
)
metadata = ()
pager = client.list_builds(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloudbuild.Build)
for i in results)
def test_list_builds_pages():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
cloudbuild.Build(),
],
next_page_token='abc',
),
cloudbuild.ListBuildsResponse(
builds=[],
next_page_token='def',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
],
),
RuntimeError,
)
pages = list(client.list_builds(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_builds_async_pager():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
cloudbuild.Build(),
],
next_page_token='abc',
),
cloudbuild.ListBuildsResponse(
builds=[],
next_page_token='def',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
],
),
RuntimeError,
)
async_pager = await client.list_builds(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloudbuild.Build)
for i in responses)
@pytest.mark.asyncio
async def test_list_builds_async_pages():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_builds),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
cloudbuild.Build(),
],
next_page_token='abc',
),
cloudbuild.ListBuildsResponse(
builds=[],
next_page_token='def',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildsResponse(
builds=[
cloudbuild.Build(),
cloudbuild.Build(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_builds(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_cancel_build(transport: str = 'grpc', request_type=cloudbuild.CancelBuildRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.Build(
name='name_value',
id='id_value',
project_id='project_id_value',
status=cloudbuild.Build.Status.PENDING,
status_detail='status_detail_value',
images=['images_value'],
logs_bucket='logs_bucket_value',
build_trigger_id='build_trigger_id_value',
log_url='log_url_value',
tags=['tags_value'],
service_account='service_account_value',
)
response = client.cancel_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CancelBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.Build)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.project_id == 'project_id_value'
assert response.status == cloudbuild.Build.Status.PENDING
assert response.status_detail == 'status_detail_value'
assert response.images == ['images_value']
assert response.logs_bucket == 'logs_bucket_value'
assert response.build_trigger_id == 'build_trigger_id_value'
assert response.log_url == 'log_url_value'
assert response.tags == ['tags_value']
assert response.service_account == 'service_account_value'
def test_cancel_build_from_dict():
test_cancel_build(request_type=dict)
def test_cancel_build_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_build),
'__call__') as call:
client.cancel_build()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CancelBuildRequest()
@pytest.mark.asyncio
async def test_cancel_build_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.CancelBuildRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.Build(
name='name_value',
id='id_value',
project_id='project_id_value',
status=cloudbuild.Build.Status.PENDING,
status_detail='status_detail_value',
images=['images_value'],
logs_bucket='logs_bucket_value',
build_trigger_id='build_trigger_id_value',
log_url='log_url_value',
tags=['tags_value'],
service_account='service_account_value',
))
response = await client.cancel_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CancelBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.Build)
assert response.name == 'name_value'
assert response.id == 'id_value'
assert response.project_id == 'project_id_value'
assert response.status == cloudbuild.Build.Status.PENDING
assert response.status_detail == 'status_detail_value'
assert response.images == ['images_value']
assert response.logs_bucket == 'logs_bucket_value'
assert response.build_trigger_id == 'build_trigger_id_value'
assert response.log_url == 'log_url_value'
assert response.tags == ['tags_value']
assert response.service_account == 'service_account_value'
@pytest.mark.asyncio
async def test_cancel_build_async_from_dict():
await test_cancel_build_async(request_type=dict)
def test_cancel_build_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.Build()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.cancel_build(
project_id='project_id_value',
id='id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].id == 'id_value'
def test_cancel_build_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.cancel_build(
cloudbuild.CancelBuildRequest(),
project_id='project_id_value',
id='id_value',
)
@pytest.mark.asyncio
async def test_cancel_build_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.cancel_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.Build()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.Build())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.cancel_build(
project_id='project_id_value',
id='id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].id == 'id_value'
@pytest.mark.asyncio
async def test_cancel_build_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.cancel_build(
cloudbuild.CancelBuildRequest(),
project_id='project_id_value',
id='id_value',
)
def test_retry_build(transport: str = 'grpc', request_type=cloudbuild.RetryBuildRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.retry_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.retry_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.RetryBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_retry_build_from_dict():
test_retry_build(request_type=dict)
def test_retry_build_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.retry_build),
'__call__') as call:
client.retry_build()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.RetryBuildRequest()
@pytest.mark.asyncio
async def test_retry_build_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.RetryBuildRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.retry_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.retry_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.RetryBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_retry_build_async_from_dict():
await test_retry_build_async(request_type=dict)
def test_retry_build_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.retry_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.retry_build(
project_id='project_id_value',
id='id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].id == 'id_value'
def test_retry_build_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.retry_build(
cloudbuild.RetryBuildRequest(),
project_id='project_id_value',
id='id_value',
)
@pytest.mark.asyncio
async def test_retry_build_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.retry_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.retry_build(
project_id='project_id_value',
id='id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].id == 'id_value'
@pytest.mark.asyncio
async def test_retry_build_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.retry_build(
cloudbuild.RetryBuildRequest(),
project_id='project_id_value',
id='id_value',
)
def test_approve_build(transport: str = 'grpc', request_type=cloudbuild.ApproveBuildRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.approve_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ApproveBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_approve_build_from_dict():
test_approve_build(request_type=dict)
def test_approve_build_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
client.approve_build()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ApproveBuildRequest()
@pytest.mark.asyncio
async def test_approve_build_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.ApproveBuildRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.approve_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ApproveBuildRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_approve_build_async_from_dict():
await test_approve_build_async(request_type=dict)
def test_approve_build_field_headers():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.ApproveBuildRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.approve_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_approve_build_field_headers_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.ApproveBuildRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.approve_build(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_approve_build_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.approve_build(
name='name_value',
approval_result=cloudbuild.ApprovalResult(approver_account='approver_account_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].approval_result == cloudbuild.ApprovalResult(approver_account='approver_account_value')
def test_approve_build_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.approve_build(
cloudbuild.ApproveBuildRequest(),
name='name_value',
approval_result=cloudbuild.ApprovalResult(approver_account='approver_account_value'),
)
@pytest.mark.asyncio
async def test_approve_build_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.approve_build),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.approve_build(
name='name_value',
approval_result=cloudbuild.ApprovalResult(approver_account='approver_account_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].approval_result == cloudbuild.ApprovalResult(approver_account='approver_account_value')
@pytest.mark.asyncio
async def test_approve_build_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.approve_build(
cloudbuild.ApproveBuildRequest(),
name='name_value',
approval_result=cloudbuild.ApprovalResult(approver_account='approver_account_value'),
)
def test_create_build_trigger(transport: str = 'grpc', request_type=cloudbuild.CreateBuildTriggerRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger(
resource_name='resource_name_value',
id='id_value',
description='description_value',
name='name_value',
tags=['tags_value'],
disabled=True,
ignored_files=['ignored_files_value'],
included_files=['included_files_value'],
filter='filter_value',
service_account='service_account_value',
autodetect=True,
)
response = client.create_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.BuildTrigger)
assert response.resource_name == 'resource_name_value'
assert response.id == 'id_value'
assert response.description == 'description_value'
assert response.name == 'name_value'
assert response.tags == ['tags_value']
assert response.disabled is True
assert response.ignored_files == ['ignored_files_value']
assert response.included_files == ['included_files_value']
assert response.filter == 'filter_value'
assert response.service_account == 'service_account_value'
def test_create_build_trigger_from_dict():
test_create_build_trigger(request_type=dict)
def test_create_build_trigger_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build_trigger),
'__call__') as call:
client.create_build_trigger()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateBuildTriggerRequest()
@pytest.mark.asyncio
async def test_create_build_trigger_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.CreateBuildTriggerRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.BuildTrigger(
resource_name='resource_name_value',
id='id_value',
description='description_value',
name='name_value',
tags=['tags_value'],
disabled=True,
ignored_files=['ignored_files_value'],
included_files=['included_files_value'],
filter='filter_value',
service_account='service_account_value',
))
response = await client.create_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.BuildTrigger)
assert response.resource_name == 'resource_name_value'
assert response.id == 'id_value'
assert response.description == 'description_value'
assert response.name == 'name_value'
assert response.tags == ['tags_value']
assert response.disabled is True
assert response.ignored_files == ['ignored_files_value']
assert response.included_files == ['included_files_value']
assert response.filter == 'filter_value'
assert response.service_account == 'service_account_value'
@pytest.mark.asyncio
async def test_create_build_trigger_async_from_dict():
await test_create_build_trigger_async(request_type=dict)
def test_create_build_trigger_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_build_trigger(
project_id='project_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger == cloudbuild.BuildTrigger(resource_name='resource_name_value')
def test_create_build_trigger_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_build_trigger(
cloudbuild.CreateBuildTriggerRequest(),
project_id='project_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
@pytest.mark.asyncio
async def test_create_build_trigger_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.BuildTrigger())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_build_trigger(
project_id='project_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger == cloudbuild.BuildTrigger(resource_name='resource_name_value')
@pytest.mark.asyncio
async def test_create_build_trigger_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_build_trigger(
cloudbuild.CreateBuildTriggerRequest(),
project_id='project_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
def test_get_build_trigger(transport: str = 'grpc', request_type=cloudbuild.GetBuildTriggerRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger(
resource_name='resource_name_value',
id='id_value',
description='description_value',
name='name_value',
tags=['tags_value'],
disabled=True,
ignored_files=['ignored_files_value'],
included_files=['included_files_value'],
filter='filter_value',
service_account='service_account_value',
autodetect=True,
)
response = client.get_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.BuildTrigger)
assert response.resource_name == 'resource_name_value'
assert response.id == 'id_value'
assert response.description == 'description_value'
assert response.name == 'name_value'
assert response.tags == ['tags_value']
assert response.disabled is True
assert response.ignored_files == ['ignored_files_value']
assert response.included_files == ['included_files_value']
assert response.filter == 'filter_value'
assert response.service_account == 'service_account_value'
def test_get_build_trigger_from_dict():
test_get_build_trigger(request_type=dict)
def test_get_build_trigger_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build_trigger),
'__call__') as call:
client.get_build_trigger()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetBuildTriggerRequest()
@pytest.mark.asyncio
async def test_get_build_trigger_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.GetBuildTriggerRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.BuildTrigger(
resource_name='resource_name_value',
id='id_value',
description='description_value',
name='name_value',
tags=['tags_value'],
disabled=True,
ignored_files=['ignored_files_value'],
included_files=['included_files_value'],
filter='filter_value',
service_account='service_account_value',
))
response = await client.get_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.BuildTrigger)
assert response.resource_name == 'resource_name_value'
assert response.id == 'id_value'
assert response.description == 'description_value'
assert response.name == 'name_value'
assert response.tags == ['tags_value']
assert response.disabled is True
assert response.ignored_files == ['ignored_files_value']
assert response.included_files == ['included_files_value']
assert response.filter == 'filter_value'
assert response.service_account == 'service_account_value'
@pytest.mark.asyncio
async def test_get_build_trigger_async_from_dict():
await test_get_build_trigger_async(request_type=dict)
def test_get_build_trigger_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
def test_get_build_trigger_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_build_trigger(
cloudbuild.GetBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
)
@pytest.mark.asyncio
async def test_get_build_trigger_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.BuildTrigger())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
@pytest.mark.asyncio
async def test_get_build_trigger_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_build_trigger(
cloudbuild.GetBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
)
def test_list_build_triggers(transport: str = 'grpc', request_type=cloudbuild.ListBuildTriggersRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListBuildTriggersResponse(
next_page_token='next_page_token_value',
)
response = client.list_build_triggers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListBuildTriggersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBuildTriggersPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_build_triggers_from_dict():
test_list_build_triggers(request_type=dict)
def test_list_build_triggers_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
client.list_build_triggers()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListBuildTriggersRequest()
@pytest.mark.asyncio
async def test_list_build_triggers_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.ListBuildTriggersRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListBuildTriggersResponse(
next_page_token='next_page_token_value',
))
response = await client.list_build_triggers(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListBuildTriggersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListBuildTriggersAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_build_triggers_async_from_dict():
await test_list_build_triggers_async(request_type=dict)
def test_list_build_triggers_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListBuildTriggersResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_build_triggers(
project_id='project_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
def test_list_build_triggers_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_build_triggers(
cloudbuild.ListBuildTriggersRequest(),
project_id='project_id_value',
)
@pytest.mark.asyncio
async def test_list_build_triggers_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListBuildTriggersResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListBuildTriggersResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_build_triggers(
project_id='project_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
@pytest.mark.asyncio
async def test_list_build_triggers_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_build_triggers(
cloudbuild.ListBuildTriggersRequest(),
project_id='project_id_value',
)
def test_list_build_triggers_pager():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
next_page_token='abc',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[],
next_page_token='def',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
),
RuntimeError,
)
metadata = ()
pager = client.list_build_triggers(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloudbuild.BuildTrigger)
for i in results)
def test_list_build_triggers_pages():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
next_page_token='abc',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[],
next_page_token='def',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
),
RuntimeError,
)
pages = list(client.list_build_triggers(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_build_triggers_async_pager():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
next_page_token='abc',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[],
next_page_token='def',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
),
RuntimeError,
)
async_pager = await client.list_build_triggers(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloudbuild.BuildTrigger)
for i in responses)
@pytest.mark.asyncio
async def test_list_build_triggers_async_pages():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_build_triggers),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
next_page_token='abc',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[],
next_page_token='def',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
],
next_page_token='ghi',
),
cloudbuild.ListBuildTriggersResponse(
triggers=[
cloudbuild.BuildTrigger(),
cloudbuild.BuildTrigger(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_build_triggers(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_delete_build_trigger(transport: str = 'grpc', request_type=cloudbuild.DeleteBuildTriggerRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.DeleteBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_build_trigger_from_dict():
test_delete_build_trigger(request_type=dict)
def test_delete_build_trigger_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_build_trigger),
'__call__') as call:
client.delete_build_trigger()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.DeleteBuildTriggerRequest()
@pytest.mark.asyncio
async def test_delete_build_trigger_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.DeleteBuildTriggerRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.DeleteBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_build_trigger_async_from_dict():
await test_delete_build_trigger_async(request_type=dict)
def test_delete_build_trigger_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
def test_delete_build_trigger_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_build_trigger(
cloudbuild.DeleteBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
)
@pytest.mark.asyncio
async def test_delete_build_trigger_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
@pytest.mark.asyncio
async def test_delete_build_trigger_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_build_trigger(
cloudbuild.DeleteBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
)
def test_update_build_trigger(transport: str = 'grpc', request_type=cloudbuild.UpdateBuildTriggerRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger(
resource_name='resource_name_value',
id='id_value',
description='description_value',
name='name_value',
tags=['tags_value'],
disabled=True,
ignored_files=['ignored_files_value'],
included_files=['included_files_value'],
filter='filter_value',
service_account='service_account_value',
autodetect=True,
)
response = client.update_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.UpdateBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.BuildTrigger)
assert response.resource_name == 'resource_name_value'
assert response.id == 'id_value'
assert response.description == 'description_value'
assert response.name == 'name_value'
assert response.tags == ['tags_value']
assert response.disabled is True
assert response.ignored_files == ['ignored_files_value']
assert response.included_files == ['included_files_value']
assert response.filter == 'filter_value'
assert response.service_account == 'service_account_value'
def test_update_build_trigger_from_dict():
test_update_build_trigger(request_type=dict)
def test_update_build_trigger_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_build_trigger),
'__call__') as call:
client.update_build_trigger()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.UpdateBuildTriggerRequest()
@pytest.mark.asyncio
async def test_update_build_trigger_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.UpdateBuildTriggerRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.BuildTrigger(
resource_name='resource_name_value',
id='id_value',
description='description_value',
name='name_value',
tags=['tags_value'],
disabled=True,
ignored_files=['ignored_files_value'],
included_files=['included_files_value'],
filter='filter_value',
service_account='service_account_value',
))
response = await client.update_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.UpdateBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.BuildTrigger)
assert response.resource_name == 'resource_name_value'
assert response.id == 'id_value'
assert response.description == 'description_value'
assert response.name == 'name_value'
assert response.tags == ['tags_value']
assert response.disabled is True
assert response.ignored_files == ['ignored_files_value']
assert response.included_files == ['included_files_value']
assert response.filter == 'filter_value'
assert response.service_account == 'service_account_value'
@pytest.mark.asyncio
async def test_update_build_trigger_async_from_dict():
await test_update_build_trigger_async(request_type=dict)
def test_update_build_trigger_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
assert args[0].trigger == cloudbuild.BuildTrigger(resource_name='resource_name_value')
def test_update_build_trigger_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_build_trigger(
cloudbuild.UpdateBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
@pytest.mark.asyncio
async def test_update_build_trigger_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.BuildTrigger()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.BuildTrigger())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
assert args[0].trigger == cloudbuild.BuildTrigger(resource_name='resource_name_value')
@pytest.mark.asyncio
async def test_update_build_trigger_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_build_trigger(
cloudbuild.UpdateBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
trigger=cloudbuild.BuildTrigger(resource_name='resource_name_value'),
)
def test_run_build_trigger(transport: str = 'grpc', request_type=cloudbuild.RunBuildTriggerRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.run_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.RunBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_run_build_trigger_from_dict():
test_run_build_trigger(request_type=dict)
def test_run_build_trigger_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_build_trigger),
'__call__') as call:
client.run_build_trigger()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.RunBuildTriggerRequest()
@pytest.mark.asyncio
async def test_run_build_trigger_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.RunBuildTriggerRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.run_build_trigger(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.RunBuildTriggerRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_run_build_trigger_async_from_dict():
await test_run_build_trigger_async(request_type=dict)
def test_run_build_trigger_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.run_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
source=cloudbuild.RepoSource(project_id='project_id_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
assert args[0].source == cloudbuild.RepoSource(project_id='project_id_value')
def test_run_build_trigger_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.run_build_trigger(
cloudbuild.RunBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
source=cloudbuild.RepoSource(project_id='project_id_value'),
)
@pytest.mark.asyncio
async def test_run_build_trigger_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.run_build_trigger),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.run_build_trigger(
project_id='project_id_value',
trigger_id='trigger_id_value',
source=cloudbuild.RepoSource(project_id='project_id_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].project_id == 'project_id_value'
assert args[0].trigger_id == 'trigger_id_value'
assert args[0].source == cloudbuild.RepoSource(project_id='project_id_value')
@pytest.mark.asyncio
async def test_run_build_trigger_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.run_build_trigger(
cloudbuild.RunBuildTriggerRequest(),
project_id='project_id_value',
trigger_id='trigger_id_value',
source=cloudbuild.RepoSource(project_id='project_id_value'),
)
def test_receive_trigger_webhook(transport: str = 'grpc', request_type=cloudbuild.ReceiveTriggerWebhookRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.receive_trigger_webhook),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ReceiveTriggerWebhookResponse(
)
response = client.receive_trigger_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ReceiveTriggerWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.ReceiveTriggerWebhookResponse)
def test_receive_trigger_webhook_from_dict():
test_receive_trigger_webhook(request_type=dict)
def test_receive_trigger_webhook_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.receive_trigger_webhook),
'__call__') as call:
client.receive_trigger_webhook()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ReceiveTriggerWebhookRequest()
@pytest.mark.asyncio
async def test_receive_trigger_webhook_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.ReceiveTriggerWebhookRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.receive_trigger_webhook),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ReceiveTriggerWebhookResponse(
))
response = await client.receive_trigger_webhook(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ReceiveTriggerWebhookRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.ReceiveTriggerWebhookResponse)
@pytest.mark.asyncio
async def test_receive_trigger_webhook_async_from_dict():
await test_receive_trigger_webhook_async(request_type=dict)
def test_create_worker_pool(transport: str = 'grpc', request_type=cloudbuild.CreateWorkerPoolRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.create_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_worker_pool_from_dict():
test_create_worker_pool(request_type=dict)
def test_create_worker_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
client.create_worker_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateWorkerPoolRequest()
@pytest.mark.asyncio
async def test_create_worker_pool_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.CreateWorkerPoolRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.create_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.CreateWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_worker_pool_async_from_dict():
await test_create_worker_pool_async(request_type=dict)
def test_create_worker_pool_field_headers():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.CreateWorkerPoolRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.create_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_worker_pool_field_headers_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.CreateWorkerPoolRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.create_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_worker_pool_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_worker_pool(
parent='parent_value',
worker_pool=cloudbuild.WorkerPool(name='name_value'),
worker_pool_id='worker_pool_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].worker_pool == cloudbuild.WorkerPool(name='name_value')
assert args[0].worker_pool_id == 'worker_pool_id_value'
def test_create_worker_pool_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_worker_pool(
cloudbuild.CreateWorkerPoolRequest(),
parent='parent_value',
worker_pool=cloudbuild.WorkerPool(name='name_value'),
worker_pool_id='worker_pool_id_value',
)
@pytest.mark.asyncio
async def test_create_worker_pool_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_worker_pool(
parent='parent_value',
worker_pool=cloudbuild.WorkerPool(name='name_value'),
worker_pool_id='worker_pool_id_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].worker_pool == cloudbuild.WorkerPool(name='name_value')
assert args[0].worker_pool_id == 'worker_pool_id_value'
@pytest.mark.asyncio
async def test_create_worker_pool_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_worker_pool(
cloudbuild.CreateWorkerPoolRequest(),
parent='parent_value',
worker_pool=cloudbuild.WorkerPool(name='name_value'),
worker_pool_id='worker_pool_id_value',
)
def test_get_worker_pool(transport: str = 'grpc', request_type=cloudbuild.GetWorkerPoolRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.WorkerPool(
name='name_value',
display_name='display_name_value',
uid='uid_value',
state=cloudbuild.WorkerPool.State.CREATING,
etag='etag_value',
private_pool_v1_config=cloudbuild.PrivatePoolV1Config(worker_config=cloudbuild.PrivatePoolV1Config.WorkerConfig(machine_type='machine_type_value')),
)
response = client.get_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.WorkerPool)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.uid == 'uid_value'
assert response.state == cloudbuild.WorkerPool.State.CREATING
assert response.etag == 'etag_value'
def test_get_worker_pool_from_dict():
test_get_worker_pool(request_type=dict)
def test_get_worker_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
client.get_worker_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetWorkerPoolRequest()
@pytest.mark.asyncio
async def test_get_worker_pool_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.GetWorkerPoolRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.WorkerPool(
name='name_value',
display_name='display_name_value',
uid='uid_value',
state=cloudbuild.WorkerPool.State.CREATING,
etag='etag_value',
))
response = await client.get_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.GetWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cloudbuild.WorkerPool)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
assert response.uid == 'uid_value'
assert response.state == cloudbuild.WorkerPool.State.CREATING
assert response.etag == 'etag_value'
@pytest.mark.asyncio
async def test_get_worker_pool_async_from_dict():
await test_get_worker_pool_async(request_type=dict)
def test_get_worker_pool_field_headers():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.GetWorkerPoolRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
call.return_value = cloudbuild.WorkerPool()
client.get_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_worker_pool_field_headers_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.GetWorkerPoolRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.WorkerPool())
await client.get_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_get_worker_pool_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.WorkerPool()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_worker_pool(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_get_worker_pool_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_worker_pool(
cloudbuild.GetWorkerPoolRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_get_worker_pool_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.WorkerPool()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.WorkerPool())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_worker_pool(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_get_worker_pool_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_worker_pool(
cloudbuild.GetWorkerPoolRequest(),
name='name_value',
)
def test_delete_worker_pool(transport: str = 'grpc', request_type=cloudbuild.DeleteWorkerPoolRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.delete_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.DeleteWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_worker_pool_from_dict():
test_delete_worker_pool(request_type=dict)
def test_delete_worker_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
client.delete_worker_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.DeleteWorkerPoolRequest()
@pytest.mark.asyncio
async def test_delete_worker_pool_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.DeleteWorkerPoolRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.delete_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.DeleteWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_worker_pool_async_from_dict():
await test_delete_worker_pool_async(request_type=dict)
def test_delete_worker_pool_field_headers():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.DeleteWorkerPoolRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.delete_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_worker_pool_field_headers_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.DeleteWorkerPoolRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.delete_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_delete_worker_pool_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_worker_pool(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
def test_delete_worker_pool_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_worker_pool(
cloudbuild.DeleteWorkerPoolRequest(),
name='name_value',
)
@pytest.mark.asyncio
async def test_delete_worker_pool_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_worker_pool(
name='name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
@pytest.mark.asyncio
async def test_delete_worker_pool_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_worker_pool(
cloudbuild.DeleteWorkerPoolRequest(),
name='name_value',
)
def test_update_worker_pool(transport: str = 'grpc', request_type=cloudbuild.UpdateWorkerPoolRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/spam')
response = client.update_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.UpdateWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_update_worker_pool_from_dict():
test_update_worker_pool(request_type=dict)
def test_update_worker_pool_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
client.update_worker_pool()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.UpdateWorkerPoolRequest()
@pytest.mark.asyncio
async def test_update_worker_pool_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.UpdateWorkerPoolRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
response = await client.update_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.UpdateWorkerPoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_update_worker_pool_async_from_dict():
await test_update_worker_pool_async(request_type=dict)
def test_update_worker_pool_field_headers():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.UpdateWorkerPoolRequest()
request.worker_pool.name = 'worker_pool.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
call.return_value = operations_pb2.Operation(name='operations/op')
client.update_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'worker_pool.name=worker_pool.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_worker_pool_field_headers_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.UpdateWorkerPoolRequest()
request.worker_pool.name = 'worker_pool.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op'))
await client.update_worker_pool(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'worker_pool.name=worker_pool.name/value',
) in kw['metadata']
def test_update_worker_pool_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_worker_pool(
worker_pool=cloudbuild.WorkerPool(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].worker_pool == cloudbuild.WorkerPool(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
def test_update_worker_pool_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_worker_pool(
cloudbuild.UpdateWorkerPoolRequest(),
worker_pool=cloudbuild.WorkerPool(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
@pytest.mark.asyncio
async def test_update_worker_pool_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_worker_pool),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name='operations/op')
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name='operations/spam')
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_worker_pool(
worker_pool=cloudbuild.WorkerPool(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].worker_pool == cloudbuild.WorkerPool(name='name_value')
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=['paths_value'])
@pytest.mark.asyncio
async def test_update_worker_pool_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_worker_pool(
cloudbuild.UpdateWorkerPoolRequest(),
worker_pool=cloudbuild.WorkerPool(name='name_value'),
update_mask=field_mask_pb2.FieldMask(paths=['paths_value']),
)
def test_list_worker_pools(transport: str = 'grpc', request_type=cloudbuild.ListWorkerPoolsRequest):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListWorkerPoolsResponse(
next_page_token='next_page_token_value',
)
response = client.list_worker_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListWorkerPoolsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWorkerPoolsPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_worker_pools_from_dict():
test_list_worker_pools(request_type=dict)
def test_list_worker_pools_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
client.list_worker_pools()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListWorkerPoolsRequest()
@pytest.mark.asyncio
async def test_list_worker_pools_async(transport: str = 'grpc_asyncio', request_type=cloudbuild.ListWorkerPoolsRequest):
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListWorkerPoolsResponse(
next_page_token='next_page_token_value',
))
response = await client.list_worker_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == cloudbuild.ListWorkerPoolsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListWorkerPoolsAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_worker_pools_async_from_dict():
await test_list_worker_pools_async(request_type=dict)
def test_list_worker_pools_field_headers():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.ListWorkerPoolsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
call.return_value = cloudbuild.ListWorkerPoolsResponse()
client.list_worker_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_worker_pools_field_headers_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = cloudbuild.ListWorkerPoolsRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListWorkerPoolsResponse())
await client.list_worker_pools(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_worker_pools_flattened():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListWorkerPoolsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_worker_pools(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
def test_list_worker_pools_flattened_error():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_worker_pools(
cloudbuild.ListWorkerPoolsRequest(),
parent='parent_value',
)
@pytest.mark.asyncio
async def test_list_worker_pools_flattened_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = cloudbuild.ListWorkerPoolsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cloudbuild.ListWorkerPoolsResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_worker_pools(
parent='parent_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
@pytest.mark.asyncio
async def test_list_worker_pools_flattened_error_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_worker_pools(
cloudbuild.ListWorkerPoolsRequest(),
parent='parent_value',
)
def test_list_worker_pools_pager():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
next_page_token='abc',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[],
next_page_token='def',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
],
next_page_token='ghi',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_worker_pools(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, cloudbuild.WorkerPool)
for i in results)
def test_list_worker_pools_pages():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
next_page_token='abc',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[],
next_page_token='def',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
],
next_page_token='ghi',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
),
RuntimeError,
)
pages = list(client.list_worker_pools(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_worker_pools_async_pager():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
next_page_token='abc',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[],
next_page_token='def',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
],
next_page_token='ghi',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
),
RuntimeError,
)
async_pager = await client.list_worker_pools(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, cloudbuild.WorkerPool)
for i in responses)
@pytest.mark.asyncio
async def test_list_worker_pools_async_pages():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_worker_pools),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
next_page_token='abc',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[],
next_page_token='def',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
],
next_page_token='ghi',
),
cloudbuild.ListWorkerPoolsResponse(
worker_pools=[
cloudbuild.WorkerPool(),
cloudbuild.WorkerPool(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_worker_pools(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CloudBuildGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.CloudBuildGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudBuildClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.CloudBuildGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CloudBuildClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudBuildGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CloudBuildClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CloudBuildGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.CloudBuildGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.CloudBuildGrpcTransport,
transports.CloudBuildGrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CloudBuildGrpcTransport,
)
def test_cloud_build_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.CloudBuildTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_cloud_build_base_transport():
# Instantiate the base transport.
with mock.patch('google.devtools.cloudbuild_v1.services.cloud_build.transports.CloudBuildTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CloudBuildTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_build',
'get_build',
'list_builds',
'cancel_build',
'retry_build',
'approve_build',
'create_build_trigger',
'get_build_trigger',
'list_build_triggers',
'delete_build_trigger',
'update_build_trigger',
'run_build_trigger',
'receive_trigger_webhook',
'create_worker_pool',
'get_worker_pool',
'delete_worker_pool',
'update_worker_pool',
'list_worker_pools',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_cloud_build_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.devtools.cloudbuild_v1.services.cloud_build.transports.CloudBuildTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudBuildTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_cloud_build_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.devtools.cloudbuild_v1.services.cloud_build.transports.CloudBuildTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudBuildTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_cloud_build_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.devtools.cloudbuild_v1.services.cloud_build.transports.CloudBuildTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CloudBuildTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_cloud_build_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudBuildClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_cloud_build_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CloudBuildClient()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudBuildGrpcTransport,
transports.CloudBuildGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_cloud_build_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.CloudBuildGrpcTransport,
transports.CloudBuildGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_cloud_build_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.CloudBuildGrpcTransport, grpc_helpers),
(transports.CloudBuildGrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_cloud_build_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"cloudbuild.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="cloudbuild.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.CloudBuildGrpcTransport, transports.CloudBuildGrpcAsyncIOTransport])
def test_cloud_build_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_cloud_build_host_no_port():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='cloudbuild.googleapis.com'),
)
assert client.transport._host == 'cloudbuild.googleapis.com:443'
def test_cloud_build_host_with_port():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='cloudbuild.googleapis.com:8000'),
)
assert client.transport._host == 'cloudbuild.googleapis.com:8000'
def test_cloud_build_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudBuildGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_cloud_build_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.CloudBuildGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.CloudBuildGrpcTransport, transports.CloudBuildGrpcAsyncIOTransport])
def test_cloud_build_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.CloudBuildGrpcTransport, transports.CloudBuildGrpcAsyncIOTransport])
def test_cloud_build_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_cloud_build_grpc_lro_client():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_cloud_build_grpc_lro_async_client():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc_asyncio',
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(
transport.operations_client,
operations_v1.OperationsAsyncClient,
)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_build_path():
project = "squid"
build = "clam"
expected = "projects/{project}/builds/{build}".format(project=project, build=build, )
actual = CloudBuildClient.build_path(project, build)
assert expected == actual
def test_parse_build_path():
expected = {
"project": "whelk",
"build": "octopus",
}
path = CloudBuildClient.build_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_build_path(path)
assert expected == actual
def test_build_trigger_path():
project = "oyster"
trigger = "nudibranch"
expected = "projects/{project}/triggers/{trigger}".format(project=project, trigger=trigger, )
actual = CloudBuildClient.build_trigger_path(project, trigger)
assert expected == actual
def test_parse_build_trigger_path():
expected = {
"project": "cuttlefish",
"trigger": "mussel",
}
path = CloudBuildClient.build_trigger_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_build_trigger_path(path)
assert expected == actual
def test_crypto_key_path():
project = "winkle"
location = "nautilus"
keyring = "scallop"
key = "abalone"
expected = "projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}".format(project=project, location=location, keyring=keyring, key=key, )
actual = CloudBuildClient.crypto_key_path(project, location, keyring, key)
assert expected == actual
def test_parse_crypto_key_path():
expected = {
"project": "squid",
"location": "clam",
"keyring": "whelk",
"key": "octopus",
}
path = CloudBuildClient.crypto_key_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_crypto_key_path(path)
assert expected == actual
def test_network_path():
project = "oyster"
network = "nudibranch"
expected = "projects/{project}/global/networks/{network}".format(project=project, network=network, )
actual = CloudBuildClient.network_path(project, network)
assert expected == actual
def test_parse_network_path():
expected = {
"project": "cuttlefish",
"network": "mussel",
}
path = CloudBuildClient.network_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_network_path(path)
assert expected == actual
def test_secret_version_path():
project = "winkle"
secret = "nautilus"
version = "scallop"
expected = "projects/{project}/secrets/{secret}/versions/{version}".format(project=project, secret=secret, version=version, )
actual = CloudBuildClient.secret_version_path(project, secret, version)
assert expected == actual
def test_parse_secret_version_path():
expected = {
"project": "abalone",
"secret": "squid",
"version": "clam",
}
path = CloudBuildClient.secret_version_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_secret_version_path(path)
assert expected == actual
def test_service_account_path():
project = "whelk"
service_account = "octopus"
expected = "projects/{project}/serviceAccounts/{service_account}".format(project=project, service_account=service_account, )
actual = CloudBuildClient.service_account_path(project, service_account)
assert expected == actual
def test_parse_service_account_path():
expected = {
"project": "oyster",
"service_account": "nudibranch",
}
path = CloudBuildClient.service_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_service_account_path(path)
assert expected == actual
def test_subscription_path():
project = "cuttlefish"
subscription = "mussel"
expected = "projects/{project}/subscriptions/{subscription}".format(project=project, subscription=subscription, )
actual = CloudBuildClient.subscription_path(project, subscription)
assert expected == actual
def test_parse_subscription_path():
expected = {
"project": "winkle",
"subscription": "nautilus",
}
path = CloudBuildClient.subscription_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_subscription_path(path)
assert expected == actual
def test_topic_path():
project = "scallop"
topic = "abalone"
expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic, )
actual = CloudBuildClient.topic_path(project, topic)
assert expected == actual
def test_parse_topic_path():
expected = {
"project": "squid",
"topic": "clam",
}
path = CloudBuildClient.topic_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_topic_path(path)
assert expected == actual
def test_worker_pool_path():
project = "whelk"
location = "octopus"
worker_pool = "oyster"
expected = "projects/{project}/locations/{location}/workerPools/{worker_pool}".format(project=project, location=location, worker_pool=worker_pool, )
actual = CloudBuildClient.worker_pool_path(project, location, worker_pool)
assert expected == actual
def test_parse_worker_pool_path():
expected = {
"project": "nudibranch",
"location": "cuttlefish",
"worker_pool": "mussel",
}
path = CloudBuildClient.worker_pool_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_worker_pool_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CloudBuildClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = CloudBuildClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = CloudBuildClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = CloudBuildClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = CloudBuildClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = CloudBuildClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = CloudBuildClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = CloudBuildClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CloudBuildClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = CloudBuildClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CloudBuildClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CloudBuildTransport, '_prep_wrapped_messages') as prep:
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CloudBuildTransport, '_prep_wrapped_messages') as prep:
transport_class = CloudBuildClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = CloudBuildAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = CloudBuildClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
|
import pytest
from pymeos import GeomPoint
from pymeos.temporal import (TemporalDuration, TBoolInst, TGeomPointInst,
TIntInst, TBoolInstSet, TGeomPointInstSet,
TIntInstSet)
from ..utils import unix_dt
def get_sample_tinstant_set():
tb1 = TBoolInst(True, unix_dt(2011, 1, 1))
tb2 = TBoolInst(True, unix_dt(2011, 1, 2))
return TBoolInstSet({tb1, tb2})
@pytest.mark.parametrize("actual", [
TIntInstSet({TIntInst(10, unix_dt(2020, 9, 10)), TIntInst(20, unix_dt(2019, 9, 10))}),
TIntInstSet({"10@2020-09-10 01:00:00+01", "20@2019-09-10 01:00:00+01"}),
TIntInstSet("{10@2020-09-10 01:00:00+01, 20@2019-09-10 01:00:00+01}"),
])
def test_different_constructors(actual):
assert actual.duration == TemporalDuration.InstantSet
assert actual.duration.name == 'InstantSet'
assert len(actual.instants) == 2
assert actual.startInstant == TIntInst(20, unix_dt(2019, 9, 10))
assert actual.endInstant == TIntInst(10, unix_dt(2020, 9, 10))
@pytest.mark.parametrize("expected_srid, actual", [
(0, TGeomPointInstSet({TGeomPointInst(GeomPoint(20, 30), unix_dt(2020, 9, 10)), TGeomPointInst(GeomPoint(24, 32), unix_dt(2019, 9, 10))})),
(4326, TGeomPointInstSet({TGeomPointInst(GeomPoint(20, 30), unix_dt(2020, 9, 10)), TGeomPointInst(GeomPoint(24, 32), unix_dt(2019, 9, 10))}, 4326)),
(4326, TGeomPointInstSet({TGeomPointInst(GeomPoint(20, 30, 4326), unix_dt(2020, 9, 10)), TGeomPointInst(GeomPoint(24, 32, 4326), unix_dt(2019, 9, 10))})),
(4326, TGeomPointInstSet({TGeomPointInst(GeomPoint(20, 30, 4326), unix_dt(2020, 9, 10)), TGeomPointInst(GeomPoint(24, 32, 4326), unix_dt(2019, 9, 10))}, 0)),
(4326, TGeomPointInstSet({TGeomPointInst(GeomPoint(20, 30, 4326), unix_dt(2020, 9, 10)), TGeomPointInst(GeomPoint(24, 32, 4326), unix_dt(2019, 9, 10))}, 4326)),
(0, TGeomPointInstSet({"POINT (20 30)@2020-09-10 01:00:00+01", "POINT (24 32)@2019-09-10 01:00:00+01"})),
(4326, TGeomPointInstSet({"POINT (20 30)@2020-09-10 01:00:00+01", "POINT (24 32)@2019-09-10 01:00:00+01"}, 4326)),
(4326, TGeomPointInstSet({"SRID=4326;POINT (20 30)@2020-09-10 01:00:00+01", "SRID=4326;POINT (24 32)@2019-09-10 01:00:00+01"})),
(4326, TGeomPointInstSet({"SRID=4326;POINT (20 30)@2020-09-10 01:00:00+01", "SRID=4326;POINT (24 32)@2019-09-10 01:00:00+01"}, 0)),
(4326, TGeomPointInstSet({"SRID=4326;POINT (20 30)@2020-09-10 01:00:00+01", "SRID=4326;POINT (24 32)@2019-09-10 01:00:00+01"}, 4326)),
(0, TGeomPointInstSet("{POINT (20 30)@2020-09-10 01:00:00+01, POINT (24 32)@2019-09-10 01:00:00+01}")),
(4326, TGeomPointInstSet("{POINT (20 30)@2020-09-10 01:00:00+01, POINT (24 32)@2019-09-10 01:00:00+01}", 4326)),
(4326, TGeomPointInstSet("{SRID=4326;POINT (20 30)@2020-09-10 01:00:00+01, SRID=4326;POINT (24 32)@2019-09-10 01:00:00+01}")),
(4326, TGeomPointInstSet("{SRID=4326;POINT (20 30)@2020-09-10 01:00:00+01, SRID=4326;POINT (24 32)@2019-09-10 01:00:00+01}", 0)),
(4326, TGeomPointInstSet("{SRID=4326;POINT (20 30)@2020-09-10 01:00:00+01, SRID=4326;POINT (24 32)@2019-09-10 01:00:00+01}", 4326)),
(4326, TGeomPointInstSet("SRID=4326;{POINT (20 30)@2020-09-10 01:00:00+01, POINT (24 32)@2019-09-10 01:00:00+01}")),
(4326, TGeomPointInstSet("SRID=4326;{POINT (20 30)@2020-09-10 01:00:00+01, POINT (24 32)@2019-09-10 01:00:00+01}", 0)),
(4326, TGeomPointInstSet("SRID=4326;{POINT (20 30)@2020-09-10 01:00:00+01, POINT (24 32)@2019-09-10 01:00:00+01}", 4326)),
])
def test_different_geom_constructors(expected_srid, actual):
assert actual.duration == TemporalDuration.InstantSet
assert actual.duration.name == 'InstantSet'
assert len(actual.instants) == 2
assert actual.startInstant == TGeomPointInst(GeomPoint(24, 32, expected_srid), unix_dt(2019, 9, 10))
assert actual.endInstant == TGeomPointInst(GeomPoint(20, 30, expected_srid), unix_dt(2020, 9, 10))
assert actual.srid == expected_srid
assert actual.startValue.srid == expected_srid
@pytest.mark.parametrize("args", [
({TGeomPointInst(GeomPoint(20, 30, 5676), unix_dt(2020, 9, 10)), TGeomPointInst(GeomPoint(24, 32, 5676), unix_dt(2019, 9, 10))}, 4326),
({"SRID=5676;POINT (20 30)@2020-09-10 01:00:00+01", "SRID=5676;POINT (24 32)@2019-09-10 01:00:00+01"}, 4326),
("{SRID=5676;POINT (20 30)@2020-09-10 01:00:00+01, SRID=5676;POINT (24 32)@2019-09-10 01:00:00+01}", 4326),
("SRID=5676;{POINT (20 30)@2020-09-10 01:00:00+01, POINT (24 32)@2019-09-10 01:00:00+01}", 4326),
])
def test_constructors_with_conflicting_srids(args):
with pytest.raises(ValueError, match="Conflicting SRIDs provided. Given: 4326, while Geometry contains: 5676"):
TGeomPointInstSet(*args)
def test_constructor():
tb1 = TBoolInst(True, unix_dt(2011, 1, 1))
tb2 = TBoolInst(True, unix_dt(2011, 1, 2))
tb3 = TBoolInst(True, unix_dt(2011, 1, 1)) # Repeating
tsetb = TBoolInstSet({tb1, tb2, tb3})
instants = tsetb.instants
assert len(instants) == 2
assert instants == {tb1, tb2}
def test_str():
tsetb = get_sample_tinstant_set()
assert str(tsetb) == '{t@2011-01-01T00:00:00+0000, t@2011-01-02T00:00:00+0000}'
assert repr(tsetb) == '{t@2011-01-01T00:00:00+0000, t@2011-01-02T00:00:00+0000}'
|
from __future__ import with_statement
from functools import wraps
from StringIO import StringIO # No need for cStringIO at this time
import sys
from nose.tools import raises
from fabric.operations import require
#
# Setup/teardown helpers and decorators
#
def mock_streams(*which):
"""
Replaces ``sys.stderr`` with a ``StringIO`` during the test, then restores
after.
Must specify which stream via string args, e.g.::
@mock_streams('stdout')
def func():
pass
@mock_streams('stderr')
def func():
pass
@mock_streams('stdout', 'stderr')
def func()
pass
"""
def mocked_streams_decorator(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if 'stdout' in which:
my_stdout, sys.stdout = sys.stdout, StringIO()
if 'stderr' in which:
my_stderr, sys.stderr = sys.stderr, StringIO()
result = func(*args, **kwargs)
if 'stderr' in which:
sys.stderr = my_stderr
if 'stdout' in which:
sys.stdout = my_stdout
return result
return inner_wrapper
return mocked_streams_decorator
#
# require()
#
def test_require_single_existing_key():
"""
When given a single existing key, require() throws no exceptions
"""
# 'version' is one of the default values, so we know it'll be there
require('version')
def test_require_multiple_existing_keys():
"""
When given multiple existing keys, require() throws no exceptions
"""
require('version', 'settings_file')
@mock_streams('stderr')
@raises(SystemExit)
def test_require_single_missing_key():
"""
When given a single non-existent key, require() raises SystemExit
"""
require('blah')
@mock_streams('stderr')
@raises(SystemExit)
def test_require_multiple_missing_keys():
"""
When given multiple non-existent keys, require() raises SystemExit
"""
require('foo', 'bar')
@mock_streams('stderr')
@raises(SystemExit)
def test_require_mixed_state_keys():
"""
When given mixed-state keys, require() raises SystemExit
"""
require('foo', 'version')
@mock_streams('stderr')
def test_require_mixed_state_keys_prints_missing_only():
"""
When given mixed-state keys, require() prints missing keys only
"""
try:
require('foo', 'version')
except SystemExit:
err = sys.stderr.getvalue()
assert 'version' not in err
assert 'foo' in err
|
import requests
import sys
import json
from flask import render_template, flash, redirect, request, session
from flask import Flask
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired
from python_terraform import *
app = Flask(__name__)
app.config['SECRET_KEY'] = 'cisco'
def terraformAction(tenantName, epgCount, vmmDomain):
tf = Terraform(working_dir='./', variables={
'tenantName': tenantName, 'epgCount': epgCount, 'vmmDomain': vmmDomain})
plan = tf.plan(no_color=IsFlagged, refresh=False,
capture_output=True, out="plan.out")
approve = {"auto-approve": True}
output = tf.apply(skip_plan=True, **approve)
return output
class LoginForm(FlaskForm):
fabric = StringField('FabricIP', validators=[DataRequired()])
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
rememberMe = BooleanField('Remember Me')
submit = SubmitField('Sign in')
def getAPICCookie():
url = 'https://'+session['fabric']+'/api/aaaLogin.xml'
xml_string = "<aaaUser name='%s' pwd='%s'/>" % (
session['username'], session['password'])
req = requests.post(url, data=xml_string, verify=False)
session['cookie'] = req.cookies['APIC-cookie']
def sendAPICRequest(apicurl):
url = 'https://'+session['fabric']+apicurl
cookies = {}
cookies['APIC-cookie'] = session['cookie']
req = requests.get(url, cookies=cookies, verify=False)
return json.loads(req.text)
@app.route('/terraform', methods=['GET', 'POST'])
def terraform():
if request.method == "POST":
req = request.form
planOutput = terraformAction(
req.get("tenantName"), req.get("epgCount"), req.get("vmmDomain"))
return render_template('terraform.html', plan=planOutput)
return render_template('terraform.html')
@app.route('/menu')
def menu():
return render_template('menu.html')
@app.route('/epgs')
def epgs():
epgList = sendAPICRequest(
"/api/node/class/fvAEPg.json?&order-by=fvAEPg.name")
return render_template('epg.tpl', title='List of all EPGs', epgs=epgList['imdata'])
@app.route('/tenants')
def tenants():
tenantList = sendAPICRequest(
"/api/node/class/fvTenant.json?&order-by=fvTenant.name")
return render_template('tenants.tpl', title='List of all tenants', tenants=tenantList['imdata'])
@app.route('/endpoints')
def endpoints():
endpointList = sendAPICRequest(
"/api/node/class/fvCEp.json")
return render_template('endpoints.tpl', title='List of all endpoints', endpoints=endpointList['imdata'])
@app.route('/')
@app.route('/index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "POST":
session['fabric'] = request.form['fabric']
session['username'] = request.form['username']
session['password'] = request.form['password']
try:
getAPICCookie()
if 'cookie' in session:
return redirect('/menu')
except KeyError:
flash('Invalid credentials')
return redirect('login')
return render_template('login.tpl', title='Sign In')
if __name__ == '__main__':
app.run()
|
import fastapi
from fastapi_chameleon import template
from starlette import status
from starlette.requests import Request
from services import episode_service
from services import transcripts_service
from services import shownotes_service
from viewmodels.shared.viewmodel import ViewModelBase
from viewmodels.admin.admin_viewmodel import AdminViewModel
from viewmodels.admin.add_episode import EpisodeAddViewModel
from viewmodels.admin.add_show_notes_viewmodel import ShowNotesAddViewModel
from viewmodels.admin.add_transcripts_viewmodel import TranscriptAddViewModel
from viewmodels.admin.edit_episode_viewmodel import EditEpisodeViewModel
from viewmodels.admin.edit_show_notes_viewmodel import EditShowNotesViewModel
router = fastapi.APIRouter()
# ########## ADMIN HOMEPAGE ##############
# ### GET EPISODE LIST TO DISPLAY FOR EDIT ####
@router.get("/admin/index")
@template(template_file="admin/index.pt")
async def index(request: Request):
vm = AdminViewModel(request)
await vm.load()
if vm.login_status is False:
response = fastapi.responses.RedirectResponse(
url="/", status_code=status.HTTP_302_FOUND
)
return response
else:
return vm.to_dict()
@router.post("/admin/index", include_in_schema=False)
@template()
async def edit_post(request: Request):
vm = AdminViewModel(request)
await vm.load()
episode_number = vm.episode_number
# Redirect to Admin homepage on post
response = fastapi.responses.RedirectResponse(
url=f"/admin/edit-episode/{episode_number}", status_code=status.HTTP_302_FOUND
)
return response
# ########## ADD EPISODE ##############
@router.get("/admin/add-episode", include_in_schema=False)
@template(template_file="admin/add-episode.pt")
def register(request: Request):
vm = EpisodeAddViewModel(request)
if vm.login_status is False:
response = fastapi.responses.RedirectResponse(
url="/", status_code=status.HTTP_302_FOUND
)
return response
else:
return vm.to_dict()
@router.post("/admin/add-episode", include_in_schema=False)
@template()
async def register(request: Request):
vm = EpisodeAddViewModel(request)
await vm.load()
if vm.error:
return vm.to_dict()
# Add the episode
episode = await episode_service.create_episode(
vm.season,
vm.episode_number,
vm.episode_title,
vm.youtube_url,
vm.guest_firstname,
vm.guest_lastname,
vm.topic,
vm.record_date,
vm.record_date_converted,
vm.publish_date,
vm.publish_date_converted,
vm.guest_image,
vm.guest_bio_1,
vm.guest_bio_2,
vm.sponsor_1,
vm.sponsor_2,
vm.published,
vm.episode_length,
vm.episode_length_string,
vm.captivate_url,
)
# Redirect to the episode page
response = fastapi.responses.RedirectResponse(
url="/admin/add-show-notes", status_code=status.HTTP_302_FOUND
)
return response
# ### EDIT EPISODE DETAIL TEMPLATE ####
@router.get("/admin/edit-episode/{episode_number}")
@template(template_file="admin/edit-episode.pt")
async def edit_details(episode_number, request: Request):
vm = EditEpisodeViewModel(episode_number, request)
await vm.load()
if vm.login_status is False:
response = fastapi.responses.RedirectResponse(
url="/", status_code=status.HTTP_302_FOUND
)
return response
else:
episode_number = vm.episode_number
return vm.to_dict()
@router.post("/admin/edit-episode/{episode_number}", include_in_schema=False)
@template()
async def edit_episode_post(episode_number, request: Request):
vm = EditEpisodeViewModel(episode_number, request)
await vm.load()
if vm.error:
return vm.to_dict()
# Edit the episode
episode = await episode_service.edit_episode(
vm.season,
vm.episode_number,
vm.episode_title,
vm.youtube_url,
vm.guest_firstname,
vm.guest_lastname,
vm.topic,
vm.record_date,
vm.record_date_converted,
vm.publish_date,
vm.publish_date_converted,
vm.guest_image,
vm.guest_bio_1,
vm.guest_bio_2,
vm.sponsor_1,
vm.sponsor_2,
vm.published,
vm.episode_length,
vm.episode_length_string,
vm.captivate_url,
)
# Redirect to the admin page
response = fastapi.responses.RedirectResponse(
url="/admin/index", status_code=status.HTTP_302_FOUND
)
return response
# ########## ADD SHOWNOTES ##############
@router.get("/admin/add-show-notes", include_in_schema=False)
@template("admin/add-show-notes.pt")
def add_show_notes(request: Request):
vm = ShowNotesAddViewModel(request)
if vm.login_status is False:
response = fastapi.responses.RedirectResponse(
url="/", status_code=status.HTTP_302_FOUND
)
return response
else:
return vm.to_dict()
@router.post("/admin/add-show-notes", include_in_schema=False)
@template("admin/add-show-notes.pt")
async def add_show_notes(request: Request):
vm = ShowNotesAddViewModel(request)
await vm.load()
if vm.error:
return vm.to_dict()
# Add the show notes
show_notes = await shownotes_service.create_show_notes(
vm.season,
vm.episode,
vm.published,
vm.timestamp_1,
vm.notes_1,
vm.link_1,
vm.link_text_1,
vm.timestamp_2,
vm.notes_2,
vm.link_2,
vm.link_text_2,
vm.timestamp_3,
vm.notes_3,
vm.link_3,
vm.link_text_3,
vm.timestamp_4,
vm.notes_4,
vm.link_4,
vm.link_text_4,
vm.timestamp_5,
vm.notes_5,
vm.link_5,
vm.link_text_5,
vm.timestamp_6,
vm.notes_6,
vm.link_6,
vm.link_text_6,
vm.timestamp_7,
vm.notes_7,
vm.link_7,
vm.link_text_7,
vm.timestamp_8,
vm.notes_8,
vm.link_8,
vm.link_text_8,
vm.timestamp_9,
vm.notes_9,
vm.link_9,
vm.link_text_9,
vm.timestamp_10,
vm.notes_10,
vm.link_10,
vm.link_text_10,
)
# Redirect to the admin page
response = fastapi.responses.RedirectResponse(
url="/admin/index", status_code=status.HTTP_302_FOUND
)
return response
# ### EDIT SHOW NOTES ####
@router.get("/admin/edit-shownotes/{episode_number}")
@template(template_file="admin/edit-shownotes.pt")
async def edit_show_notes_get(episode_number, request: Request):
vm = EditShowNotesViewModel(episode_number, request)
await vm.load()
if vm.login_status is False:
response = fastapi.responses.RedirectResponse(
url="/", status_code=status.HTTP_302_FOUND
)
return response
else:
episode_number = vm.episode_number
return vm.to_dict()
@router.post("/admin/edit-shownotes/{episode_number}", include_in_schema=False)
@template("admin/edit-shownotes.pt{episode_number}")
async def edit_show_notes_post(episode_number, request: Request):
vm = EditShowNotesViewModel(episode_number, request)
await vm.load()
if vm.error:
return vm.to_dict()
# Edit the show notes
show_notes = await shownotes_service.edit_show_notes(
vm.season,
vm.episode_number,
vm.timestamp_1,
vm.notes_1,
vm.link_1,
vm.link_text_1,
vm.timestamp_2,
vm.notes_2,
vm.link_2,
vm.link_text_2,
vm.timestamp_3,
vm.notes_3,
vm.link_3,
vm.link_text_3,
vm.timestamp_4,
vm.notes_4,
vm.link_4,
vm.link_text_4,
vm.timestamp_5,
vm.notes_5,
vm.link_5,
vm.link_text_5,
vm.timestamp_6,
vm.notes_6,
vm.link_6,
vm.link_text_6,
vm.timestamp_7,
vm.notes_7,
vm.link_7,
vm.link_text_7,
vm.timestamp_8,
vm.notes_8,
vm.link_8,
vm.link_text_8,
vm.timestamp_9,
vm.notes_9,
vm.link_9,
vm.link_text_9,
vm.timestamp_10,
vm.notes_10,
vm.link_10,
vm.link_text_10,
)
# Redirect to the admin page
response = fastapi.responses.RedirectResponse(
url="/admin/index", status_code=status.HTTP_302_FOUND
)
return response
# ########## ADD Transcripts ##############
@router.get("/admin/add-transcripts", include_in_schema=False)
@template(template_file="admin/add-transcripts.pt")
def add_show_notes(request: Request):
vm = TranscriptAddViewModel(request)
if vm.login_status is False:
response = fastapi.responses.RedirectResponse(
url="/", status_code=status.HTTP_302_FOUND
)
return response
else:
return vm.to_dict()
@router.post("/admin/add-transcripts", include_in_schema=False)
@template()
async def add_transcripts(request: Request):
vm = TranscriptAddViewModel(request)
await vm.load()
if vm.error:
return vm.to_dict()
# Add the transcript
transcript = await transcripts_service.create_transcript(
vm.season,
vm.episode_number,
vm.transcript_1,
vm.transcript_2,
)
# Redirect to the episode page
response = fastapi.responses.RedirectResponse(
url="/episodes/all", status_code=status.HTTP_302_FOUND
)
return response
|
import os
from time import time
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.client import timeline
from graph_supervised_wgan_crn_enc_rand_stack_gru import build_multi_tower_graph, build_single_graph_stage_1, build_single_graph_stage_2
from input_pipeline_rand_mix_stack import build_input_queue_paired_sketchy, build_input_queue_paired_sketchy_test, build_input_queue_paired_flickr, build_input_queue_paired_mixed
import inception_score
tf.logging.set_verbosity(tf.logging.INFO)
inception_v4_ckpt_path = './inception_v4_model/inception_v4.ckpt'
vgg_16_ckpt_path = './vgg_16_model/vgg_16.ckpt'
def one_hot_to_dense(labels):
# Assume on value is 1
batch_size = int(labels.get_shape()[0])
return tf.reshape(tf.where(tf.equal(labels, 1))[:, 1], (batch_size,))
def print_parameter_count(verbose=False):
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator_s1'):
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(len(shape))
variable_parametes = 1
for dim in shape:
# print(dim)
variable_parametes *= dim.value
if verbose and len(shape) > 1:
print(shape)
print(variable_parametes)
total_parameters += variable_parametes
print('generator_s1')
print(total_parameters)
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator_s2'):
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(len(shape))
variable_parametes = 1
for dim in shape:
# print(dim)
variable_parametes *= dim.value
if verbose and len(shape) > 1:
print(shape)
print(variable_parametes)
total_parameters += variable_parametes
print('generator_s2')
print(total_parameters)
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator'):
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(len(shape))
variable_parametes = 1
for dim in shape:
# print(dim)
variable_parametes *= dim.value
if verbose and len(shape) > 1:
print(shape)
print(variable_parametes)
total_parameters += variable_parametes
print('generator')
print(total_parameters)
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_s1'):
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(len(shape))
variable_parametes = 1
for dim in shape:
# print(dim)
variable_parametes *= dim.value
if verbose and len(shape) > 1:
print(shape)
print(variable_parametes)
total_parameters += variable_parametes
print('critic_s1')
print(total_parameters)
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_s2'):
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(len(shape))
variable_parametes = 1
for dim in shape:
# print(dim)
variable_parametes *= dim.value
if verbose and len(shape) > 1:
print(shape)
print(variable_parametes)
total_parameters += variable_parametes
print('critic_s2'
'')
print(total_parameters)
total_parameters = 0
for variable in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator'):
# shape is an array of tf.Dimension
shape = variable.get_shape()
# print(len(shape))
variable_parametes = 1
for dim in shape:
# print(dim)
variable_parametes *= dim.value
if verbose and len(shape) > 1:
print(shape)
print(variable_parametes)
total_parameters += variable_parametes
print('critic')
print(total_parameters)
def train(**kwargs):
def get_inception_score_origin(generator_out, data_format, session, n):
all_samples = []
img_dim = 64
for i in range(n // 100):
all_samples.append(session.run(generator_out[0]))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples + 1.) * (255. / 2)).astype('int32')
all_samples = all_samples.reshape((-1, 3, img_dim, img_dim))
if data_format == 'NCHW':
all_samples = all_samples.transpose(0, 2, 3, 1)
return inception_score.get_inception_score(list(all_samples), session)
def get_inception_score(generator_out, batch_size, img_dim, channel, data_format, sess):
all_samples = []
for i in range(int(1000/batch_size)):
all_samples.append(sess.run(generator_out))
all_samples = np.concatenate(all_samples, axis=0)
all_samples = ((all_samples + 1.) * (255. / 2)).astype('int32')
if data_format == 'NCHW':
all_samples = all_samples.reshape((-1, channel, img_dim, img_dim)).transpose(0, 2, 3, 1)
else:
all_samples = all_samples.reshape((-1, img_dim, img_dim, channel))
return inception_score.get_inception_score(list(all_samples), sess)
resume = False
iter_from = 0
status = 0
# Roll out the parameters
appendix = kwargs["resume_from"]
# dset1 = kwargs["dset1"]
# dset2 = kwargs["dset2"]
batch_size = kwargs["batch_size"]
img_dim = kwargs["img_dim"]
num_classes = kwargs["num_classes"]
noise_dim = kwargs["noise_dim"]
max_iter_step = kwargs["max_iter_step"]
weight_decay_rate = kwargs["weight_decay_rate"]
deconv_weight_decay_rate = kwargs["deconv_weight_decay_rate"]
Diters = kwargs["disc_iterations"]
ld = kwargs["lambda"]
optimizer = kwargs["optimizer"]
lr_G = kwargs["lr_G"]
lr_D = kwargs["lr_D"]
# device = kwargs["device"]
num_gpu = kwargs["num_gpu"]
log_dir = kwargs["log_dir"]
ckpt_dir = kwargs["ckpt_dir"]
data_format = kwargs["data_format"]
distance_map = kwargs["distance_map"]
small_img = kwargs["small_img"]
if not (appendix is None or appendix == ''):
resume = True
iter_from = kwargs["iter_from"]
# Temp - test auto method
num_classes = None
channel1 = 3
channel2 = 3
distance_map = distance_map != 0
small = small_img != 0
if small:
img_dim = 64
else:
img_dim = 256
batch_portion = np.array([1, 1, 1, 1], dtype=np.int32)
# Time counter
prev_time = float("-inf")
curr_time = float("-inf")
# Stage division
mid_point = int(max_iter_step / 2)
max_iter_step_s1 = mid_point
max_iter_step_s2 = max_iter_step - mid_point
stage_1_log_dir = os.path.join(log_dir, "stage1")
if not os.path.exists(stage_1_log_dir):
os.mkdir(stage_1_log_dir)
stage_1_ckpt_dir = os.path.join(ckpt_dir, "stage1")
if not os.path.exists(stage_1_ckpt_dir):
os.mkdir(stage_1_ckpt_dir)
stage_2_log_dir = os.path.join(log_dir, "stage2")
if not os.path.exists(stage_2_log_dir):
os.mkdir(stage_2_log_dir)
stage_2_ckpt_dir = os.path.join(ckpt_dir, "stage2")
if not os.path.exists(stage_2_ckpt_dir):
os.mkdir(stage_2_ckpt_dir)
#################################### Stage 1 ##################################
if iter_from < mid_point:
tf.reset_default_graph()
print("Stage 1")
print(iter_from)
print(max_iter_step_s1)
assert inception_score.softmax.graph != tf.get_default_graph()
inception_score._init_inception()
counter = tf.Variable(initial_value=iter_from, dtype=tf.int32, trainable=False)
counter_addition_op = tf.assign_add(counter, 1, use_locking=True)
portion = 0.1 + tf.minimum(0.8, (tf.cast(counter, tf.float32) / max_iter_step_s1 / 0.95) ** 1.0)
# Construct data queue
with tf.device('/cpu:0'):
images_small, sketches_small, images_large, sketches_large, image_paired_class_ids = build_input_queue_paired_mixed(
batch_size=batch_size * num_gpu,
img_dim=img_dim,
test_mode=False,
# portion=tf.minimum(0.9, tf.cast(counter, tf.float32) / (0.9 * max_iter_step)),
portion=portion,
data_format=data_format,
distance_map=distance_map,
small=small, capacity=2 ** 12)
image_paired_class_ids = one_hot_to_dense(image_paired_class_ids)
with tf.device('/cpu:0'):
images_small_d, _, _, _, _ = build_input_queue_paired_mixed(
batch_size=batch_size * num_gpu,
img_dim=img_dim,
test_mode=False,
# portion=tf.minimum(0.9, tf.cast(counter, tf.float32) / (0.9 * max_iter_step)),
portion=tf.constant(0.3, dtype=tf.float32),
data_format=data_format,
distance_map=distance_map,
small=small, capacity=2 ** 12)
# image_paired_class_ids = one_hot_to_dense(image_paired_class_ids)
with tf.device('/cpu:0'):
_, sketches_small_100, _, sketches_large_100, image_paired_class_ids_100 = build_input_queue_paired_sketchy(
batch_size=100,
img_dim=img_dim,
test_mode=False,
data_format=data_format,
distance_map=distance_map,
small=small, capacity=1024)
image_paired_class_ids_100 = one_hot_to_dense(image_paired_class_ids_100)
opt_g, opt_d, loss_g, loss_d, merged_all, gen_out = build_multi_tower_graph(
images_small, sketches_small, images_large, sketches_large,
images_small_d,
sketches_small_100, sketches_large_100,
image_paired_class_ids, image_paired_class_ids_100,
batch_size=batch_size, num_gpu=num_gpu, batch_portion=batch_portion, training=True,
in_channel1=channel1, in_channel2=channel2, out_channel=channel1,
img_dim=img_dim, num_classes=num_classes,
learning_rates={
"generator": lr_G,
"discriminator": lr_D,
},
counter=counter, portion=portion, max_iter_step=max_iter_step_s1, stage=1,
ld=ld, data_format=data_format,
distance_map=distance_map,
optimizer=optimizer)
inception_score_mean = tf.placeholder(dtype=tf.float32, shape=())
inception_score_std = tf.placeholder(dtype=tf.float32, shape=())
inception_score_mean_summary = tf.summary.scalar("inception_score/mean", inception_score_mean)
inception_score_std_summary = tf.summary.scalar("inception_score/std", inception_score_std)
inception_score_summary = tf.summary.merge((inception_score_mean_summary, inception_score_std_summary))
saver_s1 = tf.train.Saver()
try:
saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionV4'))
perceptual_model_path = inception_v4_ckpt_path
except:
try:
saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vgg_16'))
perceptual_model_path = vgg_16_ckpt_path
except:
saver2 = None
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,
intra_op_parallelism_threads=10)
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 # JIT XLA
# config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if saver2 is not None:
saver2.restore(sess, perceptual_model_path)
# saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))
summary_writer = tf.summary.FileWriter(stage_1_log_dir, sess.graph)
if resume:
saver_s1.restore(sess, tf.train.latest_checkpoint(stage_1_ckpt_dir))
summary_writer.reopen()
run_options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
run_metadata = tf.RunMetadata()
print_parameter_count(verbose=False)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run([counter.assign(iter_from)])
for i in range(iter_from, max_iter_step_s1):
if status == -1:
break
if i % 100 == 0:
curr_time = time()
elapsed = curr_time - prev_time
print(
"Now at iteration %d. Elapsed time: %.5fs. Average time: %.5fs/iter" % (i, elapsed, elapsed / 100.))
prev_time = curr_time
diters = Diters
# Train Discriminator
for j in range(diters):
# print(j)
if i % 100 == 0 and j == 0:
_, merged, loss_d_out = sess.run([opt_d, merged_all, loss_d],
options=run_options,
run_metadata=run_metadata)
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
summary_writer.add_summary(merged, i)
summary_writer.add_run_metadata(
run_metadata, 'discriminator_metadata {}'.format(i), i)
else:
_, loss_d_out = sess.run([opt_d, loss_d])
if np.isnan(np.sum(loss_d_out)):
status = -1
print("NaN occurred during training D")
return status
# Train Generator
if i % 100 == 0:
_, merged, loss_g_out, counter_out, _ = sess.run(
[opt_g, merged_all, loss_g, counter, counter_addition_op],
options=run_options,
run_metadata=run_metadata)
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
summary_writer.add_summary(merged, i)
summary_writer.add_run_metadata(
run_metadata, 'generator_metadata {}'.format(i), i)
else:
_, loss_g_out, counter_out, _ = sess.run([opt_g, loss_g, counter, counter_addition_op])
if np.isnan(np.sum(loss_g_out)):
status = -1
print("NaN occurred during training G")
return status
if i % 5000 == 4999:
saver_s1.save(sess, os.path.join(
stage_1_ckpt_dir, "model.ckpt"), global_step=i)
if i % 1000 == 999:
# this_score = get_inception_score(gen_out[1], batch_size=batch_size, img_dim=img_dim, channel=3,
# data_format=data_format, sess=sess)
this_score = get_inception_score_origin(gen_out, data_format=data_format,
session=sess, n=10000)
merged_sum = sess.run(inception_score_summary, feed_dict={
inception_score_mean: this_score[0],
inception_score_std: this_score[1],
})
summary_writer.add_summary(merged_sum, i)
coord.request_stop()
coord.join(threads)
################################### Stage 2 ######################################
if iter_from < mid_point:
iter_from = 0
else:
iter_from = max(0, iter_from - mid_point)
tf.reset_default_graph()
batch_size /= 2
batch_size = int(batch_size)
assert batch_size % 2 == 0
print("Stage 2")
print(iter_from)
print(max_iter_step_s2)
assert inception_score.softmax.graph != tf.get_default_graph()
inception_score._init_inception()
counter = tf.Variable(initial_value=iter_from, dtype=tf.int32, trainable=False)
counter_addition_op = tf.assign_add(counter, 1, use_locking=True)
portion = 0.1 + tf.minimum(0.75, (tf.cast(counter, tf.float32) / (0.9 * max_iter_step_s2)) ** 1.0)
# Construct data queue
with tf.device('/cpu:0'):
images_small, sketches_small, images_large, sketches_large, image_paired_class_ids = build_input_queue_paired_mixed(
batch_size=batch_size * num_gpu,
img_dim=img_dim,
test_mode=False,
portion=portion,
data_format=data_format,
distance_map=distance_map,
small=small, capacity=2 ** 12)
image_paired_class_ids = one_hot_to_dense(image_paired_class_ids)
with tf.device('/cpu:0'):
_, sketches_small_100, _, sketches_large_100, image_paired_class_ids_100 = build_input_queue_paired_sketchy(
batch_size=100,
img_dim=img_dim,
test_mode=False,
data_format=data_format,
distance_map=distance_map,
small=small, capacity=1024)
image_paired_class_ids_100 = one_hot_to_dense(image_paired_class_ids_100)
opt_g, opt_d, loss_g, loss_d, merged_all, gen_out = build_multi_tower_graph(
images_small, sketches_small, images_large, sketches_large,
sketches_small_100, sketches_large_100,
image_paired_class_ids, image_paired_class_ids_100,
batch_size=batch_size, num_gpu=num_gpu, batch_portion=batch_portion, training=True,
in_channel1=channel1, in_channel2=channel2, out_channel=channel1,
img_dim=img_dim, num_classes=num_classes,
learning_rates={
"generator": lr_G,
"discriminator": lr_D,
},
counter=counter, portion=portion, max_iter_step=max_iter_step_s2, stage=2,
ld=ld, data_format=data_format,
distance_map=distance_map,
optimizer=optimizer)
inception_score_mean = tf.placeholder(dtype=tf.float32, shape=())
inception_score_std = tf.placeholder(dtype=tf.float32, shape=())
inception_score_mean_summary = tf.summary.scalar("inception_score/mean", inception_score_mean)
inception_score_std_summary = tf.summary.scalar("inception_score/std", inception_score_std)
inception_score_summary = tf.summary.merge((inception_score_mean_summary, inception_score_std_summary))
# Add stage 1 parameters
var_collections = {
'generator_s1': tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator_s1'),
'discriminator_s1': tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_s1'),
'generator_s2': tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator_s2'),
'discriminator_s2': tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator_s2'),
}
saver2 = None
saver_s1 = tf.train.Saver(var_collections['generator_s1'])
saver_s2 = tf.train.Saver()
# try:
# saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='InceptionV4'))
# perceptual_model_path = inception_v4_ckpt_path
# except:
# try:
# saver2 = tf.train.Saver(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='vgg_16'))
# perceptual_model_path = vgg_16_ckpt_path
# except:
# saver2 = None
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False,
intra_op_parallelism_threads=10)
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1 # JIT XLA
# config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
# if saver2 is not None:
# saver2.restore(sess, perceptual_model_path)
# saver.restore(sess, tf.train.latest_checkpoint(ckpt_dir))
summary_writer = tf.summary.FileWriter(stage_2_log_dir, sess.graph)
if resume:
saver_s2.restore(sess, tf.train.latest_checkpoint(stage_2_ckpt_dir))
summary_writer.reopen()
else:
saver_s1.restore(sess, tf.train.latest_checkpoint(stage_1_ckpt_dir))
run_options = tf.RunOptions(trace_level=tf.RunOptions.NO_TRACE)
run_metadata = tf.RunMetadata()
print_parameter_count(verbose=False)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(iter_from, max_iter_step_s2):
if status == -1:
break
if i % 100 == 0:
curr_time = time()
elapsed = curr_time - prev_time
print(
"Now at iteration %d. Elapsed time: %.5fs. Average time: %.5fs/iter" % (
i, elapsed, elapsed / 100.))
prev_time = curr_time
diters = Diters
# Train Discriminator
for j in range(diters):
# print(j)
if i % 100 == 0 and j == 0:
_, merged, loss_d_out = sess.run([opt_d, merged_all, loss_d],
options=run_options,
run_metadata=run_metadata)
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
summary_writer.add_summary(merged, i)
summary_writer.add_run_metadata(
run_metadata, 'discriminator_metadata {}'.format(i), i)
else:
_, loss_d_out = sess.run([opt_d, loss_d])
if np.isnan(np.sum(loss_d_out)):
status = -1
print("NaN occurred during training D")
return status
# Train Generator
if i % 100 == 0:
_, merged, loss_g_out, counter_out, _ = sess.run(
[opt_g, merged_all, loss_g, counter, counter_addition_op],
options=run_options,
run_metadata=run_metadata)
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
summary_writer.add_summary(merged, i)
summary_writer.add_run_metadata(
run_metadata, 'generator_metadata {}'.format(i), i)
else:
_, loss_g_out, counter_out, _ = sess.run([opt_g, loss_g, counter, counter_addition_op])
# print(counter_out)
if np.isnan(np.sum(loss_g_out)):
status = -1
print("NaN occurred during training G")
return status
if i % 5000 == 4999:
saver_s2.save(sess, os.path.join(
stage_2_ckpt_dir, "model.ckpt"), global_step=i)
if i % 1000 == 999:
# this_score = get_inception_score(gen_out[1], batch_size=batch_size, img_dim=img_dim, channel=3,
# data_format=data_format, sess=sess)
this_score = get_inception_score_origin(gen_out, data_format=data_format,
session=sess, n=10000)
merged_sum = sess.run(inception_score_summary, feed_dict={
inception_score_mean: this_score[0],
inception_score_std: this_score[1],
})
summary_writer.add_summary(merged_sum, i)
coord.request_stop()
coord.join(threads)
return status
def test(**kwargs):
def binarize(sketch, threshold=245):
sketch[sketch < threshold] = 0
sketch[sketch >= threshold] = 255
return sketch
from scipy import ndimage
# Roll out the parameters
appendix = kwargs["resume_from"]
batch_size = kwargs["batch_size"]
# img_dim = kwargs["img_dim"]
num_classes = kwargs["num_classes"]
# noise_dim = kwargs["noise_dim"]
# max_iter_step = kwargs["max_iter_step"]
# weight_decay_rate = kwargs["weight_decay_rate"]
# deconv_weight_decay_rate = kwargs["deconv_weight_decay_rate"]
# Diters = kwargs["disc_iterations"]
# ld = kwargs["lambda"]
# optimizer = kwargs["optimizer"]
# lr_G = kwargs["lr_G"]
# lr_D = kwargs["lr_D"]
# num_gpu = kwargs["num_gpu"]
log_dir = kwargs["log_dir"]
ckpt_dir = kwargs["ckpt_dir"]
data_format = kwargs["data_format"]
distance_map = kwargs["distance_map"]
small_img = kwargs["small_img"]
# test_folder = kwargs["test_image_folder"]
stage = kwargs["stage"]
if stage == 1:
build_func = build_single_graph_stage_1
elif stage == 2:
build_func = build_single_graph_stage_2
else:
raise ValueError
channel = 3
distance_map = distance_map != 0
small = small_img != 0
if small or stage == 1:
img_dim = 64
else:
img_dim = 256
# batch_size = 20
# output_img = np.zeros((img_dim * 2, img_dim * batch_size, channel))
output_folder = os.path.join(log_dir, 'out')
print(output_folder)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# Time counter
prev_time = float("-inf")
curr_time = float("-inf")
# Construct data queue
with tf.device('/cpu:0'):
images_small, sketches_small, images_large, sketches_large, image_paired_class_ids, \
categories, imagenet_ids, sketch_ids = build_input_queue_paired_sketchy_test(
batch_size=batch_size,
img_dim=img_dim,
test_mode=True,
data_format=data_format,
distance_map=distance_map,
small=small, capacity=512)
image_paired_class_ids = one_hot_to_dense(image_paired_class_ids)
with tf.device('/gpu:0'):
ret_list = build_func(images_small, sketches_small, None, None, None, None,
image_paired_class_ids, None,
batch_size=batch_size, training=False,
in_channel1=channel, in_channel2=channel,
out_channel=channel,
img_dim=img_dim, num_classes=num_classes,
data_format=data_format,
distance_map=distance_map)
if stage == 1:
print("Stage 1")
stage_1_log_dir = os.path.join(log_dir, "stage1")
if not os.path.exists(stage_1_log_dir):
raise RuntimeError
stage_1_ckpt_dir = os.path.join(ckpt_dir, "stage1")
if not os.path.exists(stage_1_ckpt_dir):
raise RuntimeError
saver = tf.train.Saver()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver.restore(sess, tf.train.latest_checkpoint(stage_1_ckpt_dir))
counter = 0
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
while True:
try:
generated_img, gt_image, input_sketch, category, imagenet_id, sketch_id = sess.run(
[ret_list[0], ret_list[1], ret_list[2], categories, imagenet_ids, sketch_ids])
except Exception as e:
print(e.args)
break
if counter % 100 == 0:
curr_time = time()
elapsed = curr_time - prev_time
print(
"Now at iteration %d. Elapsed time: %.5fs." % (counter, elapsed))
prev_time = curr_time
if data_format == 'NCHW':
generated_img = np.transpose(generated_img, (0, 2, 3, 1))
gt_image = np.transpose(gt_image, (0, 2, 3, 1))
input_sketch = np.transpose(input_sketch, (0, 2, 3, 1))
generated_img = ((generated_img + 1) / 2.) * 255
gt_image = ((gt_image + 1) / 2.) * 255
input_sketch = ((input_sketch + 1) / 2.) * 255
generated_img = generated_img[:, :, :, ::-1].astype(np.uint8)
gt_image = gt_image[:, :, :, ::-1].astype(np.uint8)
input_sketch = input_sketch.astype(np.uint8)
# input_sketch = 1 - (input_sketch < 0.025)
# for i in range(int(batch_size / 2)):
# output_img[:img_dim, i * img_dim:(i + 1) * img_dim, :] = input_sketch[i]
# output_img[img_dim:, i * img_dim:(i + 1) * img_dim, :] = generated_img[i]
# output_img = output_img[:, :int(batch_size / 2 + 1) * img_dim, :]
for i in range(batch_size):
this_prefix = '%s_%d_%d' % (category[i].decode('ascii'),
int(imagenet_id[i].decode('ascii').split('_')[1]),
sketch_id[i])
img_out_filename = this_prefix + '_fake_B.png'
img_gt_filename = this_prefix + '_real_B.png'
sketch_in_filename = this_prefix + '_real_A.png'
# Save file
# file_path = os.path.join(output_folder, 'output_%d.jpg' % int(counter / batch_size))
cv2.imwrite(os.path.join(output_folder, img_out_filename), generated_img[i])
cv2.imwrite(os.path.join(output_folder, img_gt_filename), gt_image[i])
cv2.imwrite(os.path.join(output_folder, sketch_in_filename), input_sketch[i])
# output_img = np.zeros((img_dim * 2, img_dim * batch_size, channel))
print('Saved file %s' % this_prefix)
counter += 1
coord.request_stop()
coord.join(threads)
else:
raise NotImplementedError
|
Production = True
from bin import app
run=app.app.server
if Production:
PORT = 8050
ADDRESS = '0.0.0.0'
if __name__ == '__main__':
app.app.run_server(port=PORT, host=ADDRESS, debug=False, threaded=True)
else:
PORT = 8050
ADDRESS = '0.0.0.0'
if __name__ == '__main__':
app.app.run_server(port=PORT, host=ADDRESS, debug=True)
|
import math
import torch
import torch.nn.functional as F
from torch.distributions import Transform, constraints
class CouplingTransform(Transform):
r"""
NICE coupling.
dim = d_residue + d_transform
f : batch x d_residue -> batch x (2*d_transform)
"""
domain = constraints.real
codomain = constraints.real
bijective = True
event_dim = 1
def __init__(self, d_residue, f, cache_size=1):
super().__init__(cache_size=cache_size)
self.d_residue = d_residue
self._f = f
self._cached_s = None
def _call(self, x):
x_transform, x_residue = self.partition(x)
pre_s, bias = self.f_split(x_residue)
s = F.softplus(pre_s + math.log(math.e - 1))
self._cached_s = s
y_transform = s * x_transform + bias
y = torch.cat([y_transform, x_residue], -1)
return y
def _inverse(self, y):
y_transform, y_residue = self.partition(y)
pre_s, bias = self.f_split(y_residue)
s = F.softplus(pre_s + math.log(math.e - 1))
x_transform = (y_transform - bias) / s
x = torch.cat([x_transform, y_residue], -1)
return x
def log_abs_det_jacobian(self, x, y):
s = self._get_cached_s(x)
if s is None:
x_residue = self.partition(x)[1]
pre_s, _ = self.f_split(x_residue)
s = F.softplus(pre_s + math.log(math.e - 1))
return s.abs().log().sum(-1)
def _get_cached_s(self, x):
x_old, _ = self._cached_x_y
if self._cached_s is not None and x is x_old:
return self._cached_s
return None
def partition(self, x):
d = x.shape[-1]
return x[..., : d - self.d_residue], x[..., d - self.d_residue :]
def f_split(self, x):
out = self._f(x)
d = out.shape[-1] // 2
return out[..., :d], out[..., d:]
|
# -*- coding: utf-8 -*-
from typing import Optional
from pip_services3_expressions.io.IScanner import IScanner
from pip_services3_expressions.tokenizers.IQuoteState import IQuoteState
from pip_services3_expressions.tokenizers.ITokenizer import ITokenizer
from pip_services3_expressions.tokenizers.Token import Token
from pip_services3_expressions.tokenizers.TokenType import TokenType
from pip_services3_expressions.tokenizers.utilities.CharValidator import CharValidator
class GenericQuoteState(IQuoteState):
"""
A quoteState returns a quoted string token from a scanner. This state will collect characters
until it sees a match to the character that the tokenizer used to switch to this state.
For example, if a tokenizer uses a double-quote character to enter this state,
then :func:`next_token <GenericQuoteState.next_token>` will search for another double-quote until it finds one
or finds the end of the scanner.
"""
def next_token(self, scanner: IScanner, tokenizer: ITokenizer) -> Token:
"""
Return a quoted string token from a scanner. This method will collect
characters until it sees a match to the character that the tokenizer used
to switch to this state.
:param scanner: A textual string to be tokenized.
:param tokenizer: A tokenizer class that controls the process.
:return: The next token from the top of the stream.
"""
first_symbol = scanner.read()
token_value = chr(first_symbol)
line = scanner.peek_line()
column = scanner.peek_column()
next_symbol = scanner.read()
while not CharValidator.is_eof(next_symbol):
token_value = token_value + chr(next_symbol)
if next_symbol == first_symbol:
break
next_symbol = scanner.read()
return Token(TokenType.Quoted, token_value, line, column)
def encode_string(self, value: str, quote_symbol: int) -> Optional[str]:
"""
Encodes a string value.
:param value: A string value to be encoded.
:param quote_symbol: A string quote character.
:return: An encoded string.
"""
if value is None:
return None
result = chr(quote_symbol) + value + chr(quote_symbol)
return result
def decode_string(self, value: str, quote_symbol: int) -> Optional[str]:
"""
Decodes a string value.
:param value: A string value to be decoded.
:param quote_symbol: A string quote character.
:return: An decoded string.
"""
if value is None:
return None
if len(value) >= 2 and ord(value[0]) == quote_symbol and ord(value[len(value) - 1]) == quote_symbol:
return value[1:len(value) - 1]
return value
|
#!/usr/bin/env python
# coding: utf-8
# # 2020 Cyberinfrastructure for Intelligent Water Supply (CIWS) Data Visualization Challenge
# ### Datasets
# * ClassifiedEvents
# * RawData
# #### Import Libraries
# In[1]:
# These libraries are already installed on the Hydroshare Server
import pandas as pd # Data manipulation and analysis library
import matplotlib.pyplot as plt # Comprehensive library for creating static, animated, and interactive visualizations
import datetime # Library that supplies classes for manipulating dates and times
import seaborn as sns # Data visualization library based on matplotlib
# Seaborn configuration: Plot size, grid type, and lines width
sns.set()
sns.set(rc={'figure.figsize':(16,12)})
sns.set_style(style="whitegrid")
sns.set_context("notebook", font_scale=2, rc={"lines.linewidth": 2})
# #### Import Data
# In[2]:
# Read csv data file as a dataframe using pandas
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')
Events = pd.read_csv('Classified_Events.csv')
RawData = pd.read_csv('Raw_Data.csv', index_col ='Time',parse_dates = ['Time'], date_parser=dateparse, skiprows = 3)
RawData = RawData.drop(columns=['Record'])
# the output of this step are tables of raw data and classified events
# In[3]:
# Inspect the first 5 rows of classifed events table
Events.head()
# In[4]:
# Inspect the first 5 rows of raw data table
RawData.head()
# #### Water use variation per each end use type
# + Using boxplots function with Seaborn and Matplotlib
# + Create horizontal subplots, one for each water use feature (Volume, Duration, and Flowrate)
# + Plot on each subplot axis
# In[5]:
fig, axes = plt.subplots(3, 1, sharex=False, figsize=(20,12))
sns.boxplot(ax=axes[0], x="Label", y="Duration(min)", data=Events, palette="Set2")
sns.boxplot(ax=axes[1], x="Label", y="Volume(gal)", data=Events, palette="Set2")
sns.boxplot(ax=axes[2], x="Label", y="Flowrate(gpm)", data=Events, palette="Set2")
axes[2].set(xlabel='Event type')
axes[1].set(xlabel='')
axes[0].set(xlabel='')
# The output of this step is a box and whisker plot that shows the variation in each water end use type compared to
# other water end use types.
# In[6]:
# Same plots above considering indoor water use only
# Exclude outdoor water use events from the table
IndoorEvents = Events[Events.Label != "irrigation"]
IndoorEvents = IndoorEvents[IndoorEvents.Label != "hose"]
# In[7]:
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(20,12))
sns.boxplot(ax=axes[0], x="Label", y="Duration(min)", data=IndoorEvents, palette="Set2")
sns.boxplot(ax=axes[1], x="Label", y="Volume(gal)", data=IndoorEvents, palette="Set2")
sns.boxplot(ax=axes[2], x="Label", y="Flowrate(gpm)", data=IndoorEvents, palette="Set2")
sns.despine(right=False)
axes[2].set(xlabel='Event type')
axes[1].set(xlabel='')
axes[0].set(xlabel='')
axes[0].set_title('Indoor Water Use')
# #### Shower events duration compared to RWEUS2016 Study
# + Residential Water End Use Study (RWEUS2016) URL: https://www.circleofblue.org/wp-content/uploads/2016/04/WRF_REU2016.pdf
# + Using Violinplot with Seaborn and Matplotlib
# + Violin plot is a combination of bar and kernel density plots
# + The width of the violin represent the probability where skinner sections represent a lower probability
# + Add a horizontal line that represent the average shower duration from the REWUS2016 study
# In[8]:
ShowerEvents = Events[Events.Label == "shower"]
ax = sns.violinplot(x="Label", y="Duration(min)", data=ShowerEvents, palette="colorblind")
sns.despine(right=True)
ax.set(xlabel='', ylabel='Duration(min)')
ax.axhline(y = 8, c = 'red')
# #### Daily and hourly water use
# In[9]:
# Aggregate pulses by hour and calculate the average number of pulses per each hour
Use_Hour = RawData.groupby(RawData.index.hour).mean()
Use_Hour.Pulses = Use_Hour.Pulses * 0.041619 * 15 * 60 # where 0.041619 is the meter resoultion, 15 is the number of 4 seconds in one minute (60/4)
# and 60 is the number of minutes in an hour
# In[10]:
ax = sns.barplot(x= Use_Hour.index, y="Pulses", data=Use_Hour, palette="muted")
sns.despine(right=True)
ax.set(xlabel='Hour of the day', ylabel='Water use (gal)')
# The output of this step is the average water use per hour
# ##### Excluding irrigation hours (from 3 to 7 AM)
# In[11]:
Use_Hour.drop([3,4,5,6,7], inplace = True)
# In[12]:
ax = sns.barplot(x= Use_Hour.index, y="Pulses", data=Use_Hour, palette="muted")
sns.despine(right=True)
ax.set(xlabel='Hour of the day', ylabel='Water use (gal)')
# The output of this step is the average water use per hour for indoor water use
# In[13]:
# Aggregate pulses by day and calculate the average number of pulses in each day
Use_Day = RawData.groupby(RawData.index.day_name()).mean()
Use_Day.Pulses = Use_Day.Pulses * 0.041619 * 15 * 60 * 24 # 0.041619 is the meter resoultion, 15 is the number of 4 seconds in one minute (60/4)
# 60 is the number of minutes in an hour, and 24 is the number of hours in a day
# In[14]:
# Create an array with days of the week names and map it to the daily water use table (We did that so we can sort the days from Monday to Sunday)
weekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
mapping = {day: i for i, day in enumerate(weekdays)}
key = Use_Day.index.map(mapping)
Use_Day = Use_Day.iloc[key.argsort()]
# In[15]:
ax = sns.barplot(x= Use_Day.index, y="Pulses", data=Use_Day, palette="colorblind")
sns.despine(right=True)
ax.set(xlabel='Day of the week', ylabel='Water use (gal)')
# The output of this step is the average water use per day
# #### By Nour Atallah
|
import progressbar as pb
import torch
import time
from sys_monitor import memory_usage_in_megabytes
from ppo_agent import PPOAgent
from model import Actor, Critic
import numpy as np
from collections import deque
def train(env, agent, episodes, max_t, print_every, logger, checkpoints_dir):
widget = ['training loop: ', pb.Percentage(), ' ', pb.Bar(), ' ', pb.ETA()]
timer = pb.ProgressBar(widgets=widget, maxval=episodes).start()
scores_deque = deque(maxlen=100)
for i_episode in range(1, episodes+1):
states = env.reset()
agent.reset()
score = np.zeros(env.num_agents)
start = time.time()
for t in range(max_t):
actions, action_probs = agent.act(states)
next_states, rewards, dones, info = env.step(actions)
agent.step(states, actions, action_probs, rewards, next_states, dones)
states = next_states
score += rewards
logger.add_histogram("states", states[0], i_episode)
logger.add_histogram("rewards", rewards[0], i_episode)
logger.add_histogram("actions", actions[0], i_episode)
if np.any(dones):
break
# time.sleep(0.5)
agent.episode_done(i_episode)
logger.add_histogram("scores", score, i_episode)
scores_deque.append(np.mean(score))
time_spent = time.time() - start
print(f"Episode {i_episode}/{episodes}\t",
f"Average Score: {np.mean(score):.2f}\t",
f"Last 100 Average score: {np.mean(scores_deque):.2f}\t",
f"Memory usage: {memory_usage_in_megabytes():.2f}MB\t",
f"Time spent: {time_spent} seconds")
if i_episode % print_every == 0:
print()
timer.update(i_episode)
if np.mean(scores_deque) > 30:
print(f"\nEnvironment solved in {i_episode} episodes!\t Average Score: {np.mean(scores_deque):.2f}")
torch.save(agent.actor_local.state_dict(), f"{checkpoints_dir}/checkpoint_actor.pth")
torch.save(agent.critic_local.state_dict(),f"{checkpoints_dir}/checkpoint_critic.pth")
logger.add_scalar(f"data/score", np.mean(score), i_episode)
timer.finish()
def create_agent(env, learning_rate, batch_size, discount,
clip_ratio, optimization_epochs, value_loss_weight,
entropy_weight, entropy_reduction_rate, max_grad_norm, device,
seed, logger):
def create_actor():
return Actor(
state_size=env.state_size,
action_size=env.action_size,
seed=seed).to(device)
def create_critic():
return Critic(
state_size=env.state_size,
seed=seed).to(device)
agent = PPOAgent(
create_actor=create_actor,
create_critic=create_critic,
state_size=env.state_size,
num_agents=env.num_agents,
optimization_epochs=optimization_epochs,
batch_size=batch_size,
discount=discount,
clip_ratio=clip_ratio,
value_loss_weight=value_loss_weight,
entropy_weight=entropy_weight,
entropy_reduction_rate=entropy_reduction_rate,
lr=learning_rate,
max_grad_norm=max_grad_norm,
device=device,
seed=seed,
logger=logger)
return agent
|
import re
class Generator:
def __init__(self, link, start, end):
self.link = link
self.start = start
self.end = end
self.validate()
def loop(self):
#Iterates over numbers from start to end
for episode in range(self.start, self.end+1):
#Print out links for all episodes
self.generate(episode)
def generate(self, number):
#Get the size that we have to generate
size = len(re.search("-0{0,2}1-1080p", self.link).group()) - 7
#Pad the required number of zeros
number = str(number).zfill(size)
#Do the necessary replacements
print(re.sub("-0{0,2}1-1080p", "-%s-1080p" % number, self.link))
def validate(self):
#Checks if there is a query string, dumps it and changes hostname
if(self.link.find('?') + 1):
self.link = self.link[:self.link.index('?')].replace('://', '://storage.googleapis.com/linear-theater-254209.appspot.com/')
#Checks if the link is supported
if(re.search("-0{0,2}1-1080p", self.link)):
self.loop()
#This format isn't supported, inform the user of the same
else:
print("We don't support this format, please check documentation")
|
from flask import json, Flask, render_template, request, Response
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__) # we create an instance of the Flask class with our module name
app.secret_key = "secret key" # for encrypting sessions; don't worry about this for now
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:@localhost/cookie_tracker' # configure the database connection
db = SQLAlchemy(app)
class Cookie(db.Model): # model the cookie table we have in our database
id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=False)
name = db.Column(db.String(255), nullable=False)
rating = db.Column(db.Float, nullable=False)
def __init__(self, name, rating): # constructor function
self.name = name
self.rating = rating
@app.route('/cookies') # route decorator tells Flask what URL should trigger this function
def list_cookies(): # we name the function list_cookies
cookies = Cookie.query.all()
return render_template('main.html', cookies=cookies)# we render main.html and pass data over under the param "cookies"
@app.route('/cookie', methods=['POST']) # we specify that this route is only accessible as a POST to /cookie
def add_cookie():
data = request.form # get the request body
if data["name"] == "":
js = json.dumps('{"message": "failed!"}')
return Response(js, status=400, mimetype='application/json')
else:
cookie = Cookie(name=data["name"], rating=data["rating"])
db.session.add(cookie)
db.session.commit()
js = json.dumps('{"message": "success!"}')
return Response(js, status=200, mimetype='application/json')
if __name__ == '__main__': # check if we're running the "main" function
app.run(debug=True) # run on debug mode (this allows for hot reload)
|
"""
.. module:: view_augmented_images
:synopsis: Example nuts-ml pipeline for viewing augmented image data
"""
from nutsflow import Take, Consume
from nutsml import ViewImageAnnotation, AugmentImage
if __name__ == "__main__":
from cnn_train import load_samples
train_samples, _ = load_samples()
p = 0.5
augment = (AugmentImage(0)
.by('identical', 1.0)
.by('elastic', p, [5, 5], [50, 50], [0, 100])
.by('brightness', p, [0.7, 1.3])
.by('rotate', p, [-10, 10])
.by('fliplr', p)
)
show_image = ViewImageAnnotation(0, 1, pause=1, figsize=(2, 2),
interpolation='spline36')
(train_samples >> Take(10) >> augment >> show_image >> Consume())
|
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import mockfs
import os
import pytest
import sys
import jsonschema
from jimmy import cli
from mock import call
from click.testing import CliRunner
from jimmy.lib.common import yaml_reader
from jimmy.tests import base
modules_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
jimmy_dir = os.path.dirname(modules_dir)
artifactory_schema_path = os.path.join(modules_dir, 'artifactory', 'resources', 'schema.yaml')
jenkins_yaml_path = os.path.join(jimmy_dir, 'sample', 'input', 'jenkins.yaml')
class TestArtifactoryModule(base.TestCase):
def setup_method(self, method):
self.runner = CliRunner()
def teardown_method(self, method):
mockfs.restore_builtins()
@mock.patch('jimmy.lib.core.load_py_modules')
@mock.patch('subprocess.call')
def test_cli_call(self, mock_subp, mock_modules):
with open(artifactory_schema_path, 'r') as f:
mock_artifactory_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({os.path.join(jimmy_dir, 'lib', 'schema.yaml'): self.jimmy_schema,
os.path.join(jimmy_dir, 'jimmy.yaml'): self.mock_jimmy_yaml,
artifactory_schema_path: mock_artifactory_schema,
jenkins_yaml_path: '\n'.join(
[
'jenkins:',
' artifactory:',
' build_info_proxy:',
' port: 9876',
' servers:',
' - id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: false',
' - id: artifactory-server-dev',
' url: artifactory-dev.example.com',
' deployer_credentials_id: artifactory-dev-credentials',
' resolver_credentials_id: resolver-dev-credentials',
' timeout: 600',
' bypass_jenkins_proxy: false'
])
})
sys.path.insert(0, modules_dir)
import artifactory
import read_source
sys.path.pop(0)
mock_modules.return_value = [artifactory, read_source]
os.chdir(jimmy_dir)
self.runner.invoke(cli)
calls = [call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'artifactory/resources/jenkins.groovy',
'setGlobalConfig',
'9876'
], shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'artifactory/resources/jenkins.groovy',
'setServerConfig',
'artifactory-server',
'artifactory.example.com',
'artifactory-credentials',
'resolver-credentials',
'600',
'False'
], shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'artifactory/resources/jenkins.groovy',
'setServerConfig',
'artifactory-server-dev',
'artifactory-dev.example.com',
'artifactory-dev-credentials',
'resolver-dev-credentials',
'600',
'False'
], shell=False)]
mock_subp.assert_has_calls(calls, any_order=True)
assert 3 == mock_subp.call_count, "subprocess call should be equal to 3"
class TestArtifactorySchema(object):
def setup_method(self, method):
with open(artifactory_schema_path, 'r') as f:
mock_artifactory_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({artifactory_schema_path: mock_artifactory_schema})
self.schema = yaml_reader.read(artifactory_schema_path)
def teardown_method(self, method):
mockfs.restore_builtins()
def test_valid_repo_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'build_info_proxy:',
' port: 9876',
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_validation_fail_if_port_is_not_integer(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'build_info_proxy:',
' port: test',
'servers:',
' - id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'integer'"
def test_validation_fail_for_servers_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
# dictionary can be printed differently,
# thus assert only the error message
assert excinfo.value.message.endswith(" is not of type 'array'")
def test_validation_fail_for_id_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'id' is a required property"
def test_validation_fail_for_url_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'url' is a required property"
def test_validation_fail_for_deployer_credentials_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'deployer_credentials_id' is a required property"
def test_validation_fail_if_id_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: 123',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_url_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: 123',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_deployer_credentials_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: 123',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_resolver_credentials_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: 123',
' timeout: 600',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_timeout_is_not_integer(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: test',
' bypass_jenkins_proxy: False'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'integer'"
def test_validation_fail_if_bypass_proxy_is_not_boolean(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not of type 'boolean'"
def test_validation_fail_for_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'servers:',
'- id: artifactory-server',
' url: artifactory.example.com',
' deployer_credentials_id: artifactory-credentials',
' resolver_credentials_id: resolver-credentials',
' timeout: 600',
' bypass_jenkins_proxy: False',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
|
#!/usr/bin/env python
from distutils.core import setup
classifiers =[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: GNU General Public License (GPL)'
]
setup(name="Py Neuroshare",
version="0.4.2",
description="Python port of the Neuroshare API",
author="Ripple LLC",
author_email="support@rppl.com",
packages=["pyns"],
classifiers=classifiers
)
|
import numpy as np
from ..constants import res_path as res
from ..constants import tol_path as tol
def discretize_bezier(points, count=None, scale=1.0):
'''
Arguments
----------
points: (o,d) list of points of the bezier. The first and last
points should be the start and end of the curve.
For a 2D cubic bezier, order o=3, dimension d=2
Returns
----------
discrete: (n,d) list of points, a polyline representation
of the bezier curve which respects constants.RES_LENGTH
'''
def compute(t):
# compute discrete points given a sampling t
t_d = 1.0 - t
n = len(points) - 1
# binomial coefficents, i, and each point
iterable = zip(binomial(n), np.arange(n+1), points)
stacked = [((t**i)*(t_d**(n-i))).reshape((-1,1))*p*c for c,i,p in iterable]
discrete = np.sum(stacked, axis=0)
return discrete
# make sure we have a numpy array
points = np.array(points)
if count is None:
# how much distance does a small percentage of the curve take
# this is so we can figure out how finely we have to sample t
norm = np.linalg.norm(np.diff(points, axis=0), axis=1).sum()
count = np.ceil(norm / (res.seg_frac * scale))
count = int(np.clip(count,
res.min_sections*len(points),
res.max_sections*len(points)))
result = compute(np.linspace(0.0, 1.0, count))
test = np.sum((result[[0,-1]] - points[[0,-1]])**2, axis=1)
assert (test < tol.merge).all()
assert len(result) >= 2
return result
def discretize_bspline(control, knots, count=None, scale=1.0):
'''
Given a B-Splines control points and knot vector, return
a sampled version of the curve.
Arguments
----------
control: (o,d) list of control points of the b- spline.
knots: (j) list of knots
count: int, number of sections to discretize the spline in to.
If not specified, RES_LENGTH will be used to inform this.
Returns
----------
discrete: (count,d) list of points, a polyline of the B-spline.
'''
# evaluate the b-spline using scipy/fitpack
from scipy.interpolate import splev
# (n, d) control points where d is the dimension of vertices
control = np.array(control)
degree = len(knots) - len(control) - 1
if count is None:
norm = np.linalg.norm(np.diff(control, axis=0), axis=1).sum()
count = int(np.clip(norm / (res.seg_frac*scale),
res.min_sections*len(control),
res.max_sections*len(control)))
ipl = np.linspace(knots[0], knots[-1], count)
discrete = splev(ipl, [knots, control.T, degree])
discrete = np.column_stack(discrete)
return discrete
def binomial(n):
'''
Return all binomial coefficents for a given order.
For n > 5, scipy.special.binom is used, below we hardcode
to avoid the scipy.special dependancy.
'''
if n == 1: return [1,1]
elif n == 2: return [1,2,1]
elif n == 3: return [1,3,3,1]
elif n == 4: return [1,4,6,4,1]
elif n == 5: return [1,5,10,10,5,1]
else:
from scipy.special import binom
return binom(n,np.arange(n+1))
|
"""tests for vak.config.predict module"""
from configparser import ConfigParser
from pathlib import Path
import unittest
import vak.config.predict
import vak.split
from vak.core.learncurve import LEARN_CURVE_DIR_STEM
HERE = Path(__file__).parent
TEST_DATA_DIR = HERE.joinpath('..', '..', 'test_data')
TEST_CONFIGS_PATH = TEST_DATA_DIR.joinpath('configs')
class TestParsePredictConfig(unittest.TestCase):
def setUp(self):
a_results_dir = list(
TEST_DATA_DIR.joinpath('results').glob(
f'{LEARN_CURVE_DIR_STEM}*'))[0]
a_training_records_dir = list(
Path(a_results_dir).joinpath(
'train').glob('records_for_training_set*'))[0]
checkpoint_path = str(Path(a_training_records_dir).joinpath(
'TweetyNet', 'checkpoints'))
spect_scaler = list(
Path(a_training_records_dir).glob('spect_scaler_*'))[0]
spect_scaler = str(spect_scaler)
# rewrite config so it points to data for testing + temporary output dirs
a_config = str(TEST_CONFIGS_PATH.joinpath('test_predict_config.ini'))
config = ConfigParser()
config.read(a_config)
config['PREDICT']['checkpoint_path'] = checkpoint_path
config['PREDICT']['spect_scaler_path'] = spect_scaler
test_data_vds_path = list(TEST_DATA_DIR.glob('vds'))[0]
test_data_vds_path = Path(test_data_vds_path)
for stem in ['train', 'test']:
vds_path = list(test_data_vds_path.glob(f'*.{stem}.vds.json'))
self.assertTrue(len(vds_path) == 1)
vds_path = vds_path[0]
if stem == 'train':
config['PREDICT']['train_vds_path'] = str(vds_path)
elif stem == 'test':
# pretend test data is data we want to predict
config['PREDICT']['predict_vds_path'] = str(vds_path)
self.config_obj = config
def test_parse_predict_config_returns_PredictConfig_instance(self):
predict_config_obj = vak.config.predict.parse_predict_config(self.config_obj)
self.assertTrue(type(predict_config_obj) == vak.config.predict.PredictConfig)
def test_no_networks_raises(self):
self.config_obj.remove_option('PREDICT', 'networks')
with self.assertRaises(KeyError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_network_not_installed_raises(self):
self.config_obj['PREDICT']['networks'] = 'NotInstalledNet, OtherNotInstalledNet'
with self.assertRaises(TypeError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_missing_checkpoint_path_raises(self):
self.config_obj.remove_option('PREDICT', 'checkpoint_path')
with self.assertRaises(KeyError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_missing_predict_vds_path_raises(self):
self.config_obj.remove_option('PREDICT', 'predict_vds_path')
with self.assertRaises(KeyError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_missing_train_vds_path_raises(self):
self.config_obj.remove_option('PREDICT', 'train_vds_path')
with self.assertRaises(KeyError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_nonexistent_checkpoint_dir_raises(self):
self.config_obj['PREDICT']['checkpoint_path'] = 'obviously/non/existent/dir'
with self.assertRaises(NotADirectoryError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_nonexistent_predict_vds_path_raises(self):
self.config_obj['PREDICT']['predict_vds_path'] = 'obviously/non/existent/dir'
with self.assertRaises(NotADirectoryError):
vak.config.predict.parse_predict_config(self.config_obj)
def test_nonexistent_train_vds_path_raises(self):
self.config_obj['PREDICT']['train_vds_path'] = 'obviously/non/existent/dir'
with self.assertRaises(NotADirectoryError):
vak.config.predict.parse_predict_config(self.config_obj)
if __name__ == '__main__':
unittest.main()
|
import heapq
import fractions
def factorization(n):
arr = []
temp = n
for i in range(2, int(-(-n**0.5//1))+1):
if temp % i == 0:
while temp % i == 0:
arr.append(i)
temp //= i
if temp != 1:
arr.append(temp)
if arr == []:
arr.append(n)
arr.sort()
return arr
N, P = map(int, input().split())
# リストを優先度付きキューへ
factors = factorization(P)
heapq.heapify(factors)
# 因数の数がNより少ないなら1になる
if len(factors) < N:
print(1)
exit(0)
# 因数の数がNと一致するなら最小値の因数が解になる
if len(factors) == N:
print(min(factors))
exit(0)
# 因数の数がNより大きい時、
# 因数の数がNとおなじになるまで、小さい値同士を掛ける。
# 同じ値になったら最小値を出力する。
ans = []
while True:
if len(ans) < N:
ans.append(heapq.heappop(factors))
else:
if len(factors) == 0:
break
idx = ans.index(min(ans))
ans[idx] = ans[idx]*heapq.heappop(factors)
if N == 1:
print(ans[0])
else:
gcd = fractions.gcd(ans[0], ans[1])
if N == 2:
print(gcd)
else:
for i in range(2, N):
gcd = fractions.gcd(gcd, ans[i])
print(gcd)
|
"""
This module defines the class that handles the data loading.
"""
import pandas as pd
# Imports for testing
from KGTorrent import config
class DataLoader:
"""
This class stores the MetaKaggle version tables and the foreign key constraints table.
"""
def __init__(self, constraints_file_path, meta_kaggle_path):
"""
The constructor of this class loads Meta Kaggle and constraints ``.csv`` files from the given paths.
Args:
constraints_file_path: the path to the ``.csv`` file containing information on the foreign key constraints to be set. By default, it is located at ``/data/fk_constraints_data.csv``.
meta_kaggle_path: The path to the folder containing the 29 ``.csv`` of the MetaKaggle tables.
"""
# Dataframe containing constraints info:
# (Referencing Table, Foreign Key, Referenced Table, Referenced Column, IsSolved)
print('## Loading MetaKaggle constraints data...')
self._constraints_df = pd.read_csv(constraints_file_path)
# Array of table file names
table = self._constraints_df['Table']
referenced_table = self._constraints_df['Referenced Table']
union = table.append(referenced_table, ignore_index=True)
table_file_names = union.unique()
# Dictionary of tables
self._tables_dict = {}
# Reading tables
print('## Loading MeataKaggle csv tables from provided path...')
for file_name in table_file_names:
self._tables_dict[file_name] = pd.read_csv(meta_kaggle_path + '\\' + file_name)
print(f'- {file_name} loaded.')
def get_constraints_df(self):
"""
This method returns the foreign key constraints ``pandas.DataFrame`` which contains constraints information:
Referencing Table, Foreign Key, Referenced Table, Referenced Column.
Returns:
constraints_df: The ``pandas.DataFrame`` containing the foreign key constraints information.
"""
return self._constraints_df
def get_tables_dict(self):
"""
This method returns the dictionary of all 29 MetaKaggle ``pandas.DataFrame`` tables.
Returns:
tables_dict: The dictionary whose keys are the table names and whose values are the ``pandas.DataFrame`` tables.
"""
return self._tables_dict
if __name__ == '__main__':
print("********************")
print("*** LOADING DATA ***")
print("********************")
dataloader = DataLoader(config.constraints_file_path, config.meta_kaggle_path)
print('CONSTRAINT DF\n', dataloader.get_constraints_df())
print('TABLES\n', dataloader.get_tables_dict().keys())
print(dataloader.get_tables_dict())
|
from .inspector import TrioInspector
|
import click
from .iroha_helpers import *
#ASSET COMMANDS#
def new_asset():
asset = click.prompt("New Asset Name")
domain = click.prompt("Domain")
precision = click.prompt("Precision",type=int)
create_new_asset(asset,domain,precision)
def new_asset_transfer(account_id):
src_account_id = click.prompt("Source Account",default=account_id)
recipient = click.prompt("Recipient")
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Total Amount to Send")
description = click.prompt("Enter Transaction Details")
transfer_asset(src_account_id,recipient,asset_id,description,qty)
def increase_asset_qty():
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Qty To Add")
add_asset_qty(asset_id,qty)
def decrease_asset_qty():
asset_id = click.prompt("AssetID : asset#domain")
qty = click.prompt("Qty To Subtract")
subtract_asset_qty(asset_id,qty)
#ASSET QUERIES
def view_account_asset_balance(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
get_account_assets(account_id)
def grant_asset_read_permission(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
contact = click.prompt("Username@domain Your Write Acc Granting Permission")
grant_account_read_permission(creator_account=account_id,contact=contact)
def query_asset_tx_history(account_id):
account_id = click.prompt("Account To Use : Username@domain",default=account_id)
total = click.prompt("Total Txs to return",default=50)
get_acc_tx_history(creator_account=account_id,total=total)
#def query_domain_assets():
# click.echo("Checking For Pending Transactions That Require Signatures")
# get_domain_assets()
|
from app import app
def test_app_returns_200():
# Arrange
client = app.test_client()
# client.testing = True
# Act
result = client.get("/")
# Assert
assert result.status_code == 200
def test_app_page_has_expected_text():
# Arrange
client = app.test_client()
# Act
result = client.get("/")
# Assert
assert "Before you commit, every commit!" in str(result.data)
|
# -*- coding: utf-8 -*-
import numpy as np
from collections import deque
from sortedcontainers import SortedDict
class Buffer(object):
# __slots__ = ('types', 'size', 'batches')
def __init__(self, size):
self.size = size
self.clean()
def clean(self):
self.types = {}
self.batches = []
def get_batches(self, type_id=None):
if type_id is None:
result = []
for value in self.types.values():
result += self.batches[value]
return result
return self.batches[self.get_type(type_id)]
def has_atleast(self, frames, type=None):
return self.count(type) >= frames
def has(self, frames, type=None):
return self.count(type) == frames
def count(self, type=None):
if type is None:
if len(self.batches) == 0:
return 0
return sum(len(batch) for batch in self.batches)
return len(self.batches[type])
def id_is_full(self, type_id):
return self.has(self.size, self.get_type(type_id))
def is_full(self, type=None):
if type is None:
return self.has(self.size*len(self.types))
return self.has(self.size, type)
def is_empty(self, type=None):
return not self.has_atleast(1, type)
def get_type(self, type_id):
self.add_type(type_id)
return self.types[type_id]
def add_type(self, type_id):
if type_id in self.types:
return
self.types[type_id] = len(self.types)
self.batches.append(deque())
def put(self, batch, type_id=0): # put batch into buffer
type = self.get_type(type_id)
if self.is_full(type):
self.batches[type].popleft()
self.batches[type].append(batch)
def sample(self):
# assert self.has_atleast(frames=1)
type = np.random.choice( [value for value in self.types.values() if not self.is_empty(value)] )
id = np.random.randint(0, len(self.batches[type]))
return self.batches[type][id]
class PrioritizedBuffer(Buffer):
def clean(self):
super().clean()
self.prefixsum = []
self.priorities = []
def get_batches(self, type_id=None):
if type_id is None:
result = []
for type in self.types.values():
result += self.batches[type].values()
return result
return self.batches[self.get_type(type_id)].values()
def add_type(self, type_id):
if type_id in self.types:
return
self.types[type_id] = len(self.types)
self.batches.append(SortedDict())
self.prefixsum.append([])
self.priorities.append({})
def get_priority_from_unique(self, unique):
return float(unique.split('#', 1)[0])
def build_unique(self, priority, count):
return '{:.5f}#{}'.format(priority,count) # new batch has higher unique priority than old ones with same shared priority
def put(self, batch, priority, type_id=0): # O(log)
type = self.get_type(type_id)
if self.is_full(type):
old_unique_batch_priority, _ = self.batches[type].popitem(index=0) # argument with lowest priority is always 0 because buffer is sorted by priority
old_priority = self.get_priority_from_unique(old_unique_batch_priority)
if old_priority in self.priorities[type] and self.priorities[type][old_priority] == 1: # remove from priority dictionary in order to prevent buffer overflow
del self.priorities[type][old_priority]
priority_count = self.priorities[type][priority] if priority in self.priorities[type] else 0
priority_count = (priority_count % self.size) + 1 # modular counter to avoid overflow
self.priorities[type][priority] = priority_count
unique_batch_priority = self.build_unique(priority,priority_count)
self.batches[type].update({unique_batch_priority: batch}) # O(log)
self.prefixsum[type] = None # compute prefixsum only if needed, when sampling
def keyed_sample(self): # O(n) after a new put, O(log) otherwise
type_id = np.random.choice( [key for key,value in self.types.items() if not self.is_empty(value)] )
type = self.get_type(type_id)
if self.prefixsum[type] is None: # compute prefixsum
self.prefixsum[type] = np.cumsum([self.get_priority_from_unique(k) for k in self.batches[type].keys()]) # O(n)
mass = np.random.random() * self.prefixsum[type][-1]
idx = np.searchsorted(self.prefixsum[type], mass) # O(log) # Find arg of leftmost item greater than or equal to x
keys = self.batches[type].keys()
if idx == len(keys): # this may happen when self.prefixsum[type] is negative
idx = -1
return self.batches[type][keys[idx]], idx, type_id
def sample(self): # O(n) after a new put, O(log) otherwise
return self.keyed_sample()[0]
def update_priority(self, idx, priority, type_id=0): # O(log)
type = self.get_type(type_id)
_, batch = self.batches[type].popitem(index=idx) # argument with lowest priority is always 0 because buffer is sorted by priority
self.put(batch, priority, type_id)
|
#!/usr/bin/python3
"""
---SUPERMARINE---
* Version 0.3b *
-----------------
Create a mixer geometry blockMesh dictionary for use in
The OpenFOAM toolkits.
Date: 07-2017
Author: Gabriel Lemieux (gabriel.lemieux@usherbrooke.ca)
TARGET : MIXER
-- LOG YOUR TWEAKS HERE ------------------------------
Key Description
------------------------------------------------------
MOD1 Vertex adjustment was added
------------------------------------------------------
--> Search the keu to find the modification
------------------------------------------------------
*Quick Introduction*
DIVI : Angular, Radial and Vertical divisions per block
RQUAD : Radius of each QUADran
This parameter must be a list of at least two elements,
The first being the center hole/square section.
NSEC : The Number of SECtors to create
Must be a multiple of 4 (12,24 and 36 are useful multiple of 4!)
HLAY : Height of each LAYers
This parameter must be a list of at least one element
SHAFT : Section on which the SHAFT exists
This parameter must be a list which as the same number of elements
of HLAY
IMPELLERCUT : Where to CUT for the IMPELLER
This parameter must be a NSEC by NCAR by NHLAY 3d matrix.
A 1 in a region means to cut that region for the impeller.
SQRRATIO : The RATIO for the distance between the center SQuaRe region and the
Outer cylinder.
Must be larger than 0 and smaller than 1.
____ __ __ _
/ ___| _ _ _ __ ___ _ __ | \/ | __ _ _ __(_)_ __ ___
\___ \| | | | '_ \ / _ \ '__|____| |\/| |/ _` | '__| | '_ \ / _ \
___) | |_| | |_) | __/ | |_____| | | | (_| | | | | | | | __/
|____/ \__,_| .__/ \___|_| |_| |_|\__,_|_| |_|_| |_|\___|
|_|
"""
import numpy as np
import json as js
def toRad(deg):
return deg * 2 * np.pi / 360
def rotz(vec, rad):
m = np.dot(vec, np.array([
[np.cos(rad), -np.sin(rad), 0],
[np.sin(rad), np.cos(rad), 0],
[0, 0, 1]
]))
return m
def superMarine(conf):
DIVI = conf["DIVI"]
RQUAD = conf["RQUAD"]
NSEC = conf["NSEC"]
HLAY = conf["HLAY"]
SHAFT = conf["SHAFT"]
LVLROT = conf["LVLROT"]
IMPELLERCUT = np.zeros((NSEC, len(RQUAD), len(HLAY)))
SQRRATIO = conf["SQRRATIO"]
# ----- READ THE DOCUMENTATIONS BEFORE CHANGING THE CODE -------- #
nCad = len(RQUAD)
# -- CYLINDER VERTEX -- #
# Create rotation unit vectors
cUnits = np.array([[1, 0, 0]])
for sector in range(0, NSEC - 1):
cUnits = np.append(cUnits, [rotz(cUnits[sector], toRad(360 / NSEC))], axis=0)
# Multiply each units with each radius
# this create the vertex rings
vertex = np.empty([1, 3]) # Create a dummy at 0
for radius in RQUAD:
vertex = np.append(vertex, radius * cUnits, axis=0)
# Remove the dummy
vertex = np.delete(vertex, 0, axis=0)
# Add HLAY to each base
# this create the ring layers
temp = vertex
for H in HLAY:
vertex = np.append(vertex, temp + [0, 0, H], axis=0)
# -- CYLINDER HEX -- #
# This is the scaffold use to link the
# the right points together
hexSet = np.array([
0, 1, 1 + NSEC, NSEC, 0 + NSEC * nCad,
1 + NSEC * nCad, 1 + NSEC + NSEC * nCad, NSEC + NSEC * nCad
])
# The last one is a bit different
hexSetLast = np.array([
0, 1 - NSEC, 1, NSEC,
0 + NSEC * nCad, 1 + NSEC * (nCad - 1), 1 + NSEC * nCad, NSEC * (nCad + 1)
])
hexa = np.array([hexSet])
lastPos = 0
# Apply the scaffold
for H in range(0, len(HLAY)):
for quadrant in range(0, nCad - 1):
for sector in range(0, NSEC):
if not (IMPELLERCUT[sector, quadrant, H] == 1):
hexa = np.append(hexa, [
(hexSet if sector < NSEC - 1 else hexSetLast) + sector + quadrant * NSEC + H * NSEC * nCad], axis=0)
hexa = np.delete(hexa, 0, axis=0)
# -- CENTER VERTEX -- #
offset = len(vertex) # Offset of the cylinder and the center
div = int(NSEC / 4) # Number of inner division required
# Generate an inner grid using four equidistant points around
# the cylinder
for H in range(0, len(HLAY) + 1):
index = H * NSEC * nCad + np.arange(0, NSEC, NSEC / 4, dtype=int)
cvertex = vertex[index]
r = [SQRRATIO, SQRRATIO, 0]
# Grid creation with rought interpolation
xs = np.array([r * cvertex[0] + t * (r * cvertex[1] - r * cvertex[0]) / div for t in np.arange(0, div + 1, 1)])
ys = np.array([r * cvertex[0] + t * (r * cvertex[3] - r * cvertex[0]) / div for t in np.arange(0, div + 1, 1)])
grid = np.concatenate([xs + y + (0 if H == 0 else [0, 0, HLAY[H - 1]]) for y in ys - ys[0]])
vertex = np.append(vertex, grid, axis=0)
# Will be usefull to link the center vertex with the cylinder
# it return the vertex number using a simple x,y,z coordinate system
cGridId = lambda x, y, z: x + y * (div + 1) + z * (div + 1) ** 2 + offset
# -- CENTER HEX -- #
# Create the hex using the x,y,z coordinate
for H in range(0, len(HLAY)):
if SHAFT[H] == 0:
for ii in range(0, div):
for jj in range(0, div):
bottom = np.array(
[cGridId(*p) for p in [[ii, jj, H], [ii, jj + 1, H], [ii + 1, jj + 1, H], [ii + 1, jj, H]]])
top = np.array([cGridId(*p) for p in
[[ii, jj, H + 1], [ii, jj + 1, H + 1], [ii + 1, jj + 1, H + 1], [ii + 1, jj, H + 1]]])
points = np.append(bottom, top)
hexa = np.append(hexa, [points], axis=0)
# -- LINK HEX -- #
# Create an ordered list of corresponding points to link
# with the outer cylinder
reverse = lambda x: np.dot(x, [[0, 1, 0], [1, 0, 0], [0, 0, 1]])
ordered = [[p, 0, 0] for p in range(0, div + 1)]
ordered = np.append(ordered, reverse(ordered[1:]) + [div, 0, 0], axis=0)
ordered = np.append(ordered, np.flip(reverse(ordered[:-1]), axis=0), axis=0)
# Link the center region to the cylinder
for H in range(0, len(HLAY)):
if SHAFT[H] == 0:
for sec in range(0, NSEC):
inOne = cGridId(*(ordered[sec] + [0, 0, H]))
inTwo = cGridId(*(ordered[sec + (1 if sec < NSEC - 1 else -sec)] + [0, 0, H]))
inThree = cGridId(*(ordered[sec] + [0, 0, H + 1]))
inFour = cGridId(*(ordered[sec + (1 if sec < NSEC - 1 else -sec)] + [0, 0, H + 1]))
outOne = sec + H * NSEC * nCad
outTwo = outOne + (1 if sec < NSEC - 1 else -sec)
outThree = sec + (H + 1) * NSEC * nCad
outFour = outThree + (1 if sec < NSEC - 1 else -sec)
hexa = np.append(hexa, [[inOne, inTwo, outTwo, outOne, inThree, inFour, outFour, outThree]], axis=0)
# -- ADJUSTMENTS (ROTATION AND COMPANIES) -- #
# -- LEVEL ROTATION -- #
oCylId = lambda omega, rp, z: omega + rp * NSEC + z * NSEC * nCad
for kk in range(0, len(HLAY)):
for jj in range(0, nCad):
for ii in range(0, NSEC):
ident = oCylId(ii, jj, kk + 1)
vertex[ident] = rotz(vertex[ident], toRad(LVLROT[kk]))
# -- CYLINDER ARCS -- #
# Create a list of arc for the cylinder vertex
nEdges = NSEC * nCad * (len(HLAY) + 1)
edges = []
for edge in range(0, nEdges):
# The last one
if np.mod(edge, NSEC) == NSEC - 1:
second = edge
first = edge - NSEC + 1
vect = rotz(vertex[first], toRad(-360 / (2 * NSEC)))
edges.append([first, second, vect[0], vect[1], vect[2]])
else:
first = edge
second = edge + 1
vect = rotz(vertex[first], toRad(360 / (2 * NSEC)))
edges.append([first, second, vect[0], vect[1], vect[2]])
# -- VERTICAL SPLINE -- #
# This force the vertical lines to pass
# on the outer cylinder instead of a straight line
spedge = []
for H in range(0, len(HLAY)):
for quadrant in range(0, nCad):
for sector in range(0, NSEC):
first = sector + quadrant * NSEC + H * nCad * NSEC
second = sector + quadrant * NSEC + (H + 1) * nCad * NSEC
unitH = np.array([0, 0, 1]) * (vertex[second] - vertex[first]) / DIVI[2]
fv = vertex[first] * [1, 1, 0]
sv = vertex[second] * [1, 1, 0]
if fv[0] != sv[0] or fv[1] != sv[1]:
unitR = (np.arccos(np.dot(sv, fv) / (np.linalg.norm(sv) * np.linalg.norm(fv)))) / DIVI[2]
iterp = [rotz(vertex[first], p * unitR) + p * unitH for p in range(1, DIVI[2])]
# Filter the edge witch isn't in an hex
inHex = False
for h in hexa:
if first in h and second in h:
inHex = True
if inHex:
spedge.append([first, second, np.array(iterp)])
# -- WALLS PATCH -- #
# Sides
wallsPatch = []
scaffold = np.array([
NSEC * (nCad - 1),
NSEC * (nCad - 1) + 1,
NSEC * nCad + NSEC * (nCad - 1) + 1,
NSEC * nCad + NSEC * (nCad - 1)
])
scaffoldLast = np.array([
NSEC * (nCad - 1),
1 + NSEC * (nCad - 1) - NSEC,
1 + NSEC * nCad + NSEC * (nCad - 1) - NSEC,
NSEC * nCad + NSEC * (nCad - 1)
])
for H in range(0, len(HLAY)):
for sector in range(0, NSEC):
wallsPatch.append((scaffold if sector < NSEC - 1 else scaffoldLast) + sector + (H * NSEC * nCad))
# -- SHAFT AND IMPELLER PATCH -- #
shaftPatch = []
impelPatch = []
scaffold = np.array([0, 1, NSEC * nCad + 1, NSEC * nCad])
scaffoldLast = np.array([0, 1 - NSEC, NSEC * nCad + 1 - NSEC, NSEC * nCad])
impScaffolds = [
np.array([0, NSEC, NSEC + NSEC * nCad, NSEC * nCad]),
np.array([1, 1 + NSEC, 1 + NSEC + NSEC * nCad, 1 + NSEC * nCad]),
np.array([0, NSEC, 1 + NSEC, 1]),
np.array([NSEC + NSEC * nCad, NSEC * nCad, 1 + NSEC * nCad, 1 + NSEC + NSEC * nCad]),
np.array([NSEC, 1 + NSEC, 1 + NSEC + NSEC * nCad, NSEC + NSEC * nCad])
]
for H in range(0, len(HLAY)):
for quadrant in range(0, nCad):
for sector in range(0, NSEC):
if SHAFT[H] == 1 and quadrant == 0 and IMPELLERCUT[sector, quadrant, H] == 0:
shaftPatch.append((scaffold if sector < NSEC - 1 else scaffoldLast) + sector + (H * NSEC * nCad))
CutV = IMPELLERCUT[sector, quadrant, H]
if H < len(HLAY) - 1:
if CutV != IMPELLERCUT[sector, quadrant, H + 1]:
if sector < NSEC - 1:
impelPatch.append(impScaffolds[3] + sector + quadrant * NSEC + (H * NSEC * nCad))
else:
adjust = [0, 0, NSEC, NSEC]
impelPatch.append(impScaffolds[3] + sector + quadrant * NSEC + (H * NSEC * nCad) - adjust)
if quadrant < nCad - 1:
if CutV != IMPELLERCUT[sector, quadrant + 1, H]:
if sector < NSEC - 1:
impelPatch.append(impScaffolds[4] + sector + quadrant * NSEC + (H * NSEC * nCad))
else:
adjust = [0, NSEC, NSEC, 0]
impelPatch.append(impScaffolds[4] + sector + quadrant * NSEC + (H * NSEC * nCad) - adjust)
if sector < NSEC - 1:
if CutV != IMPELLERCUT[sector + 1, quadrant, H]:
impelPatch.append(impScaffolds[1] + sector + quadrant * NSEC + (H * NSEC * nCad))
elif sector == NSEC - 1:
if int(CutV) != int(IMPELLERCUT[0, quadrant, H]):
impelPatch.append(impScaffolds[0] + 0 + quadrant * NSEC + (H * NSEC * nCad))
# -- TOP AND BOTTOM PATCH -- #
topPatch = []
bottomPatch = []
scaffold = np.array([0, NSEC, NSEC + 1, 1])
scaffoldLast = np.array([0, NSEC, 1, 1 - NSEC])
for H in [0, len(HLAY)]:
for quadrant in range(0, nCad - 1):
for sector in range(0, NSEC):
if H == 0:
if IMPELLERCUT[sector,quadrant,H]!=1:
bottomPatch.append((scaffold if sector < NSEC - 1 else scaffoldLast) + sector + NSEC * quadrant + (H * NSEC * nCad))
else:
if IMPELLERCUT[sector,quadrant,H-1]!=1:
topPatch.append((scaffold if sector < NSEC - 1 else scaffoldLast) + sector + NSEC * quadrant + (H * NSEC * nCad))
# -- CENTER TOP BOTTOM and IMPELLER/SHAFT -- #
scaffold = np.array([[0, 0, 0],
[0, 1, 0],
[1, 1, 0],
[1, 0, 0]])
for H in range(0, len(HLAY) + 1):
for ii in range(0, div):
for jj in range(0, div):
toAdd = [cGridId(*p) for p in (scaffold + [ii, jj, H])]
if H == 0 and SHAFT[0] == 0:
bottomPatch.append(toAdd)
elif H == len(HLAY) and SHAFT[len(HLAY) - 1] == 0:
topPatch.append(toAdd)
elif (H != 0) and (H != len(HLAY)):
if SHAFT[H - 1] != SHAFT[H]:
shaftPatch.append(toAdd)
for H in range(0, len(HLAY) + 1):
for sec in range(0, NSEC):
inOne = cGridId(*(ordered[sec] + [0, 0, H]))
inTwo = cGridId(*(ordered[sec + (1 if sec < NSEC - 1 else -sec)] + [0, 0, H]))
outOne = sec + H * NSEC * nCad
outTwo = outOne + (1 if sec < NSEC - 1 else -sec)
if H == 0 and SHAFT[0] == 0:
bottomPatch.append([inOne, outOne, outTwo, inTwo])
elif H == len(HLAY) and SHAFT[len(HLAY) - 1] == 0:
topPatch.append([inOne, outOne, outTwo, inTwo])
elif (H != 0) and (H != len(HLAY)):
if SHAFT[H - 1] != SHAFT[H]:
shaftPatch.append([inOne, outOne, outTwo, inTwo])
# PRINT BM
# Header
blockMesh = [
"/*--------------------------------*- C++ -*----------------------------------*\\",
"| ========= | |",
"| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |",
"| \\ / O peration | Version: 2.3.0 |",
"| \\ / A nd | Web: www.OpenFOAM.org |",
"| \\/ M anipulation | |",
"\*---------------------------------------------------------------------------*/",
"FoamFile",
"{",
" version 2.0;",
" format ascii;",
" class dictionary;",
" object blockMeshDict;",
"}",
"// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //",
"",
"",
"convertToMeter = 1;"
]
# Vertex
vtemp = "({:20.10f} {:20.10f} {:20.10f})"
blockMesh += ["", "vertices", "("]
for v in vertex:
blockMesh.append(vtemp.format(*v))
blockMesh += [");"]
# Hex
btemp = " hex ( {} {} {} {} {} {} {} {} )"
divtem = " ( {} {} {} ) ".format(*DIVI)
gradtem = " simpleGrading ( 1 1 1 )"
blockMesh += [" ", "blocks", "("]
for h in hexa:
out = btemp.format(*h) + divtem + gradtem
blockMesh.append(out)
blockMesh += [");"]
# Edge
arctem = " arc {} {} ( {} {} {} )"
blockMesh += ["", "edges", "("]
# Arc
for b in edges:
blockMesh.append(arctem.format(*b))
# Spline
splinetem = " spline {} {} ("
for b in spedge:
blockMesh.append(splinetem.format(b[0], b[1]))
for v in b[2]:
blockMesh.append(" " + vtemp.format(*v))
blockMesh += [" )"]
blockMesh += [");"]
# Patch
ftem = " ({} {} {} {})"
blockMesh += ["", "boundary", "("]
blockMesh += ["walls", "{", "type wall;", "faces", "("]
for w in wallsPatch:
blockMesh.append(ftem.format(*w))
blockMesh += [");", "}"]
blockMesh += ["top", "{", "type wall;", "faces", "("]
for w in topPatch:
blockMesh.append(ftem.format(*w))
blockMesh += [");", "}"]
blockMesh += ["bottom", "{", "type wall;", "faces", "("]
for w in bottomPatch:
blockMesh.append(ftem.format(*w))
blockMesh += [");", "}"]
blockMesh += ["shaft", "{", "type wall;", "faces", "("]
for w in shaftPatch:
blockMesh.append(ftem.format(*w))
blockMesh += [");", "}"]
blockMesh += ["impeller", "{", "type wall;", "faces", "("]
for w in impelPatch:
blockMesh.append(ftem.format(*w))
blockMesh += [");", "}"]
blockMesh += [");"]
return "\n".join(blockMesh)
if __name__ == "__main__":
jsfile = open("./cone15deg.json","r")
conf = js.load(jsfile)
print("-- Super Marine Test--")
print(superMarine(conf))
|
"""
Space : O(n)
Time : O(n)
"""
class Solution:
def validMountainArray(self, A: List[int]) -> bool:
temp = []
n = len(A)
for i in range(n):
if i == 0:
temp.append('-')
continue
if A[i] - A[i-1] > 0:
temp.append('U')
elif A[i] - A[i-1] == 0:
temp.append('N')
else:
temp.append('D')
up, down = False, False
for j in range(n):
if temp[j] == 'U' and not down:
up = True
elif temp[j] == 'D' and not up:
return False
elif temp[j] == 'U' and down:
return False
elif temp[j] == 'D' and up:
down = True
elif temp[j] == 'N':
return False
if not down:
return False
return True
|
import json
class Coupon(object):
"""Loose representation of a coupon - no logic.
This is a coupon - you can add it to an Order (order.add_item) and,
if it fits, get some money off your purchase. I think.
This is another thing that's worth exploring - there are some sweet
coupons that would be awful without the coupon.
"""
def __init__(self, code, quantity=1):
self.code = code
self.quantity = quantity
self.id = 1
self.is_new = True
def save(self, filename="data/coupons/coupon1.json"):
"""
saves the current coupon to a .json file for loading later
"""
if not filename.startswith("data/coupons"):
filename = "data/coupons/" + filename
json_dict = {"code": self.code,
"quantity": self.quantity}
with open(filename, "w") as f:
json.dump(json_dict, f)
@staticmethod
def load(filename):
"""
load and return a new coupon object from a json file
"""
with open(filename, "r") as f:
data = json.load(f)
coupon = Coupon(data["code"],
data["quantity"])
return coupon
def __repr__(self):
return "Code: {}\nQuantity: {}".format(
self.code,
self.quantity,
)
|
#
# Collective Knowledge (QR code)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: cTuning foundation
#
cfg={} # Will be updated by CK (meta description of this module)
work={} # Will be updated by CK (temporal data)
ck=None # Will be updated by CK (initialized CK kernel)
# Local settings
import os
##############################################################################
# Initialize module
def init(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
return {'return':0}
##############################################################################
# generate QR code
def generate(i):
"""
Input: {
string - string to convert to qr-code
(qr_level) - qr_level (default=3)
(image_size) - picture size (default=512)
(image_type) - picture type (default=PNG)
(web) - if 'yes', return as web output
(filename) - file to write (if not web) (default - qr-code.png)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
full_filename - file with image
}
"""
o=i.get('con','')
s=i.get('string','')
if s=='': return {'return':1, 'error':'string is not defined'}
qrl=i.get('qr_level','3') # default 3
ims=i.get('image_size','512')
imt=i.get('image_type','PNG')
web=i.get('web','')
fn=i.get('filename','qr-code.png')
# Import PyQRNative module
r=ck.load_module_from_path({'path':work['path'],
'module_code_name':'PyQRNative',
'cfg':None,
'skip_init':'yes'})
if r['return']>0: return r
qrm=r['code']
# Prepare QR code
qr = qrm.QRCode(int(qrl), qrm.QRErrorCorrectLevel.L)
qr.addData(s)
qr.make()
im = qr.makeImage()
im1=im.resize((int(ims), int(ims)))
# Check how to output
rr={'return':0}
if web=='yes' or o=='json' or o=='json_out':
# Generate tmp file
import tempfile
fd, fn=tempfile.mkstemp(suffix='.tmp', prefix='ck-')
os.close(fd)
os.remove(fn)
if os.path.isfile(fn):
return {'return':1, 'error': 'file '+fn+' already exists'}
# Save image
try:
im1.save(fn, imt)
except Exception as e:
return {'return':1, 'error':'problem writing image ('+format(e)+')'}
# Finish web
if web=='yes' or o=='json' or o=='json_out':
r=ck.convert_file_to_upload_string({'filename':fn})
if r['return']>0: return r
rr['file_content_base64']=r['file_content_base64']
rr['filename']='qr-code.'+imt.lower()
os.remove(fn)
return rr
|
from ..step import Step
from ..resource import Resource
from ..transform import transform_resource
from ..exception import FrictionlessException
from .. import errors
# NOTE:
# Some of the following step use **options - we need to review/fix it
# The step updating resource might benefit from having schema_patch argument
class resource_add(Step):
code = "resource-add"
def __init__(self, descriptor=None, *, name=None, **options):
self.setinitial("name", name)
self.setinitial("options", options)
super().__init__(descriptor)
self.__options = options
# Transform
def transform_package(self, package):
name = self.get("name")
options = self.get("options")
resource = Resource(name=name, basepath=package.basepath, **options)
resource.infer()
package.add_resource(resource)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string"},
},
}
class resource_remove(Step):
code = "resource-remove"
def __init__(self, descriptor=None, *, name=None):
self.setinitial("name", name)
super().__init__(descriptor)
# Transform
def transform_package(self, package):
name = self.get("name")
resource = package.get_resource(name)
if not resource:
error = errors.ResourceError(note=f'No resource "{name}"')
raise FrictionlessException(error=error)
package.remove_resource(name)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string"},
},
}
class resource_transform(Step):
code = "resource-transform"
def __init__(self, descriptor=None, *, name=None, steps=None):
self.setinitial("name", name)
self.setinitial("steps", steps)
super().__init__(descriptor)
# Transform
def transform_package(self, package):
name = self.get("name")
steps = self.get("steps")
resource = package.get_resource(name)
index = package.resources.index(resource)
if not resource:
error = errors.ResourceError(note=f'No resource "{name}"')
raise FrictionlessException(error=error)
package.resources[index] = transform_resource(resource, steps=steps)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name", "steps"],
"properties": {
"name": {"type": "string"},
"steps": {"type": "array"},
},
}
class resource_update(Step):
code = "resource-update"
def __init__(self, descriptor=None, *, name=None, **options):
self.setinitial("name", name)
self.setinitial("options", options)
super().__init__(descriptor)
# Transform
def transform_package(self, package):
name = self.get("name")
options = self.get("options")
resource = package.get_resource(name)
if not resource:
error = errors.ResourceError(note=f'No resource "{name}"')
raise FrictionlessException(error=error)
for name, value in options.items():
setattr(resource, name, value)
# Metadata
metadata_profile = { # type: ignore
"type": "object",
"required": ["name"],
"properties": {
"name": {"type": "string"},
},
}
|
import os, json
import itertools
from tqdm import tqdm
with open('/home/share/liuyibing/vqa/vqa-cp1.0/qa_path/vqacp_v1_train_questions.json', 'r') as fd:
v1_questions_json = json.load(fd)
with open('/home/share/liuyibing/vqa/vqa-cp1.0/qa_path/vqacp_v1_test_questions.json', 'r') as fd:
v1_questions_json2 = json.load(fd)
v1_questions_json = itertools.chain(v1_questions_json, v1_questions_json2)
with open('/home/share/liuyibing/vqa/vqa-cp2.0/qa_path/vqacp_v2_train_questions.json', 'r') as fd:
v2_questions_json = json.load(fd)
with open('/home/share/liuyibing/vqa/vqa-cp2.0/qa_path/vqacp_v2_test_questions.json', 'r') as fd:
v2_questions_json2 = json.load(fd)
v2_questions_json = itertools.chain(v2_questions_json, v2_questions_json2)
v1_question_ids = [q['question_id'] for q in v1_questions_json]
v2_question_ids = [q['question_id'] for q in v2_questions_json]
intersection_num = 0
for qid in tqdm(v1_question_ids):
if qid in v2_question_ids:
intersection_num += 1
print("intersection_num/v1_num: {:d}/{:d}, intersection_num/v2_num: {:d}/{:d}"
.format(intersection_num, len(v1_question_ids), intersection_num, len(v2_question_ids)) )
|
"""Task Definions for the task of ArmPointNav"""
from typing import Dict, Tuple, List, Any, Optional
import gym
import numpy as np
from allenact.base_abstractions.misc import RLStepResult
from allenact.base_abstractions.sensor import Sensor
from allenact.base_abstractions.task import Task
from ithor_arm.ithor_arm_constants import (
MOVE_ARM_CONSTANT,
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
from ithor_arm.ithor_arm_environment import ManipulaTHOREnvironment
from ithor_arm.ithor_arm_viz import LoggerVisualizer
def position_distance(s1, s2):
position1 = s1["position"]
position2 = s2["position"]
return (
(position1["x"] - position2["x"]) ** 2
+ (position1["y"] - position2["y"]) ** 2
+ (position1["z"] - position2["z"]) ** 2
) ** 0.5
class AbstractPickUpDropOffTask(Task[ManipulaTHOREnvironment]):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
)
def __init__(
self,
env: ManipulaTHOREnvironment,
sensors: List[Sensor],
task_info: Dict[str, Any],
max_steps: int,
visualizers: List[LoggerVisualizer] = [],
**kwargs
) -> None:
"""Initializer.
See class documentation for parameter definitions.
"""
super().__init__(
env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs
)
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
self.visualizers = visualizers
self.start_visualize()
self.action_sequence_and_success = []
self._took_end_action: bool = False
self._success: Optional[bool] = False
self._subsampled_locations_from_which_obj_visible: Optional[
List[Tuple[float, float, int, int]]
] = None
# in allenact initialization is with 0.2
self.last_obj_to_goal_distance = None
self.last_arm_to_obj_distance = None
self.object_picked_up = False
self.got_reward_for_pickup = False
self.reward_configs = kwargs["reward_configs"]
self.initial_object_metadata = self.env.get_current_object_locations()
@property
def action_space(self):
return gym.spaces.Discrete(len(self._actions))
def reached_terminal_state(self) -> bool:
return self._took_end_action
@classmethod
def class_action_names(cls, **kwargs) -> Tuple[str, ...]:
return cls._actions
def close(self) -> None:
self.env.stop()
def obj_state_aproximity(self, s1, s2):
# KIANA ignore rotation for now
position1 = s1["position"]
position2 = s2["position"]
eps = MOVE_ARM_CONSTANT * 2
return (
abs(position1["x"] - position2["x"]) < eps
and abs(position1["y"] - position2["y"]) < eps
and abs(position1["z"] - position2["z"]) < eps
)
def start_visualize(self):
for visualizer in self.visualizers:
if not visualizer.is_empty():
print("OH NO VISUALIZER WAS NOT EMPTY")
visualizer.finish_episode(self.env, self, self.task_info)
visualizer.finish_episode_metrics(self, self.task_info, None)
visualizer.log(self.env, "")
def visualize(self, action_str):
for vizualizer in self.visualizers:
vizualizer.log(self.env, action_str)
def finish_visualizer(self, episode_success):
for visualizer in self.visualizers:
visualizer.finish_episode(self.env, self, self.task_info)
def finish_visualizer_metrics(self, metric_results):
for visualizer in self.visualizers:
visualizer.finish_episode_metrics(self, self.task_info, metric_results)
def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray:
assert mode == "rgb", "only rgb rendering is implemented"
return self.env.current_frame
def calc_action_stat_metrics(self) -> Dict[str, Any]:
action_stat = {
"metric/action_stat/" + action_str: 0.0 for action_str in self._actions
}
action_success_stat = {
"metric/action_success/" + action_str: 0.0 for action_str in self._actions
}
action_success_stat["metric/action_success/total"] = 0.0
seq_len = len(self.action_sequence_and_success)
for (action_name, action_success) in self.action_sequence_and_success:
action_stat["metric/action_stat/" + action_name] += 1.0
action_success_stat[
"metric/action_success/{}".format(action_name)
] += action_success
action_success_stat["metric/action_success/total"] += action_success
action_success_stat["metric/action_success/total"] /= seq_len
for action_name in self._actions:
action_success_stat[
"metric/" + "action_success/{}".format(action_name)
] /= (action_stat["metric/action_stat/" + action_name] + 0.000001)
action_stat["metric/action_stat/" + action_name] /= seq_len
succ = [v for v in action_success_stat.values()]
sum(succ) / len(succ)
result = {**action_stat, **action_success_stat}
return result
def metrics(self) -> Dict[str, Any]:
result = super(AbstractPickUpDropOffTask, self).metrics()
if self.is_done():
result = {**result, **self.calc_action_stat_metrics()}
final_obj_distance_from_goal = self.obj_distance_from_goal()
result[
"metric/average/final_obj_distance_from_goal"
] = final_obj_distance_from_goal
final_arm_distance_from_obj = self.arm_distance_from_obj()
result[
"metric/average/final_arm_distance_from_obj"
] = final_arm_distance_from_obj
final_obj_pickup = 1 if self.object_picked_up else 0
result["metric/average/final_obj_pickup"] = final_obj_pickup
original_distance = self.get_original_object_distance()
result["metric/average/original_distance"] = original_distance
# this ratio can be more than 1?
if self.object_picked_up:
ratio_distance_left = final_obj_distance_from_goal / original_distance
result["metric/average/ratio_distance_left"] = ratio_distance_left
result["metric/average/eplen_pickup"] = self.eplen_pickup
if self._success:
result["metric/average/eplen_success"] = result["ep_length"]
# put back this is not the reason for being slow
objects_moved = self.env.get_objects_moved(self.initial_object_metadata)
# Unnecessary, this is definitely happening objects_moved.remove(self.task_info['object_id'])
result["metric/average/number_of_unwanted_moved_objects"] = (
len(objects_moved) - 1
)
result["metric/average/success_wo_disturb"] = (
len(objects_moved) == 1
) # multiply this by the successrate
result["success"] = self._success
self.finish_visualizer_metrics(result)
self.finish_visualizer(self._success)
self.action_sequence_and_success = []
return result
def _step(self, action: int) -> RLStepResult:
raise Exception("Not implemented")
def arm_distance_from_obj(self):
goal_obj_id = self.task_info["objectId"]
object_info = self.env.get_object_by_id(goal_obj_id)
hand_state = self.env.get_absolute_hand_state()
return position_distance(object_info, hand_state)
def obj_distance_from_goal(self):
goal_obj_id = self.task_info["objectId"]
object_info = self.env.get_object_by_id(goal_obj_id)
goal_state = self.task_info["target_location"]
return position_distance(object_info, goal_state)
def get_original_object_distance(self):
goal_obj_id = self.task_info["objectId"]
s_init = dict(position=self.task_info["source_location"]["object_location"])
current_location = self.env.get_object_by_id(goal_obj_id)
original_object_distance = position_distance(s_init, current_location)
return original_object_distance
def judge(self) -> float:
"""Compute the reward after having taken a step."""
raise Exception("Not implemented")
class ArmPointNavTask(AbstractPickUpDropOffTask):
_actions = (
MOVE_ARM_HEIGHT_P,
MOVE_ARM_HEIGHT_M,
MOVE_ARM_X_P,
MOVE_ARM_X_M,
MOVE_ARM_Y_P,
MOVE_ARM_Y_M,
MOVE_ARM_Z_P,
MOVE_ARM_Z_M,
MOVE_AHEAD,
ROTATE_RIGHT,
ROTATE_LEFT,
PICKUP,
DONE,
)
def _step(self, action: int) -> RLStepResult:
action_str = self.class_action_names()[action]
self._last_action_str = action_str
action_dict = {"action": action_str}
object_id = self.task_info["objectId"]
if action_str == PICKUP:
action_dict = {**action_dict, "object_id": object_id}
self.env.step(action_dict)
self.last_action_success = self.env.last_action_success
last_action_name = self._last_action_str
last_action_success = float(self.last_action_success)
self.action_sequence_and_success.append((last_action_name, last_action_success))
self.visualize(last_action_name)
# If the object has not been picked up yet and it was picked up in the previous step update parameters to integrate it into reward
if not self.object_picked_up:
if self.env.is_object_at_low_level_hand(object_id):
self.object_picked_up = True
self.eplen_pickup = (
self._num_steps_taken + 1
) # plus one because this step has not been counted yet
if action_str == DONE:
self._took_end_action = True
object_state = self.env.get_object_by_id(object_id)
goal_state = self.task_info["target_location"]
goal_achieved = self.object_picked_up and self.obj_state_aproximity(
object_state, goal_state
)
self.last_action_success = goal_achieved
self._success = goal_achieved
step_result = RLStepResult(
observation=self.get_observations(),
reward=self.judge(),
done=self.is_done(),
info={"last_action_success": self.last_action_success},
)
return step_result
def judge(self) -> float:
"""Compute the reward after having taken a step."""
reward = self.reward_configs["step_penalty"]
if not self.last_action_success or (
self._last_action_str == PICKUP and not self.object_picked_up
):
reward += self.reward_configs["failed_action_penalty"]
if self._took_end_action:
reward += (
self.reward_configs["goal_success_reward"]
if self._success
else self.reward_configs["failed_stop_reward"]
)
# increase reward if object pickup and only do it once
if not self.got_reward_for_pickup and self.object_picked_up:
reward += self.reward_configs["pickup_success_reward"]
self.got_reward_for_pickup = True
current_obj_to_arm_distance = self.arm_distance_from_obj()
if self.last_arm_to_obj_distance is None:
delta_arm_to_obj_distance_reward = 0
else:
delta_arm_to_obj_distance_reward = (
self.last_arm_to_obj_distance - current_obj_to_arm_distance
)
self.last_arm_to_obj_distance = current_obj_to_arm_distance
reward += delta_arm_to_obj_distance_reward
current_obj_to_goal_distance = self.obj_distance_from_goal()
if self.last_obj_to_goal_distance is None:
delta_obj_to_goal_distance_reward = 0
else:
delta_obj_to_goal_distance_reward = (
self.last_obj_to_goal_distance - current_obj_to_goal_distance
)
self.last_obj_to_goal_distance = current_obj_to_goal_distance
reward += delta_obj_to_goal_distance_reward
# add collision cost, maybe distance to goal objective,...
return float(reward)
|
#Assignment No. 1...............................................
altitude=int(input("Enter the altitude of the plane ="))
if altitude<=1000:
print("Safe for landing")
elif altitude>1000 and altitude<5000:
print("Bring down to 1000")
elif altitude>5000:
print("Turn around")
#Assignment No. 2.......................................
for i in range(1,200):
for j in range(2,i):
if(i % j==0):
break
else:
print(i)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KikChannelProperties(Model):
"""The parameters to provide for the Kik channel.
:param user_name: The Kik user name
:type user_name: str
:param api_key: Kik API key
:type api_key: str
:param is_validated: Whether this channel is validated for the bot
:type is_validated: bool
:param is_enabled: Whether this channel is enabled for the bot
:type is_enabled: bool
"""
_validation = {
'user_name': {'required': True},
'api_key': {'required': True},
'is_enabled': {'required': True},
}
_attribute_map = {
'user_name': {'key': 'userName', 'type': 'str'},
'api_key': {'key': 'apiKey', 'type': 'str'},
'is_validated': {'key': 'isValidated', 'type': 'bool'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(self, user_name, api_key, is_enabled, is_validated=None):
self.user_name = user_name
self.api_key = api_key
self.is_validated = is_validated
self.is_enabled = is_enabled
|
"""
Plot bulk fluxes as a time series.
"""
import os, sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zfun
Ldir = Lfun.Lstart()
sys.path.append(os.path.abspath(Ldir['LO'] + 'plotting'))
import pfun
import numpy as np
import matplotlib.pyplot as plt
import pickle
from datetime import datetime, timedelta
import tef_fun
# get the DataFrame of all sections
sect_df = tef_fun.get_sect_df()
from warnings import filterwarnings
filterwarnings('ignore') # skip some warning messages
#year = 2017
# choose input and organize output
Ldir = Lfun.Lstart()
indir0 = Ldir['LOo'] + 'tef/'
# choose the tef extraction to process
item = Lfun.choose_item(indir0)
# hacky way of getting the year, assumes "item" is of the form:
# 'cas6_v3_lo8b_2017.01.01_2017.12.31'
year_str = item.split('_')[-1].split('.')[0]
year = int(year_str)
indir0 = indir0 + item + '/'
indir = indir0 + 'bulk/'
sect_list_raw = os.listdir(indir)
sect_list_raw.sort()
sect_list = [item for item in sect_list_raw if ('.p' in item)]
print(20*'=' + ' Processed Sections ' + 20*'=')
print(*sect_list, sep=", ")
print(61*'=')
# select which sections to process
my_choice = input('-- Input section to plot (e.g. sog5, or Return to plot all): ')
if len(my_choice)==0:
# full list
save_fig = True
else: # single item
if (my_choice + '.p') in sect_list:
sect_list = [my_choice + '.p']
save_fig = False
else:
print('That section is not available')
sys.exit()
outdir = indir0 + 'bulk_plots/'
Lfun.make_dir(outdir)
#plt.close('all')
sect_list = [item for item in sect_list if item.replace('.p','') in sect_df.index]
for snp in sect_list:
sn = snp.replace('.p','')
bulk = pickle.load(open(indir + snp, 'rb'))
QQ = bulk['QQ']
SS = bulk['SS']
ot = bulk['ot']
qnet = bulk['qnet_lp'] # net volume transport
fnet = bulk['fnet_lp'] # net tidal energy flux
ssh = bulk['ssh_lp'] # average SSH across the section, low-passed
NT, NS = SS.shape
# make vector and array times in days from start of the year
dt = []
for tt in ot:
dt.append(Lfun.modtime_to_datetime(tt))
td = []
for tt in dt:
#ttt = tt- datetime(dt[0].year,1,1)
ttt = tt - datetime(year,1,1) # hardwire for 2016.12.15 start
td.append(ttt.days + ttt.seconds/86400)
td = np.array(td) # time in days from start of the year
Time = td.reshape((NT,1)) * np.ones((1,NS)) # matrix version
# some information about direction
x0, x1, y0, y1, landward = sect_df.loc[sn,:]
if (x0==x1) and (y0!=y1):
sdir = 'NS'
if landward == 1:
dir_str = 'Eastward'
elif landward == -1:
dir_str = 'Westward'
a = [y0, y1]; a.sort()
y0 = a[0]; y1 = a[1]
elif (x0!=x1) and (y0==y1):
sdir = 'EW'
if landward == 1:
dir_str = 'Northward'
elif landward == -1:
dir_str = 'Southward'
a = [x0, x1]; a.sort()
x0 = a[0]; x1 = a[1]
# separate out positive and negative transports
QQp = QQ.copy()
QQp[QQ<=0] = np.nan
QQm = QQ.copy()
QQm[QQ>=0] = np.nan
# form two-layer versions of Q and S
Qin = np.nansum(QQp, axis=1)
QSin = np.nansum(QQp*SS, axis=1)
Sin = QSin/Qin
Qout = np.nansum(QQm, axis=1)
QSout = np.nansum(QQm*SS, axis=1)
Sout = QSout/Qout
# and find net transport to compare with qnet (should be identical)
Qnet = np.nansum(QQ, axis=1)
# RESULT: it is identical
# 2019.11.14 make monthly averages
import pandas as pd
td_list = []
for t in td:
td_list.append(datetime(year,1,1,0,0,0) + timedelta(days=t))
tef_df = pd.DataFrame(index=td_list, columns=['Qin','Qout','Sin','Sout'])
tef_df.loc[:,'Qin']=Qin
tef_df.loc[:,'Qout']=Qout
tef_df.loc[:,'Sin']=Sin
tef_df.loc[:,'Sout']=Sout
tef_mean_df = tef_df.resample('1M').mean()
# the above puts timestamps at the end of the month
# so here we set it to the middle of each month becasue it is more
# consistent with the averaging
tef_mean_df.index -= timedelta(days=15)
tef_mean_df.loc[:,'yd'] = tef_mean_df.index.dayofyear
# PLOTTING
fig = plt.figure(figsize=(21,9))
alpha = .5
# Salinity vs. Time (size and color by Transport)
ax = fig.add_subplot(2,3,1)
Qscale = np.nanmean(np.abs(QQ))
qf = 25
ax.scatter(Time, SS, s=qf*np.abs(QQp/Qscale), c='r', alpha=alpha)
ax.scatter(Time, SS, s=qf*np.abs(QQm/Qscale), c='b', alpha=alpha)
# add two-layer versions
if False:
ax.plot(td, Sin, '-k', td, Sout, '--k')
else:
tef_mean_df.plot(x='yd', y = 'Sin', style='-ok', ax=ax, legend=False)
tef_mean_df.plot(x='yd', y = 'Sout', style='--ok', ax=ax, legend=False)
ax.text(0.05, 0.1, 'Positive is ' + dir_str, transform=ax.transAxes, fontweight='bold')
ax.set_xlim(0,366)
ax.grid(True)
ax.set_xticklabels([])
ax.set_ylabel('Salinity')
# legend
ax.scatter(.95, .2, s=qf, c='r', transform=ax.transAxes, alpha=alpha)
ax.scatter(.95, .1, s=qf, c='b', transform=ax.transAxes, alpha=alpha)
ax.text(.94, .2, 'Positive Q %d (m3/s)' % int(Qscale), color='r', fontweight='bold',
horizontalalignment='right', verticalalignment='center', transform=ax.transAxes)
ax.text(.94, .1, 'Negative Q %d (m3/s)' % int(Qscale), color='b', fontweight='bold',
horizontalalignment='right', verticalalignment='center', transform=ax.transAxes)
ax.set_title(indir0.split('/')[-2])
# # Tidal energy flux vs. Time as second y-axis
# ax = ax.twinx()
# ax.plot(td, fnet/1e9, '-g', linewidth=2)
# ax.set_ylabel('Energy Flux (GW)', color='g', alpha=alpha)
# ax.set_ylim(bottom=0)
# ax.set_xlim(0,366)
# Tranport vs. Time
ax = fig.add_subplot(2,3,4)
ax.scatter(Time, QQp/1e3, s=qf*np.abs(QQp/Qscale), c='r', alpha=alpha)
ax.scatter(Time, -QQm/1e3, s=qf*np.abs(QQm/Qscale), c='b', alpha=alpha)
# add two-layer versions
if False:
ax.plot(td, Qin/1e3, '-k', td, -Qout/1e3, '--k')
else:
this_yd = tef_mean_df.loc[:,'yd'].values
this_qin = tef_mean_df.loc[:,'Qin'].values/1e3
this_qout = -tef_mean_df.loc[:,'Qout'].values/1e3
# tef_mean_df.plot(x='yd', y = 'Qin', style='-ok', ax=ax, legend=False)
# tef_mean_df.plot(x='yd', y = 'Qout', style='--ok', ax=ax, legend=False)
ax.plot(this_yd, this_qin, '-ok')
ax.plot(this_yd, this_qout, '--ok')
ax.set_xlim(0,366)
ax.set_ylim(bottom=0)
ax.grid(True)
ax.set_xlabel('Days from 1/1/' + str(year))
ax.set_ylabel('|Q| 1000 m3/s')
# Tidal energy flux vs. Time as second y-axis
ax = fig.add_subplot(3,3,2)
ax.plot(td, fnet/1e9, '-g', linewidth=2)
ax.set_ylabel('Energy Flux (GW)')
#ax.set_ylim(bottom=0)
ax.set_xlim(0,366)
# Surface height
ax = fig.add_subplot(3,3,5)
ax.plot(td, ssh, '-b', linewidth=2)
ax.set_xlim(0,366)
ax.grid(True)
ax.set_ylabel('SSH (m)')
# Volume flux
ax = fig.add_subplot(3,3,8)
ax.plot(td, qnet/1e3, '-c', linewidth=2)
ax.plot(td, Qnet/1e3, '--r', linewidth=2)
ax.set_xlim(0,366)
ax.grid(True)
ax.set_xlabel('Days from 1/1/' + str(year))
ax.set_ylabel('Qnet 1000 m3/s')
# Section location map
ax = fig.add_subplot(1,3,3)
ax.plot([x0, x1], [y0, y1], '-m', linewidth=3)
ax.set_title(sn)
pfun.add_coast(ax)
pfun.dar(ax)
aa = [x0-.7, x1+.7, y0-.5, y1+.5]
ax.axis(aa)
if save_fig:
plt.savefig(outdir + sn + '.png')
plt.close()
else:
plt.show()
|
import utils
import threading
import requests
class PageDownloader:
def __init__(self, request, event_id):
"""
:param requests request:
:param int event_id:
"""
self.request = request
self.event_id = event_id
def download(self, page):
response = self.request.post("http://www.allcpp.cn/allcpp/event/getalldoujinshiforcircle.do", json=
{"event": self.event_id, "param": "", "type": 3, "list": "",
"num": page, "size": 30, "sectionid": ""}, headers={
"errorWrap": "json",
"Origin": "http://www.allcpp.cn",
"Referer": "http://www.allcpp.cn/allcpp/event/eventorg.do?event=541",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"Cookie": "JALKSJFJASKDFJKALSJDFLJSF=163174109124d099959a029e4a8eb3ecb0375c88e145106.83.117.214_20023679253; randomCode=cppb87dc95a932b4580b609fc4a6b31b7d0106.83.117.214_217552492183E44; token=736468D40FEDFD2D906E7871DAFE29655BC84E08F22FC4286716F3C333A3A7E652E4A916CD795DC0425EE14713F8B49CB2BB37802B200418BA5CD3BB6BA4BEB0; Hm_lvt_75e110b2a3c6890a57de45bd2882ec7c=1543823502,1543824278,1544323732,1544599641; JSESSIONID=382E62579BAA7A8B18AFF91B0ECB0C68; Hm_lpvt_75e110b2a3c6890a57de45bd2882ec7c=1544617040"})
response = response.json()
try:
if response['isSuccess']:
print('{}: OK'.format(page))
utils.save_json('cpp/{}/{}.json'.format(self.event_id, page), response['result']['list'])
else:
print('{}: {}'.format(self.event_id, response['message']))
except (KeyError, ValueError, TypeError, AttributeError) as e:
print('{}: {}'.format(self.event_id, response.content))
class CollectThread(threading.Thread):
def __init__(self, srv, show_id):
"""
:param PageDownloader srv:
:param show_id:
"""
threading.Thread.__init__(self)
self.service = srv
self.id = show_id
def run(self):
self.service.download(self.id)
if __name__ == '__main__':
service = PageDownloader(utils.requests_retry_session(), int(input('Event ID:')))
start = int(input('Start ID:'))
end = int(input('End ID:'))
if start > end or start <= 0:
print('Start - End is invalid')
exit(1)
chunks = utils.make_chunks(range(start, end + 1), 10)
for chunk in chunks:
pool = []
for show_id in chunk:
thread = CollectThread(service, show_id)
thread.start()
pool.append(thread)
for thread in pool:
thread.join()
|
"""Page models."""
from flask_babel import lazy_gettext as _
from app.database import DBItem, db
from app.utils.enums import StringEnum
class PageType(StringEnum):
"""Type of the saved location."""
ABOUT = _("About")
RULES = _("Rules")
SUPPORT = _("Support us")
class Page(DBItem):
"""Pages content"""
text = db.Column(db.Text(), nullable=False)
@classmethod
def get(cls, page_type: PageType):
"""Gets the page."""
return cls.get_by_id(page_type.value)
@classmethod
# pylint: disable=arguments-differ
# type:ignore
def create(cls, page_type: PageType, *args, **kwargs):
"""Creates the page."""
return super().create(id=page_type.value, *args, **kwargs)
|
# -*- coding: utf-8 -*-
import shutil
import os
import re
import requests
import urllib2
from pprint import pprint
import bs4
from bs4 import BeautifulSoup
import html2text
import time
import argparse
import datetime
from sys import argv
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'
def get_request(url, headers={}):
response = requests.get(url,
headers=headers)
return response
def post_request(url, data={}, headers={}, cookies={}):
response = requests.post(url,
data=data,
headers=headers,
cookies=cookies)
return response
def get_submissions():
lordoftherings_value = '13c082ac336859d586aa5364c086d26f:44751f02ffbb8d82fb3deddca4da60de'
cookies = dict()
cookies["lordoftherings"] = lordoftherings_value
browser=webdriver.Chrome()
cookie = {'name': "lordoftherings", 'value' : 'a5dr3g48ag2dg8s2b8r57gkil6ioip74:7c34ac7cc9b2c971eafaba58840e0717', 'path' : '/'}
url_home_page = 'https://www.hackerearth.com/challenges/'
url2 = 'https://www.wikipedia.org/'
browser.get(url_home_page) # This opens a firefox console
browser.implicitly_wait(1)
login_but=browser.find_element_by_xpath("//li[contains(@class,'nav-bar-menu login-menu-btn')]")
webdriver.ActionChains(browser).click(login_but).perform()
username = browser.find_element_by_id("id_login")
password = browser.find_element_by_id("id_password")
username.send_keys("gesturefm@gmail.com")
password.send_keys("Deadmau5")
browser.find_element_by_name("submit").click()
urls = ['hermione-vs-draco']
time.sleep(5)
for url in urls:
url_front = 'https://www.hackerearth.com/problem/algorithm/' + url + '/activity/'
browser.get(url_front) # This opens a firefox console
browser.implicitly_wait(.1)
time.sleep(.5)
link = None
not_avail = None
n=0
while not link:
try:
link = browser.find_element_by_link_text('View')
url_link = link.get_attribute("href")
except NoSuchElementException:
browser.get(url_front) # This opens a firefox console
browser.implicitly_wait(.1)
print "except"
n+=1
if n > 20:
break
time.sleep(1)
browser.get(url_link)
browser.implicitly_wait(.1)
time.sleep(.1)
try:
unlock_but=browser.find_element_by_xpath("//a[contains(@class,'button btn-blue ajax-get') and .//text()='Unlock it']")
webdriver.ActionChains(browser).click(unlock_but).perform()
except:
print url + " already pressed"
browser.implicitly_wait(.1)
time.sleep(.1)
unlock_but=browser.find_element_by_xpath("//a[contains(@class,'button btn-blue ajax-get') and .//text()='Unlock it']")
webdriver.ActionChains(browser).click(unlock_but).perform()
handle = 'prashantpandeyfun10'
name = 'algorithm/karan-and-even-numbers-1'
url = "https://www.hackerearth.com/submission/4440655/"
url = "https://www.hackerearth.com/problem/" + name + "/activity/"
t = get_request(url)
if t == -1 or t == {}:
return t
tmp_string = t.headers["set-cookie"]
csrf_token = re.findall(r"csrftoken=\w*", tmp_string)[0][10:]
response = {}
response["host"] = "www.hackerearth.com"
response["user-agent"] = user_agent
response["accept"] = "application/json, text/javascript, */*; q=0.01"
response["accept-language"] = "en-US,en;q=0.5"
response["accept-encoding"] = "gzip, deflate"
response["content-type"] = "application/x-www-form-urlencoded"
response["X-CSRFToken"] = csrf_token
response["X-Requested-With"] = "XMLHttpRequest"
#response["Referer"] = "https://www.hackerearth.com/submissions/" + handle + "/"
response["Referer"] = url
response["Connection"] = "keep-alive"
response["Pragma"] = "no-cache"
response["Cache-Control"] = "no-cache"
response["Cookie"] = tmp_string
it = 1
submissions = {handle: {}}
for index_number in xrange(1, 5):
print(index_number)
submissions[handle][index_number] = {}
url_post = "https://www.hackerearth.com/AJAX/algorithm/42373/unlock-problem-submission/"
url_auth = 'https://www.hackerearth.com/realtime/pusher/auth/'
data = {'csrf_token':csrf_token, 'action':'setupSubmissionFilter', 'frameProblemIndex':'A', 'verdictName':'OK'}
url_auth = 'https://www.hackerearth.com/realtime/pusher/auth/'
idk = post_request(url_post, headers=response)
url = "https://www.hackerearth.com/submission/4440655/"
page = get_request(url, headers=response)
html_content = page.text
soup = BeautifulSoup(html_content, "html.parser")
body = re.search('/submission/key/(.*)/', html_content)
w = body.group(1)
get_submissions()
|
import copy
import doctest
import gc
import os
import sys
import unittest
from tempfile import mkstemp
from pympler import refbrowser
from pympler.util.compat import StringIO
class TreeTest(unittest.TestCase):
# sample tree used in output tests
sample_tree = None
TREE_DEP_1 = """
root-+-branch1
+-branch2
+-branch3
""".strip()
TREE_DEP_2 = """
root-+-branch1-+-a
| +-b
| +-c
| +-d
| +-e
|
+-branch2-+-branch3
| +-a
| +-b
| +-c
| +-d
| +-e
|
+-branch3-+-branch1
+-a
+-b
+-c
+-d
+-e
""".strip()
TREE_DEP_4 = """
root-+-branch1-+-a
| +-b
| +-c
| +-d
| +-e
|
+-branch2-+-branch3-+-branch1-+-a
| | | +-b
| | | +-c
| | | +-d
| | | +-e
| | |
| | +-a
| | +-b
| | +-c
| | +-d
| | +-e
| |
| +-a
| +-b
| +-c
| +-d
| +-e
|
+-branch3-+-branch1-+-a
| +-b
| +-c
| +-d
| +-e
|
+-a
+-b
+-c
+-d
+-e
""".strip()
def setUp(self):
# set up a sample tree with three children, each having
# five string leaves and some have references to other children
TreeTest.sample_tree = refbrowser._Node('root')
branch1 = refbrowser._Node('branch1')
TreeTest.sample_tree.children.append(branch1)
branch2 = refbrowser._Node('branch2')
TreeTest.sample_tree.children.append(branch2)
branch3 = refbrowser._Node('branch3')
TreeTest.sample_tree.children.append(branch3)
branch2.children.append(branch3)
branch3.children.append(branch1)
for i in ['a','b','c','d','e']:
branch1.children.append(i)
branch2.children.append(i)
branch3.children.append(i)
def tearDown(self):
"""Need to delete reference cycles, otherwise test data will affect
garbage graph tests."""
gc.collect()
def test_node(self):
"""Test node functionality.
_Nodes can be created, linked to each other, and the output function
should return the expected result.
"""
# default representation
n = refbrowser._Node(1)
expected = str(1)
self.assert_(str(n) == expected)
# custom representation
expected = 'the quick brown fox'
def foo(o): return expected
n = refbrowser._Node(1, foo)
self.assert_(str(n) == expected)
# attach child
n.children.append(2)
def test_get_tree(self):
"""Test reference browser tree representation."""
#root <- ref1 <- ref11
# <- ref11 (already included)
# <- ref2 <- ref22
root = 'root id'
# the key-value pair is required since Python 2.7/3.1
# see http://bugs.python.org/issue4688 for details
ref1 = [root, []]
ref11 = [ref1, root]
ref2 = {1: root, 2:[]}
ref22 = {1: ref2}
res = refbrowser.RefBrowser(root, repeat=False).get_tree()
# note that ref11 should not be included due to the repeat argument
refs = [ref1, ref2]
children = [c.o for c in res.children if isinstance(c, refbrowser._Node)]
for r in refs:
self.assert_(r in children, "%s not in children" % r)
self.assert_(ref11 not in children)
# now we test the repeat argument
res = refbrowser.RefBrowser(root, repeat=True).get_tree()
refs = [ref1, ref11, ref2]
children = [c.o for c in res.children if isinstance(c, refbrowser._Node)]
for r in refs:
self.assert_(r in children)
# test if maxdepth is working
res = refbrowser.RefBrowser(root, maxdepth=0).get_tree()
self.assertEqual(len(res.children), 0)
res = refbrowser.RefBrowser(root, maxdepth=1).get_tree()
for c in res.children:
if c == ref1:
self.assertEqual(len(c.children), 0)
# test if the str_func is applied correctly
expected = 'the quick brown fox'
def foo(o): return expected
res = refbrowser.RefBrowser(root, str_func=foo, maxdepth=2).get_tree()
self.assertEqual(str(res), expected)
res = refbrowser.RefBrowser(root, str_func=foo, repeat=True,\
maxdepth=2).get_tree()
self.assertEqual(str(res), expected)
def test_console_browser(self):
"""Test ConsoleBrowser uses stdout by default."""
crb = refbrowser.ConsoleBrowser(None, maxdepth=2)
self.assertEqual(crb.stream, sys.stdout)
def test_file_browser(self):
crb = refbrowser.FileBrowser(None, maxdepth=1)
fhandle, fname = mkstemp(prefix='test_file_browser', text=True)
os.close(fhandle)
try:
crb.print_tree(fname, tree=self.sample_tree)
output = open(fname).read()
self.assertEqual(output.strip(), self.TREE_DEP_1)
finally:
os.unlink(fname)
def test_print_tree(self):
"""Test reference browser prints root object by default."""
out1 = StringIO()
crb = refbrowser.StreamBrowser(copy.copy(self.sample_tree), maxdepth=1, stream=out1)
crb.print_tree(crb.get_tree())
out2 = StringIO()
crb = refbrowser.StreamBrowser(copy.copy(self.sample_tree), maxdepth=1, stream=out2)
crb.print_tree()
self.assertEqual(out1.getvalue(), out2.getvalue())
def test_reference_browser_max_depth(self):
"""Test different reference tree depth settings."""
stream = StringIO()
crb = refbrowser.StreamBrowser(None, maxdepth=1, stream=stream)
crb.print_tree(self.sample_tree)
self.assertEqual(stream.getvalue().strip(), self.TREE_DEP_1)
stream = StringIO()
crb = refbrowser.StreamBrowser(None, maxdepth=2, stream=stream)
crb.print_tree(self.sample_tree)
self.assertEqual(stream.getvalue().strip(), self.TREE_DEP_2)
stream = StringIO()
crb = refbrowser.StreamBrowser(None, maxdepth=4, stream=stream)
crb.print_tree(self.sample_tree)
self.assertEqual(stream.getvalue().strip(), self.TREE_DEP_4)
test_print_tree = """
let's start with a small tree first
>>> crb = refbrowser.ConsoleBrowser(None, maxdepth=1)
>>> crb.print_tree(TreeTest.sample_tree)
root-+-branch1
+-branch2
+-branch3
okay, next level
>>> crb = refbrowser.ConsoleBrowser(None, maxdepth=2)
>>> crb.print_tree(TreeTest.sample_tree)
root-+-branch1-+-a
| +-b
| +-c
| +-d
| +-e
|
+-branch2-+-branch3
| +-a
| +-b
| +-c
| +-d
| +-e
|
+-branch3-+-branch1
+-a
+-b
+-c
+-d
+-e
and now full size
>>> crb = refbrowser.ConsoleBrowser(None, maxdepth=4)
>>> crb.print_tree(TreeTest.sample_tree)
root-+-branch1-+-a
| +-b
| +-c
| +-d
| +-e
|
+-branch2-+-branch3-+-branch1-+-a
| | | +-b
| | | +-c
| | | +-d
| | | +-e
| | |
| | +-a
| | +-b
| | +-c
| | +-d
| | +-e
| |
| +-a
| +-b
| +-c
| +-d
| +-e
|
+-branch3-+-branch1-+-a
| +-b
| +-c
| +-d
| +-e
|
+-a
+-b
+-c
+-d
+-e
"""
__test__ = {"test_print_tree": test_print_tree}
def suite():
suite = unittest.makeSuite(TreeTest,'test')
suite.addTest(doctest.DocTestSuite())
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
# -*- coding: utf-8 -*-
from django_input_collection.views import CollectorView
from . import collection
class PollView(CollectorView):
template_name = "poll.html"
collector_class = collection.PollTemplateViewCollector
|
from typing import *
from ArithmeticExpression import Operations
class ExpressionEvalulator:
"""
A class to safely evaluate `ArithmeticExpression`s, without any kind of security concern,
like which would be the case using eval(), exec() or similar
"""
@staticmethod
def evaluate(expression: 'ArithmeticExpression') -> int:
raise NotImplementedError()
|
#!/usr/bin/python
from os import listdir
from os.path import isfile, join
def CamelCase(str):
return str.replace('_', ' ').title().replace(' ', '')
def WriteEnum(file_name, file_base_name, out, is_binding):
out.write('/*\n* Generated by python\n* Any changes to this file will be overwritten by the next python run\n*/\n\n')
out.write('#pragma once\n')
out.write('\n#include "../../config.h"\n')
if is_binding:
out.write('#include "' + file_name.replace('binding.txt', 'type.h') + '"\n')
out.write('#if 0')
lines = open('../src/' + file_name).readlines()
for line in lines:
enum = line[:-1]
out.write('\\\n|| (defined(' + enum + '))')
out.write("""
namespace Ubpa::gl {
\tenum class """)
out.write(CamelCase(file_base_name) + ' : GLenum {\n')
for line in lines:
enum = line[:-1]
out.write('#ifdef '+enum + '\n')
out.write('\t\t' + CamelCase(enum[3:]) + ' = ' + enum + ',\n')
out.write('#endif\n')
out.write("""\t};
}
#endif
""")
def HandleNormalEnumFile(file_name, file_base_name, out):
WriteEnum(file_name, file_base_name, out, False)
def HandleBindingEnumFile(file_name, file_base_name, out):
WriteEnum(file_name, file_base_name, out, True)
binding_lines = open('../src/' + file_name).readlines()
target_lines = open('../src/' + file_name.replace('binding', 'type')).readlines()
out.write('#if 0')
for line_num in range(0, len(binding_lines)):
binding_enum = binding_lines[line_num][:-1]
target_enum = target_lines[line_num][:-1]
out.write('\\\n|| (defined(' + binding_enum + ') && defined(' + target_enum + '))')
out.write("""
namespace Ubpa::gl {
""")
binding = CamelCase(file_base_name)
target = binding.replace('Binding', 'Type')
out.write('\tinline ' + binding + ' BindOf(' +
target + ' type) {\n')
out.write('\t\tswitch (type) {\n')
if len(binding_lines) != len(target_lines):
raise Exception(file_name + ' mismatches ' +
file_name.replace('binding', 'type') + ' in element size.')
for line_num in range(0, len(binding_lines)):
binding_enum = binding_lines[line_num][:-1]
target_enum = target_lines[line_num][:-1]
out.write('#if defined(' + binding_enum + ') && defined(' + target_enum + ')\n')
out.write('\t\tcase ' + target + '::' + CamelCase(target_enum[3:]) + ':\n')
out.write('\t\t\treturn ' + binding + '::' + CamelCase(binding_enum[3:]) + ';\n')
out.write('#endif\n')
out.write('\t\t}\n\t}\n}\n')
out.write('#endif\n')
file_list = [ f for f in listdir('../src') if isfile(join('../src', f)) ]
enum_file = open('../../../include/UGL/enums.h', 'w')
enum_file.write('#pragma once\n\n#include "detail/enum_boolean_op.h"\n\n')
for file_name in file_list:
file_base_name = file_name[:-4]
enum_file.write('#include "detail/enums/' + file_base_name + '.h"\n')
out = open('../../../include/UGL/detail/enums/' + file_base_name + '.h', 'w')
if file_name.endswith('_binding.txt'):
HandleBindingEnumFile(file_name, file_base_name, out)
elif file_name.endswith('.txt'):
HandleNormalEnumFile(file_name, file_base_name, out)
out.close()
|
from demography.models import CensusLabel
from rest_framework import serializers
class CensusLabelSerializer(serializers.ModelSerializer):
class Meta:
model = CensusLabel
fields = '__all__'
|
from hks_pylib.logger.standard import StdUsers
from hks_pylib.cryptography.ciphers.hkscipher import HKSCipher
from hks_pylib.cryptography.ciphers.symmetrics import NoCipher
from hks_pylib.logger import LoggerGenerator, InvisibleLoggerGenerator, Display
from csbuilder.client import ClientResponser
from _simulator.protocol.check.client import CheckClientScheme
from _simulator.protocol.match.client import MatchClientScheme
from _simulator.protocol.search.client import SearchClientScheme
from _simulator.protocol.authentication.client import AuthenticationClientScheme
class ThesisClient(ClientResponser):
def __init__(self,
address: tuple,
cipher: HKSCipher = NoCipher(),
logger_generator: LoggerGenerator = InvisibleLoggerGenerator(),
display: dict = {StdUsers.DEV: Display.ALL}
) -> None:
super().__init__(
address,
cipher=cipher,
name="Client",
logger_generator=logger_generator,
display=display
)
self.session_manager().create_session(CheckClientScheme(self._forwarder.name))
self.session_manager().create_session(MatchClientScheme(self._forwarder.name))
self.session_manager().create_session(SearchClientScheme(self._forwarder.name))
self.session_manager().create_session(AuthenticationClientScheme(self._forwarder.name))
|
from django.shortcuts import render
from .models import *
from .serializers import *
import json
from rest_framework import generics
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.pagination import PageNumberPagination
from rest_framework.views import APIView
from django.http import JsonResponse
from rest_framework import status
from django.db.models import Q
from rest_framework import filters
# Create your views here.
@api_view(['GET'])
def api_root(request,format=None):
return Response({
'products': reverse('api:products',request=request,format=format),
'create product': reverse('api:create_product',request=request,format=format),
'product type': reverse('api:product_types',request=request,format=format),
'create size': reverse('api:size-list-create',request=request,format=format),
'product category': reverse('api:product_categories',request=request,format=format),
'create category': reverse('api:create-category',request=request,format=format),
})
class ProductTypeView(generics.ListCreateAPIView):
queryset = ProductType.objects.all()
serializer_class = ProductTypeSerializer
class ProductCategoryView(generics.ListAPIView):
queryset = ProductCategory.objects.all()
serializer_class = ProductCategoryListSerializer
filter_backends = [filters.SearchFilter]
search_fields = ['name_product_category']
class ProductCategoryCreateView(generics.CreateAPIView):
queryset = ProductCategory.objects.all()
serializer_class = ProductCategoryCreateSerializer
def get(self,request):
return Response(status=status.HTTP_200_OK)
class SizeListCreateView(generics.ListCreateAPIView):
queryset = Size.objects.all()
serializer_class = SizeSerializer
class ProductView(generics.ListAPIView):
lookup_field = "id"
queryset = Product.objects.all()
serializer_class = ProductListSerializer
pagination_class = PageNumberPagination
filter_backends = [filters.SearchFilter]
search_fields = ['name_product','price']
class ProductCreateView(generics.CreateAPIView):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductDetailView(generics.RetrieveAPIView):
lookup_field = "id"
queryset = Product.objects.all()
serializer_class = ProductSerializer
class ProductUpdateView(generics.RetrieveUpdateAPIView):
lookup_field = "id"
queryset = Product.objects.all()
serializer_class = ProductUpdateSerializer
class SearchProductView(APIView):
queryset = Product.objects.all()
serializer_class = SearchProductNameSerializer
def get(self,request):
return Response(status=status.HTTP_200_OK)
def post(self,request):
return JsonResponse(list(self.queryset.filter(name_product__contains=request.data['name_search_product']).values('name_product',
'color','price','product_size__size','product_category__name_product_category',
'product_type__name_product_type')),safe=False)
class SearchPriceProductView(APIView):
queryset = Product.objects.all()
def get(self,request,*args,**kwargs):
if float(self.kwargs["price_one"]) <= 0:
raise serializers.ValidationError({'error':'Price one is empty or less the zero'})
elif float(self.kwargs["price_two"]) <= 0:
raise serializers.ValidationError({'error':'Price two is empty or less the zero'})
elif float(self.kwargs["price_one"]) > float(self.kwargs["price_two"]):
raise serializers.ValidationError({'error':'Price one more than price two'})
price_one = float(self.kwargs["price_one"])
price_two = float(self.kwargs["price_two"])
query = self.queryset.filter(Q(price__gte=price_one) & Q(price__lte=price_two)).values(
'name_product','color','price',
'product_size__size','product_category__name_product_category',
'product_type__name_product_type'
)
return Response(query)
|
import torch
import torch.nn.functional as F
def _weighted_cross_entropy_loss(preds, edges):
""" Calculate sum of weighted cross entropy loss. """
# Reference:
# hed/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
# https://github.com/s9xie/hed/issues/7
mask = (edges > 0.5).float()
b, c, h, w = mask.shape
num_pos = torch.sum(mask, dim=[1, 2, 3]).float() # Shape: [b,].
num_neg = c * h * w - num_pos # Shape: [b,].
weight = torch.zeros_like(mask)
weight[edges > 0.5] = num_neg / (num_pos + num_neg)
weight[edges <= 0.5] = num_pos / (num_pos + num_neg)
# Calculate loss.
losses = F.binary_cross_entropy_with_logits(
preds.float(), edges.float(), weight=weight, reduction='none')
loss = torch.sum(losses) / b
return loss
def weighted_cross_entropy_loss(preds, edges):
""" Calculate sum of weighted cross entropy loss. """
# Reference:
# hed/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp
# https://github.com/s9xie/hed/issues/7
mask = (edges > 0.5).float()
b, c, h, w = mask.shape
num_pos = torch.sum(mask, dim=[1, 2, 3], keepdim=True).float() # Shape: [b,].
num_neg = c * h * w - num_pos # Shape: [b,].
weight = torch.zeros_like(mask)
#weight[edges > 0.5] = num_neg / (num_pos + num_neg)
#weight[edges <= 0.5] = num_pos / (num_pos + num_neg)
weight.masked_scatter_(edges > 0.5,
torch.ones_like(edges) * num_neg / (num_pos + num_neg))
weight.masked_scatter_(edges <= 0.5,
torch.ones_like(edges) * num_pos / (num_pos + num_neg))
# Calculate loss.
# preds=torch.sigmoid(preds)
losses = F.binary_cross_entropy_with_logits(
preds.float(), edges.float(), weight=weight, reduction='none')
loss = torch.sum(losses) / b
return loss
|
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
import unittest
from pyscf import gto
from pyscf import scf
from pyscf import ao2mo
from pyscf.scf import _vhf
mol = gto.Mole()
mol.build(
verbose = 5,
output = '/dev/null',
atom = '''
O 0 0 0
H 0 -0.757 0.587
H 0 0.757 0.587''',
basis = 'cc-pvdz',
)
mf = scf.RHF(mol)
mf.scf()
nao, nmo = mf.mo_coeff.shape
class KnowValues(unittest.TestCase):
def test_incore_s4(self):
eri4 = ao2mo.restore(4, mf._eri, nmo)
dm = mf.make_rdm1()
vj0, vk0 = _vhf.incore(eri4, dm, hermi=1)
vj1, vk1 = scf.hf.get_jk(mol, dm, hermi=1)
self.assertTrue(numpy.allclose(vj0,vj1))
self.assertTrue(numpy.allclose(vk0,vk1))
def test_direct_mapdm(self):
numpy.random.seed(1)
dm = numpy.random.random((nao,nao))
eri0 = numpy.zeros((3,nmo,nmo,nmo,nmo))
c_atm = numpy.array(mol._atm, dtype=numpy.int32)
c_bas = numpy.array(mol._bas, dtype=numpy.int32)
c_env = numpy.array(mol._env)
i0 = 0
for i in range(mol.nbas):
j0 = 0
for j in range(mol.nbas):
k0 = 0
for k in range(mol.nbas):
l0 = 0
for l in range(mol.nbas):
buf = gto.getints_by_shell('int2e_ip1_sph', (i,j,k,l),
c_atm, c_bas, c_env, 3)
di,dj,dk,dl = buf.shape[1:]
eri0[:,i0:i0+di,j0:j0+dj,k0:k0+dk,l0:l0+dl] = buf
l0 += dl
k0 += dk
j0 += dj
i0 += di
vj0 = numpy.einsum('nijkl,lk->nij', eri0, dm)
vk0 = numpy.einsum('nijkl,jk->nil', eri0, dm)
vj1, vk1 = _vhf.direct_mapdm('int2e_ip1_sph', 's2kl',
('lk->s1ij', 'jk->s1il'),
dm, 3, mol._atm, mol._bas, mol._env)
self.assertTrue(numpy.allclose(vj0,vj1))
self.assertTrue(numpy.allclose(vk0,vk1))
def test_direct_bindm(self):
numpy.random.seed(1)
dm = numpy.random.random((nao,nao))
vj0, vk0 = _vhf.direct_mapdm('int2e_ip1_sph', 's2kl',
('lk->s1ij', 'jk->s1il'),
dm, 3, mol._atm, mol._bas, mol._env)
dms = (dm,dm)
vj1, vk1 = _vhf.direct_bindm('int2e_ip1_sph', 's2kl',
('lk->s1ij', 'jk->s1il'),
dms, 3, mol._atm, mol._bas, mol._env)
self.assertTrue(numpy.allclose(vj0,vj1))
self.assertTrue(numpy.allclose(vk0,vk1))
def test_rdirect_mapdm(self):
numpy.random.seed(1)
n2c = nao*2
dm = numpy.random.random((n2c,n2c)) + \
numpy.random.random((n2c,n2c)) * 1j
eri0 = numpy.zeros((3,n2c,n2c,n2c,n2c),dtype=numpy.complex)
c_atm = numpy.array(mol._atm, dtype=numpy.int32)
c_bas = numpy.array(mol._bas, dtype=numpy.int32)
c_env = numpy.array(mol._env)
i0 = 0
for i in range(mol.nbas):
j0 = 0
for j in range(mol.nbas):
k0 = 0
for k in range(mol.nbas):
l0 = 0
for l in range(mol.nbas):
buf = gto.getints_by_shell('int2e_g1_spinor', (i,j,k,l),
c_atm, c_bas, c_env, 3)
di,dj,dk,dl = buf.shape[1:]
eri0[:,i0:i0+di,j0:j0+dj,k0:k0+dk,l0:l0+dl] = buf
l0 += dl
k0 += dk
j0 += dj
i0 += di
vk0 = numpy.einsum('nijkl,jk->nil', eri0, dm)
vj1, vk1 = _vhf.rdirect_mapdm('int2e_g1_spinor', 'a4ij',
('lk->s2ij', 'jk->s1il'),
dm, 3, mol._atm, mol._bas, mol._env)
self.assertTrue(numpy.allclose(vk0,vk1))
def test_rdirect_bindm(self):
n2c = nao*2
eri0 = numpy.zeros((n2c,n2c,n2c,n2c),dtype=numpy.complex)
mfr = scf.DHF(mol)
mfr.scf()
dm = mfr.make_rdm1()[:n2c,:n2c].copy()
c_atm = numpy.array(mol._atm, dtype=numpy.int32)
c_bas = numpy.array(mol._bas, dtype=numpy.int32)
c_env = numpy.array(mol._env)
i0 = 0
for i in range(mol.nbas):
j0 = 0
for j in range(mol.nbas):
k0 = 0
for k in range(mol.nbas):
l0 = 0
for l in range(mol.nbas):
buf = gto.getints_by_shell('int2e_spsp1_spinor', (i,j,k,l),
c_atm, c_bas, c_env, 1)
di,dj,dk,dl = buf.shape
eri0[i0:i0+di,j0:j0+dj,k0:k0+dk,l0:l0+dl] = buf
l0 += dl
k0 += dk
j0 += dj
i0 += di
vk0 = numpy.einsum('ijkl,jk->il', eri0, dm)
vk1 = _vhf.rdirect_bindm('int2e_spsp1_spinor', 's4', ('jk->s1il',),
(dm,), 1, mol._atm, mol._bas, mol._env)
self.assertTrue(numpy.allclose(vk0,vk1))
if __name__ == "__main__":
print("Full Tests for _vhf")
unittest.main()
|
#!/usr/bin/env python3
import argparse
import re
from os.path import basename
def parse():
parser = argparse.ArgumentParser(
description="The normalizer for maxwell protocol in rust."
)
parser.add_argument("--proto_file", required=True,
type=argparse.FileType("r"))
args = parser.parse_args()
return args.proto_file
def capitalize(name):
return "".join(map(lambda s: s.capitalize(), name.lower().split("_")))
def normalize(content, output_file_name):
output = re.sub(
r"(enum\s+)([a-zA-Z0-9_]+_t)(\s+\{)",
lambda match: match.group(1)+ capitalize(match.group(2).rstrip("_t")) + match.group(3),
content
)
output = re.sub(
r"(message\s+)([a-zA-Z0-9_]+_t)(\s+\{)",
lambda match: match.group(1) + capitalize(match.group(2).rstrip("_t")) + match.group(3),
output
)
output = re.sub(
r"([a-zA-Z0-9_]+_t)(\s+[a-zA-Z0-9_]+\s+=\s+[0-9]+\s*;)",
lambda match: capitalize(match.group(1).rstrip("_t")) + match.group(2),
output
)
with open(output_file_name, "w") as output_file:
output_file.write(output)
if __name__ == "__main__":
proto_file = parse()
content = proto_file.read()
output_file_name = proto_file.name.rstrip(".proto") + ".normalized.proto"
normalize(content, output_file_name)
|
APPS = [
{
'name': 'ontherecord',
'num_accounts': 3,
'num_assets': 0,
'ledger': 0,
'payload_func': (
lambda x: {
'app': 'ontherecord',
'content': x
}
)
},
{
'name': 'sharetrader',
'num_accounts': 5,
'num_assets': 64,
'ledger': 0,
'payload_func': (
lambda i: {
'app': 'sharetrader',
'content': {
'x': int(i / 8),
'y': int(i % 8)
}
}
)
},
{
'name': 'interledger',
'accounts': [
{
'name': 'alice',
'ledgers': [
{
'id': 0,
'num_assets': 3
}
]
},
{
'name': 'bob',
'ledgers': [
{
'id': 1,
'num_assets': 3
}
]
},
{
'name': 'chloe',
'ledgers': [
{
'id': 0,
'num_assets': 3
},
{
'id': 1,
'num_assets': 3
}
]
}
],
'payload_func': (
lambda x: {
'app': 'interledger',
'content': x
}
)
}
]
|
import json
import boto3
import os
import logging
from botocore.exceptions import ClientError
sqs = boto3.resource("sqs")
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def create_response_headers():
"""
Create response headers
"""
headers = {
"Access-Control-Allow-Headers": "Content-Type,Authorization",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "POST",
"Access-Control-Allow-Credentials": True,
}
return headers
def create_return_status(status_code, body):
"""
Create return status
Parameters
----------
status_code: int
Status code for response
body: str
response body
"""
return {
"headers": create_response_headers(),
"statusCode": status_code,
"body": body,
}
def handler(event, context):
"""
Receive comma separated mobile numbers and push them into a queue.
Format is "+91XXXXXXXXXX,+91XXXXXXXXXX"
Parameters
----------
event: dict
event parameters passed to function
context: dict
context parameters passed to function
"""
queue = sqs.Queue(os.environ["QUEUE_URL"])
numbers = json.loads(event["body"])["numbers"]
failed = []
# upload numbers to queue one at a time
for number in numbers.split(","):
try:
queue.send_message(MessageBody=number)
except ClientError as e:
logger.error(f"Failed to add {number} to queue.\n{e}")
failed.append(number)
else:
logger.info(f"Added to queue {number}")
if failed:
failed_numbers = ",".join(failed)
body = json.dumps(f"Failed to add upload numbers: {failed_numbers}")
else:
body = json.dumps("Succesfully uploaded all numbers")
return_status = create_return_status(200, body)
logger.info(return_status)
return return_status
|
from ..rfc6749.errors import OAuth2Error
# https://tools.ietf.org/html/rfc8628#section-3.5
class AuthorizationPendingError(OAuth2Error):
"""The authorization request is still pending as the end user hasn't
yet completed the user-interaction steps (Section 3.3).
"""
error = 'authorization_pending'
class SlowDownError(OAuth2Error):
"""A variant of "authorization_pending", the authorization request is
still pending and polling should continue, but the interval MUST
be increased by 5 seconds for this and all subsequent requests.
"""
error = 'slow_down'
class ExpiredTokenError(OAuth2Error):
"""The "device_code" has expired, and the device authorization
session has concluded. The client MAY commence a new device
authorization request but SHOULD wait for user interaction before
restarting to avoid unnecessary polling.
"""
error = 'expired_token'
|
from collections import deque
from typing import List
import os
import numpy as np
import torch
from unityagents import UnityEnvironment
from dqn_agent import Agent
from utils import Experience, device
from model import DQN
class DQNTrainer:
def __init__(self,
env_filename: str,
n_episodes: int = 2000,
max_t: int = 1000,
eps_start: int = 1.0,
eps_end: int = 0.01,
eps_decay: int = 0.995,
save_every: int = 100,
target_score: float = 13.0,
):
"""Deep Q-Learning.
Params
======
env_filename: path to the unity env file.
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of time-steps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
save_every: save the model every {}
target_score: score that needs to be reached to consider the problem as solved.
"""
self.env_filename = env_filename
self.n_episodes = n_episodes
self.max_t = max_t
self.eps_start = eps_start
self.eps_end = eps_end
self.eps_decay = eps_decay
self.save_every = save_every
self.target_score = target_score
self.agent = None
self.scores = None
self.scores_window = None
self.eps = None
self.env = None
self.brain_name = None
self.action_size = None
self.state_size = None
self.solved_weights = None
def init_env(self):
self.env = UnityEnvironment(file_name=self.env_filename)
self.brain_name = self.env.brain_names[0]
env_info = self.env.reset(train_mode=True)[self.brain_name]
brain = self.env.brains[self.brain_name]
self.action_size = brain.vector_action_space_size
self.state_size = len(env_info.vector_observations[0])
def instantiate_agent(self, hidden_layers: List[int], **kwargs):
self.agent = Agent(state_size=self.state_size, action_size=self.action_size, hidden_layers=hidden_layers, **kwargs)
self.agent.init_dqn_networks()
def train_dqn(self):
self.init_training()
for i_episode in range(1, self.n_episodes + 1):
self.play_episode()
self.update_eps()
self.log(i_episode)
if i_episode % self.save_every == 0:
self.save_model('checkpoint_{}.pth'.format(i_episode))
if np.mean(self.scores_window) >= self.target_score:
avg_score = np.mean(self.scores_window)
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode - 100, avg_score))
self.solved_weights = 'solved_{}_score_{:.2f}.pth'.format(self.agent.model_id, avg_score)
self.save_model(self.solved_weights)
break
def init_training(self):
self.scores = [] # list containing scores from each episode
self.scores_window = deque(maxlen=100) # last 100 scores
self.update_eps()
def play_episode(self):
state = self.env.reset(train_mode=True)[self.brain_name].vector_observations[0]
score = 0
for _ in range(self.max_t):
experience = self.play_step(state)
self.agent.step(experience)
state = experience.next_state
score += experience.reward
if experience.done:
break
self.save_score(score)
def play_step(self, state, inference: bool = False) -> Experience:
if inference:
action = self.act(state)
else:
action = self.agent.act(state, self.eps)
brain_info = self.env.step(action)[self.brain_name]
next_state = brain_info.vector_observations[0]
reward = brain_info.rewards[0]
done = brain_info.local_done[0]
return Experience(state, action, reward, next_state, done)
def save_score(self, score):
self.scores_window.append(score)
self.scores.append(score)
def update_eps(self):
if self.eps is None:
self.eps = self.eps_start # init epsilon
else:
self.eps = max(self.eps_end, self.eps_decay * self.eps) # decrease epsilon
def log(self, episodes: int):
print('\rEpisode {}\tAverage Score: {:.2f}'.format(episodes, np.mean(self.scores_window)), end="")
if episodes % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(episodes, np.mean(self.scores_window)))
def save_model(self, filename: str, directory: str = 'saved_weights'):
torch.save(self.agent.dqn_local.state_dict(), os.path.join(directory, 'local_{}'.format(filename)))
torch.save(self.agent.dqn_target.state_dict(), os.path.join(directory, 'target_{}'.format(filename)))
def set_trained_dqn(self, weights_path: str = None):
weights_path = self.solved_weights if weights_path is None else weights_path
if weights_path is None:
raise ValueError('please provide the path to the trained model weights.')
if self.agent.dqn_local is None:
self.agent.dqn_local = DQN(self.state_size, self.action_size, self.agent.hidden_layers)
self.agent.init_dqn_networks(inference=True)
self.agent.dqn_local.load_state_dict(torch.load(weights_path))
def play(self):
self.eps = 0.
env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment
state = env_info.vector_observations[0] # get the current state
score = 0 # initialize the score
timestep = 0
while True:
timestep += 1
experience = self.play_step(state, inference=True)
state = experience.state
score += experience.reward
print('\rTime step {}\tScore: {:.2f}'.format(timestep, score), end="")
if experience.done:
break
print("\rFinal Score: {:.2f}".format(score))
def act(self, state):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.agent.dqn_local.eval()
with torch.no_grad():
action_values = self.agent.dqn_local(state)
return np.argmax(action_values.cpu().data.numpy())
|
import sys
from pyspark import cloudpickle
from pyspark.sql.types import BooleanType
import pandas as pd
import numpy as np
from great_expectations.dataset import PandasDataset
def create_suite():
df = pd.DataFrame()
df['num'] = np.random.randint(0, 10, 100)
df['num2'] = np.random.randint(0, 20, 100)
ds = PandasDataset.from_dataset(df)
ds.expect_column_values_to_be_between('num', 0, 10)
ds.expect_column_values_to_be_between('num2', 0, 20)
return ds.get_expectation_suite()
def create_validator(suite):
def validate(df) -> pd.DataFrame:
ds = PandasDataset.from_dataset(df)
# print(ds, ds.shape)
result = ds.validate(suite, result_format='COMPLETE')
valid_rows = pd.Series([True] * ds.shape[0])
# print(result)
for check in result.results:
if check.success:
continue
valid_rows.iloc[check.result['unexpected_index_list']] = False
return valid_rows
return validate
def main(dest_path):
with open(dest_path, 'wb') as f:
fun = create_validator(create_suite())
command = (fun, BooleanType())
cloudpickle.dump(command, f)
if __name__ == '__main__':
main(sys.argv[1])
|
import numpy as np
from skimage import transform, io, img_as_float, exposure
"""
Data was preprocessed in the following ways:
- resize to im_shape;
- equalize histogram (skimage.exposure.equalize_hist);
- normalize by data set mean and std.
Resulting shape should be (n_samples, img_width, img_height, 1).
It may be more convenient to store preprocessed data for faster loading.
Dataframe should contain paths to images and masks as two columns (relative to `path`).
"""
def loadDataJSRT(df, path, im_shape):
"""This function loads data preprocessed with `preprocess_JSRT.py`"""
X, y = [], []
for i, item in df.iterrows():
img = io.imread(path + item[0])
img = transform.resize(img, im_shape)
img = np.expand_dims(img, -1)
mask = io.imread(path + item[1])
mask = transform.resize(mask, im_shape)
mask = np.expand_dims(mask, -1)
X.append(img)
y.append(mask)
X = np.array(X)
y = np.array(y)
X -= X.mean()
X /= X.std()
print '### Data loaded'
print '\t{}'.format(path)
print '\t{}\t{}'.format(X.shape, y.shape)
print '\tX:{:.1f}-{:.1f}\ty:{:.1f}-{:.1f}\n'.format(X.min(), X.max(), y.min(), y.max())
print '\tX.mean = {}, X.std = {}'.format(X.mean(), X.std())
return X, y
def loadDataMontgomery(df, path, im_shape):
"""Function for loading Montgomery dataset"""
X, y = [], []
for i, item in df.iterrows():
img = img_as_float(io.imread(path + item[0]))
gt = io.imread(path + item[1])
l, r = np.where(img.sum(0) > 1)[0][[0, -1]]
t, b = np.where(img.sum(1) > 1)[0][[0, -1]]
img = img[t:b, l:r]
mask = gt[t:b, l:r]
img = transform.resize(img, im_shape)
img = exposure.equalize_hist(img)
img = np.expand_dims(img, -1)
mask = transform.resize(mask, im_shape)
mask = np.expand_dims(mask, -1)
X.append(img)
y.append(mask)
X = np.array(X)
y = np.array(y)
X -= X.mean()
X /= X.std()
print '### Data loaded'
print '\t{}'.format(path)
print '\t{}\t{}'.format(X.shape, y.shape)
print '\tX:{:.1f}-{:.1f}\ty:{:.1f}-{:.1f}\n'.format(X.min(), X.max(), y.min(), y.max())
print '\tX.mean = {}, X.std = {}'.format(X.mean(), X.std())
return X, y
def loadDataGeneral(df, path, im_shape):
"""Function for loading arbitrary data in standard formats"""
X, y = [], []
for i, item in df.iterrows():
img = img_as_float(io.imread(path + item[0]))
mask = io.imread(path + item[1])
img = transform.resize(img, im_shape)
img = exposure.equalize_hist(img)
img = np.expand_dims(img, -1)
mask = transform.resize(mask, im_shape)
mask = np.expand_dims(mask, -1)
X.append(img)
y.append(mask)
X = np.array(X)
y = np.array(y)
X -= X.mean()
X /= X.std()
print '### Dataset loaded'
print '\t{}'.format(path)
print '\t{}\t{}'.format(X.shape, y.shape)
print '\tX:{:.1f}-{:.1f}\ty:{:.1f}-{:.1f}\n'.format(X.min(), X.max(), y.min(), y.max())
print '\tX.mean = {}, X.std = {}'.format(X.mean(), X.std())
return X, y
|
import numpy as np
import pandas as pd
import seaborn as sns
import pickle
import matplotlib.pyplot as plt
from sklearn import metrics as m
from utils import write_to_file, unpipe
def get_metrics(true_labels, predicted_labels):
acc = np.round(m.accuracy_score(true_labels, predicted_labels),4)
prec = np.round(m.precision_score(true_labels,predicted_labels,average='weighted'),4)
rec = np.round(m.recall_score(true_labels, predicted_labels, average='weighted'), 4)
fsc = np.round(m.f1_score(true_labels, predicted_labels, average='weighted', zero_division=0), 4)
print('Accuracy:', acc)
print('Precision:', prec)
print('Recall:', rec)
print('F1 Score:', fsc)
def get_classification_report(true_labels, predicted_labels,
classes, target_names=None, digits=3):
"""
Returns a classification report with recall, precission and f1 score.
"""
report = m.classification_report(
y_true=true_labels,
y_pred=predicted_labels,
labels=classes,
target_names=target_names,
digits=digits,
zero_division=0
)
return report
def display_model_performance_metrics(true_labels, predicted_labels, classes):
print('Model Performance metrics:')
print('-'*30)
get_metrics(
true_labels=true_labels,
predicted_labels=predicted_labels)
print('\nModel Classification report:')
print('-'*30)
display_classification_report(
true_labels=true_labels,
predicted_labels=predicted_labels,
classes=classes)
print('\nPrediction Confusion Matrix:')
print('-'*30)
display_confusion_matrix(
true_labels=true_labels,
predicted_labels=predicted_labels,
classes=classes)
def get_confusion_matrix(true_labels, predicted_labels):
return m.confusion_matrix(true_labels, predicted_labels)
def plot_confusion_matrix(cm, log_dir, names=None, cmap='Blues', figsize=(15,13), show=True):
"""
"""
# Font sizes
axis_font = 10 # font size of x,y labels
cell_font = 7 # font size of sns heatmap
plt.rc('xtick', labelsize=axis_font)
plt.rc('ytick', labelsize=axis_font)
plt.rc('axes', titlesize=16) # font size of title
plt.rc('axes', labelsize=12) # size of 'predicted','true label'
plt.figure(figsize=figsize)
ax = plt.subplot()
# Show percentages inside cells and hide empty cells
cm_sum = np.sum(cm, axis=1, keepdims=True)
cm_perc = cm / cm_sum.astype(float) * 100
annot = np.empty_like(cm).astype(str)
nrows, ncols = cm.shape
for i in range(nrows):
for j in range(ncols):
c = cm[i, j]
p = cm_perc[i, j]
if i == j:
s = cm_sum[i]
annot[i, j] = '%.1f%%\n%d/%d' % (p, c, s)
elif c == 0:
annot[i, j] = ''
else:
annot[i, j] = '%.1f%%\n%d' % (p, c)
#sns.set(font_scale=0.7) # for label size
hm = sns.heatmap(cm_perc, annot=annot, fmt='', cmap=cmap,
linewidths=0.4, linecolor="white", annot_kws={"size": cell_font});
cbar = hm.collections[0].colorbar
cbar.set_ticks([0, 25, 50, 75, 100])
cbar.set_ticklabels(['0%', '20%', '50&', '75%', '100%'])
#labels, title and ticks
ax.set_xlabel('Predicted labels');
ax.set_ylabel('True labels');
ax.set_title("Confusion matrix");
if names is None:
ax.xaxis.set_ticklabels(range(nrows), rotation=40, ha="right");
ax.yaxis.set_ticklabels(range(nrows), rotation=40, ha="right");
else:
ax.xaxis.set_ticklabels(names, rotation=40, ha="right");
ax.yaxis.set_ticklabels(names, rotation=40, ha="right");
plt.tight_layout()
plt.savefig(log_dir+"/confusion_matrix.pdf", format="pdf")
if show:
plt.show()
else:
plt.close()
def display_confusion_matrix(true_labels, predicted_labels, classes):
total_classes = len(classes)
level_labels = [total_classes*[0], list(range(total_classes))]
cm = m.confusion_matrix(y_true=true_labels, y_pred=predicted_labels, labels=classes)
cm_frame = pd.DataFrame(
data=cm,
index=pd.MultiIndex(levels=[['Actual:'], classes], codes=level_labels),
columns=pd.MultiIndex(levels=[['Predicted:'], classes], codes=level_labels),
)
print(cm_frame)
def plot_lr_and_accuracy(history, conf):
"""
"""
import seaborn as sns
sns.set()
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(history.epoch[-1]+1)
if conf["decay_rate"] > 0:
lr = history.history['lr']
# Plot the learning rate
plt.figure(figsize=(8, 6))
plt.plot(epochs_range, lr, label='Learning Rate')
plt.xlabel('Epoch')
plt.ylabel('Learnign rate')
plt.title('Learning Rate development during training');
plt.tight_layout()
plt.savefig(conf["log_dir"]+'/learning_rate.pdf', format='pdf')
# Plot train-val accuracy and loss
plt.figure(figsize=(14, 6))
# Subplot 1
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylim([0, 1])
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy')
# Subplot 2
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylim([0.0, 3])
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training and Validation Loss')
plt.tight_layout()
plt.savefig(conf["log_dir"]+'/accuracy_and_loss.pdf', format='pdf')
plt.show()
def show_dataset_predictions(true_labels, pred_labels, pred_confidence, images, conf):
grid_width = 5
grid_height = 5
f, ax = plt.subplots(grid_width, grid_height)
f.set_size_inches(22, 22)
img_idx = 0
for i in range(0, grid_width):
for j in range(0, grid_height):
actual = conf["class_names"][true_labels[img_idx]]
lab = conf["class_names"][pred_labels[img_idx]]
pred = np.round(pred_confidence[img_idx], 2)
title = 'Actual: '+actual+'\nPred: '+lab+ '\nConf: '+str(pred)
ax[i][j].axis('off')
ax[i][j].set_title(title)
ax[i][j].imshow(images[img_idx])
img_idx += 1
plt.subplots_adjust(left=0, bottom=0, right=2, top=2, wspace=0.5, hspace=0.5)
plt.tight_layout()
plt.savefig("{}/checkout-eval_ds-pred.pdf".format(conf["log_dir"]), format="pdf")
def evaluate_model(model, history, ds, conf):
# Save the metrics and model from training
write_to_file(history.history, conf, "history")
write_to_file(conf, conf, "conf")
with open(conf["log_dir"]+"/history.pkl", 'wb') as f:
pickle.dump(history.history, f)
if conf["num_epochs"] > 9:
model.save(conf["log_dir"]+'/model')
# Plot learning rate and loss
plot_lr_and_accuracy(history, conf)
# Create true_labels and pred_labels for later evaluations
eval_ds = unpipe(ds["test"], conf["ds_sizes"]["test"]).as_numpy_iterator()
eval_ds = np.array(list(eval_ds))
true_labels = list(eval_ds[:,1])
eval_images = np.stack(eval_ds[:,0], axis=0)
# Evaluate model on test dataset
model_evaluation = model.evaluate(ds["test"], verbose=0, steps=conf["steps"]["test"])
write_to_file(model_evaluation, conf, "evaluate_val")
# Create predictions and pred_labels
predictions = model.predict(eval_images, verbose=1)
pred_confidence = [np.max(pred) for pred in predictions]
pred_labels = [np.argmax(pred) for pred in predictions]
# Classification report
report = get_classification_report(
true_labels,
pred_labels,
range(conf["num_classes"]),
target_names=conf["class_names"]
)
print (report)
write_to_file(report, conf, "classification_report")
# Confusion matrix
cm = get_confusion_matrix(true_labels, pred_labels)
plot_confusion_matrix(cm, conf["log_dir"], conf["class_names"], figsize=(12,10), show=False)
|
from string import letters, digits
from random import shuffle
def random_monoalpha_cipher(pool=None):
"""Generate a Monoalphabetic Cipher"""
if pool is None:
pool = letters + digits
original_pool = list(pool)
shuffled_pool = list(pool)
shuffle(shuffled_pool)
return dict(zip(original_pool, shuffled_pool))
def inverse_monoalpha_cipher(monoalpha_cipher):
"""Given a Monoalphabetic Cipher (dictionary) return the inverse."""
inverse_monoalpha = {}
for key, value in monoalpha_cipher.iteritems():
inverse_monoalpha[value] = key
return inverse_monoalpha
def encrypt_with_monoalpha(message, monoalpha_cipher):
encrypted_message = []
for letter in message:
encrypted_message.append(monoalpha_cipher.get(letter, letter))
return ''.join(encrypted_message)
def decrypt_with_monoalpha(encrypted_message, monoalpha_cipher):
return encrypt_with_monoalpha(
encrypted_message,
inverse_monoalpha_cipher(monoalpha_cipher)
)
|
import pytest
from osdu_commons.clients.rest_client import HttpServerException, HttpNotFoundException
@pytest.mark.usefixtures('smds_data')
def test_list_workflows(workflow_client):
workflows = workflow_client.list_workflows()
assert workflows.batch
@pytest.mark.usefixtures('smds_data')
def test_list_workflows_with_filter(workflow_client):
workflows = workflow_client.list_workflows({"some_filter": 123})
assert workflows.batch
def test_describe_workflow_smds_data(workflow_client, smds_workflow_job_id, smds_srn):
workflow_job_description = workflow_client.describe_workflow(smds_workflow_job_id)
assert workflow_job_description.state.has_succeeded()
assert workflow_job_description.workflow_job_id == smds_workflow_job_id
assert str(workflow_job_description.work_product_id) == smds_srn
def test_describe_workflow_swps_data(workflow_client, swps_workflow):
workflow_job_description = workflow_client.describe_workflow(swps_workflow)
assert workflow_job_description.state.has_succeeded()
assert workflow_job_description.workflow_job_id == swps_workflow
def test_describe_workflow_that_does_not_exist(workflow_client):
with pytest.raises(HttpNotFoundException, match='Workflow not found for id test_workflow_id_1234'):
workflow_client.describe_workflow('test_workflow_id_1234')
def test_describe_workflow_no_workflow_id(workflow_client):
with pytest.raises(HttpServerException):
workflow_client.describe_workflow('')
|
# coding=utf-8
from __future__ import absolute_import
from .._compat import to_native
HTTP_SEE_OTHER = 303
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.path
if request.query_string:
path += '?' + to_native(request.query_string)
return path
def make_full_url(request, url):
"""Get a relative URL and returns the absolute version.
Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
"""
return request.url_root + url.lstrip('/')
def is_post(request):
"""Return ``True`` if the method of the request is ``POST``.
"""
return request.method.upper() == 'POST'
def is_idempotent(request):
"""Return ``True`` if the method of the request is ``GET`` or ``HEAD``.
"""
return request.method.upper() in ('GET', 'HEAD')
def redirect(url):
"""Return an HTTP 303 See Other response for this url, in the
idiom of the framework.
"""
from werkzeug.utils import redirect
return redirect(url, code=HTTP_SEE_OTHER)
def raise_forbidden(msg='You are not allowed to access this.'):
"""Return an HTTP 403 Forbidden response (with the passed message), in the
idiom of the framework.
"""
from werkzeug.exceptions import Forbidden
raise Forbidden(msg)
def get_from_params(request, key):
"""Try to read a value named ``key`` from the GET parameters.
"""
data = getattr(request, 'json', None) or request.values
value = data.get(key)
return to_native(value)
def get_from_headers(request, key):
"""Try to read a value named ``key`` from the headers.
"""
value = request.headers.get(key)
return to_native(value)
def get_post_data(request):
"""Return all the POST data from the request.
"""
return getattr(request, 'json', None) or request.form or {}
def make_response(body, mimetype='text/html'):
"""Build a framework specific HTPP response, containing ``body`` and
marked as the type ``mimetype``.
"""
from werkzeug.wrappers import Response
if isinstance(body, Response):
body.mimetype = mimetype
return body
return Response(body, mimetype=mimetype)
|
import pytest
from widgetastic.widget import View
from widgetastic_patternfly4.ouia import Navigation
TESTING_PAGE_URL = "https://patternfly-docs-ouia.netlify.app/documentation/react/components/nav"
@pytest.fixture
def view(browser):
class TestView(View):
ROOT = ".//div[@id='ws-react-c-nav-ouia']"
nav = Navigation("Nav Default")
return TestView(browser)
def test_navigation(browser, view):
assert view.nav.currently_selected == ["Link 1"]
assert view.nav.nav_item_tree() == ["Link 1", "Link 2", "Link 3", "Link 4"]
|
from .hash import Hash
from .heap import Heap
from .ordered_archive import Ordered
from .variable_heap import VLHeap
__all__ = ["Hash", "Heap", "Ordered", "VLHeap"]
|
# coding: utf-8
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
S3 Key Encrypt on Bucket Changes
"""
import boto3
import json
from c7n.resources.s3 import EncryptExtantKeys
s3 = config = None
def init():
global s3, config
if s3 is not None:
return
s3 = boto3.client('s3')
with open('config.json') as fh:
config = json.load(fh)
# multipart copy can on multigb file can take a long time
config['large'] = False
def process_key_event(event, context):
init()
processor = EncryptExtantKeys(config)
for record in event.get('Records', []):
bucket = record['s3']['bucket']['name']
key = {'Key': record['s3']['object']['key']}
version = record['s3']['object'].get('versionId')
if version is not None:
result = processor.process_version(s3, key, bucket)
else:
result = processor.process_key(s3, key, bucket)
if not result:
return
print("remediated %s:%s" % (bucket, key['Key']))
def get_function(session_factory, role, buckets=None):
from c7n.mu import (
LambdaFunction, custodian_archive, BucketNotification)
config = dict(
name='c7n-s3-encrypt',
handler='s3crypt.process_key_event',
memory_size=256,
timeout=15,
role=role,
runtime="python2.7",
description='Custodian S3 Key Encrypt')
if buckets:
config['events'] = [
BucketNotification({}, session_factory, b)
for b in buckets]
archive = custodian_archive()
archive.create()
src = __file__
if src.endswith('.pyc'):
src = src[:-1]
archive.add_file(src, 's3crypt.py')
archive.add_contents('config.json', json.dumps({}))
archive.close()
return LambdaFunction(config, archive)
|
from __future__ import print_function
from crhelper import CfnResource
from pyqldb.driver.pooled_qldb_driver import PooledQldbDriver
from lib.constants import Constants
from lib.sample_data import convert_object_to_ion, SampleData, get_document_ids_from_dml_results
from lib.connect_to_ledger import create_qldb_session
import logging
import time
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=False, log_level='DEBUG', boto_level='CRITICAL')
try:
pass
except Exception as e:
helper.init_failure(e)
@helper.create
def create(event, context):
qldb_ledger = event['ResourceProperties']['QldbLedger']
logger.info('CREATE: %s', qldb_ledger)
try:
with create_qldb_session(ledger_name=qldb_ledger) as session:
session.execute_lambda(lambda x: create_table(x, Constants.DRIVERS_LICENSE_TABLE_NAME) and
create_table(x, Constants.PERSON_TABLE_NAME) and
create_table(x, Constants.VEHICLE_TABLE_NAME) and
create_table(x, Constants.VEHICLE_REGISTRATION_TABLE_NAME),
lambda retry_attempt: logger.info('Retrying due to OCC conflict...'))
logger.info('Tables created successfully.')
time.sleep(10)
session.execute_lambda(lambda x: create_index(x, Constants.PERSON_TABLE_NAME, Constants.GOV_ID_INDEX_NAME)
and create_index(x, Constants.VEHICLE_TABLE_NAME, Constants.VEHICLE_VIN_INDEX_NAME)
and create_index(x, Constants.VEHICLE_REGISTRATION_TABLE_NAME, Constants.LICENSE_PLATE_NUMBER_INDEX_NAME)
and create_index(x, Constants.VEHICLE_REGISTRATION_TABLE_NAME, Constants.VEHICLE_VIN_INDEX_NAME)
and create_index(x, Constants.DRIVERS_LICENSE_TABLE_NAME, Constants.PERSON_ID_INDEX_NAME)
and create_index(x, Constants.DRIVERS_LICENSE_TABLE_NAME, Constants.LICENSE_NUMBER_INDEX_NAME),
lambda retry_attempt: logger.info('Retrying due to OCC conflict...'))
logger.info('Indexes created successfully.')
time.sleep(10)
session.execute_lambda(lambda executor: update_and_insert_documents(executor),
lambda retry_attempt: logger.info('Retrying due to OCC conflict...'))
logger.info('Documents inserted successfully!')
except Exception as err:
logger.exception('Errors creating resources.')
logger.exception(err)
return None
@helper.update
def update(event, context):
logger.info('UPDATE')
return True
@helper.delete
def delete(event, context):
logger.info('DELETE')
return True
### main handler
def handler(event, context):
helper(event, context)
def create_table(transaction_executor, table_name):
logger.info("Creating the '{}' table...".format(table_name))
statement = 'CREATE TABLE {}'.format(table_name)
cursor = transaction_executor.execute_statement(statement)
logger.info('{} table created successfully.'.format(table_name))
return len(list(cursor))
def create_index(transaction_executor, table_name, index_attribute):
logger.info("Creating index on '{}'...".format(index_attribute))
statement = 'CREATE INDEX on {} ({})'.format(table_name, index_attribute)
cursor = transaction_executor.execute_statement(statement)
return len(list(cursor))
def update_person_id(document_ids):
new_drivers_licenses = SampleData.DRIVERS_LICENSE.copy()
new_vehicle_registrations = SampleData.VEHICLE_REGISTRATION.copy()
for i in range(len(SampleData.PERSON)):
drivers_license = new_drivers_licenses[i]
registration = new_vehicle_registrations[i]
drivers_license.update({'PersonId': str(document_ids[i])})
registration['Owners']['PrimaryOwner'].update({'PersonId': str(document_ids[i])})
return new_drivers_licenses, new_vehicle_registrations
def insert_documents(transaction_executor, table_name, documents):
logger.info('Inserting some documents in the {} table...'.format(table_name))
statement = 'INSERT INTO {} ?'.format(table_name)
cursor = transaction_executor.execute_statement(statement, convert_object_to_ion(documents))
list_of_document_ids = get_document_ids_from_dml_results(cursor)
return list_of_document_ids
def update_and_insert_documents(transaction_executor):
list_ids = insert_documents(transaction_executor, Constants.PERSON_TABLE_NAME, SampleData.PERSON)
logger.info("Updating PersonIds for 'DriversLicense' and PrimaryOwner for 'VehicleRegistration'...")
new_licenses, new_registrations = update_person_id(list_ids)
insert_documents(transaction_executor, Constants.VEHICLE_TABLE_NAME, SampleData.VEHICLE)
insert_documents(transaction_executor, Constants.VEHICLE_REGISTRATION_TABLE_NAME, new_registrations)
insert_documents(transaction_executor, Constants.DRIVERS_LICENSE_TABLE_NAME, new_licenses)
|
# Souffle - A Datalog Compiler
# Copyright (c) 2022 The Souffle Developers. All rights reserved
# Licensed under the Universal Permissive License v 1.0 as shown at:
# - https://opensource.org/licenses/UPL
# - <souffle root>/licenses/SOUFFLE-UPL.txt
import glob
import os
import shutil
from common import *
args = os.sys.argv
args.pop(0)
if len(args) == 0:
raise RuntimeError("Missing INPUT_DIR")
input_dir = args.pop(0)
if len(args) == 0:
raise RuntimeError("Missing OUTPUT_DIR")
output_dir = args.pop(0)
if len(args) == 0:
raise RuntimeError("Missing TEST_NAME")
test_name = args.pop(0)
if len(args) > 0:
extra_data = args.pop(0)
else:
extra_data = None
if len(args) != 0:
raise RuntimeError("Unexpected argument")
if os.path.exists(output_dir) and (not os.path.isdir(output_dir)):
raise RuntimeError("Output path exists but is not a directory")
# clean output directory
if os.path.isdir(output_dir):
for file in os.listdir(output_dir):
path = os.path.join(output_dir, file)
if os.path.isdir(path):
shutil.rmtree(path) # only remove directories
else:
os.remove(path) # only remove files
else:
os.makedirs(output_dir, exist_ok=True)
if extra_data == "json":
for file in [os.path.basename(p) for p in glob.glob(os.path.join(input_dir,"*.json"))]:
shutil.copyfile(
os.path.join(input_dir, file),
os.path.join(output_dir, "{}.expected".format(file)))
elif extra_data == "python" or extra_data == "java":
shutil.copyfile(
os.path.join(input_dir, "{}-{}.out".format(test_name, extra_data)),
os.path.join(output_dir, "{}-{}.out.expected".format(test_name, extra_data)))
elif extra_data == "provenance":
shutil.copyfile(
os.path.join(input_dir, "{}.in".format(test_name)),
os.path.join(output_dir, "{}.in".format(test_name)))
shutil.copyfile(
os.path.join(input_dir, "{}.out".format(test_name)),
os.path.join(output_dir, "{}.out.expected".format(test_name)))
shutil.copyfile(
os.path.join(input_dir, "{}.err".format(test_name)),
os.path.join(output_dir, "{}.err.expected".format(test_name)))
csvs = [os.path.basename(p) for p in glob.glob(os.path.join(input_dir,"*.csv"))]
with open(os.path.join(output_dir, "num.expected"), "w") as num_file:
num_file.write("{}\n".format(len(csvs)))
for file in csvs:
shutil.copyfile(
os.path.join(input_dir, file),
os.path.join(output_dir, "{}.expected".format(file)))
owd = os.getcwd()
try:
os.chdir(output_dir)
for file in glob.glob("*.expected"):
sort_file(file)
finally:
os.chdir(owd)
|
#! /usr/bin/env python3
# TODO: Standard configuration directory
# TODO: xdg
# TODO: Registrar
# TODO: SSH-like features: known_hosts, host configs
'''
A simple ffmpeg-based SRTP stream setup tool.
Doesn't have any ambitions of dealing with more complex matters such as
conferencing, NAT traversal, codec negotiation, etc—sane defaults and sane
environments only supported.
'''
# TODO: https://github.com/webrtc/samples
# https://webrtchacks.com/chrome-extension/
from functools import partial
from argparse import ArgumentParser
from base64 import b64encode
from socket import (socket, AF_INET6, SOCK_STREAM, IPPROTO_TCP,
getaddrinfo, AI_PASSIVE,
SOL_SOCKET, SO_REUSEADDR)
from pprint import pformat
from sys import platform, exit
import subprocess
import logging
import json
import ssl
_DEFAULT_DEVICE = {'linux': 'alsa',
'darwin': 'avfoundation',
'win32': 'dshow',
'cygwin': 'dshow'}[platform]
_DEFAULT_CODEC, *_DEFAULT_CODEC_PARAMS = ['opus',
'-application', 'voip',
'-cutoff', '8000',
'-b:a', '32k']
_DEFAULT_PORT = 20000
_DEFAULT_TLS_CIPHERS = '!eNULL:!aNULL:kDHE+aRSA+HIGH'
_DEFAULT_SRTP_CIPHER = 'AES_CM_128_HMAC_SHA1_80'
def ssl_context_for(purpose, ca_certs, own_cert, dh_params=None):
ssl_context = ssl.create_default_context(purpose, cafile=ca_certs)
ssl_context.load_cert_chain(own_cert)
if ca_certs is None:
ssl_context.load_default_certs(purpose)
else:
ssl_context.load_verify_locations(cafile=ca_certs)
# Force client cert requirement too
ssl_context.verify_mode = ssl.CERT_REQUIRED
ssl_context.verify_flags |= ssl.VERIFY_X509_STRICT
# Since we use only DH KEX later, we have to provide DH params. They aren't
# automatically generated. There are no compiled in ones. If you don't do
# this, you get weird "No shared cipher" errors at the client hello.
if purpose == ssl.Purpose.CLIENT_AUTH:
ssl_context.load_dh_params(dh_params)
# Enforce encryption and authentication.
# Enforce perfect forward secrecy—only provided by Diffie-Hellman ephemeral
# so far.
# Enforce RSA-based authentication because of better failure modes.
# Enforce 'high'—higher security suites.
# See http://security.stackexchange.com/questions/5096/rsa-vs-dsa-for-ssh-authentication-keys/46781#46781.
# TODO: Figure out how to enforce *generically*, better hash suites, and
# not have outdated, slow, and known weaker ciphers like 3DES.
ssl_context.set_ciphers(_DEFAULT_TLS_CIPHERS)
ssl_context.set_alpn_protocols(['simplevoip/0'])
return ssl_context
class NullFramedJSONSocket:
'"socket"'
def __init__(self, socket):
self.socket = socket
# Not the most efficient, but who cares here? It's a bloody control
# channel, with small payoads.
self.buffer = bytearray()
def load(self):
while self.buffer.rfind(b'\0') == -1:
chunk = self.socket.recv(128)
self.buffer += chunk
# TODO: What does this really mean?
if len(chunk) == 0:
break
body, _, self.buffer = self.buffer.partition(b'\0')
return json.loads(body.decode())
def dump(self, payload):
self.socket.sendall(self._frame_json(payload))
@staticmethod
def _frame_json(payload):
return json.dumps(payload).encode() + b'\0'
# Honestly, I'm only doing classes rather than functions like I used to because
# I need an excuse to use PascalCase, to make variable naming easier.
class FFmpeg(subprocess.Popen):
def __init__(self, *args, **kwargs):
super().__init__(['ffmpeg', '-loglevel', 'error',
'-nostdin'] +
list(args),
**kwargs)
class FFmpegSink(FFmpeg):
def __init__(self, device, speaker, sdp):
super().__init__('-f', 'sdp', '-i', 'pipe:',
'-f', device, speaker,
stdin=subprocess.PIPE,
universal_newlines=True)
# Not .communicate(), which tries to read stdout, and does a wait().
with self.stdin:
self.stdin.write(sdp)
class FFmpegSource(FFmpeg):
def __init__(self, device, microphone, address, srtp_params):
super().__init__('-f', device,
'-i', microphone,
'-f', 'rtp',
'-c:a', _DEFAULT_CODEC,
*_DEFAULT_CODEC_PARAMS,
*srtp_params,
'srtp://[{}]:{}'.format(*address))
class VoIPContext:
@classmethod
def from_namespace(cls, namespace):
new = cls()
new.listen = namespace.listen
if new.listen:
new.port = namespace.port
dh_params = namespace.dh_params
else:
dh_params = None
new.public_address = namespace.public_address
new.device = namespace.device
new.microphone = namespace.microphone
new.speaker = namespace.speaker
new.ssl_context_for = partial(ssl_context_for,
ca_certs=namespace.certs,
own_cert=namespace.cert,
dh_params=dh_params)
return new
def serve(self):
# TODO: getaddrinfo prefer dual stack if available, for
# local bind address
family, type_, proto, _, address = \
getaddrinfo(None, self.port,
family=AF_INET6, proto=IPPROTO_TCP,
flags=AI_PASSIVE)[0]
# Unlike SIP, or other running over a connectionless protocol, we don't
# have the luxury of reusing the same port, so the calling and the
# contact address aren't the same. Oh well. So people's phonebooks
# shouldn't rely on peername()s collected from incoming calls.
listen_socket = socket(family, type_, proto)
listen_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
listen_socket.bind(address)
listen_socket.listen(1)
return VoIPServer(listen_socket,
voip_context=self)
def call(self, common_name, address):
return VoIPClient(common_name, address,
voip_context=self)
class VoIPServer:
def __init__(self, listen_socket, voip_context):
self._listen_socket = listen_socket
self._ssl_context = voip_context.ssl_context_for(ssl.Purpose.CLIENT_AUTH)
self._voip_context = voip_context
def accept(self):
connection, address = self._listen_socket.accept()
logging.debug('TCP connection from %s.', address)
ssl_socket = self._ssl_context.wrap_socket(connection, server_side=True)
logging.debug('TLS handshake')
return VoIPCall(ssl_socket, self._voip_context)
def run(self):
while True:
voip_call = self.accept()
voip_call.accept()
voip_call.wait()
class VoIPClient:
def __init__(self, common_name, address, voip_context):
self._common_name = common_name
self._address = address
self._voip_context = voip_context
def connect(self):
family, type_, proto, _, address = \
getaddrinfo(*self._address,
family=AF_INET6, proto=IPPROTO_TCP)[0]
connect_socket = socket(family, type_, proto)
ssl_context = self._voip_context.ssl_context_for(ssl.Purpose.SERVER_AUTH)
ssl_context.check_hostname = True
ssl_socket = ssl_context.wrap_socket(connect_socket,
server_hostname=self._common_name)
ssl_socket.connect(address)
logging.info('Calling %s@%s:%s.', ssl_socket.server_hostname, *address[:2])
return VoIPCall(ssl_socket, self._voip_context)
class CallFailedError(RuntimeError):
pass
class VoIPCall:
def __init__(self, ssl_socket, voip_context):
self._voip_context = voip_context
self._ssl_socket = ssl_socket
self._json_socket = NullFramedJSONSocket(ssl_socket)
def accept(self):
logging.info('Call from:\n%s',
pformat(self._ssl_socket.getpeercert()['subject']))
self._json_socket.dump({'accept': True})
self._run()
def connect(self):
if not self._json_socket.load()['accept'] is True:
raise CallFailedError('rejected')
self._run()
def _gen_srtp_params(self):
srtp_key = ssl.RAND_bytes(30)
# Doesn't seem like ffmpeg supports RFC 5764 (DTLS-SRTP), despite
# supporting some of the ciphers, so we have to do the key negotiation
# ourselves, so we just exchange the master key and master salt over a
# TCP/TLS channel.
return ['-srtp_out_suite', _DEFAULT_SRTP_CIPHER,
'-srtp_out_params', b64encode(srtp_key)]
def _audio_sdp(self, host, port, srtp_params):
# FIXME: Why does it say c=… 127.0.0.1? We're not originating from
# localhost!
ffmpeg = FFmpeg('-f', self._voip_context.device,
'-i', 'null',
'-f', 'rtp',
'-t', '0',
'-c:a', _DEFAULT_CODEC,
*srtp_params,
'srtp://[{}]:{}'.format(host, port),
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
ffmpeg_stdout, ffmpeg_stderr = ffmpeg.communicate()
ffmpeg.wait()
if ffmpeg.returncode != 0:
raise subprocess.SubprocessError(ffmpeg_stderr)
# skip 'SDP:\n'
return ffmpeg_stdout[5:]
# FIXME: So bloody ugly. Why must I support this!?
def _send_public_address(self):
payload = {'public_address': self._voip_context.public_address}
self._json_socket.dump(payload)
logging.debug('Sent %s.', payload)
def _recv_public_address(self):
response = self._json_socket.load()
logging.debug('Got %s.', response)
return response['public_address']
def _send_sdp(self, srtp_params, public_address=None):
payload = {}
# Don't care about IPv6 flow info nor scope id
address = public_address or self._ssl_socket.getpeername()[:2]
payload['audio_sdp'] = self._audio_sdp(*address,
srtp_params=srtp_params)
self._json_socket.dump(payload)
logging.debug('Sent %s.', payload)
def _recv_sdp(self):
response = self._json_socket.load()
logging.debug('Got %s.', response)
return response['audio_sdp']
def _setup_inbound_media(self, audio_sdp):
inbound_media = FFmpegSink(self._voip_context.device,
self._voip_context.speaker,
audio_sdp)
logging.debug('ffmpeg listening.')
self._json_socket.dump({'clear_to_send': True})
logging.debug('Sent CTS.')
assert self._json_socket.load()['clear_to_send'] is True
logging.debug('Got CTS.')
return inbound_media
def _setup_outbound_media(self, address, srtp_params):
outbound_media = FFmpegSource(self._voip_context.device,
self._voip_context.microphone,
address,
srtp_params)
logging.debug('ffmpeg sending.')
return outbound_media
def _run(self):
self._send_public_address()
public_address = self._recv_public_address()
srtp_params = self._gen_srtp_params()
self._send_sdp(srtp_params, public_address=public_address)
audio_sdp = self._recv_sdp()
address = public_address or self._ssl_socket.getpeername()[:2]
self._inbound_media = self._setup_inbound_media(audio_sdp)
self._outbound_media = self._setup_outbound_media(address, srtp_params)
def wait(self):
with self._inbound_media, self._outbound_media:
try:
assert self._json_socket.load()['hangup'] is True
except KeyboardInterrupt:
pass
self._inbound_media.terminate()
self._outbound_media.terminate()
logging.debug('Call shutdown.')
self._json_socket.dump({'hangup': True})
def argument_parser():
ap = ArgumentParser()
ap.add_argument('-q', action='store_const',
const=logging.WARNING, dest='log_level',
help='quiet')
ap.add_argument('-D', action='store_const',
const=logging.DEBUG, dest='log_level',
help='debug')
ap.add_argument('--cert', required=True, help='client/server cert')
ap.add_argument('--certs', help='CA certs')
ap.add_argument('--device', default=_DEFAULT_DEVICE, help='ffmpeg -devices')
ap.add_argument('--microphone', default='default', help='ffmpeg -sources <device>')
ap.add_argument('--speaker', default='default', help='ffmpeg -sinks <device>')
#ap.add_argument('--webcam', default='/dev/video0', help='ffmpeg -sources')
ap.add_argument('-p', '--public-address', nargs=2,
metavar=('HOST', 'PORT'))
sub_aps = ap.add_subparsers()
client_ap = sub_aps.add_parser('client', aliases=['c'])
# The extent to which we help you work around NAT
client_ap.add_argument('name')
client_ap.add_argument('host')
client_ap.add_argument('port', type=int)
client_ap.set_defaults(listen=False, continuation=client_main)
server_ap = sub_aps.add_parser('server', aliases=['s'])
server_ap.add_argument('--dh-params', required=True)
server_ap.add_argument('port', type=int, default=_DEFAULT_PORT, nargs='?')
server_ap.set_defaults(listen=True, continuation=server_main)
init_ap = sub_aps.add_parser('init')
init_ap.add_argument('--dh-params', required=True)
init_ap.set_defaults(continuation=init_main)
return ap
def init_main(args):
subprocess.check_call(['openssl', 'req', '-new',
'-x509',
'-days', '365',
'-nodes',
'-out', args.cert,
'-keyout', args.cert])
subprocess.check_call(['openssl', 'dhparam', '-out', args.dh_params,
'2048'])
# Empty cert file.
with open(args.certs, 'w'):
pass
exit()
def server_main(args):
voip_context = VoIPContext.from_namespace(args)
voip_server = voip_context.serve()
voip_server.run()
def client_main(args):
voip_context = VoIPContext.from_namespace(args)
voip_client = voip_context.call(args.name, (args.host, args.port))
voip_call = voip_client.connect()
voip_call.connect()
voip_call.wait()
if __name__ == '__main__':
ap = argument_parser()
args = ap.parse_args()
if args.log_level is not None:
logging.basicConfig(level=args.log_level)
args.continuation(args)
|
"""create products table
Revision ID: 79f05cc45ebe
Revises: 3d4d62b34ef9
Create Date: 2021-06-28 13:13:13.406920
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '79f05cc45ebe'
down_revision = '3d4d62b34ef9'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"analytics",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("times_requested", sa.String(), nullable=True),
sa.Column("product_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["product_id"], ["products.id"],),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_analytic_id"), "analytics", ["id"], unique=True)
def downgrade():
pass
|
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.image.v2 import resource_types_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestResouceTypesClient(base.BaseServiceTest):
FAKE_LIST_RESOURCETYPES = {
"resource_types": [
{
"created_at": "2014-08-28T18:13:04Z",
"name": "OS::Glance::Image",
"updated_at": "2014-08-28T18:13:04Z"
},
{
"created_at": "2014-08-28T18:13:04Z",
"name": "OS::Cinder::Volume",
"updated_at": "2014-08-28T18:13:04Z"
},
{
"created_at": "2014-08-28T18:13:04Z",
"name": "OS::Nova::Flavor",
"updated_at": "2014-08-28T18:13:04Z"
},
{
"created_at": "2014-08-28T18:13:04Z",
"name": "OS::Nova::Aggregate",
"updated_at": "2014-08-28T18:13:04Z"
},
{
"created_at": "2014-08-28T18:13:04Z",
"name": u"\u2740(*\xb4\u25e1`*)\u2740",
"updated_at": "2014-08-28T18:13:04Z"
}
]
}
def setUp(self):
super(TestResouceTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = resource_types_client.ResourceTypesClient(fake_auth,
'image',
'regionOne')
def _test_list_resouce_types(self, bytes_body=False):
self.check_service_client_function(
self.client.list_resource_types,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_RESOURCETYPES,
bytes_body)
def test_list_resouce_types_with_str_body(self):
self._test_list_resouce_types()
def test_list_resouce_types_with_bytes_body(self):
self._test_list_resouce_types(bytes_body=True)
|
import unittest
from aceql.login_url_decoder import LoginUrlDecoder
class MyUrlTest(unittest.TestCase):
@staticmethod
def test_something():
url = "http://localhost:9090/aceql?username=user1&password=password1&database=sampledb"
login_url_decoder: LoginUrlDecoder = LoginUrlDecoder(url)
print("login_url_decoder.server_url: " + login_url_decoder.server_url + ":")
print("login_url_decoder.username : " + login_url_decoder.username + ":")
print("login_url_decoder.password : " + login_url_decoder.password + ":")
print("login_url_decoder.database : " + login_url_decoder.database + ":")
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import json
import orjson
import rapidjson
import simplejson
import ujson
LIBRARIES = ("orjson", "ujson", "rapidjson", "simplejson", "json")
DUMP_TO = ("bytes", "string")
def get_version(library: str) -> str:
if library == "json":
return json.__version__
elif library == "orjson":
return orjson.__version__
elif library == "rapidjson":
return rapidjson.__version__
elif library == "ujson":
return ujson.__version__
elif library == "simplejson":
return simplejson.__version__
else:
return "UNKNWON"
|
'''OpenGL extension EXT.shared_texture_palette
This module customises the behaviour of the
OpenGL.raw.GL.EXT.shared_texture_palette to provide a more
Python-friendly API
Overview (from the spec)
EXT_shared_texture_palette defines a shared texture palette which may be
used in place of the texture object palettes provided by
EXT_paletted_texture. This is useful for rapidly changing a palette
common to many textures, rather than having to reload the new palette
for each texture. The extension acts as a switch, causing all lookups
that would normally be done on the texture's palette to instead use the
shared palette.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/shared_texture_palette.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.EXT.shared_texture_palette import *
### END AUTOGENERATED SECTION
|
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{tjong-kim-sang-de-meulder-2003-introduction,
title = "Introduction to the {C}o{NLL}-2003 Shared Task: Language-Independent Named Entity Recognition",
author = "Tjong Kim Sang, Erik F. and
De Meulder, Fien",
booktitle = "Proceedings of the Seventh Conference on Natural Language Learning at {HLT}-{NAACL} 2003",
year = "2003",
url = "https://www.aclweb.org/anthology/W03-0419",
pages = "142--147",
}
"""
_DESCRIPTION = """\
The shared task of CoNLL-2003 concerns language-independent named entity recognition. We will concentrate on
four types of named entities: persons, locations, organizations and names of miscellaneous entities that do
not belong to the previous three groups.
The CoNLL-2003 shared task data files contain four columns separated by a single space. Each word has been put on
a separate line and there is an empty line after each sentence. The first item on each line is a word, the second
a part-of-speech (POS) tag, the third a syntactic chunk tag and the fourth the named entity tag. The chunk tags
and the named entity tags have the format I-TYPE which means that the word is inside a phrase of type TYPE. Only
if two phrases of the same type immediately follow each other, the first word of the second phrase will have tag
B-TYPE to show that it starts a new phrase. A word with tag O is not part of a phrase. Note the dataset uses IOB2
tagging scheme, whereas the original dataset uses IOB1.
For more details see https://www.clips.uantwerpen.be/conll2003/ner/ and https://www.aclweb.org/anthology/W03-0419
"""
_URL = "../../../data/CoNLL05/"
_TRAINING_FILE = "conll05.train.txt"
_DEV_FILE = "conll05.devel.txt"
_TEST_WSJ_FILE = "conll05.test.wsj.txt"
_TEST_BROWN_FILE = "conll05.test.brown.txt"
class Conll2005Config(datasets.BuilderConfig):
"""BuilderConfig for Conll2003"""
def __init__(self, **kwargs):
"""BuilderConfig forConll2005.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(Conll2005Config, self).__init__(**kwargs)
class Conll2005(datasets.GeneratorBasedBuilder):
"""Conll2003 dataset."""
BUILDER_CONFIGS = [
Conll2005Config(name="conll2005", version=datasets.Version("1.0.0"), description="Conll2005 dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"index": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(
datasets.features.ClassLabel(
names=['B-C-AM-TMP', 'B-C-AM-DIR', 'B-C-A2', 'B-R-AM-EXT', 'B-C-A0', 'I-AM-NEG', 'I-AM-ADV', 'B-C-V', 'B-C-AM-MNR', 'B-R-A3', 'I-AM-TM', 'B-V', 'B-R-A4', 'B-A5', 'I-A4', 'I-R-AM-LOC', 'I-C-A1', 'B-R-AA', 'I-C-A0', 'B-C-AM-EXT', 'I-C-AM-DIS', 'I-C-A5', 'B-A0', 'B-C-A4', 'B-C-AM-CAU', 'B-C-AM-NEG', 'B-AM-NEG', 'I-AM-MNR', 'I-R-A2', 'I-R-AM-TMP', 'B-AM', 'I-R-AM-PNC', 'B-AM-LOC', 'B-AM-REC', 'B-A2', 'I-AM-EXT', 'I-V', 'B-A3', 'B-A4', 'B-R-A0', 'I-AM-MOD', 'I-C-AM-CAU', 'B-R-AM-CAU', 'B-A1', 'B-R-AM-TMP', 'I-R-AM-EXT', 'B-C-AM-ADV', 'B-AM-ADV', 'B-R-A2', 'B-AM-CAU', 'B-R-AM-DIR', 'I-A5', 'B-C-AM-DIS', 'I-C-AM-MNR', 'B-AM-PNC', 'I-C-AM-LOC', 'I-R-A3', 'I-R-AM-ADV', 'I-A0', 'B-AM-EXT', 'B-R-AM-PNC', 'I-AM-DIS', 'I-AM-REC', 'B-C-AM-LOC', 'B-R-AM-ADV', 'I-AM', 'I-AM-CAU', 'I-AM-TMP', 'I-A1', 'I-C-A4', 'B-R-AM-LOC', 'I-C-A2', 'B-C-A5', 'O', 'B-R-AM-MNR', 'I-C-A3', 'I-R-AM-DIR', 'I-AM-PRD', 'B-AM-TM', 'I-A2', 'I-AA', 'I-AM-LOC', 'I-AM-PNC', 'B-AM-MOD', 'B-AM-DIR', 'B-R-A1', 'B-AM-TMP', 'B-AM-MNR', 'I-R-A0', 'B-AM-PRD', 'I-AM-DIR', 'B-AM-DIS', 'I-C-AM-ADV', 'I-R-A1', 'B-C-A3', 'I-R-AM-MNR', 'I-R-A4', 'I-C-AM-PNC', 'I-C-AM-TMP', 'I-C-V', 'I-A3', 'I-C-AM-EXT', 'B-C-A1', 'B-AA', 'I-C-AM-DIR', 'B-C-AM-PNC']
)
),
}
),
supervised_keys=None,
homepage="https://www.aclweb.org/anthology/W03-0419/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
"dev": f"{_URL}{_DEV_FILE}",
"test_wsj": f"{_URL}{_TEST_WSJ_FILE}",
"test_brown": f"{_URL}{_TEST_BROWN_FILE}"
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name="train", gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(name="validation", gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(name="test_wsj", gen_kwargs={"filepath": downloaded_files["test_wsj"]}),
datasets.SplitGenerator(name="test_brown", gen_kwargs={"filepath": downloaded_files["test_brown"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
for line in f:
if line != '':
index = line.split()[0]
text = ' '.join(line.split()[1:]).strip()
tokens = text.split("|||")[0].split()
labels = text.split("|||")[1].split()
yield guid, {
"id": str(guid),
"index": index,
"tokens": tokens,
"tags": labels
}
guid += 1
|
# coding:utf8
from flask import Blueprint
home = Blueprint("home", __name__)
import backend.app.home.views # import views.py files
|
v3 = 333
def Sum(value1, value2=200, value3=v3):
return value1 + value2 + value3
v3 = 999
print(Sum(10))
|
from __future__ import absolute_import
import numpy as np
from .Node import Op
from .._base import DNNL_LIB
from ..gpu_links import matrix_multiply
from ..cpu_links import matrix_multiply as cpu_matrix_multiply
class MatMulOp(Op):
def __init__(self, node_A, node_B, trans_A=False, trans_B=False, ctx=None):
super().__init__(MatMulOp, [node_A, node_B], ctx)
self.matmul_attr_trans_A = trans_A
self.matmul_attr_trans_B = trans_B
def compute(self, input_vals, output_val, stream_handle=None):
if self.on_cpu:
if DNNL_LIB['DnnlMatrixMultiply']:
cpu_matrix_multiply(
input_vals[0], self.matmul_attr_trans_A,
input_vals[1], self.matmul_attr_trans_B,
output_val)
else:
input_vals = [n.asnumpy() for n in input_vals]
if ((self.matmul_attr_trans_A is False) and
(self.matmul_attr_trans_B is False)):
output_val[:] = np.matmul(input_vals[0], input_vals[1])
elif ((self.matmul_attr_trans_A is True) and
(self.matmul_attr_trans_B is False)):
output_val[:] = np.matmul(
np.transpose(input_vals[0]), input_vals[1])
elif ((self.matmul_attr_trans_A is False) and
(self.matmul_attr_trans_B is True)):
output_val[:] = np.matmul(
input_vals[0], np.transpose(input_vals[1]))
elif ((self.matmul_attr_trans_A is True) and
(self.matmul_attr_trans_B is True)):
output_val[:] = np.matmul(
np.transpose(input_vals[0]), np.transpose(input_vals[1]))
else:
matrix_multiply(
input_vals[0], self.matmul_attr_trans_A,
input_vals[1], self.matmul_attr_trans_B,
output_val, stream_handle)
def gradient(self, output_grad):
if ((self.matmul_attr_trans_A is False) and
(self.matmul_attr_trans_B is False)):
# if Y=AB, then dA=dY B^T, dB=A^T dY
lhs_grad = matmul_op(
output_grad, self.inputs[1], trans_A=False, trans_B=True, ctx=self.raw_ctx)
rhs_grad = matmul_op(
self.inputs[0], output_grad, trans_A=True, trans_B=False, ctx=self.raw_ctx)
elif ((self.matmul_attr_trans_A is True) and
(self.matmul_attr_trans_B is False)):
# if Y=A^T B, then dA=(dY B^T)^T=B dY^T, dB=A dY
lhs_grad = matmul_op(
self.inputs[1], output_grad, trans_A=False, trans_B=True, ctx=self.raw_ctx)
rhs_grad = matmul_op(
self.inputs[0], output_grad, trans_A=False, trans_B=False, ctx=self.raw_ctx)
elif ((self.matmul_attr_trans_A is False) and
(self.matmul_attr_trans_B is True)):
# if Y=A B^T, then dA=dY B, dB=(A^T dY)^T=dY^T A
lhs_grad = matmul_op(
output_grad, self.inputs[1], trans_A=False, trans_B=False, ctx=self.raw_ctx)
rhs_grad = matmul_op(
output_grad, self.inputs[0], trans_A=True, trans_B=False, ctx=self.raw_ctx)
elif ((self.matmul_attr_trans_A is True) and
(self.matmul_attr_trans_B is True)):
# if Y=A^T B^T, then dA=(dY B)^T=B^T dY^T, dB=(A dY)^T=dY^T A^T
lhs_grad = matmul_op(
self.inputs[1], output_grad, trans_A=True, trans_B=True, ctx=self.raw_ctx)
rhs_grad = matmul_op(
output_grad, self.inputs[0], trans_A=True, trans_B=True, ctx=self.raw_ctx)
return [lhs_grad, rhs_grad]
def infer_shape(self, input_shapes):
assert len(input_shapes) == 2
A = input_shapes[0]
B = input_shapes[1]
shape_A = A[0]
shape_B = B[1]
if self.matmul_attr_trans_A == True:
shape_A = A[1]
if self.matmul_attr_trans_B == True:
shape_B = B[0]
return (shape_A, shape_B)
def deduce_states(self, states, duplicates, orders):
def revert(x):
return (x[1], x[0])
assert len(states) == 2 and len(duplicates) == 2 and len(orders) == 2
if states[0] is None and states[1] is None:
return None, 1, None
if states[0] is None:
states[0] = (1, 1)
if states[1] is None:
states[1] = (1, 1)
assert len(states[0]) == 2 and len(states[1]) == 2
if self.matmul_attr_trans_A:
states[0] = revert(states[0])
if self.matmul_attr_trans_B:
states[1] = revert(states[1])
assert states[0][1] == states[1][0], \
'Partition number of left matrix column shoule match that of right matrix row.'
if duplicates[0] is None:
duplicates[0] = states[1][1]
assert duplicates[0] == states[1][1], 'The duplicate number is not conform with states.'
if duplicates[1] is None:
duplicates[1] = states[0][0]
assert duplicates[1] == states[0][0], 'The duplicate number is not conform with states.'
map_index = self.matmul_attr_trans_B * 2 + self.matmul_attr_trans_A
l2r_map = [
{0: -1, -1: 1, 1: 0}, # no trans
{1: -1, -1: 1, 0: 0}, # trans A
{0: -1, -1: 0, 1: 1}, # trans B
{1: -1, -1: 0, 0: 1}, # trans both
][map_index]
r2l_map = [
{-1: 0, 1: -1, 0: 1}, # no trans
{-1: 1, 1: -1, 0: 0}, # trans A
{-1: 0, 0: -1, 1: 1}, # trans B
{-1: 1, 0: -1, 1: 0}, # trans both
][map_index]
l2res_map = [
{-1: 1, 0: 0, 1: -1}, # no trans
{-1: 1, 1: 0, 0: -1}, # trans A
][self.matmul_attr_trans_A]
if orders[0] is None and orders[1] is None:
# for left matrix, the order of dimensions are (row, duplicate, column)
orders[0] = (0, 1, -1) if self.matmul_attr_trans_A else (1, 0, -1)
# for right matrix, the order of dimensions are (duplicate, column, row)
orders[1] = (1, -1, 0) if self.matmul_attr_trans_B else (0, -1, 1)
elif orders[0] is None and orders[1] is not None:
orders[0] = tuple(r2l_map[x] for x in orders[1])
elif orders[0] is not None and orders[1] is None:
orders[1] = tuple(l2r_map[x] for x in orders[0])
assert orders[0] == tuple(r2l_map[x] for x in orders[1])
assert orders[1] == tuple(l2r_map[x] for x in orders[0])
return (states[0][0], states[1][1]), states[0][1], tuple(l2res_map[x] for x in orders[0])
def matmul_op(node_A, node_B, trans_A=False, trans_B=False, ctx=None):
"""Make a new instance of Matrix Multiplication and call the instance.
Parameters:
----
node_A : Node
The left operand of the matrix multiplication.
node_B : Node
The right operand of the matrix multiplication.
trans_A : Boolean
Whether node_A to be transposed
trans_B : Boolean
Whether node_B to be transposed
Returns:
----
A new Node instance created by Op.
"""
return MatMulOp(node_A, node_B, trans_A, trans_B, ctx=ctx)
|
# -*- coding: utf-8 -*-
#
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import websocket as ws
from websocket._abnf import *
import sys
sys.path[0:0] = [""]
if sys.version_info[0] == 2 and sys.version_info[1] < 7:
import unittest2 as unittest
else:
import unittest
class ABNFTest(unittest.TestCase):
def testInit(self):
a = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
self.assertEqual(a.fin, 0)
self.assertEqual(a.rsv1, 0)
self.assertEqual(a.rsv2, 0)
self.assertEqual(a.rsv3, 0)
self.assertEqual(a.opcode, 9)
self.assertEqual(a.data, '')
a_bad = ABNF(0,1,0,0, opcode=77)
self.assertEqual(a_bad.rsv1, 1)
self.assertEqual(a_bad.opcode, 77)
def testValidate(self):
a = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
self.assertRaises(ws.WebSocketProtocolException, a.validate)
a_bad = ABNF(0,1,0,0, opcode=77)
self.assertRaises(ws.WebSocketProtocolException, a_bad.validate)
a_close = ABNF(0,1,0,0, opcode=ABNF.OPCODE_CLOSE, data="abcdefgh1234567890abcdefgh1234567890abcdefgh1234567890abcdefgh1234567890")
self.assertRaises(ws.WebSocketProtocolException, a_close.validate)
# This caused an error in the Python 2.7 Github Actions build
# Uncomment test case when Python 2 support no longer wanted
# def testMask(self):
# ab = ABNF(0,0,0,0, opcode=ABNF.OPCODE_PING)
# bytes_val = bytes("aaaa", 'utf-8')
# self.assertEqual(ab._get_masked(bytes_val), bytes_val)
def testFrameBuffer(self):
fb = frame_buffer(0, True)
self.assertEqual(fb.recv, 0)
self.assertEqual(fb.skip_utf8_validation, True)
fb.clear
self.assertEqual(fb.header, None)
self.assertEqual(fb.length, None)
self.assertEqual(fb.mask, None)
self.assertEqual(fb.has_mask(), False)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
import time
import clime
import base64
import requests
import traceback
from pprintpp import pprint as pp
_TEST_URL = 'http://localhost'
#_TEST_URL = 'http://crawl.crs.dianhua.cn'
_TEST_PORT = 8080
# GET
_FLOW_TYPE_URL = '{}:{}/crawl/calls/flow_type?tel={}&uid={}'
_VERIFY_SMS_URL = '{}:{}/crawl/calls/verify/sms?sid={}'
_VERIFY_CAPTCHA_URL = '{}:{}/crawl/calls/verify/captcha?sid={}'
_VERIFY_SMS_CAPTCHA_URL = '{}:{}/crawl/calls/verify/sms_captcha?sid={}'
_ABORT_URL = '{}:{}/crawl/calls/abort?sid={}'
# POST
_LOGIN_URL = '{}:{}/crawl/calls/login'
_VERIFY_URL = '{}:{}/crawl/calls/verify'
def flow_type(tel, uid):
print '\nACTION : GET flow_type'
url = _FLOW_TYPE_URL.format(_TEST_URL, _TEST_PORT, tel, uid)
r = requests.get(url)
ret = r.json()
return ret
def get_sms(sid):
start = time.time()
print '\nACTION : GetSMS'
url = _VERIFY_SMS_URL.format(_TEST_URL, _TEST_PORT, sid)
r = requests.get(url)
ret = r.json()
print 'cost {} secs'.format(time.time() - start)
return ret
def get_captcah(sid):
start = time.time()
print '\nACTION : GetCaptch'
url = _VERIFY_CAPTCHA_URL.format(_TEST_URL, _TEST_PORT, sid)
r = requests.get(url)
ret = r.json()
pp(ret)
captcha = base64.b64decode(ret['content'][len('data:image/png;base64,'):])
with open('./image.png', 'w') as fp:
fp.write(captcha)
print 'cost {} secs'.format(time.time() - start)
return ret
def get_smscaptcha(sid):
start = time.time()
print '\nACTION : GetSMSCaptch'
url = _VERIFY_SMS_CAPTCHA_URL.format(_TEST_URL, _TEST_PORT, sid)
r = requests.get(url)
print r.text
ret = r.json()
pp(ret)
captcha = base64.b64decode(ret['content'][len('data:image/png;base64,'):])
with open('./image.png', 'w') as fp:
fp.write(captcha)
print 'cost {} secs'.format(time.time() - start)
return ret
# def update_smscaptcha(sid,verify_type):
# start = time.time()
# print '\nACTION : GetSMSCaptch'
# url = _VERIFY_SMS_CAPTCHA_URL.format(_TEST_URL, _TEST_PORT, sid)
# data = {
# 'verify_type': verify_type
# }
# r = requests.get(url, params=data)
# print r.text
# ret = r.json()
# pp(ret)
# captcha = base64.b64decode(ret['content'][len('data:image/png;base64,'):])
# with open('./image.png', 'w') as fp:
# fp.write(captcha)
# print 'cost {} secs'.format(time.time() - start)
# return ret
def login(sid, tel, pin_pwd, sms_code, captcha_code):
start = time.time()
print '\nACTION: POST login'
data = {
'sid': sid,
'full_name' : '毛羽建',
'id_card' : '330225198112260052',
'pin_pwd': pin_pwd,
'sms_code': sms_code,
'captcha_code': captcha_code
}
url = '{}:{}/crawl/calls/login'.format(_TEST_URL, _TEST_PORT)
r = requests.post(url, data=data)
ret = r.json()
print 'cost {} secs'.format(time.time() - start)
return ret
def verify(sid, sms_code, captcha_code):
start = time.time()
print '\nACTION: POST verify'
data = {
'sid': sid,
'sms_code': sms_code,
'captcha_code': captcha_code
}
url = _VERIFY_URL.format(_TEST_URL, _TEST_PORT, sid, sms_code, captcha_code)
r = requests.post(url, data=data)
ret = r.json()
print 'cost {} secs'.format(time.time() - start)
return ret
def abort(sid):
start = time.time()
print '\nACTION: GET abort'
url = _ABORT_URL.format(_TEST_URL, _TEST_PORT, sid)
r = requests.get(url)
ret = r.json()
print 'cost {} secs'.format(time.time() - start)
return ret
def crawl(tel, pin_pwd):
next_action = ''
try:
sid = ''
next_action = ''
need_sms_verify = ''
need_captcha_verify = ''
sms_code = ''
captcha_code = ''
ret = {}
while(next_action != 'Finish'):
if next_action == '':
ret = flow_type(tel, 'UIDxxxxxxxx')
pp(ret)
if ret['status']:
print 'Error'
return ret
sid = ret['result']['sid']
need_sms_verify = ret['result']['need_sms_verify']
need_captcha_verify = ret['result']['need_captcha_verify']
elif next_action == 'Login':
if need_sms_verify == 1 or need_captcha_verify == 1:
sms_code = raw_input('Input verify code (sms): ')
captcha_code = raw_input('Input verify code (captcha): ')
else:
sms_code = ''
captcha_code = ''
ret = login(sid, tel, pin_pwd, sms_code, captcha_code)
if ret['status']:
print u'ERROR : {}'.format(ret['status'])
elif next_action == 'GetSMS':
ret = get_sms(sid)
if ret['status']:
print u'ERROR : {}'.format(ret['status'])
elif next_action == 'GetCaptcha':
ret = get_captcah(sid)
if ret['status']:
print u'ERROR : {}'.format(ret['status'])
elif next_action == 'GetSMSCaptcha':
ret = get_smscaptcha(sid)
if ret['status']:
print u'ERROR : {}'.format(ret['status'])
elif next_action == 'Verify':
# update_if = raw_input('if update verify_code(''/"sms"/"captcha")?: ')
# if update_if == "sms":
# ret = update_smscaptcha(sid,update_if)
# if ret['status']:
# print u'ERROR : {}'.format(ret['status'])
# elif update_if == "captcha":
# ret = update_smscaptcha(sid,update_if)
# if ret['status']:
# print u'ERROR : {}'.format(ret['status'])
sms_code = raw_input('Input verify code (sms): ')
captcha_code = raw_input('Input verify code (captcha): ')
ret = verify(sid, sms_code, captcha_code)
if ret['status']:
print u'ERROR : {}'.format(ret['status'])
elif next_action == 'Reset':
print u'Crawler status = {}, msg = {}'.format(
ret['status'],
ret['message'])
break
elif next_action == 'Unsupported':
print 'No crawler supported!!'
break
elif next_action == 'NoCrawlFinish':
print 'No crawl needed. Results are already exist.'
break
else:
print next_action
assert False, 'Abnormal case !!!'
pp(ret)
next_action = ret['next_action']
#print next_action
raw_input('next_action = {}'.format(next_action))
except KeyboardInterrupt:
ret = abort(sid)
if ret['status']:
print 'ERROR : {}'.format(ret['status'])
next_action = ret['next_action']
except:
pp(ret)
print traceback.format_exc()
return 'Fatal Error.......................................'
return next_action
if __name__ == '__main__':
import clime.now
|
from functools import wraps
from unittest import TestCase
import asyncio
import concurrent
from unsync import unsync
from unsync.unsync import Unfuture
class DecoratorTests(TestCase):
def test_exception(self):
class TestException(Exception):
pass
@unsync
async def error():
await asyncio.sleep(0.1)
raise TestException
with self.assertRaises(TestException):
error().result()
def test_parallelism(self):
calls = []
@unsync
async def sleep():
calls.append('a')
await asyncio.sleep(0.1)
calls.append('b')
results = []
for _ in range(100):
results.append(sleep())
for result in results:
result.result()
self.assertEqual(list(sorted(calls)), calls)
def test_future_integration(self):
asyncio_future = asyncio.Future(loop=unsync.loop)
@unsync
async def wrapper(_future):
return await _future
result = wrapper(asyncio_future)
with self.assertRaises(concurrent.futures.TimeoutError):
result.result(timeout=0.1)
self.assertFalse(result.done())
unsync.loop.call_soon_threadsafe(lambda: asyncio_future.set_result('faff'))
self.assertEqual('faff', result.result(timeout=0.1))
def test_unfuture_integration(self):
unfuture = Unfuture()
@unsync
async def wrapper(_future):
result = await _future
return result
result = wrapper(unfuture)
with self.assertRaises(concurrent.futures.TimeoutError):
result.result(timeout=0.1)
self.assertFalse(result.done())
unfuture.set_result('faff')
self.assertEqual('faff', result.result(timeout=0.1))
def test_instance_methods(self):
class Class:
@unsync
async def wait(self):
await asyncio.sleep(0.1)
return 'faff'
self.assertEqual('faff', Class().wait().result())
def test_passing_arguments(self):
@unsync(faff='faff')
def cpu_bound():
return 'faff'
self.assertEqual('faff', cpu_bound().result())
def test_implementation_without_decorator(self):
"""
This implementation is useful to preserve type hints without an ignore statement.
"""
def function_name(x: str) -> Unfuture[str]:
async_method = unsync(__function_name_synced)
return async_method(x)
def __function_name_synced(x: str) -> str:
return x + 'a'
future_result = function_name('b')
self.assertEqual('ba', future_result.result())
def set_attr(attr_value):
"""
Sample decorator for testing nested unsync decorators.
"""
@wraps(attr_value)
def wrapper(f):
f.attr = attr_value
return f
return wrapper
class NestedDecoratorTests(TestCase):
def test_nested_decorator_retains_wrapped_function_attributes(self):
@unsync
@set_attr("faff")
async def wrapped_func(): pass
assert wrapped_func.__name__ == "wrapped_func"
assert wrapped_func.attr == "faff"
def test_nested_decorator_retains_wrapped_class_method_attributes(self):
class Class:
@unsync
@set_attr("faff")
async def wrapped_func(self): pass
instance = Class()
assert instance.wrapped_func.__name__ == "wrapped_func"
assert instance.wrapped_func.attr == "faff"
|
import discord
from discord.ext import commands
import sys
import mysql.connector
sys.path.insert(1, '../')
from config import *
sys.path.insert(1, '../constants')
from colors import *
from constants import *
#MySQL stuff
mydb = mysql.connector.connect(
host= sql.host,
user= sql.user,
password= sql.password,
database= sql.database,
port= sql.port
)
c = mydb.cursor()
#Loading Cog
class permissions(commands.Cog):
def __init__(self, bot):
self.bot = bot
def setup(bot):
bot.add_cog(permissions(bot))
bot.add_command(checkperms)
bot.add_command(setperms)
bot.add_command(creategroup)
@commands.command()
async def checkperms(ctx, user : discord.Member=None):
#If user not sepecified set it to command author
if user == None:
user = ctx.author
other = False
else:
other = True
#Sql
c.execute(f"SELECT id, group_id FROM users WHERE user_id={user.id} AND server_id={ctx.guild.id}")
res = c.fetchone()
#Check if in database
if not res:
weight = 0
group_id = 0
group_name = "User"
db_id = "Not in DB"
else:
c.execute(f"SELECT * FROM permission_table WHERE group_id={res[1]}")
grp = c.fetchone()
group_id = grp[0]
group_name = grp[1]
weight = grp[2]
db_id = res[0]
#Embed Strings#
db_group_id = f"**Group ID:** {group_id}\n"
db_group_name = f"**Group Name:** {group_name}\n"
db_permission_level = f"**Permission level:** {weight}\n"
#Check if user was specified or not again
if other == True:
pron = "This user is also"
else:
pron = "You're also"
#Check if command author is bot owner (additional info)
if str(ctx.author.id) in config.bot_owners:
display_id = f"**Entry ID:** {db_id}\n"
else:
display_id = ""
if str(user.id) in config.bot_owners:
q_bot_owner = f"\n{pron} Bot Owner (**Permission level:** {config.bot_owners_weight})"
else:
q_bot_owner = ""
#Embed Embed
embed=discord.Embed(title=f"{user.name}#{user.discriminator}'s Permssions", description=f"{db_group_name}{db_group_id}{db_permission_level}{display_id}{q_bot_owner}", color=user.colour)
embed.set_thumbnail(url=f"{user.avatar_url}")
embed.set_footer(text=f"Bot Version: {const.version}")
await ctx.send(embed=embed)
@commands.command()
async def setperms(ctx, user : discord.Member=None, level=0):
if str(ctx.author.id) not in config.bot_owners:
return await ctx.send("You must be bot owner to do that")
#Help
if user == None:
return await ctx.send("Help wip")
#Syntax checks
try:
level = int(level)
except:
return await ctx.send("Permission level argument must be a digit")
#Check if level is 3 (Bot owner which is hard coded)
if level == 3:
return await ctx.send(f"This group id is reserved and unsettable, if you want someone added here ask def750")
#Check if group exist
c.execute(f"SELECT group_id FROM permission_table WHERE group_id={level}")
res1 = c.fetchone()
if not res1:
return await ctx.send(f"Group with id {level} does not exist!")
#Check if user tries to change his own perms
if str(user.id) == str(ctx.author.id):
return await ctx.send("You can't change your own perms")
#Assign group name
c.execute(f"SELECT group_name FROM permission_table WHERE group_id={level}")
res3 = c.fetchone()
if not res3:
n_group = "User"
else:
n_group = res3[0]
y = colors.yellow
e = colors.end
#Fetch from database
c.execute(f"SELECT * FROM users WHERE user_id={user.id} and server_id={ctx.guild.id}")
res = c.fetchone()
#Check if in database
if not res:
prev_level = 0
p_weight = 0
user_tag = f"{ctx.author.name}#{ctx.author.discriminator}"
###LOG MESSAGE###
log_msg = f"Changed permission level of <{user.name}#{user.discriminator} ({user.id})> from {prev_level} to {level}"
###LOG MESSAGE###
#SQL
sql = "INSERT INTO users (user_id, server_id, group_id) VALUES (%s, %s, %s)"
val = (user.id, ctx.guild.id, level)
c.execute(sql, val)
sql = "INSERT INTO logs (user_id, user_tag, server_id, log) VALUES (%s, %s, %s, %s)"
val = (ctx.author.id, user_tag, ctx.guild.id, log_msg)
c.execute(sql, val)
mydb.commit()
c.execute(f"SELECT weight FROM permission_table WHERE group_id={level}")
w = c.fetchone()
weight = w[0]
#embed
embed=discord.Embed(title=f"Changed {user.name}#{user.discriminator}'s permissions", description=f"**Previous level:** {prev_level} ({p_weight})\n**New Level:** {level} ({weight})\n**Group Name:** {n_group}", color=user.colour)
embed.set_thumbnail(url=f"{user.avatar_url}")
embed.set_footer(text=f"Bot Version: {const.version}")
###CONSOLE LOG NOTICE
print(f"\n{colors.red}-----[ IMPORTANT NOTICE ]-----")
print(f"Permission change")
print(f"{y}Executed By: {e}{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})")
print(f"{y}User affected: {e}{user.name}#{user.discriminator} ({user.id})")
print(f"{y}Previous Group: {e}{prev_level} ({p_weight})")
print(f"{y}New Group: {e}{level} ({weight})")
print(f"{y}Server: {e}{ctx.guild.name} ({ctx.guild.id})")
print(f"{colors.red}This event was also logged to database!{e}\n")
return await ctx.send(embed=embed)
else:
#Previous level
prev_level = res[3]
c.execute(f"SELECT weight FROM permission_table WHERE group_id={res[3]}")
w = c.fetchone()
p_weight = w[0]
#User Tag
user_tag = f"{ctx.author.name}#{ctx.author.discriminator}"
c.execute(f"SELECT weight FROM permission_table WHERE group_id={level}")
w = c.fetchone()
weight = w[0]
###LOG MESSAGE###
log_msg = f"Changed permission level of <{user.name}#{user.discriminator} ({user.id})> from {prev_level} ({p_weight}) to {level} ({weight})"
###LOG MESSAGE###
#SQL
c.execute("UPDATE users SET group_id=%s WHERE user_id=%s AND server_id=%s", (level, user.id, ctx.guild.id))
sql = "INSERT INTO logs (user_id, user_tag, server_id, log) VALUES (%s, %s, %s, %s)"
val = (ctx.author.id, user_tag, ctx.guild.id, log_msg)
c.execute(sql, val)
mydb.commit()
#Embed
embed=discord.Embed(title=f"Changed {user.name}#{user.discriminator}'s permissions", description=f"**Previous level:** {prev_level} ({p_weight})\n**New Level:** {level} ({weight})\n**Group Name:** {n_group}", color=user.colour)
embed.set_thumbnail(url=f"{user.avatar_url}")
embed.set_footer(text=f"Bot Version: {const.version}")
###CONSOLE LOG NOTICE
print(f"\n{colors.red}-----[ IMPORTANT NOTICE ]-----")
print(f"Permission change")
print(f"{y}Executed By: {e}{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})")
print(f"{y}User affected: {e}{user.name}#{user.discriminator} ({user.id})")
print(f"{y}Previous Group: {e}{prev_level} ({p_weight})")
print(f"{y}New Group: {e}{level} ({weight})")
print(f"{y}Server: {e}{ctx.guild.name} ({ctx.guild.id})")
print(f"{colors.red}This event was also logged to the database!{e}\n")
return await ctx.send(embed=embed)
@commands.command()
async def creategroup(ctx, name=None, weight=0):
#Asssign terminal colors
y = colors.yellow
e = colors.end
#Check if user is bot onwer
if str(ctx.author.id) not in config.bot_owners:
return await ctx.send("You must be bot owner to do that")
#Check if name is not none
if name == None:
return await ctx.send("Invalid sintax, help WIP")
unallowed_names = ["bot_owner", "bot"]
#convert name to lowercase
name = name.lower()
#Syntax checks
try:
weight = int(weight)
except:
return await ctx.send("Weight must be a number")
if weight > 1000 or weight < 0:
return await ctx.send("Weight must be higher than 0 and smaller than 1000")
#Check if group with that name already exists
c.execute(f"SELECT `group_name` FROM `permission_table` WHERE `group_name` = '{name}'")
res2 = c.fetchone()
if res2:
return await ctx.send("Group with that name already exists")
#Check if name is unallowed
if name in unallowed_names:
return await ctx.send("This name is not allowed, choose other one")
####### Command code #######
#Insert group to the database
sql = "INSERT INTO permission_table (group_name, weight) VALUES (%s, %s)"
val = (name, weight)
c.execute(sql, val)
mydb.commit()
#Get group id
c.execute("SELECT group_id FROM permission_table WHERE group_name=%s", (name,))
res1 = c.fetchone()
grp_id = res1[0]
###CONSOLE LOG NOTICE
user_tag = f"{ctx.author.name}#{ctx.author.discriminator}"
print(f"\n{colors.red}-----[ IMPORTANT NOTICE ]-----")
print(f"Created Group")
print(f"{y}Executed By: {e}{user_tag} ({ctx.author.id})")
print(f"{y}New Group: {e}{name} ({weight})")
print(f"{y}Group ID: {e}{grp_id}")
print(f"{y}Server of Execution: {e}{ctx.guild.name} ({ctx.guild.id})")
print(f"{colors.red}This event was also logged to the database!{e}\n")
#Insert log into database
log_msg = f"<{user_tag} ({ctx.author.id}> Created new group {name} (ID: {grp_id}) with weight {weight}"
sql = "INSERT INTO logs (user_id, user_tag, server_id, log) VALUES (%s, %s, %s, %s)"
val = (ctx.author.id, user_tag, ctx.guild.id, log_msg)
c.execute(sql, val)
mydb.commit()
#Send embed
embed=discord.Embed(title=f"Group created", description=f"**Group name:** {name}\n**Group Weight:** {weight}\n**Group ID:** {grp_id}\n\n__**This event was logged to the database!**__", color=colors.embeds.green)
embed.set_footer(text=f"Bot Version: {const.version}")
return await ctx.send(embed=embed)
@commands.command()
async def checkgroup(ctx, group):
return
|
import logging
from unittest import TestCase
from parameterized import parameterized, param
from hvac import exceptions
from hvac.tests import utils
class TestApprole(utils.HvacIntegrationTestCase, TestCase):
TEST_MOUNT_POINT = 'approle'
def setUp(self):
super(TestApprole, self).setUp()
self.client.enable_auth_backend(
backend_type='approle',
mount_point=self.TEST_MOUNT_POINT,
)
def tearDown(self):
self.client.token = self.manager.root_token
self.client.disable_auth_backend(mount_point=self.TEST_MOUNT_POINT)
super(TestApprole, self).tearDown()
@parameterized.expand([
param(
'no secret ids',
num_secrets_to_create=0,
raises=exceptions.InvalidPath,
),
param(
'one secret id',
num_secrets_to_create=1,
),
param(
'two secret ids',
num_secrets_to_create=2,
),
])
def test_list_role_secrets(self, label, num_secrets_to_create=0, raises=None):
test_role_name = 'testrole'
self.client.create_role(
role_name=test_role_name,
mount_point=self.TEST_MOUNT_POINT,
)
for _ in range(0, num_secrets_to_create):
self.client.create_role_secret_id(
role_name=test_role_name,
mount_point=self.TEST_MOUNT_POINT,
)
if raises:
with self.assertRaises(raises):
self.client.list_role_secrets(
role_name=test_role_name,
mount_point=self.TEST_MOUNT_POINT,
)
else:
list_role_secrets_response = self.client.list_role_secrets(
role_name=test_role_name,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug('list_role_secrets_response: %s' % list_role_secrets_response)
self.assertEqual(
first=num_secrets_to_create,
second=len(list_role_secrets_response['data']['keys'])
)
def test_create_role(self):
self.client.create_role('testrole')
result = self.client.read('auth/approle/role/testrole')
lib_result = self.client.get_role('testrole')
del result['request_id']
del lib_result['request_id']
self.assertEqual(result, lib_result)
def test_delete_role(self):
test_role_name = 'test-role'
self.client.create_role(test_role_name)
# We add a second dummy test role so we can still hit the /role?list=true route after deleting the first role
self.client.create_role('test-role-2')
# Ensure our created role shows up when calling list_roles as expected
result = self.client.list_roles()
actual_list_role_keys = result['data']['keys']
self.assertIn(
member=test_role_name,
container=actual_list_role_keys,
)
# Now delete the role and verify its absence when calling list_roles
self.client.delete_role(test_role_name)
result = self.client.list_roles()
actual_list_role_keys = result['data']['keys']
self.assertNotIn(
member=test_role_name,
container=actual_list_role_keys,
)
def test_create_delete_role_secret_id(self):
self.client.create_role('testrole')
create_result = self.client.create_role_secret_id('testrole', {'foo': 'bar'})
secret_id = create_result['data']['secret_id']
result = self.client.get_role_secret_id('testrole', secret_id)
self.assertEqual(result['data']['metadata']['foo'], 'bar')
self.client.delete_role_secret_id('testrole', secret_id)
with self.assertRaises(ValueError):
self.client.get_role_secret_id('testrole', secret_id)
def test_auth_approle(self):
self.client.create_role('testrole')
create_result = self.client.create_role_secret_id('testrole', {'foo': 'bar'})
secret_id = create_result['data']['secret_id']
role_id = self.client.get_role_id('testrole')
result = self.client.auth_approle(role_id, secret_id)
self.assertEqual(result['auth']['metadata']['foo'], 'bar')
self.assertEqual(self.client.token, result['auth']['client_token'])
self.assertTrue(self.client.is_authenticated())
def test_auth_approle_dont_use_token(self):
self.client.create_role('testrole')
create_result = self.client.create_role_secret_id('testrole', {'foo': 'bar'})
secret_id = create_result['data']['secret_id']
role_id = self.client.get_role_id('testrole')
result = self.client.auth_approle(role_id, secret_id, use_token=False)
self.assertEqual(result['auth']['metadata']['foo'], 'bar')
self.assertNotEqual(self.client.token, result['auth']['client_token'])
|
# Copyright (c) 2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
import numpy as np
import toast.timing as timing
class GlitchFlagger():
def __init__(self, fwhm=5, threshold=4.0, fsample=180.3737, twice=False,
wkernel=3):
"""
Instantiate a glitch flagging object. Parameters:
fwhm (float): Beam width [arc minutes]
threshold (float): glitch detection limit in units of the
filtered signal RMS
twice(bool): Run the glitch detection on regular and convolved TOD
"""
self.fwhm = fwhm
wbin = self.fwhm
self.order = 6 # order of signal model across 3 bins
nbin_min = np.int(2 * np.pi / np.radians(wbin / 60))
nbin = 2
while nbin < nbin_min:
nbin *= 2
wbin = 2 * np.pi / nbin
self.nbin = nbin
self.wbin = wbin
self.threshold = threshold
self.fsample = fsample
self.twice = twice
self.wkernel = wkernel
def flag_glitches(self, signal_in, flag_in, phase=None, dark=False,
pntflag=None):
"""
Find and flag glitches.
"""
if not dark:
if phase is None:
raise RuntimeError('Optical detectors must provide phase')
if pntflag is None:
raise RuntimeError(
'Optical detectors must provide pointing flags')
signal_in = signal_in.copy()
self.subtract_trend(signal_in)
flag = flag_in.copy()
if dark:
self.flag_outliers(signal_in, flag)
flag_intense = np.zeros_like(flag)
else:
# POD = phase-ordered data
ind = np.argsort(phase)
reverse_ind = np.argsort(ind)
POD_signal_in = signal_in[ind]
POD_signal = POD_signal_in.copy()
POD_flag = flag[ind]
POD_pntflag = pntflag[ind]
POD_phase = phase[ind]
bin_lim = np.arange(self.nbin) * self.wbin
bin_ind = np.searchsorted(POD_phase, bin_lim)
bin_ranges = [
(bin_ind[i], bin_ind[i + 1]) for i in range(self.nbin - 1)]
bin_ranges.append((bin_ind[-1], POD_phase.size))
# Identify outliers in each phase bin
POD_signal, POD_flag, bin_rms = self.flag_outliers_by_phase(
POD_signal, POD_phase, POD_flag, POD_pntflag, bin_ranges)
POD_signal_estimate = POD_signal_in - POD_signal
POD_flag_intense = self.get_intense(
bin_ranges, bin_rms, POD_signal, POD_signal_estimate, POD_flag)
if self.twice:
POD_signal2 = np.convolve(signal_in, [.25, .5, .25],
mode='same')[ind]
# POD_signal2_in = POD_signal2.copy()
flag2_in = POD_flag[reverse_ind]
flag2_in = np.convolve(flag2_in, np.ones(3), mode='same') != 0
POD_flag2_in = flag2_in[ind]
POD_flag2 = POD_flag2_in.copy()
POD_signal2, POD_flag2, bin_rms = self.flag_outliers_by_phase(
POD_signal2, POD_phase, POD_flag2, POD_pntflag, bin_ranges)
"""
# DEBUG begin
import matplotlib.pyplot as plt
import pdb
plt.figure()
good = flag_in[ind] + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal_in[good], '.',
label='input')
good = POD_flag + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal_in[good],
label='unflagged')
plt.plot(POD_phase[good] / self.wbin, POD_signal_estimate[good],
label='model')
good[POD_flag_intense == 0] = False
plt.plot(POD_phase[good] / self.wbin, POD_signal_in[good], '.',
label='unflagged intense')
# plt.plot(POD_phase[good] / self.wbin, POD_signal[good], '.',
# label='unflagged - model')
if self.twice:
plt.legend(loc='best')
plt.figure()
POD_signal_estimate2 = POD_signal2_in - POD_signal2
good = POD_flag2_in + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal2_in[good], '.',
label='input')
good = POD_flag2 + POD_pntflag == 0
plt.plot(POD_phase[good] / self.wbin, POD_signal2_in[good],
label='unflagged')
plt.plot(POD_phase[good] / self.wbin,
POD_signal_estimate2[good], label='model')
good[POD_flag_intense == 0] = False
plt.plot(POD_phase[good] / self.wbin, POD_signal2_in[good], '.',
label='unflagged intense')
# plt.plot(POD_phase[good] / self.wbin, POD_signal2[good], '.',
# label='unflagged - model')
plt.legend(loc='best')
plt.show()
pdb.set_trace()
# DEBUG end
"""
if self.twice:
POD_flag2[POD_flag2_in] = False
POD_flag[POD_flag2] = True
flag = POD_flag[reverse_ind]
# flag = POD_flag[reverse_ind]
flag_intense = POD_flag_intense[reverse_ind]
signal_estimate = POD_signal_estimate[reverse_ind]
if self.wkernel:
# Extend the flagging
flag[flag_in] = False
flag = np.convolve(flag, np.ones(self.wkernel), mode='same') != 0
flag = np.roll(flag, self.wkernel // 2 - 1)
flag[flag_in] = True
return flag, flag_intense, signal_estimate
def subtract_trend(self, signal):
"""
subtract a simple trend
"""
istart = 0
step = np.int(60 * self.fsample)
while istart < signal.size:
istop = istart + step
if istop + step > signal.size:
istop += step
ind = slice(istart, istop)
offset = np.median(signal[ind])
signal[ind] -= offset
istart = istop
return
def flag_outliers(self, signal, flag):
"""
Find outliers in offset-removed signal
"""
for _ in range(10):
offset = np.median(signal[flag == 0])
signal -= offset
rms = np.mean(signal[flag == 0] ** 2) ** .5
bad = np.abs(signal) > self.threshold * rms
bad[flag != 0] = False
nbad = np.sum(bad)
if nbad == 0:
break
flag[bad] = True
return
def _get_bin(self, ibin, signal, phase, flag, pntflag, bin_ranges):
"""
Return signal in the current bin with margins
"""
nbin = len(bin_ranges)
signals = []
phases = []
flags = []
pntflags = []
# previous bin
if ibin == 0:
bin_start, bin_stop = bin_ranges[-1]
else:
bin_start, bin_stop = bin_ranges[ibin - 1]
ind = slice(bin_start, bin_stop)
signals.append(signal[ind])
if ibin == 0:
phases.append(phase[ind] - 2 * np.pi)
else:
phases.append(phase[ind])
flags.append(flag[ind])
pntflags.append(pntflag[ind])
# current bin
bin_start, bin_stop = bin_ranges[ibin]
ind = slice(bin_start, bin_stop)
signals.append(signal[ind])
phases.append(phase[ind])
flags.append(flag[ind])
pntflags.append(pntflag[ind])
# next bin
if ibin < nbin - 1:
bin_start, bin_stop = bin_ranges[ibin + 1]
else:
bin_start, bin_stop = bin_ranges[0]
ind = slice(bin_start, bin_stop)
signals.append(signal[ind])
if ibin < nbin - 1:
phases.append(phase[ind])
else:
phases.append(phase[ind] + 2 * np.pi)
flags.append(flag[ind])
pntflags.append(pntflag[ind])
center = slice(signals[0].size, signals[0].size + signals[1].size)
# concatenate
signals = np.hstack(signals)
phases = np.hstack(phases)
flags = np.hstack(flags)
pntflags = np.hstack(pntflags)
return signals, phases, flags, pntflags, center
def robust_rms(self, x):
"""
Measure the sample variance using the interquartile range (IQR) method
"""
if len(x) < 4:
return np.std(x)
xsorted = np.sort(x)
nx = x.size
i1 = np.int(0.25 * nx)
i2 = np.int(0.75 * nx)
iqr = xsorted[i2] - xsorted[i1]
rms = iqr * 0.7412
return rms
def flag_outliers_by_phase(self, signal, phase, flag, pntflag, bin_ranges):
"""
Find outliers in the de-trended signal and derive a signal estimate.
"""
bin_rms = []
nbin = len(bin_ranges)
signal_out = np.zeros_like(signal)
flag_out = np.zeros_like(flag)
for ibin in range(nbin):
bin_start, bin_stop = bin_ranges[ibin]
ind = slice(bin_start, bin_stop)
sig, phse, flg, pntflg, center = self._get_bin(
ibin, signal, phase, flag, pntflag, bin_ranges)
rms = 0
for iiter in range(10):
good_ind = flg + pntflg == 0
ngood = np.sum(good_ind)
if ngood < 10:
# This bin is beyond hope
flg[:] = True
break
if iiter < 2:
# Signal model is an offset
offset = np.median(sig[good_ind])
else:
# Signal model is a polynomial
offset = self.fit_poly(phse, sig, good_ind)
sig -= offset
rms = self.robust_rms(sig[good_ind])
bad = np.abs(sig) > self.threshold * rms
bad[flg != 0] = False
nbad = np.sum(bad)
if nbad == 0 and iiter > 2:
break
flg[bad] = True
signal_out[ind] = sig[center]
flag_out[ind] = flg[center]
bin_rms.append(rms)
return signal_out, flag_out, np.array(bin_rms)
def get_intense(self, bin_ranges, bin_rms, noise, estimate, flag):
"""
Flag all samples falling into bins with extreme RMS as intense
"""
snr = []
for ibin, ((bin_start, bin_stop), rms) in enumerate(zip(bin_ranges,
bin_rms)):
ind = slice(bin_start, bin_stop)
good = flag[ind] == 0
rms_signal = np.std(estimate[ind][good])
rms_noise = np.std(noise[ind][good])
snr.append(rms_signal / rms_noise)
flag_intense = np.zeros_like(flag)
good = bin_rms != 0
for _ in range(10):
ngood = np.sum(good)
good_rms = bin_rms[good]
rms_median = np.median(good_rms)
rms_rms = (np.sum((good_rms - rms_median) ** 2) / (ngood - 1)) ** .5
for ibin, ((bin_start, bin_stop), rms) in enumerate(zip(bin_ranges,
bin_rms)):
if rms < max(2 * rms_median,
rms_median + 5 * rms_rms) and snr[ibin] < 1:
continue
good[ibin] = False
ind = slice(bin_start, bin_stop)
flag_intense[ind] = True
return flag_intense
def fit_poly(self, x, y, ind_fit):
templates = []
xx = (x - np.mean(x)) / np.ptp(x)
for iorder in range(self.order + 1):
templates.append(xx[ind_fit] ** iorder)
templates = np.vstack(templates)
invcov = np.dot(templates, templates.T)
cov = np.linalg.inv(invcov)
proj = np.dot(templates, y[ind_fit])
coeff = np.dot(cov, proj)
poly = np.zeros_like(y)
for iorder, cc in enumerate(coeff):
poly += cc * xx ** iorder
return poly
|
from .base import Model, OrderedCollection
from .user import Member, User
from .room import Room
from ..client.constants import Endpoints
from .. import utils
__all__ = ["Message", "MessageCollection", "Conversation", "ConversationCollection"]
def everything_except(iterable, exception):
return filter(lambda o: o != exception, iterable)
class Message(Model):
"""
Represents a sent message on QueUp.
Attributes
----------
id: str
Chat ID of this message.
content: str
Text of the message.
deleted: bool
True if the message has been deleted, False otherwise.
created_at: :class:`datetime.datetime`
Datetime representing the time this message was sent.
"""
def __init__(self, client, data: dict):
super().__init__(client)
self.id = data.get("chatid")
self.content = data.get("message")
self.deleted = False
self.created_at = utils.dt(data.get("time"))
self._userid = data.get("user", {}).get("_id")
self._roomid = data.get("queue_object", {}).get("roomid")
# self.member = Member(self, data.get("queue_object")) # today on naming things with dubtrack
@classmethod
def from_conversation_message(cls, client, data: dict):
new_data = dict(chatid=data.get("_id"),
message=data.get("message"),
time=data.get("created"),
user=data.get("_user"),
queue_object={"roomid": None})
return cls(client, new_data)
@property
def author(self) -> User:
"""
Get the author of this message.
Returns
-------
:class:`User`
Author of this message
"""
return self.client.users.get(self._userid)
@property
def room(self) -> Room:
"""
Get the room this message was sent in.
If this message was sent in a private conversation, this will be None.
Returns
-------
:class:`Room`
Room this message was sent to.
"""
return self.client.rooms.get(self._roomid)
@property
def member(self) -> Member:
"""
Get the author of this message as a member of the room.
Returns
-------
:class:`Member`
Member who sent this message.
"""
return self.room.members.from_user_id(self._userid)
# TODO: Conversations can actually have multiple recipients.
class Conversation(Model):
"""
Represents a private conversation between two users.
Attributes
----------
id: str
ID of this conversation.
created_at: :class:`datetime.datetime`
Time this conversation was started.
latest_message_str: str
Text of the last message to be sent in this conversation. May not be up-to-date unless :meth:`fetch` is called.
"""
def __init__(self, client, data: dict):
super().__init__(client)
self.id = data.get("_id")
self.created_at = utils.dt(data.get("created"))
self.latest_message_str = data.get("latest_message_str")
self._latest_message_dt = data.get("latest_message")
self._read = data.get("users_read")
self._users = []
for user in data.get("usersid"):
u = User(self.client, user)
self._users.append(u.id)
self.client.users.add(u)
@property
def _recipients(self):
others = everything_except(self._users, self.client.user.id)
return list(others)
@property
def recipients(self) -> list:
"""
Get the recipients of this conversation.
Returns
-------
list[:class:`User`]
Recipients of this conversation.
"""
return list(map(self.client.users.get, self._recipients))
async def fetch(self) -> list:
"""
Fetch all messages for this conversation.
Returns
-------
list[str]
List of message IDs in this conversation.
"""
_, messages = await self.client.http.get(Endpoints.conversation(cid=self.id))
self.latest_message_str = messages[0]['message']
for msg_data in messages:
message = Message.from_conversation_message(self.client, msg_data)
self.client.messages.add(message)
return list(map(lambda m: m['_id'], messages))
async def send_message(self, text: str):
"""
Send a message to this conversation.
Parameters
----------
text: str
Text to send in the message.
"""
data = {
"message": text,
"userid": self.client.user.id
}
_, msg_data = await self.client.http.post(Endpoints.conversation(cid=self.id), json=data)
# TODO: fuck me the message data is in yet *another* format
# we'll deal with this later
async def mark_as_read(self):
"""
Marks this conversation as read by the current user.
"""
_, resp = await self.client.http.post(Endpoints.conversation_read(cid=self.id))
self._read = resp.get("users_read")
def has_read(self, user: User) -> bool:
"""
Checks if the passed :class:`User` has read this conversation.
Parameters
----------
user: :class:`User`
User to check
Returns
-------
bool:
True if the user has read this conversation, False otherwise.
"""
return user.id in self._read
class MessageCollection(OrderedCollection):
pass
class ConversationCollection(OrderedCollection):
def get_by_recipients(self, *args):
def _checker(conv: Conversation):
for uid in args:
if uid not in conv._recipients:
return False
return True
convs = filter(_checker, self.values())
return next(convs, None)
|
import pysam
import itertools
# longer genes have more reads aligning to them
# so instead of counting reads, estimate a number of copies of each marker
# this should be proportional to species abundance and agree between markers
# ( and also proportional to sequencing depth - we correct that later if calculating CPM output)
def compute_contribution_to_marker_coverage(alignment_file, sam_record):
marker_length = alignment_file.get_reference_length(sam_record.reference_name)
return round(1.0 * sam_record.infer_query_length() / marker_length, 6)
# adapted from https://github.com/mortazavilab/TALON/blob/master/src/talon/transcript_utils.py
def compute_alignment_identity(sam_record):
""" This function computes what fraction of the read matches the reference
genome."""
read_ID = sam_record.query_name
try:
MD_tag = sam_record.get_tag('MD')
except KeyError:
raise ValueError("SAM transcript %s lacks an MD tag" % read_ID)
total_bases = sam_record.infer_query_length()
matches = 0.0
ops, counts = splitMD(MD_tag)
for op,ct in zip(ops, counts):
if op == "M":
matches += ct
if op == "D":
total_bases += ct
return round(1.0 * matches/total_bases, 6)
def splitMD(MD):
""" Takes MD tag and splits into two lists:
one with capital letters (match operators), and one with
the number of bases that each operation applies to. """
operations = []
# Split MD string where type changes.
# Digits are separated from base changes.
# Deletions (with ^) are captured together.
counts = ["".join(x) for _, x in itertools.groupby(MD, key=str.isdigit)]
# Get operations
for i in range(0,len(counts)):
curr = counts[i]
try:
counts[i] = int(curr)
operations.append("M")
except ValueError:
# Handle deletion
if curr.startswith("^"):
operations.append("D")
counts[i] = len(counts[i]) - 1
else:
operations.append("X")
counts[i] = len(counts[i])
return operations, counts
|
#!/usr/bin/env python3
"""
Testing simple arithmetic FHE operations using the Python Frontend.
"""
from pyabc import *
import logging
def test_simple_arithmetic():
p = ABCProgram(logging.DEBUG)
with ABCContext(p, logging.DEBUG):
def main(a : NonSecretInt, x = 3):
b = (2 * 6) / x
y = a + b
return y
r = p.execute(1, x=2)
assert r == 7
r = p.execute(2)
assert r == 6
def test_simple_for_loop():
p = ABCProgram(logging.DEBUG)
with ABCContext(p, logging.DEBUG):
def main(start : NonSecretInt, end : NonSecretInt, step : NonSecretInt):
sum = 0
for i in range(start, end, step):
sum += i
return sum
s = p.execute(1, 3, 1)
assert s == 3
s = p.execute(5, 0, -1)
assert s == 15
|
class TimeError(Exception):
""" Raised when creating a TimePeriod that doesn't work"""
|
# Copyright 2016-2020, Pulumi Corporation. All rights reserved.
import pulumi
import pulumi_azure_native.authorization as authorization
import pulumi_azure_native.storage as storage
import pulumi_azure_native.synapse as synapse
import pulumi_azure_native.resources as resources
import pulumi_random as random
config = pulumi.Config()
resource_group = resources.ResourceGroup("synapse-rg")
storage_account = storage.StorageAccount(
"synapsesa",
resource_group_name=resource_group.name,
access_tier=storage.AccessTier.HOT,
enable_https_traffic_only=True,
is_hns_enabled=True,
kind=storage.Kind.STORAGE_V2,
sku=storage.SkuArgs(
name=storage.SkuName.STANDARD_RAGRS,
))
data_lake_storage_account_url = storage_account.name.apply(lambda name: f"https://{name}.dfs.core.windows.net")
users = storage.BlobContainer(
"users",
resource_group_name=resource_group.name,
account_name=storage_account.name,
public_access=storage.PublicAccess.NONE)
workspace = synapse.Workspace(
"workspace",
resource_group_name=resource_group.name,
default_data_lake_storage=synapse.DataLakeStorageAccountDetailsArgs(
account_url=data_lake_storage_account_url,
filesystem="users",
),
identity=synapse.ManagedIdentityArgs(
type=synapse.ResourceIdentityType.SYSTEM_ASSIGNED,
),
sql_administrator_login="sqladminuser",
sql_administrator_login_password=random.RandomPassword("workspacePwd", length=12).result)
allow_all = synapse.IpFirewallRule(
"allowAll",
resource_group_name=resource_group.name,
workspace_name=workspace.name,
end_ip_address="255.255.255.255",
start_ip_address="0.0.0.0")
subscription_id = resource_group.id.apply(lambda id: id.split('/')[2])
role_definition_id = subscription_id.apply(
lambda
id: f"/subscriptions/{id}/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe")
storage_access = authorization.RoleAssignment(
"storageAccess",
role_assignment_name=random.RandomUuid("roleName").result,
scope=storage_account.id,
principal_id=workspace.identity.principal_id.apply(lambda v: v or "<preview>"),
principal_type="ServicePrincipal",
role_definition_id=role_definition_id)
user_access = authorization.RoleAssignment(
"userAccess",
role_assignment_name=random.RandomUuid("userRoleName").result,
scope=storage_account.id,
principal_id=config.require("userObjectId"),
principal_type="User",
role_definition_id=role_definition_id)
sql_pool = synapse.SqlPool(
"SQLPOOL1",
resource_group_name=resource_group.name,
workspace_name=workspace.name,
collation="SQL_Latin1_General_CP1_CI_AS",
create_mode="Default",
sku=synapse.SkuArgs(
name="DW100c",
))
spark_pool = synapse.BigDataPool(
"Spark1",
resource_group_name=resource_group.name,
workspace_name=workspace.name,
auto_pause=synapse.AutoPausePropertiesArgs(
delay_in_minutes=15,
enabled=True,
),
auto_scale=synapse.AutoScalePropertiesArgs(
enabled=True,
max_node_count=3,
min_node_count=3,
),
node_count=3,
node_size="Small",
node_size_family="MemoryOptimized",
spark_version="2.4")
|
import os
from .base_logger import BaseLogger
class ModelLogger(BaseLogger):
def __init__(self, tb_logger, log_path, cfg):
super().__init__(tb_logger, log_path, cfg)
self.NAME = 'model'
os.makedirs(self.log_path, exist_ok=True)
self.phase = 'train'
self.current_epoch = -1
self.save_interval = cfg.MODEL_SAVE_PER_N_EPOCH
self.save_method = None
def log_batch(self, batch):
self.phase = batch['phase']
self.current_epoch = batch['epoch-id']
self.save_method = batch['save-method']
def log_phase(self):
if self.phase == 'train' and (self.current_epoch % self.save_interval == 0):
self.save_method(os.path.join(self.log_path, "epoch_%d.model" % self.current_epoch))
if self.phase == 'train':
if any([True if fn.endswith('latest.model') else False for fn in os.listdir(self.log_path)]):
os.system('rm ' + os.path.join(self.log_path, "epoch_*_latest.model"))
self.save_method(os.path.join(self.log_path, "epoch_%d_latest.model" % self.current_epoch))
|
from django.conf.urls import url
from django.core.exceptions import FieldError
from tastypie.http import HttpBadRequest
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
def generate_filtering(mdl):
"""Utility function to add all model fields to filtering whitelist.
See: http://django-tastypie.readthedocs.org/en/latest/resources.html#basic-filtering
"""
filtering = {}
for field in mdl._meta.fields:
filtering.update({field.name: ALL_WITH_RELATIONS})
return filtering
def generate_meta(klass, overrides={}):
metaitems = {
'queryset': klass.objects.all(),
'resource_name': klass._meta.model_name,
'filtering': generate_filtering(klass)
}
metaitems.update(overrides)
return type('Meta', (object,), metaitems)
class APIResource(ModelResource):
class Meta:
pass
def prepend_urls(self):
return [
url(
r"^(?P<resource_name>{})/fields/(?P<field_name>[\w\d_.-]+)/$".format(self._meta.resource_name),
self.wrap_view('field_values'), name="api_field_values"),
]
def field_values(self, request, **kwargs):
# Get a list of unique values for the field passed in kwargs.
try:
qs = self._meta.queryset.values_list(kwargs['field_name'], flat=True).distinct()
except FieldError as e:
return self.create_response(request, data={'error': str(e)}, response_class=HttpBadRequest)
# Prepare return the HttpResponse.
return self.create_response(request, data=list(qs))
|
# Generated by Django 2.1.5 on 2019-01-13 05:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cards', '0005_auto_20190110_1422'),
]
operations = [
migrations.AddField(
model_name='card',
name='img_cover',
field=models.ImageField(blank=True, upload_to='card', verbose_name="Card's attached image"),
),
]
|
# -*- coding: utf-8 -*-
"""
gethbvpars:
gets the HBV catchment parameters from a hbv model. It assules the default
parameters are stored in the root (the basin) and that tuned parameters
are stored in each catchment.
syntax:
gethbvpars -p pathtobasin -o outputfilename
"""
import os.path
import getopt
import sys
sep = ","
csvfile = "test.csv"
def usage(*args):
sys.stdout = sys.stderr
for msg in args:
print(msg)
print(__doc__)
sys.exit(0)
def readpar(fname, skip):
a = {}
f = open(fname, "rb")
if skip:
x = f.readline()
x = f.readlines()
f.close()
for l in x:
ll = [c for c in l if c not in "'"].split()
if len(ll) > 0:
a[ll[0]] = ll[1]
return a
def readbas(fname):
a = []
f = open(fname, "rb")
x = f.readline()
x = f.readlines()
f.close()
for l in x:
ll = [c for c in l if c not in "'\\"].split()
if len(ll) > 0:
if ll[0] == "basindir":
a.append(ll[1])
return a
basin = ""
catch = {}
try:
opts, args = getopt.getopt(sys.argv[1:], "o:p:h")
except getopt.error as msg:
usage(msg)
for o, a in opts:
if o == "-p":
basin = a
if o == "-o":
csvfile = a
if o == "-h":
usage()
# read basin structure and order
basstruc = readbas(basin + "/basin.par")
# read default parameters
baspar = readpar(basin + "/rmod.par", 0)
for ddri in basstruc:
pfile = basin + "/" + ddri + "/bmod.par"
if os.path.exists(pfile):
xx = readpar(pfile, 1)
catch[os.path.basename(ddri)] = xx
f = open(csvfile, "w")
i = 0
print("Id,Name", end=" ", file=f)
for ppar in baspar:
print(sep + ppar, end=" ", file=f)
print("", file=f)
# for c in catch:
for ii in range(0, len(basstruc) - 1):
i = i + 1
c = basstruc[ii]
print(str(i) + sep + c, end=" ", file=f)
for ppar in baspar:
if ppar in catch[c]:
print(sep + catch[c][ppar], end=" ", file=f)
else:
print(sep + baspar[ppar], end=" ", file=f)
print("", file=f)
f.close()
|
# from __future__ import print_function # for print() in Python 2
from io import open
import sys
import re
import logging
from pysqlformatter.src.formatter import Formatter
from sparksqlformatter import Style as sparksqlStyle
from sparksqlformatter import api as sparksqlAPI
logger = logging.getLogger(__name__)
log_formatter = '[%(asctime)s] %(levelname)s [%(filePath)s:%(lineno)s:%(funcName)s] %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_formatter)
def format_file(filePath, pythonStyle='pep8', sparksqlStyle=sparksqlStyle(), queryNames=['query'], inPlace=False):
'''
Format file with given settings for python style and sparksql configurations.
Parameters
filePath: string
Path to the file to format.
pythonStyle: string
A style name or path to a style config file; interface to https://github.com/google/yapf.
sparksqlStyle: string, dict, or sparksqlformatter.src.style.Style() object
Configurations for the query language; interface to https://github.com/largecats/sparksql-formatter.
queryNames: list
inPlace: bool
If True, will format the file in place.
Else, will write the formatted file to stdout.
Return: None
'''
if type(sparksqlStyle) == type(sparksqlStyle):
formatter = Formatter(pythonStyle=pythonStyle, sparksqlStyle=sparksqlStyle)
else:
if type(sparksqlStyle) == str:
if sparksqlStyle.startswith('{'):
sparksqlStyle = eval(sparksqlStyle)
formatter = Formatter(pythonStyle=pythonStyle,
sparksqlStyle=sparksqlAPI._create_style_from_dict(sparksqlStyle),
queryNames=queryNames)
else:
formatter = Formatter(pythonStyle=pythonStyle,
sparksqlStyle=sparksqlAPI._create_style_from_file(sparksqlStyle))
elif type(sparksqlStyle) == dict:
formatter = Formatter(pythonStyle=pythonStyle,
sparksqlStyle=sparksqlAPI._create_style_from_dict(sparksqlStyle),
queryNames=queryNames)
else:
raise Exception('Unsupported config type')
_format_file(filePath, formatter, inPlace)
def format_script(script, pythonStyle='pep8', sparksqlStyle=sparksqlStyle(), queryNames=['query']):
'''
Format script using given settings for python style and sparksql configurations.
Parameters
script: string
The script to be formatted.
pythonStyle: string
A style name or path to a style config file; interface to https://github.com/google/yapf.
sparksqlStyle: string, dict, or sparksqlformatter.src.style.Style() object
Configurations for the query language; interface to https://github.com/largecats/sparksql-formatter.
Return: string
The formatted script.
'''
if type(sparksqlStyle) == type(sparksqlStyle):
formatter = Formatter(pythonStyle=pythonStyle, sparksqlStyle=sparksqlStyle, queryNames=queryNames)
else:
if type(sparksqlStyle) == str:
if sparksqlStyle.startswith('{'):
sparksqlStyle = eval(sparksqlStyle)
formatter = Formatter(pythonStyle=pythonStyle,
sparksqlStyle=sparksqlAPI._create_style_from_dict(sparksqlStyle),
queryNames=queryNames)
else:
formatter = Formatter(pythonStyle=pythonStyle,
sparksqlStyle=sparksqlAPI._create_style_from_file(sparksqlStyle),
queryNames=queryNames)
elif type(sparksqlStyle) == dict:
formatter = Formatter(pythonStyle=pythonStyle,
sparksqlStyle=sparksqlAPI._create_style_from_dict(sparksqlStyle),
queryNames=queryNames)
else:
raise Exception('Unsupported config type')
return _format_script(script, formatter)
def _format_file(filePath, formatter, inPlace=False):
'''
The I/O helper function for format_file(). Read from given file, format it, and write to specified output.
Parameters
filePath: string
Path to the file to format.
formatter: pysqlformatter.src.formatter.Formatter() object
Formatter.
inPlace: bool
If True, will format the file in place.
Else, will write the formatted file to stdout.
Return: None
'''
script = _read_from_file(filePath)
formattedScript = _format_script(script, formatter)
if inPlace: # overwrite file
logger.info('Writing to ' + filePath + '...')
_write_to_file(formattedScript, filePath)
else: # write to stdout
sys.stdout.write(formattedScript)
def _read_from_file(filePath):
'''
The input helper function for _format_file(). Read from given file and return its content.
Parameters
filePath: string
Path to the file to format.
Return: string
The file content.
'''
# see https://docs.python.org/3.5/library/functions.html#open
with open(file=filePath, mode='r', newline=None, encoding='utf-8') as f:
text = f.read()
return text
def _write_to_file(formattedQuery, filePath):
'''
The output helper function for _format_file(). Write formatted query to given file.
Parameters
formattedQuery: string
The formatted query.
filePath: string
Path to the file to write to.
'''
# see https://docs.python.org/3.5/library/functions.html#open
with open(file=filePath, mode='w', newline='\n', encoding='utf-8') as f:
f.write(formattedQuery)
def _format_script(script, formatter):
'''
The wrapper function for format_script(). Format a given script using given formatter.
Parameters
string: string
The script to format.
formatter: sparksqlformatter.src.formatter.Formatter() object
Formatter.
Return: string
The formatted script.
'''
return formatter.format(script)
|
from Martel import Dispatch
from Bio import Search, StdHandler
class BuildSearch(Dispatch.Dispatcher):
def __init__(self):
Dispatch.Dispatcher.__init__(self,
prefix = "bioformat:",
remap = {
"bioformat:hit": "bioformat:hit_description_block",
}
)
self.acquire(StdHandler.Handle_hsp(self.add_hsp))
self.acquire(StdHandler.Handle_search_header(self.add_header))
self.acquire(StdHandler.Handle_search_table(self.add_table))
self.acquire(StdHandler.Handle_search_info(self.add_stats))
def start_(self, name, attrs):
self.algorithm = None
self.query = None
self.database = None
self.table = []
self.hits = []
self.parameters = {}
self.statistics = {}
def end_(self, name):
self.document = None
self.document = Search.Search(
self.algorithm,
self.query,
self.database,
self.table,
self.hits,
self.parameters,
self.statistics)
def start_hit(self, name, attrs):
self.hit_description = None
self.hit_length = None
self.hsps = []
def end_hit(self, name):
self.hits.append(Search.Hit("XXX SPAM", self.hit_description,
"XXX EGGS", self.hit_length,
self.algorithm, self.hsps))
def add_hsp(self, hsp_values, hsp_handler, strands, frames):
self.hsps.append(Search.HSP(hsp_handler.query_seq,
hsp_handler.homology_seq,
hsp_handler.subject_seq,
# XXX strand and frame!
(hsp_handler.query_start_loc,
hsp_handler.query_end_loc),
(hsp_handler.subject_start_loc,
hsp_handler.subject_end_loc),
hsp_handler.query_name,
hsp_handler.subject_name,
self.algorithm,
hsp_values))
def add_table(self, table):
self.table = [Search.TableInfo(*x) for x in table]
def add_header(self, info):
self.algorithm = Search.Algorithm(info["appname"],
info["appversion"])
self.database = Search.Database(info["dbname"],
info["db_num_letters"],
info["db_num_sequences"])
self.query = Search.Query("XXX spam", "XXX eggs",
info["query_description"],
info["query_size"])
def add_stats(self, parameters, statistics):
self.parameters = parameters
self.statistics = statistics
StdHandler.add_text_block_handler(BuildSearch, "hit_description",
"join-description", "join|fixspaces",
"hit_description")
StdHandler.add_int_handler(BuildSearch, "hit_length", "hit_length")
make_builder = BuildSearch
|
# Joint_lap Object (inheret from Joint class)
# Joint_lap object are used to model variable angle lap joint.
import math
from ast import literal_eval
import compas
from compas.datastructures import Mesh
from compas.geometry import Box, Frame, Point, Line, Transformation, Vector
from compas.geometry import Projection, Translation, transformations
from compas.geometry import distance_point_point, intersection_segment_segment, dot_vectors, transform_points, angle_vectors, centroid_points
from integral_timber_joints.geometry.beam import Beam
from integral_timber_joints.geometry.joint import Joint
from integral_timber_joints.geometry.screw import Screw_SL
from integral_timber_joints.geometry.utils import *
try:
from typing import Dict, List, Optional, Tuple, cast, Any
from integral_timber_joints.process import RobotClampAssemblyProcess
from integral_timber_joints.assembly.beam_assembly_method import BeamAssemblyMethod
except:
pass
class JointPolylineLap(Joint):
"""
joint class containing varied joints
"""
def __init__(
self,
face_id=1, # type: int
center_distance=100, # type: float
top_side_thickness=50, # type: float
corner_pts=[], # type: list[Point]
polylines=None, # type: list[list[Tuple[float, float]]]
is_joint_on_beam_move=False, # type: bool
is_joint_on_top=False, # type: bool
name=None # type: str
):
"""
:param distance: double
:param face_id: int
"""
if polylines is None:
polylines = [[[0, 0], [1, 0]]] * 4
self.face_id = face_id # The face id of the face to be cut
self.center_distance = center_distance # Distance measured from the start of the beam to the first corner (0 or 1) of the joint
self.top_side_thickness = top_side_thickness # Height is the depth of the material removed from the ref face
self.corner_pts = corner_pts # Eight corner points - refer to drawing
self.polylines = polylines # Height is the depth of the material removed from the ref face
self.is_joint_on_beam_move = is_joint_on_beam_move # If true, the line 01 23 45 67 are cutting along the Y axis of the ref face.
self.is_joint_on_top = is_joint_on_top # If true, the joint opening is on the reference face / point 4567.
self.name = name
@property
def data(self):
data = {
'face_id': self.face_id,
'center_distance': self.center_distance,
'top_side_thickness': self.top_side_thickness,
'corner_pts': self.corner_pts,
'polylines': self.polylines,
'is_joint_on_beam_move': self.is_joint_on_beam_move,
'is_joint_on_top': self.is_joint_on_top,
'name': self.name,
}
return data
@classmethod
def from_data(cls, data):
"""Construct a Joint object from structured data.
This class method must be overridden by an inherited class.
"""
joint = cls()
joint.face_id = data.get('face_id', 1)
joint.center_distance = data.get('center_distance', 100)
joint.top_side_thickness = data.get('top_side_thickness', 90)
joint.corner_pts = data.get('corner_pts', [])
joint.polylines = data.get('polylines', [])
joint.is_joint_on_beam_move = data.get('is_joint_on_beam_move', False)
joint.is_joint_on_top = data.get('is_joint_on_top', False)
joint.name = data.get('name', None)
return joint
@property
def height(self):
if self.is_joint_on_top:
return self.top_side_thickness
else:
return self.bottom_side_thickness
@property
def angle(self):
v1 = Vector.from_start_end(self.corner_pts[0], self.corner_pts[3])
v2 = Vector.from_start_end(self.corner_pts[2], self.corner_pts[3])
if self.is_joint_on_top:
if self.is_joint_on_beam_move:
return math.degrees(v1.angle(v2))
else:
return math.degrees(v1.angle(v2.scaled(-1)))
else:
if self.is_joint_on_beam_move:
return math.degrees(v1.angle(v2.scaled(-1)))
else:
return math.degrees(v1.angle(v2))
@property
def _total_thickness(self):
return max([distance_point_point(self.corner_pts[i], self.corner_pts[i+4]) for i in range(4)])
@property
def distance_at_center(self):
return self.center_distance
@property
def centroid(self):
return centroid_points(self.corner_pts)
# ###########################
# Transformation of Extrinsic
# ###########################
def transform(self, transformation):
# type: (Transformation) -> None
"""Transforming the joint object in WCF.
Typically called by assembly.transform when initiated by user."""
self.center_frame.transform(transformation)
self.corner_pts = transform_points(self.corner_pts, transformation)
# #####################
# Modifyable Parameters
# #####################
@property
def thickness(self):
# type: () -> float
if self.is_joint_on_top:
return self._total_thickness - self.top_side_thickness
else:
return self.top_side_thickness
@thickness.setter
def thickness(self, value):
# type: (float) -> None
if self.is_joint_on_top:
self.top_side_thickness = self._total_thickness - value
else:
self.top_side_thickness = value
@property
def param_string(self):
# type: () -> str
return str([self.top_side_thickness, self.polylines]).replace(" ", "")
@param_string.setter
def param_string(self, value):
# type: (str) -> None
values = literal_eval(value)
assert len(values) == 2
self.top_side_thickness = float(values[0])
assert len(values[1]) == 4
self.polylines = values[1]
@property
def parameter_keys(self):
# type: () -> list[str]
return ['param_string']
def get_parameter(self, key):
# type: (str) -> Any
if key == 'param_string':
return self.param_string
if key == 'thickness':
return self.thickness
raise KeyError("%s is invalid for JointPolylineLap" % key)
def set_parameter(self, key, value):
# type: (str, Any) -> None
if key == "param_string":
self.param_string = value
return
if key == "thickness":
self.thickness = value
return
raise KeyError("%s is invalid for JointPolylineLap" % key)
# #####################
# Joint Shape
# #####################
def _quad_at_height(self, height_fraction):
# type: (float) -> list[Point]
"""Return four points refering to the quad at specified height.
- height is 0.0, point 0-3 are returned.
- height is 1.0, point 4-7 are returned.
- in between heights are interpolated.
"""
points = []
return [Line(self.corner_pts[i], self.corner_pts[i+4]).point(height_fraction) for i in range(4)]
def _polyline_at_height(self, line_index, height_fraction):
# type: (int, float) -> list[Point]
"""Returns a list of points (polyline-ish) as evaluated
from the uvw coordinate of the box formed by the corner points."""
line_index = line_index % 4
quad = self._quad_at_height(height_fraction)
# rotate the quad based on index
for _ in range(line_index):
quad.append(quad.pop(0))
# evaluate point on the quad
return [eval_quad(quad, u, v)for u, v in self.polylines[line_index]]
def _polyline_at_top(self, line_index, oversize=True):
# type: (int, bool) -> list[Point]
if oversize:
return self._polyline_at_height(line_index, 1.2)
else:
return self._polyline_at_height(line_index, 1.0)
def _polyline_at_btm(self, line_index, oversize=True):
# type: (int, bool) -> list[Point]
if oversize:
return self._polyline_at_height(line_index, -0.2)
else:
return self._polyline_at_height(line_index, 0.0)
def _polyline_at_mid(self, line_index, oversize=0):
# type: (int, float) -> list[Point]
height_fraction = 1.0 - (self.top_side_thickness / self._total_thickness) + oversize
return self._polyline_at_height(line_index, height_fraction)
def _extline_at_height(self, line_index, height_fraction):
# type: (int, float) -> list[Point]
"""Returns two points that can be added to evaluated polylines
for creating a complete cycle with oversize.
"""
line_index = line_index % 4
quad = self._quad_at_height(height_fraction)
# rotate the quad based on index
for _ in range(line_index):
quad.append(quad.pop(0))
return [eval_quad(quad, 0, -0.2), eval_quad(quad, 1, -0.2)]
def _extline_at_top(self, line_index, oversize=True):
# type: (int, bool) -> list[Point]
if oversize:
return self._extline_at_height(line_index, 1.2)
else:
return self._extline_at_height(line_index, 1.0)
def _extline_at_btm(self, line_index, oversize=True):
# type: (int, bool) -> list[Point]
if oversize:
return self._extline_at_height(line_index, -0.2)
else:
return self._extline_at_height(line_index, 0.0)
def _extline_at_mid(self, line_index, oversize=0):
# type: (int, float) -> list[Point]
height_fraction = 1.0 - (self.top_side_thickness / self._total_thickness) + oversize
return self._extline_at_height(line_index, height_fraction)
@property
def bottom_side_thickness(self):
return self._total_thickness - self.top_side_thickness
@property
def height_fraction_at_mid(self):
return self.bottom_side_thickness / self._total_thickness
def get_feature_shapes(self, BeamRef):
# type: (Beam) -> list[Mesh]
"""Compute the negative shape of the joint.
There are three feature shapes.
- First is the bowtie shape on the open side (refside where the joint is cut across)
- Second and third are two side cut from on the solid side.
The side cuts could be absent if there are no side cuts.
Parameters
----------
BeamRef -> integral_timber_joint.geometry.Beam
The Beam object this joint is attached to
Returns
-------
object
A compas.Mesh
"""
shapes = []
# vector_to_top =
vector_to_top = Vector.from_start_end(self.corner_pts[0], self.corner_pts[4]).unitized().scaled(self.top_side_thickness).scaled(1.1)
vector_to_bottom = Vector.from_start_end(self.corner_pts[4], self.corner_pts[0]).unitized().scaled(self._total_thickness - self.top_side_thickness).scaled(1.1)
# i is an index that help rotate the quad index by 1
i = 0 if self.is_joint_on_beam_move else 1
if self.is_joint_on_top:
hf = self.height_fraction_at_mid
poly_line_mid = self._polyline_at_height(i, hf) + self._extline_at_height(i+1, hf) + self._polyline_at_height(i+2, hf) + self._extline_at_height(i+3, hf)
shapes.append(polyhedron_extrude_from_concave_vertices(poly_line_mid, vector_to_top))
else:
hf = self.height_fraction_at_mid
poly_line_mid = self._polyline_at_height(i, hf) + self._extline_at_height(i+1, hf) + self._polyline_at_height(i+2, hf) + self._extline_at_height(i+3, hf)
shapes.append(polyhedron_extrude_from_concave_vertices(poly_line_mid, vector_to_bottom))
# Adding the two side cuts (if they have > 2 points)
if self.is_joint_on_top:
tol = 1e-3
sidecut_extrusion_vector = vector_to_bottom # = Vector.from_start_end(self.corner_pts[4], self.corner_pts[0]).unitized().scaled(self._total_thickness - self.top_side_thickness).scaled(1.1)
else:
tol = -1e-3
sidecut_extrusion_vector = vector_to_top # = Vector.from_start_end(self.corner_pts[0], self.corner_pts[4]).unitized().scaled(self.top_side_thickness).scaled(1.1)
if len(self.polylines[i+1]) > 2:
poly_line_mid = self._polyline_at_mid(i+1, tol)[::-1] + self._extline_at_mid(i+1, tol)
shapes.append(polyhedron_extrude_from_concave_vertices(poly_line_mid, sidecut_extrusion_vector))
if len(self.polylines[(i+3) % 4]) > 2:
poly_line_mid = self._polyline_at_mid(i+3, tol)[::-1] + self._extline_at_mid(i+3, tol)
shapes.append(polyhedron_extrude_from_concave_vertices(poly_line_mid, sidecut_extrusion_vector))
return shapes
def get_clamp_frames(self, beam):
# type: (Beam) -> list[Frame]
"""Compute the possible frames where the clamp can be attached.
The clamp frame is located at the opposite side of the face_id.
Orgin of the frame is located on the surface of that face, at the center point of the joint.
X Axis is along the length of the beam.
Z axis is pointing into the beam.
Parameters
----------
beam : Beam
Beam Object
Returns
-------
list(Frame)
Frames in WCF
"""
# The clamp frame locate at the opposite
origin = self.get_joint_center_at_solid_side(beam)
reference_side_wcf = beam.reference_side_wcf(self.face_id)
forward_clamp = Frame(origin, reference_side_wcf.xaxis, reference_side_wcf.yaxis)
backward_clamp = Frame(origin, reference_side_wcf.xaxis.scaled(-1), reference_side_wcf.yaxis.scaled(-1))
return [forward_clamp, backward_clamp]
def get_joint_center_at_open_side(self, beam):
# type: (Beam) -> Point
open_side_face_id = self.face_id
return beam.get_face_center_line(open_side_face_id).point_from_start(self.center_distance)
def get_joint_center_at_solid_side(self, beam):
# type: (Beam) -> Point
solid_side_face_id = (self.face_id + 1) % 4 + 1
return beam.get_face_center_line(solid_side_face_id).point_from_start(self.center_distance)
def get_assembly_direction(self, beam):
'''
Returns the only possible assembly direction.
'''
# print "Dist%s" % self.distance
face_frame = beam.get_face_plane(self.face_id)
return face_frame.normal.scaled(-1 * beam.get_face_height(self.face_id))
def swap_faceid_to_opposite_face(self):
# Face id flip
old_face_id = self.face_id
new_id = (old_face_id + 1) % 4 + 1
self.face_id = new_id
# Distance point change by angled_lead distance
self.is_joint_on_top = not self.is_joint_on_top
def assembly_tool_types(self, beam_assembly_method):
# type: (BeamAssemblyMethod) -> list[str]
# Returns a list of clamps types that can assemble this joint
from integral_timber_joints.assembly.beam_assembly_method import BeamAssemblyMethod
clamps = []
if beam_assembly_method == BeamAssemblyMethod.SCREWED_WITH_GRIPPER:
return ['SL1', 'SL1_G200']
elif beam_assembly_method == BeamAssemblyMethod.SCREWED_WITHOUT_GRIPPER:
return ['SL1_G200', 'SL1'] # Preferentially requesting SL1_G200 (this is likely to be assigned to the gripping joint)
else:
if self.angle > 24.9 and self.angle < 90.1:
clamps.append('CL3')
if self.angle > 89.9 and self.angle < 155.1:
clamps.append('CL3M')
return clamps
# * Polyline functions
def polyline_rotate_cw(self):
"""Rotate polyline clockwise.
e.g. Polyline 1 will now become Polyline 2
Return true if the new polyine values are different from the past.
"""
current_polylines = self.polylines
new_polylines = current_polylines[-1:] + current_polylines[:-1]
self.polylines = new_polylines
return new_polylines == current_polylines
def polyline_rotate_ccw(self):
"""Rotate polyline counter clockwise.
e.g. Polyline 2 will now become Polyline 1
Return true if the new polyine values are different from the past.
"""
current_polylines = self.polylines
new_polylines = current_polylines[1:] + current_polylines[:1]
self.polylines = new_polylines
return new_polylines == current_polylines
def polyline_flip_1_3(self):
"""Flip polyline 1 and 3.
Similar to a mirror operation.
Return true if the new polyine values are different from the past.
"""
current_polylines = self.polylines
new_polylines = [current_polylines[i] for i in [2, 1, 0, 3]]
# Reverse order of each line
for i in range(4):
new_polylines[i] = [[1-u, v] for u, v in new_polylines[i]][::-1]
self.polylines = new_polylines
return new_polylines == current_polylines
def polyline_flip_2_4(self):
"""Flip polyline 2 and 4.
Similar to a mirror operation.
Return true if the new polyine values are different from the past.
"""
current_polylines = self.polylines
new_polylines = [current_polylines[i] for i in [0, 3, 2, 1]]
# Reverse order of each line
for i in range(4):
new_polylines[i] = [[1-u, v] for u, v in new_polylines[i]][::-1]
self.polylines = new_polylines
return new_polylines == current_polylines
def get_polyline_interior_angles(self):
# type: () -> list[list[float]]
"""Get a 4 lists of interior corner angles for the four polylines.
A Polyline with 3 points will have one angle returned.
A polyline with only 2 points, will have no angles returned.
Angles in Degrees
It is only necessary to check one of the two joint pairs because they have the same polylines.
"""
results = []
for line_index in range(4):
polyline = self._polyline_at_mid(line_index)
if len(polyline) <= 2:
results.append([])
continue
angles = []
for i in range(len(polyline) - 2):
u = Vector.from_start_end(polyline[i+1], polyline[i])
v = Vector.from_start_end(polyline[i+1], polyline[i+2])
angle = math.degrees(angle_vectors(u, v))
angles.append(angle)
results.append(angles)
return results
def check_polyline_interior_angle(self, angle_threshold = 89.999999):
# type: (float) -> list[list[float]]
""" Check to ensure all interior angles of the polyline is >= 90 degrees.
Return true if all angle passes.
It is only necessary to check one of the two joint pairs because they have the same polylines.
"""
all_angles = self.get_polyline_interior_angles()
for angles in all_angles:
for angle in angles:
if angle < angle_threshold:
return False
return True
@classmethod
def from_beam_beam_intersection(cls, beam_stay, beam_move, dist_tol=1e-5, coplanar_tol=5e-3, joint_face_id_move=None):
# type: (Beam, Beam, int, float, float) -> Tuple[JointPolylineLap, JointPolylineLap, Line]
''' Compute the intersection between two beams.
`beam_stay` must be the earlier beam in assembly sequence
`beam_move` must be the later beam in assembly sequence
Returns a tuple of [JointHalfLap, JointHalfLap] when a valid joint pair can be found.
The function will check for beam center-line intersections.
If no intersection can be found or the two beam are not coplanar,
Returns a tuple of (None, None, None)
'''
# Compute intersection distance, Return None if they don't intersect
def llx_distance(line1, line2):
dist_tol = line1.length * 1e-5
intersection_result = intersection_segment_segment(line1, line2, dist_tol)
if intersection_result[0] is None:
# print("Joint Intersection result is none")
return None
if distance_point_point(intersection_result[0], intersection_result[1]) > dist_tol:
# print("Joint Intersection result %s > tol: %s" % (distance_point_point(intersection_result[0], intersection_result[1]), dist_tol))
return None
return distance_point_point(intersection_result[0], line1.start)
# * Find joint distance on center line
beam_move_center_line = beam_move.get_center_line()
beam_stay_center_line = beam_stay.get_center_line()
if beam_move_center_line is None or beam_stay_center_line is None:
return (None, None, None)
beam_m_center_distance = llx_distance(beam_move_center_line, beam_stay_center_line)
beam_s_center_distance = llx_distance(beam_stay_center_line, beam_move_center_line)
# Find coplanar faces
face_pairs = beam_move.get_beam_beam_coplanar_face_ids(beam_stay, coplanar_tol)
if len(face_pairs) == 0:
return (None, None, None)
# * Choosing which beam face to put joint on. Taking a optional guide parameter
beam_m_face_id, beam_s_face_id = face_pairs[0] # Default
for id_m, id_s in face_pairs:
if id_m == joint_face_id_move:
beam_m_face_id, beam_s_face_id = id_m, id_s
beam_s_face_id = (beam_s_face_id + 1) % 4 + 1 # Flip joint face on staying beam to opposite side
# * Find edges on beam_move
m_top_frnt = beam_move.reference_edge_wcf(beam_m_face_id)
m_btm_frnt = beam_move.reference_edge_wcf(beam_m_face_id + 1)
m_btm_back = beam_move.reference_edge_wcf(beam_m_face_id + 2)
m_top_back = beam_move.reference_edge_wcf(beam_m_face_id + 3)
# * Find edges on beam_stay
# Compute which side the beam_stay comes from
ref_side_m = beam_move.reference_side_wcf(beam_m_face_id)
ref_side_s = beam_stay.reference_side_wcf(beam_s_face_id)
if dot_vectors(ref_side_s.yaxis, ref_side_m.xaxis) > 0.0:
s_btm_near = beam_stay.reference_edge_wcf(beam_s_face_id)
s_top_near = beam_stay.reference_edge_wcf(beam_s_face_id + 1)
s_top_farr = beam_stay.reference_edge_wcf(beam_s_face_id + 2)
s_btm_farr = beam_stay.reference_edge_wcf(beam_s_face_id + 3)
else:
s_btm_near = beam_stay.reference_edge_wcf(beam_s_face_id + 3)
s_top_near = beam_stay.reference_edge_wcf(beam_s_face_id + 2)
s_top_farr = beam_stay.reference_edge_wcf(beam_s_face_id + 1)
s_btm_farr = beam_stay.reference_edge_wcf(beam_s_face_id)
# Compute corner points in WCF
corner_pts = []
corner_pts.append(intersection_segment_segment(m_btm_frnt, s_btm_near)[0])
corner_pts.append(intersection_segment_segment(m_btm_back, s_btm_near)[0])
corner_pts.append(intersection_segment_segment(m_btm_back, s_btm_farr)[0])
corner_pts.append(intersection_segment_segment(m_btm_frnt, s_btm_farr)[0])
corner_pts.append(intersection_segment_segment(m_top_frnt, s_top_near)[0])
corner_pts.append(intersection_segment_segment(m_top_back, s_top_near)[0])
corner_pts.append(intersection_segment_segment(m_top_back, s_top_farr)[0])
corner_pts.append(intersection_segment_segment(m_top_frnt, s_top_farr)[0])
# Construct Joint object and flip one of them
joint_m = JointPolylineLap(
face_id=beam_m_face_id,
center_distance=beam_m_center_distance,
top_side_thickness=beam_stay.get_face_height(beam_s_face_id)/2,
corner_pts=corner_pts,
is_joint_on_beam_move=True,
is_joint_on_top=True,
name='%s-%s' % (beam_move.name, beam_stay.name))
joint_s = JointPolylineLap(
face_id=beam_s_face_id,
center_distance=beam_s_center_distance,
top_side_thickness=beam_move.get_face_height(beam_m_face_id)/2,
corner_pts=corner_pts,
is_joint_on_beam_move=False,
is_joint_on_top=False,
name='%s-%s' % (beam_stay.name, beam_move.name))
# conpute screw center line
beam_move_center = joint_m.get_joint_center_at_solid_side(beam_move)
beam_stay_center = joint_s.get_joint_center_at_solid_side(beam_stay)
screw_line = Line(beam_move_center, beam_stay_center)
return (joint_s, joint_m, screw_line)
if __name__ == "__main__":
import os
import tempfile
import compas
|
from json import loads
def saeidi_parser(path):
data = loads(open(path, "r").read())
sentences = [
j
for i in [
[sample["text"]]
* len(
[
opinion
for opinion in sample["opinions"]
if opinion["aspect"] == "general"
]
)
for sample in data
]
for j in i
]
targets = [
j
for i in [
[
opinion["target_entity"]
for opinion in sample["opinions"]
if opinion["aspect"] == "general"
]
for sample in data
]
for j in i
]
labels = [
j
for i in [
[
{"Positive": 1, "Negative": 0}.get(opinion["sentiment"])
for opinion in sample["opinions"]
if opinion["aspect"] == "general"
]
for sample in data
]
for j in i
]
return sentences, targets, labels
|
# The MIT License (MIT)
#
# Copyright (c) 2018 stanwood GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import random
import string
from google.appengine.ext import ndb
from slack_utils.errors import SlackTokenError
class SlackToken(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
access_token = ndb.StringProperty()
scope = ndb.StringProperty()
team_name = ndb.StringProperty()
team_id = ndb.StringProperty()
user_id = ndb.StringProperty()
bot_user_id = ndb.StringProperty()
bot_access_token = ndb.StringProperty()
complete_response = ndb.JsonProperty()
@classmethod
def save_token(cls, response):
cls(
id=response['user_id'],
user_id=response['user_id'],
access_token=response['access_token'],
scope=response['scope'],
team_name=response['team_name'],
team_id=response['team_id'],
bot_user_id=response['bot']['bot_user_id'],
bot_access_token=response['bot']['bot_access_token'],
complete_response=response
).put()
@classmethod
def get_bot_token(cls, team_id):
try:
return cls.query(cls.team_id == team_id).get().bot_access_token
except AttributeError:
raise SlackTokenError("Bot token not found for team {}".format(team_id))
@classmethod
def get_app_token(cls, team_id):
try:
return cls.query(cls.team_id == team_id).get().access_token
except AttributeError:
raise SlackTokenError("App token not found for team {}".format(team_id))
@classmethod
def get_team_tokens(cls, team_id):
"""
:return: bot_token, app_token
"""
try:
stored_tokens = cls.query(cls.team_id == team_id).get()
return stored_tokens.bot_access_token, stored_tokens.access_token
except AttributeError:
raise SlackTokenError("Tokens not found for team {}".format(team_id))
@classmethod
def get_user_tokens(cls, user_id):
"""
:return: bot_token, app_token
"""
try:
stored_tokens = ndb.Key(cls, user_id).get()
return stored_tokens.bot_access_token, stored_tokens.access_token
except AttributeError:
raise SlackTokenError("Tokens not found for user {}".format(user_id))
class SlackAuthRequestError(Exception):
pass
class SlackAuthRequest(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
updated = ndb.DateTimeProperty(auto_now=True)
state = ndb.StringProperty()
@classmethod
def save_state(cls):
state = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(64))
cls(state=state).put()
return state
@classmethod
def validate_state(cls, state):
stored_state = cls.query(cls.state == state).get(keys_only=True)
if stored_state:
stored_state.delete()
else:
raise SlackAuthRequestError("Invalid state {}".format(state))
|
import numpy as np
import tensorflow as tf
from gym.spaces.discrete import Discrete
from tensorflow_probability.python.distributions import (
Categorical, MultivariateNormalDiag)
from xagents.base import OnPolicy
class A2C(OnPolicy):
"""
Asynchronous Methods for Deep Reinforcement Learning
https://arxiv.org/abs/1602.01783
"""
def __init__(
self,
envs,
model,
entropy_coef=0.01,
value_loss_coef=0.5,
grad_norm=0.5,
**kwargs,
):
"""
Initialize A2C agent.
Args:
envs: A list of gym environments.
model: tf.keras.models.Model that is expected to be compiled
with an optimizer before training starts.
entropy_coef: Entropy coefficient used for entropy loss calculation.
value_loss_coef: Value coefficient used for value loss calculation.
grad_norm: Gradient clipping value passed to tf.clip_by_global_norm()
**kwargs: kwargs Passed to super classes.
"""
super(A2C, self).__init__(envs, model, **kwargs)
self.entropy_coef = entropy_coef
self.value_loss_coef = value_loss_coef
self.grad_norm = grad_norm
assert (
len(model.layers) > 2
), f'Expected a model that has at least 3 layers, got {len(model.layers)}'
activations = [layer.activation for layer in model.layers[-2:]]
self.output_is_softmax = tf.keras.activations.softmax in activations
self.distribution_type = (
Categorical
if isinstance(self.envs[0].action_space, Discrete)
else MultivariateNormalDiag
)
def get_distribution(self, actor_output):
"""
Get a probability distribution from probabilities or logits.
Args:
actor_output: Output by the actor model (probabilities/logits).
Returns:
tfp.python.distributions.Categorical
"""
if self.distribution_type == MultivariateNormalDiag:
return MultivariateNormalDiag(actor_output)
if self.output_is_softmax:
return Categorical(probs=actor_output)
return Categorical(logits=actor_output)
def get_model_outputs(self, inputs, models, training=True, actions=None):
"""
Get actor and critic outputs, and determine actions sampled from
respective probability distribution.
Args:
inputs: Inputs as tensors / numpy arrays that are expected
by the given model(s).
models: A tf.keras.Model or a list of tf.keras.Model(s)
training: `training` parameter passed to model call.
actions: If not specified, a sample is drawn from corresponding
distribution and used for calculation of log probs.
Returns:
[actions, log probs, critic output, entropy, actor output]
"""
actor_output, critic_output = super(A2C, self).get_model_outputs(
inputs, models, training
)
distribution = self.get_distribution(actor_output)
critic_output = tf.squeeze(critic_output)
if actions is None:
actions = distribution.sample(seed=self.seed)
action_log_probs = distribution.log_prob(actions)
return (
actions,
action_log_probs,
critic_output,
distribution.entropy(),
actor_output,
)
def get_batch(self):
"""
Get n-step batch which is the result of running self.envs step() for
self.n_steps times.
Returns:
A list of numpy arrays which contains
[states, rewards, actions, critic_output, dones, log probs, entropies, actor_output]
"""
batch = (
states,
rewards,
actions,
critic_output,
dones,
log_probs,
entropies,
actor_output,
) = [[] for _ in range(8)]
step_states = tf.numpy_function(self.get_states, [], tf.float32)
step_dones = tf.numpy_function(self.get_dones, [], tf.float32)
for _ in range(self.n_steps):
(
step_actions,
step_log_probs,
step_values,
step_entropies,
step_actor_logits,
) = self.get_model_outputs(step_states, self.output_models)
states.append(step_states)
actions.append(step_actions)
critic_output.append(step_values)
log_probs.append(step_log_probs)
dones.append(step_dones)
entropies.append(step_entropies)
actor_output.append(step_actor_logits)
*_, step_rewards, step_dones, step_states = tf.numpy_function(
self.step_envs,
[step_actions, True, False],
5 * [tf.float32],
)
rewards.append(step_rewards)
dones.append(step_dones)
return batch
def calculate_returns(
self,
rewards,
dones,
values=None,
selected_critic_logits=None,
selected_importance=None,
):
"""
Get a batch of returns.
Args:
rewards: Rewards tensor of shape (self.n_steps, self.n_envs)
dones: Dones tensor of shape (self.n_steps, self.n_envs)
values: Values tensor of shape (self.n_steps + 1, self.n_envs).
required for PPO, TRPO and ACER
selected_critic_logits: Critic output respective to selected actions
of shape (self.n_steps, self.n_envs).
Required for ACER.
selected_importance: Importance weights respective to selected
actions of shape (self.n_steps, self.n_envs).
Required for ACER
Returns:
Tensor of n-step returns.
"""
next_values = self.get_model_outputs(self.get_states(), self.output_models)[2]
returns = [next_values]
for step in reversed(range(self.n_steps)):
returns.append(
rewards[step] + self.gamma * returns[-1] * (1.0 - dones[step + 1])
)
return np.asarray(returns[::-1], np.float32)[:-1]
def np_train_step(self):
"""
Perform the batching and return calculation in numpy.
"""
(
states,
rewards,
actions,
critic_output,
dones,
log_probs,
entropies,
actor_output,
) = [np.asarray(item, np.float32) for item in self.get_batch()]
returns = self.calculate_returns(rewards, dones)
return self.concat_step_batches(states, returns, actions, critic_output)
@tf.function
def train_step(self):
"""
Perform 1 step which controls action_selection, interaction with environments
in self.envs, batching and gradient updates.
Returns:
None
"""
states, returns, actions, old_values = tf.numpy_function(
self.np_train_step, [], 4 * [tf.float32]
)
advantages = returns - old_values
with tf.GradientTape() as tape:
_, log_probs, critic_output, entropy, actor_output = self.get_model_outputs(
states, self.output_models, actions=actions
)
entropy = tf.reduce_mean(entropy)
pg_loss = -tf.reduce_mean(advantages * log_probs)
value_loss = tf.reduce_mean(tf.square(critic_output - returns))
loss = (
pg_loss
- entropy * self.entropy_coef
+ value_loss * self.value_loss_coef
)
grads = tape.gradient(loss, self.model.trainable_variables)
if self.grad_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, self.grad_norm)
self.model.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: diplomacy_tensorflow/contrib/mpi_collectives/mpi_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from diplomacy_tensorflow.core.framework import tensor_shape_pb2 as diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from diplomacy_tensorflow.core.framework import types_pb2 as diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='diplomacy_tensorflow/contrib/mpi_collectives/mpi_message.proto',
package='diplomacy.tensorflow.contrib.mpi_collectives',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n>diplomacy_tensorflow/contrib/mpi_collectives/mpi_message.proto\x12,diplomacy.tensorflow.contrib.mpi_collectives\x1a\x36\x64iplomacy_tensorflow/core/framework/tensor_shape.proto\x1a/diplomacy_tensorflow/core/framework/types.proto\"\xb3\x02\n\nMPIRequest\x12\x14\n\x0crequest_rank\x18\x01 \x01(\x05\x12Z\n\x0crequest_type\x18\x02 \x01(\x0e\x32\x44.diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.RequestType\x12\x33\n\x0btensor_type\x18\x03 \x01(\x0e\x32\x1e.diplomacy.tensorflow.DataType\x12\x13\n\x0btensor_name\x18\x04 \x01(\t\x12<\n\x0ctensor_shape\x18\x05 \x01(\x0b\x32&.diplomacy.tensorflow.TensorShapeProto\"+\n\x0bRequestType\x12\r\n\tALLREDUCE\x10\x00\x12\r\n\tALLGATHER\x10\x01\"\xe9\x01\n\x0bMPIResponse\x12]\n\rresponse_type\x18\x01 \x01(\x0e\x32\x46.diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse.ResponseType\x12\x13\n\x0btensor_name\x18\x02 \x01(\t\x12\x15\n\rerror_message\x18\x03 \x01(\t\"O\n\x0cResponseType\x12\r\n\tALLREDUCE\x10\x00\x12\r\n\tALLGATHER\x10\x01\x12\t\n\x05\x45RROR\x10\x02\x12\x08\n\x04\x44ONE\x10\x03\x12\x0c\n\x08SHUTDOWN\x10\x04\x62\x06proto3')
,
dependencies=[diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_MPIREQUEST_REQUESTTYPE = _descriptor.EnumDescriptor(
name='RequestType',
full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.RequestType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ALLREDUCE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLGATHER', index=1, number=1,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=482,
serialized_end=525,
)
_sym_db.RegisterEnumDescriptor(_MPIREQUEST_REQUESTTYPE)
_MPIRESPONSE_RESPONSETYPE = _descriptor.EnumDescriptor(
name='ResponseType',
full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse.ResponseType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ALLREDUCE', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLGATHER', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DONE', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUTDOWN', index=4, number=4,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=682,
serialized_end=761,
)
_sym_db.RegisterEnumDescriptor(_MPIRESPONSE_RESPONSETYPE)
_MPIREQUEST = _descriptor.Descriptor(
name='MPIRequest',
full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_rank', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.request_rank', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='request_type', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.request_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_type', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.tensor_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_name', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.tensor_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest.tensor_shape', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MPIREQUEST_REQUESTTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=525,
)
_MPIRESPONSE = _descriptor.Descriptor(
name='MPIResponse',
full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response_type', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse.response_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tensor_name', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse.tensor_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error_message', full_name='diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse.error_message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_MPIRESPONSE_RESPONSETYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=528,
serialized_end=761,
)
_MPIREQUEST.fields_by_name['request_type'].enum_type = _MPIREQUEST_REQUESTTYPE
_MPIREQUEST.fields_by_name['tensor_type'].enum_type = diplomacy__tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_MPIREQUEST.fields_by_name['tensor_shape'].message_type = diplomacy__tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_MPIREQUEST_REQUESTTYPE.containing_type = _MPIREQUEST
_MPIRESPONSE.fields_by_name['response_type'].enum_type = _MPIRESPONSE_RESPONSETYPE
_MPIRESPONSE_RESPONSETYPE.containing_type = _MPIRESPONSE
DESCRIPTOR.message_types_by_name['MPIRequest'] = _MPIREQUEST
DESCRIPTOR.message_types_by_name['MPIResponse'] = _MPIRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MPIRequest = _reflection.GeneratedProtocolMessageType('MPIRequest', (_message.Message,), dict(
DESCRIPTOR = _MPIREQUEST,
__module__ = 'diplomacy_tensorflow.contrib.mpi_collectives.mpi_message_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.contrib.mpi_collectives.MPIRequest)
))
_sym_db.RegisterMessage(MPIRequest)
MPIResponse = _reflection.GeneratedProtocolMessageType('MPIResponse', (_message.Message,), dict(
DESCRIPTOR = _MPIRESPONSE,
__module__ = 'diplomacy_tensorflow.contrib.mpi_collectives.mpi_message_pb2'
# @@protoc_insertion_point(class_scope:diplomacy.tensorflow.contrib.mpi_collectives.MPIResponse)
))
_sym_db.RegisterMessage(MPIResponse)
# @@protoc_insertion_point(module_scope)
|
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# SHT30
# This code is designed to work with the SHT30_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Humidity?sku=SHT30_I2CS#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# SHT30 address, 0x44(68)
# Send measurement command, 0x2C(44)
# 0x06(06) High repeatability measurement
bus.write_i2c_block_data(0x44, 0x2C, [0x06])
time.sleep(0.5)
# SHT30 address, 0x44(68)
# Read data back from 0x00(00), 6 bytes
# cTemp MSB, cTemp LSB, cTemp CRC, Humididty MSB, Humidity LSB, Humidity CRC
data = bus.read_i2c_block_data(0x44, 0x00, 6)
# Convert the data
cTemp = ((((data[0] * 256.0) + data[1]) * 175) / 65535.0) - 45
fTemp = cTemp * 1.8 + 32
humidity = 100 * (data[3] * 256 + data[4]) / 65535.0
# Output data to screen
print("Relative Humidity : %.2f %%RH" % humidity)
print("Temperature in Celsius : %.2f C" % cTemp)
print("Temperature in Fahrenheit : %.2f F" % fTemp)
|
import json
import unittest
from lakey_finicity.responses import TransactionsListResponse
EXAMPLE_TRANSACTIONS_RESPONSE = '''
{
"found": 250,
"displaying": 2,
"moreAvailable": true,
"fromDate": 1417045583,
"toDate": 1422316026,
"sort": "desc",
"transactions": [
{
"id": 805353,
"amount": -59.56,
"accountId": 98684,
"customerId": 41442,
"status": "active",
"description": "VERIZON WIRELESS PAYMENTS",
"memo": "VERIZON WIRELESS PAYMENTS",
"type": "directDebit",
"postedDate": 1450852000,
"createdDate": 1460621294,
"categorization": {
"normalizedPayeeName": "Verizon Wireless",
"category": "Mobile Phone",
"bestRepresentation": "Verizon Wireless PMT",
"country": "USA"
}
},
{
"id": 805350,
"amount": 647.96,
"accountId": 98689,
"customerId": 41442,
"status": "active",
"description": "Square Inc 168P2",
"memo": "Square Inc 168P2",
"type": "directDeposit",
"postedDate": 1450152000,
"createdDate": 1460621294,
"categorization": {
"normalizedPayeeName": "Deposit Square Type",
"category": "Income",
"bestRepresentation": "Square Inc",
"country": "USA"
}
}
]
}
'''
EMPTY_RESULT = """
{"transactions":[]}
"""
TEST_TRANSACTION_ONLY = """
{"found":1,"displaying":1,"moreAvailable":"false","fromDate":"1583085842","toDate":"1583949842","sort":"asc","transactions":[{"id":100803586565,"amount":42.01,"accountId":1007861929,"customerId":1002249444,"status":"active","description":"test transaction","postedDate":1583906400,"transactionDate":1583906400,"createdDate":1583949620,"lastUpdatedDate":1583949620}]}
"""
class TestTransactionsResponse(unittest.TestCase):
def test_transactions_response(self):
response_dict = json.loads(EXAMPLE_TRANSACTIONS_RESPONSE)
response = TransactionsListResponse.from_dict(response_dict)
self.assertEqual({}, response._unused_fields)
for transaction in response.transactions:
self.assertEqual({}, transaction._unused_fields)
self.assertEqual({}, transaction.categorization._unused_fields)
def test_empty_response(self):
response_dict = json.loads(EMPTY_RESULT)
response = TransactionsListResponse.from_dict(response_dict)
self.assertEqual({}, response._unused_fields)
for transaction in response.transactions:
self.assertEqual({}, transaction._unused_fields)
self.assertEqual({}, transaction.categorization._unused_fields)
def test_empty_test(self):
response_dict = json.loads(TEST_TRANSACTION_ONLY)
response = TransactionsListResponse.from_dict(response_dict)
self.assertEqual({}, response._unused_fields)
for transaction in response.transactions:
self.assertEqual({}, transaction._unused_fields)
if transaction.categorization:
self.assertEqual({}, transaction.categorization._unused_fields)
|
#! /usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
# $Author$
"""
Warning: do not use this library. It is unstable and most of the routines
here have been superceded by other libraries (e.g. rospkg). These
routines will likely be *deleted* in future releases.
"""
import sys
import os
import getopt
import roslib.packages
MANIFEST_FILE = 'manifest.xml'
import roslib.manifestlib
# re-export symbols for backwards compatibility
from roslib.manifestlib import ManifestException, Depend, Export, ROSDep, VersionControl
class Manifest(roslib.manifestlib._Manifest):
"""
Object representation of a ROS manifest file
"""
__slots__ = []
def __init__(self):
"""
Initialize new empty manifest.
"""
super(Manifest, self).__init__('package')
def get_export(self, tag, attr):
"""
@return: exports that match the specified tag and attribute, e.g. 'python', 'path'
@rtype: [L{Export}]
"""
return [e.get(attr) for e in self.exports if e.tag == tag if e.get(attr) is not None]
def _manifest_file_by_dir(package_dir, required=True, env=None):
"""
@param package_dir: path to package directory
@type package_dir: str
@param env: environment dictionary
@type env: dict
@param required: require that the directory exist
@type required: bool
@return: path to manifest file of package
@rtype: str
@raise InvalidROSPkgException: if required is True and manifest file cannot be located
"""
if env is None:
env = os.environ
try:
p = os.path.join(package_dir, MANIFEST_FILE)
if not required and not os.path.exists(p):
return p
if not os.path.isfile(p):
raise roslib.packages.InvalidROSPkgException("""
Package '%(package_dir)s' is improperly configured: no manifest file is present.
"""%locals())
return p
except roslib.packages.InvalidROSPkgException as e:
if required:
raise
def manifest_file(package, required=True, env=None):
"""
@param package str: package name
@type package: str
@param env: override os.environ dictionary
@type env: dict
@param required: require that the directory exist
@type required: bool
@return: path to manifest file of package
@rtype: str
@raise InvalidROSPkgException: if required is True and manifest file cannot be located
"""
# ros_root needs to be determined from the environment or else
# everything breaks when trying to launch nodes via ssh where the
# path isn't setup correctly.
if env is None:
env = os.environ
d = roslib.packages.get_pkg_dir(package, required, ros_root=env['ROS_ROOT'])
return _manifest_file_by_dir(d, required=required, env=env)
def load_manifest(package):
"""
Load manifest for specified package.
@param pacakge: package name
@type package: str
@return: Manifest instance
@rtype: L{Manifest}
@raise InvalidROSPkgException: if package is unknown
"""
return parse_file(manifest_file(package))
def parse_file(file):
"""
Parse manifest.xml file
@param file: manifest.xml file path
@type file: str
@return: Manifest instance
@rtype: L{Manifest}
"""
return roslib.manifestlib.parse_file(Manifest(), file)
def parse(string, filename='string'):
"""
Parse manifest.xml string contents
@param string: manifest.xml contents
@type string: str
@return: Manifest instance
@rtype: L{Manifest}
"""
v = roslib.manifestlib.parse(Manifest(), string, filename)
if v.version:
raise ManifestException("<version> tag is not valid in a package manifest.xml file")
return v
|
import asyncio
import aiohttp
from
class Client:
def __init__(self, addr):
self.addr = addr
async def post(self, path, data):
# XXX: url join
url = f"{self.addr}/{self.path}"
async with aiohttp.ClientSession() as session:
return await session.post(url, data)
async def filedata(self, filenames):
data = aiohttp.FormData()
if isinstance(filenames, str):
filenames = [filenames]
for fn in filenames:
fn = Path(fn)
fh = open(fn, "rb", encoding="utf-8")
data.add_field(fn.name, fh, filename=fn.name, content_type="text/x-yaml")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.