content
stringlengths 5
1.05M
|
|---|
import types
import logging
import datetime
from google.appengine.ext import db
from google.appengine.ext import blobstore
from google.appengine.ext.db.metadata import Kind
from bottle import Bottle, run, template, abort, request, response, redirect
from cork import get_flash, set_flash, SUCCESS, ERROR, stripslashes
import settings
import models
app = Bottle()
def get_kinds():
return [x.__name__ for x in models.__dict__.values()
if type(x) == db.PropertiedClass
and issubclass(x, db.Model)
and not x.__name__ == 'Model']
def is_kind_name(kindname):
return kindname in get_kinds()
def check_kind_name(kindname):
if not is_kind_name(kindname):
abort(404, u"No such kind %s" % kindname)
def get_kind(kindname):
check_kind_name(kindname)
return getattr(models, kindname)
@app.get('/admin/')
def index():
kinds = get_kinds()
flash = get_flash()
return template('admin_index', **locals())
@app.get('/admin/<kindname>/')
def kind_index(kindname):
kind = get_kind(kindname)
flash = get_flash()
return template('admin_kind_index', **locals())
@app.get('/admin/<kindname>/add/')
def kind_add(kindname, values=None, flash=None):
if values is None:
values = {}
kind = get_kind(kindname)
flash = flash or get_flash()
edit = False
return template('admin_kind_add', **locals())
def getproperty(kind, p, key=False):
if key:
input_name = 'input__p__key__%s' % p
else:
input_name = 'input__p__%s' % p
v = getattr(request.forms, input_name)
if not key:
property_class = kind._properties[p]
else:
property_class = db.StringProperty()
logging.info("p = %s" % p)
logging.info("v = %s" % v)
logging.info("property_class = %s" % property_class)
if not v:
v = None
else:
if isinstance(property_class, db.BooleanProperty):
if v.lower() in ['false', 'no']:
v = False
else:
v = bool(v)
elif isinstance(property_class, db.IntegerProperty):
v = long(v)
elif isinstance(property_class, db.FloatProperty):
v = float(v)
elif isinstance(property_class, db.DateTimeProperty):
v = datetime.datetime.strptime(v, '%Y-%m-%d %H:%M:%S.%f')
elif isinstance(property_class, db.LinkProperty):
v = db.Link(v)
elif isinstance(property_class, db.TextProperty):
v = db.Text(v)
elif isinstance(property_class, db.BlobProperty):
v = db.Blob(v)
elif isinstance(property_class, db.EmailProperty):
v = db.Email(v)
elif isinstance(property_class, db.GeoPtProperty):
lat, lon = [float(x) for x in v.split(',', 1).strip()]
v = db.GeoPt(lat, lon)
elif isinstance(property_class, db.RatingProperty):
v = db.Rating(int(v))
elif isinstance(property_class, db.CategoryProperty):
v = db.Category(v)
elif isinstance(property_class, (db.ListProperty, db.StringListProperty)):
# todo assumes list of strings
v = list([v.strip() for v in v.split(",")])
elif isinstance(property_class, db.ReferenceProperty):
kindname = property_class.reference_class.__name__
v = db.Key(kindname, v)
elif isinstance(property_class, blobstore.BlobReferenceProperty):
v = blobstore.BlobKey(v)
elif isinstance(property_class, (
db.IMProperty,
db.PhoneNumberProperty,
db.PostalAddressProperty
)):
abort(500, 'Unsupported property type %s for model %s' % (property_class, kind.__name__))
if key and v is None:
abort(400, 'Property %s is part of the key for model %s so is required' % (p, kind.__name__))
return v
@app.post('/admin/<kindname>/add/')
def kind_add_do(kindname):
kind = get_kind(kindname)
key_parts = [getproperty(kind, p, True) for p in kind.Meta.key_parts]
key = u"/".join(key_parts)
kind.abort_if_exists(key)
try:
do_put(kindname, kind, key)
except db.BadValueError, e:
return kind_add(kindname, values=request.forms, flash=(ERROR, e.message))
@app.get('/admin/<kindname>/edit/<name:path>')
def kind_edit(kindname, name, values=None, flash=None):
if values is None:
values = {}
name = stripslashes(name)
kind = get_kind(kindname)
obj = kind.get_by_key_name_or_abort(name)
key_name = obj.key().name()
assert key_name is not None
num_parts = len(kind.Meta.key_parts)
key_parts = key_name.split(u"/")
i = 0
for k in kind.Meta.key_parts:
v = key_parts[i]
values[k] = v
i += 1
for p in kind.properties():
if not p in values:
values[p] = str(getattr(obj, p))
del obj
flash = flash or get_flash()
edit = True
return template('admin_kind_add', **locals())
@app.post('/admin/<kindname>/edit/<name>/')
def kind_edit_do(kindname, name):
kind = get_kind(kindname)
obj = kind.get_by_key_name_or_abort(name)
key = str(obj.key())
try:
do_put(kindname, kind, key, instance=obj)
except db.BadValueError, e:
return kind_edit(kindname, values=request.forms, flash=(ERROR, e.message))
def do_put(kindname, kind, key, instance=None):
properties = dict(
[(k,v) for k,v in
[(p,getproperty(kind, p)) for p in kind.properties()]
if v is not None]
)
if instance is None:
properties['key_name'] = key
key_name = key
instance = kind(**properties)
else:
key_name = db.Key(key).name()
for k, v in properties.iteritems():
setattr(instance, k, v)
instance.put()
set_flash(SUCCESS, '%s was saved' % key_name)
redirect('/admin/%s/' % kindname)
@app.get('/admin/<kindname>/delete/<key:path>/')
def kind_delete(kindname, key):
kind = get_kind(kindname)
flash = get_flash()
return template('admin_kind_delete', **locals())
@app.post('/admin/<kindname>/delete/<key:path>/')
def kind_delete_do(kindname, key):
kind = get_kind(kindname)
flash = get_flash()
instance = kind.get_by_key_name_or_abort(key)
instance.delete()
set_flash(SUCCESS, '%s was deleted' % key)
redirect('/admin/%s/' % kindname)
|
# Generated by Django 3.1.dev20200305084444 on 2020-04-21 15:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Project', '0015_auto_20200421_2009'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='Confirm_Password',
new_name='Designation',
),
migrations.RemoveField(
model_name='employee',
name='Password',
),
]
|
import sys
import clang.cindex
from dataclasses import dataclass, field
from mako.template import Template
@dataclass
class Struct:
namespace:list = field(default_factory=list)
name:str = ""
fields:list = field(default_factory=list)
def collectStructs( node, namespace = [] ):
structs =[]
for child in node.get_children():
if child.kind == clang.cindex.CursorKind.STRUCT_DECL\
and child.location.file.name.startswith("/app/data"):
struct = Struct()
struct.name = child.spelling
struct.fields = [ (field.type.spelling, field.spelling) for field in child.get_children()\
if field.kind == clang.cindex.CursorKind.FIELD_DECL ]
struct.namespace = list( namespace )
structs.append(struct)
elif node.kind == clang.cindex.CursorKind.NAMESPACE:
namespace = namespace + [ node.spelling ]
structs += collectStructs( child, namespace )
elif node.kind == clang.cindex.CursorKind.TRANSLATION_UNIT:
structs += collectStructs( child, namespace )
return structs
clang.cindex.Config.set_library_path("/usr/lib/llvm-11/lib")
index = clang.cindex.Index(clang.cindex.conf.lib.clang_createIndex(False, True))
translation_unit = index.parse(sys.argv[1], ['-x', 'c++', '--std', 'c++17'])
structs = collectStructs( translation_unit.cursor )
tpl = Template(filename='/app/templates/struct.mako')
string = tpl.render( structs=structs )
if len(sys.argv) > 2:
open(sys.argv[2], 'w').write(string)
else:
print(string)
|
import pydash
import pytest
from unittest.mock import patch
from io import StringIO
from azure.ai.ml import command, MpiDistribution, dsl, Input, Output
from azure.ai.ml._restclient.v2022_05_01.models import TensorFlow
from azure.ai.ml._utils.utils import load_yaml
from azure.ai.ml.entities import (
Component,
CommandComponent,
ResourceConfiguration,
CommandJobLimits,
load_component,
)
from azure.ai.ml.sweep import Choice
from azure.ai.ml.entities._job.pipeline._exceptions import UnexpectedKeywordError
from azure.ai.ml.entities._job.pipeline._io import PipelineInput
from azure.ai.ml.entities._builders import Command, Sweep
@pytest.mark.unittest
class TestCommandComponentEntity:
def test_component_load(self):
# code is specified in yaml, value is respected
component_yaml = "./tests/test_configs/components/basic_component_code_local_path.yml"
command_component = Component.load(
path=component_yaml,
)
assert command_component.code == "./helloworld_components_with_env"
component_yaml = "./tests/test_configs/components/basic_component_code_arm_id.yml"
command_component = Component.load(
path=component_yaml,
)
expected_code = (
"/subscriptions/4faaaf21-663f-4391-96fd-47197c630979/resourceGroups/"
"test-rg-centraluseuap-v2-2021W10/providers/Microsoft.MachineLearningServices/"
"workspaces/sdk_vnext_cli/codes/e736692c-8542-11eb-b746-6c2b59f8af4d/versions/1"
)
assert command_component.code == expected_code
def test_command_component_to_dict(self):
# Test optional params exists in component dict
yaml_path = "./tests/test_configs/components/basic_component_code_arm_id.yml"
yaml_dict = load_yaml(yaml_path)
yaml_dict["mock_option_param"] = {"mock_key": "mock_val"}
command_component = CommandComponent._load(data=yaml_dict, yaml_path=yaml_path)
assert command_component._other_parameter.get("mock_option_param") == yaml_dict["mock_option_param"]
yaml_dict["version"] = str(yaml_dict["version"])
yaml_dict["inputs"] = {}
component_dict = command_component._to_dict()
component_dict.pop("is_deterministic")
assert yaml_dict == component_dict
def test_command_component_entity(self):
code = (
"/subscriptions/4faaaf21-663f-4391-96fd-47197c630979/resourceGroups/"
"test-rg-centraluseuap-v2-2021W10/providers/Microsoft.MachineLearningServices/"
"workspaces/sdk_vnext_cli/codes/e736692c-8542-11eb-b746-6c2b59f8af4d/versions/1"
)
component = CommandComponent(
name="sample_command_component_basic",
display_name="CommandComponentBasic",
description="This is the basic command component",
tags={"tag": "tagvalue", "owner": "sdkteam"},
version="1",
outputs={"component_out_path": {"type": "uri_folder"}},
command="echo Hello World",
code=code,
environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1",
)
component_dict = component._to_rest_object().as_dict()
component_dict = pydash.omit(component_dict, "properties.component_spec.$schema")
yaml_path = "./tests/test_configs/components/basic_component_code_arm_id.yml"
yaml_component = Component.load(path=yaml_path)
yaml_component_dict = yaml_component._to_rest_object().as_dict()
yaml_component_dict = pydash.omit(yaml_component_dict, "properties.component_spec.$schema")
assert component_dict == yaml_component_dict
def test_command_component_instance_count(self):
component = CommandComponent(
name="microsoftsamples_command_component_tensor_flow",
display_name="CommandComponentTensorFlow",
description="This is the TensorFlow command component",
tags={"tag": "tagvalue", "owner": "sdkteam"},
inputs={
"component_in_number": {"description": "A number", "type": "number", "default": 10.99},
"component_in_path": {"description": "A path", "type": "uri_folder"},
},
outputs={"component_out_path": {"type": "uri_folder"}},
command="echo Hello World & echo ${{inputs.component_in_number}} & echo ${{inputs.component_in_path}} "
"& echo ${{outputs.component_out_path}}",
environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1",
distribution=TensorFlow(
parameter_server_count=1,
worker_count=2,
# No affect because TensorFlow object does not allow extra fields
added_property=7,
),
instance_count=2,
)
component_dict = component._to_rest_object().as_dict()
yaml_path = "./tests/test_configs/components/helloworld_component_tensorflow.yml"
yaml_component = Component.load(path=yaml_path)
yaml_component_dict = yaml_component._to_rest_object().as_dict()
component_dict = pydash.omit(
component_dict,
"properties.component_spec.$schema",
"properties.component_spec.distribution.added_property",
"properties.component_spec.resources.properties",
)
yaml_component_dict = pydash.omit(
yaml_component_dict,
"properties.component_spec.$schema",
"properties.component_spec.distribution.added_property",
"properties.component_spec.resources.properties",
)
assert component_dict == yaml_component_dict
def test_command_component_code(self):
component = CommandComponent(
name="SampleCommandComponentBasic",
display_name="CommandComponentBasic",
description="This is the basic command component",
version="1",
tags={"tag": "tagvalue", "owner": "sdkteam"},
outputs={"component_out_path": {"type": "path"}},
command="echo Hello World",
environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:1",
code="./helloworld_components_with_env",
)
yaml_path = "./tests/test_configs/components/basic_component_code_local_path.yml"
yaml_component = Component.load(path=yaml_path)
assert component.code == yaml_component.code
def test_command_component_version_as_a_function(self):
expected_rest_component = {
"componentId": "fake_component",
"computeId": None,
"display_name": None,
"distribution": None,
"environment_variables": {},
"inputs": {},
"limits": None,
"name": None,
"outputs": {},
"resources": None,
"tags": {},
}
yaml_path = "./tests/test_configs/components/basic_component_code_local_path.yml"
yaml_component_version = Component.load(path=yaml_path)
assert isinstance(yaml_component_version, CommandComponent)
yaml_component = yaml_component_version()
assert isinstance(yaml_component, Command)
yaml_component._component = "fake_component"
rest_yaml_component = yaml_component._to_rest_object()
assert rest_yaml_component == expected_rest_component
# assert positional args is not supported
with pytest.raises(TypeError) as error_info:
yaml_component_version(1)
assert "[component] CommandComponentBasic() takes 0 positional arguments but 1 was given" in str(error_info)
# unknown kw arg
with pytest.raises(UnexpectedKeywordError) as error_info:
yaml_component_version(unknown=1)
assert "[component] CommandComponentBasic() got an unexpected keyword argument 'unknown'." in str(error_info)
def test_command_component_version_as_a_function_with_inputs(self):
expected_rest_component = {
"componentId": "fake_component",
"computeId": None,
"display_name": None,
"distribution": None,
"environment_variables": {},
"inputs": {
"component_in_number": {"job_input_type": "Literal", "value": "10"},
"component_in_path": {"job_input_type": "Literal", "value": "${{parent.inputs.pipeline_input}}"},
},
"limits": None,
"name": None,
"outputs": {},
"resources": None,
"tags": {},
}
yaml_path = "./tests/test_configs/components/helloworld_component.yml"
yaml_component_version = Component.load(path=yaml_path)
pipeline_input = PipelineInput(name="pipeline_input", owner="pipeline", meta=None)
yaml_component = yaml_component_version(component_in_number=10, component_in_path=pipeline_input)
yaml_component._component = "fake_component"
rest_yaml_component = yaml_component._to_rest_object()
print(rest_yaml_component)
assert expected_rest_component == rest_yaml_component
def test_command_component_help_function(self):
download_unzip_component = CommandComponent(
name="download_and_unzip",
version="0.0.1",
# this component has no code, just a simple unzip command
command="curl -o local_archive.zip ${{inputs.url}} && "
"unzip local_archive.zip -d ${{outputs.extracted_data}}",
# I/O specifications, each using a specific key and type
inputs={
# 'url' is the key of this input string
"url": {"type": "string"}
},
outputs={
# 'extracted_data' will be the key to link this output to other steps in the pipeline
"extracted_data": {"type": "path"}
},
# we're using a curated environment
environment="AzureML-sklearn-0.24-ubuntu18.04-py37-cpu:9",
)
basic_component = load_component(
yaml_file="./tests/test_configs/components/basic_component_code_local_path.yml"
)
sweep_component = load_component(yaml_file="./tests/test_configs/components/helloworld_component_for_sweep.yml")
with patch("sys.stdout", new=StringIO()) as std_out:
help(download_unzip_component._func)
help(basic_component._func)
help(sweep_component._func)
assert "name: download_and_unzip" in std_out.getvalue()
assert "name: sample_command_component_basic" in std_out.getvalue()
assert "name: microsoftsamples_command_component_for_sweep" in std_out.getvalue()
with patch("sys.stdout", new=StringIO()) as std_out:
print(basic_component)
print(download_unzip_component)
print(sweep_component)
assert (
"name: sample_command_component_basic\nversion: '1'\ndisplay_name: CommandComponentBasic\n"
in std_out.getvalue()
)
assert (
"name: download_and_unzip\nversion: 0.0.1\ntype: command\ninputs:\n url:\n type: string\n"
in std_out.getvalue()
)
assert "name: microsoftsamples_command_component_for_sweep\nversion: 0.0.1\n" in std_out.getvalue()
def test_command_help_function(self):
test_command = command(
name="my-job",
display_name="my-fancy-job",
description="This is a fancy job",
tags=dict(),
command="python train.py --input-data ${{inputs.input_data}} --lr ${{inputs.learning_rate}}",
code="./src",
compute="cpu-cluster",
environment="my-env:1",
distribution=MpiDistribution(process_count_per_instance=4),
environment_variables=dict(foo="bar"),
# Customers can still do this:
resources=ResourceConfiguration(instance_count=2, instance_type="STANDARD_D2"),
limits=CommandJobLimits(timeout=300),
inputs={
"float": 0.01,
"integer": 1,
"string": "str",
"boolean": False,
"uri_folder": Input(type="uri_folder", path="https://my-blob/path/to/data", mode="ro_mount"),
"uri_file": dict(type="uri_file", path="https://my-blob/path/to/data", mode="download"),
},
outputs={"my_model": Output(type="mlflow_model", mode="rw_mount")},
)
with patch("sys.stdout", new=StringIO()) as std_out:
print(test_command)
outstr = std_out.getvalue()
assert (
"outputs:\n my_model:\n mode: rw_mount\n type: mlflow_model\ncommand: python train.py --input-data ${{inputs.input_data}} --lr ${{inputs.learning_rate}}\n"
in outstr
)
def test_sweep_help_function(self):
yaml_file = "./tests/test_configs/components/helloworld_component.yml"
component_to_sweep: CommandComponent = load_component(yaml_file=yaml_file)
cmd_node1: Command = component_to_sweep(
component_in_number=Choice([2, 3, 4, 5]), component_in_path=Input(path="/a/path/on/ds")
)
sweep_job1: Sweep = cmd_node1.sweep(
primary_metric="AUC", # primary_metric,
goal="maximize",
sampling_algorithm="random",
)
sweep_job1.set_limits(max_total_trials=10) # max_total_trials
with patch("sys.stdout", new=StringIO()) as std_out:
print(sweep_job1)
assert (
"name: microsoftsamples_command_component_basic\n version: 0.0.1\n display_name: CommandComponentBasi"
in std_out.getvalue()
)
|
# -*- coding: utf-8 -*-
#
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.quantum.v1alpha1 QuantumEngineService API."""
import functools
import pkg_resources
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, TYPE_CHECKING
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.protobuf_helpers
import grpc
from cirq_google.engine.client.quantum_v1alpha1 import types as pb_types
from cirq_google.engine.client.quantum_v1alpha1.gapic import enums
from cirq_google.engine.client.quantum_v1alpha1.gapic import quantum_engine_service_client_config
from cirq_google.engine.client.quantum_v1alpha1.gapic.transports import (
quantum_engine_service_grpc_transport,
)
from cirq_google.engine.client.quantum_v1alpha1.proto import engine_pb2
from cirq_google.engine.client.quantum_v1alpha1.proto import engine_pb2_grpc
from cirq_google.engine.client.quantum_v1alpha1.proto import quantum_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = 0.1
QUANTUM_ENGINE_SERVICE_GRPC_TRANSPORT_LIKE = Union[
quantum_engine_service_grpc_transport.QuantumEngineServiceGrpcTransport,
Callable[..., quantum_engine_service_grpc_transport.QuantumEngineServiceGrpcTransport],
]
class QuantumEngineServiceClient(object):
"""-"""
SERVICE_ADDRESS = 'quantum.googleapis.com:443'
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = 'google.cloud.quantum.v1alpha1.QuantumEngineService'
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
QuantumEngineServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport: QUANTUM_ENGINE_SERVICE_GRPC_TRANSPORT_LIKE = None,
channel: Optional[grpc.Channel] = None,
credentials: Optional[service_account.Credentials] = None,
client_config: Optional[Dict[str, Any]] = None,
client_info: Optional[google.api_core.gapic_v1.client_info.ClientInfo] = None,
client_options: Union[Dict[str, Any], google.api_core.client_options.ClientOptions] = None,
):
"""Constructor.
Args:
transport (Union[~.QuantumEngineServiceGrpcTransport,
Callable[[~.Credentials, type], ~.QuantumEngineServiceGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
'The `client_config` argument is deprecated.',
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = quantum_engine_service_client_config.config
if channel:
warnings.warn(
'The `channel` argument is deprecated; use `transport` instead.',
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(client_options)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=quantum_engine_service_grpc_transport.QuantumEngineServiceGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
'Received both a transport instance and '
'credentials; these are mutually exclusive.'
)
self.transport = transport
else:
self.transport = (
quantum_engine_service_grpc_transport.QuantumEngineServiceGrpcTransport(
address=api_endpoint,
channel=channel,
credentials=credentials,
)
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION,
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config['interfaces'][self._INTERFACE_NAME],
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls: Dict = {}
# Service calls
def create_quantum_program(
self,
parent: Optional[str] = None,
quantum_program: Union[Dict[str, Any], pb_types.QuantumProgram] = None,
overwrite_existing_source_code: Optional[bool] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.create_quantum_program()
Args:
parent (str): -
quantum_program (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram`
overwrite_existing_source_code (bool): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_quantum_program' not in self._inner_api_calls:
self._inner_api_calls[
'create_quantum_program'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_quantum_program,
default_retry=self._method_configs['CreateQuantumProgram'].retry,
default_timeout=self._method_configs['CreateQuantumProgram'].timeout,
client_info=self._client_info,
)
request = engine_pb2.CreateQuantumProgramRequest(
parent=parent,
quantum_program=quantum_program,
overwrite_existing_source_code=overwrite_existing_source_code,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['create_quantum_program'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_quantum_program(
self,
name: Optional[str] = None,
return_code: Optional[bool] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.get_quantum_program()
Args:
name (str): -
return_code (bool): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_quantum_program' not in self._inner_api_calls:
self._inner_api_calls[
'get_quantum_program'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_quantum_program,
default_retry=self._method_configs['GetQuantumProgram'].retry,
default_timeout=self._method_configs['GetQuantumProgram'].timeout,
client_info=self._client_info,
)
request = engine_pb2.GetQuantumProgramRequest(
name=name,
return_code=return_code,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['get_quantum_program'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_programs(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_programs():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_programs().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_programs' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_programs'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_programs,
default_retry=self._method_configs['ListQuantumPrograms'].retry,
default_timeout=self._method_configs['ListQuantumPrograms'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumProgramsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_programs'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='programs',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def delete_quantum_program(
self,
name: Optional[str] = None,
delete_jobs: Optional[bool] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> client.delete_quantum_program()
Args:
name (str): -
delete_jobs (bool): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_quantum_program' not in self._inner_api_calls:
self._inner_api_calls[
'delete_quantum_program'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_quantum_program,
default_retry=self._method_configs['DeleteQuantumProgram'].retry,
default_timeout=self._method_configs['DeleteQuantumProgram'].timeout,
client_info=self._client_info,
)
request = engine_pb2.DeleteQuantumProgramRequest(
name=name,
delete_jobs=delete_jobs,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls['delete_quantum_program'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_quantum_program(
self,
name: Optional[str] = None,
quantum_program: Union[Dict[str, Any], pb_types.QuantumProgram] = None,
update_mask: Union[Dict[str, Any], field_mask_pb2.FieldMask] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.update_quantum_program()
Args:
name (str): -
quantum_program (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram`
update_mask (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.FieldMask]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProgram` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_quantum_program' not in self._inner_api_calls:
self._inner_api_calls[
'update_quantum_program'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_quantum_program,
default_retry=self._method_configs['UpdateQuantumProgram'].retry,
default_timeout=self._method_configs['UpdateQuantumProgram'].timeout,
client_info=self._client_info,
)
request = engine_pb2.UpdateQuantumProgramRequest(
name=name,
quantum_program=quantum_program,
update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['update_quantum_program'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_quantum_job(
self,
parent: Optional[str] = None,
quantum_job: Union[Dict[str, Any], pb_types.QuantumJob] = None,
overwrite_existing_run_context: Optional[bool] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.create_quantum_job()
Args:
parent (str): -
quantum_job (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob`
overwrite_existing_run_context (bool): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_quantum_job' not in self._inner_api_calls:
self._inner_api_calls[
'create_quantum_job'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_quantum_job,
default_retry=self._method_configs['CreateQuantumJob'].retry,
default_timeout=self._method_configs['CreateQuantumJob'].timeout,
client_info=self._client_info,
)
request = engine_pb2.CreateQuantumJobRequest(
parent=parent,
quantum_job=quantum_job,
overwrite_existing_run_context=overwrite_existing_run_context,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['create_quantum_job'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_quantum_job(
self,
name: Optional[str] = None,
return_run_context: Optional[bool] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.get_quantum_job()
Args:
name (str): -
return_run_context (bool): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_quantum_job' not in self._inner_api_calls:
self._inner_api_calls['get_quantum_job'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_quantum_job,
default_retry=self._method_configs['GetQuantumJob'].retry,
default_timeout=self._method_configs['GetQuantumJob'].timeout,
client_info=self._client_info,
)
request = engine_pb2.GetQuantumJobRequest(
name=name,
return_run_context=return_run_context,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['get_quantum_job'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_jobs(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_jobs():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_jobs().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_jobs' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_jobs'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_jobs,
default_retry=self._method_configs['ListQuantumJobs'].retry,
default_timeout=self._method_configs['ListQuantumJobs'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumJobsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_jobs'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='jobs',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def delete_quantum_job(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> client.delete_quantum_job()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_quantum_job' not in self._inner_api_calls:
self._inner_api_calls[
'delete_quantum_job'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_quantum_job,
default_retry=self._method_configs['DeleteQuantumJob'].retry,
default_timeout=self._method_configs['DeleteQuantumJob'].timeout,
client_info=self._client_info,
)
request = engine_pb2.DeleteQuantumJobRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls['delete_quantum_job'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_quantum_job(
self,
name: Optional[str] = None,
quantum_job: Union[Dict[str, Any], pb_types.QuantumJob] = None,
update_mask: Union[Dict[str, Any], field_mask_pb2.FieldMask] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.update_quantum_job()
Args:
name (str): -
quantum_job (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob`
update_mask (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.FieldMask]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJob` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_quantum_job' not in self._inner_api_calls:
self._inner_api_calls[
'update_quantum_job'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_quantum_job,
default_retry=self._method_configs['UpdateQuantumJob'].retry,
default_timeout=self._method_configs['UpdateQuantumJob'].timeout,
client_info=self._client_info,
)
request = engine_pb2.UpdateQuantumJobRequest(
name=name,
quantum_job=quantum_job,
update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['update_quantum_job'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def cancel_quantum_job(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> client.cancel_quantum_job()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'cancel_quantum_job' not in self._inner_api_calls:
self._inner_api_calls[
'cancel_quantum_job'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.cancel_quantum_job,
default_retry=self._method_configs['CancelQuantumJob'].retry,
default_timeout=self._method_configs['CancelQuantumJob'].timeout,
client_info=self._client_info,
)
request = engine_pb2.CancelQuantumJobRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls['cancel_quantum_job'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_job_events(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_job_events():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_job_events().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumJobEvent` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_job_events' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_job_events'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_job_events,
default_retry=self._method_configs['ListQuantumJobEvents'].retry,
default_timeout=self._method_configs['ListQuantumJobEvents'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumJobEventsRequest(
parent=parent,
page_size=page_size,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_job_events'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='events',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_quantum_result(
self,
parent: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.get_quantum_result()
Args:
parent (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumResult` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_quantum_result' not in self._inner_api_calls:
self._inner_api_calls[
'get_quantum_result'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_quantum_result,
default_retry=self._method_configs['GetQuantumResult'].retry,
default_timeout=self._method_configs['GetQuantumResult'].timeout,
client_info=self._client_info,
)
request = engine_pb2.GetQuantumResultRequest(
parent=parent,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['get_quantum_result'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_processors(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_processors():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_processors().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProcessor` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_processors' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_processors'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_processors,
default_retry=self._method_configs['ListQuantumProcessors'].retry,
default_timeout=self._method_configs['ListQuantumProcessors'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumProcessorsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_processors'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='processors',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_quantum_processor(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.get_quantum_processor()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumProcessor` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_quantum_processor' not in self._inner_api_calls:
self._inner_api_calls[
'get_quantum_processor'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_quantum_processor,
default_retry=self._method_configs['GetQuantumProcessor'].retry,
default_timeout=self._method_configs['GetQuantumProcessor'].timeout,
client_info=self._client_info,
)
request = engine_pb2.GetQuantumProcessorRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['get_quantum_processor'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_calibrations(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_calibrations():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_calibrations().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumCalibration` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_calibrations' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_calibrations'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_calibrations,
default_retry=self._method_configs['ListQuantumCalibrations'].retry,
default_timeout=self._method_configs['ListQuantumCalibrations'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumCalibrationsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_calibrations'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='calibrations',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def get_quantum_calibration(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.get_quantum_calibration()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumCalibration` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_quantum_calibration' not in self._inner_api_calls:
self._inner_api_calls[
'get_quantum_calibration'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_quantum_calibration,
default_retry=self._method_configs['GetQuantumCalibration'].retry,
default_timeout=self._method_configs['GetQuantumCalibration'].timeout,
client_info=self._client_info,
)
request = engine_pb2.GetQuantumCalibrationRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['get_quantum_calibration'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_quantum_reservation(
self,
parent: Optional[str] = None,
quantum_reservation: Union[Dict[str, Any], pb_types.QuantumReservation] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.create_quantum_reservation()
Args:
parent (str): -
quantum_reservation (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_quantum_reservation' not in self._inner_api_calls:
self._inner_api_calls[
'create_quantum_reservation'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_quantum_reservation,
default_retry=self._method_configs['CreateQuantumReservation'].retry,
default_timeout=self._method_configs['CreateQuantumReservation'].timeout,
client_info=self._client_info,
)
request = engine_pb2.CreateQuantumReservationRequest(
parent=parent,
quantum_reservation=quantum_reservation,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['create_quantum_reservation'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def cancel_quantum_reservation(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.cancel_quantum_reservation()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'cancel_quantum_reservation' not in self._inner_api_calls:
self._inner_api_calls[
'cancel_quantum_reservation'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.cancel_quantum_reservation,
default_retry=self._method_configs['CancelQuantumReservation'].retry,
default_timeout=self._method_configs['CancelQuantumReservation'].timeout,
client_info=self._client_info,
)
request = engine_pb2.CancelQuantumReservationRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['cancel_quantum_reservation'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_quantum_reservation(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> client.delete_quantum_reservation()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'delete_quantum_reservation' not in self._inner_api_calls:
self._inner_api_calls[
'delete_quantum_reservation'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_quantum_reservation,
default_retry=self._method_configs['DeleteQuantumReservation'].retry,
default_timeout=self._method_configs['DeleteQuantumReservation'].timeout,
client_info=self._client_info,
)
request = engine_pb2.DeleteQuantumReservationRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls['delete_quantum_reservation'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_quantum_reservation(
self,
name: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.get_quantum_reservation()
Args:
name (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_quantum_reservation' not in self._inner_api_calls:
self._inner_api_calls[
'get_quantum_reservation'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_quantum_reservation,
default_retry=self._method_configs['GetQuantumReservation'].retry,
default_timeout=self._method_configs['GetQuantumReservation'].timeout,
client_info=self._client_info,
)
request = engine_pb2.GetQuantumReservationRequest(
name=name,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['get_quantum_reservation'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_reservations(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_reservations():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_reservations().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_reservations' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_reservations'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_reservations,
default_retry=self._method_configs['ListQuantumReservations'].retry,
default_timeout=self._method_configs['ListQuantumReservations'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumReservationsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_reservations'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='reservations',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def update_quantum_reservation(
self,
name: Optional[str] = None,
quantum_reservation: Union[Dict[str, Any], pb_types.QuantumReservation] = None,
update_mask: Union[Dict[str, Any], field_mask_pb2.FieldMask] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.update_quantum_reservation()
Args:
name (str): -
quantum_reservation (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation`
update_mask (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.FieldMask]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservation` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_quantum_reservation' not in self._inner_api_calls:
self._inner_api_calls[
'update_quantum_reservation'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_quantum_reservation,
default_retry=self._method_configs['UpdateQuantumReservation'].retry,
default_timeout=self._method_configs['UpdateQuantumReservation'].timeout,
client_info=self._client_info,
)
request = engine_pb2.UpdateQuantumReservationRequest(
name=name,
quantum_reservation=quantum_reservation,
update_mask=update_mask,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['update_quantum_reservation'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def quantum_run_stream(
self,
requests,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> request = {}
>>>
>>> requests = [request]
>>> for element in client.quantum_run_stream(requests):
... # process element
... pass
Args:
requests (iterator[dict|cirq_google.engine.client.quantum_v1alpha1.proto.engine_pb2.QuantumRunStreamRequest]): The input objects. If a dict is provided, it must be of the
same form as the protobuf message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumRunStreamRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~cirq_google.engine.client.quantum_v1alpha1.types.QuantumRunStreamResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'quantum_run_stream' not in self._inner_api_calls:
self._inner_api_calls[
'quantum_run_stream'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.quantum_run_stream,
default_retry=self._method_configs['QuantumRunStream'].retry,
default_timeout=self._method_configs['QuantumRunStream'].timeout,
client_info=self._client_info,
)
return self._inner_api_calls['quantum_run_stream'](
requests, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_reservation_grants(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_reservation_grants():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_reservation_grants().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservationGrant` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_reservation_grants' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_reservation_grants'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_reservation_grants,
default_retry=self._method_configs['ListQuantumReservationGrants'].retry,
default_timeout=self._method_configs['ListQuantumReservationGrants'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumReservationGrantsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_reservation_grants'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='reservation_grants',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def reallocate_quantum_reservation_grant(
self,
name: Optional[str] = None,
source_project_id: Optional[str] = None,
target_project_id: Optional[str] = None,
duration: Union[Dict[str, Any], duration_pb2.Duration] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> response = client.reallocate_quantum_reservation_grant()
Args:
name (str): -
source_project_id (str): -
target_project_id (str): -
duration (Union[dict, ~cirq_google.engine.client.quantum_v1alpha1.types.Duration]): -
If a dict is provided, it must be of the same form as the protobuf
message :class:`~cirq_google.engine.client.quantum_v1alpha1.types.Duration`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservationGrant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'reallocate_quantum_reservation_grant' not in self._inner_api_calls:
self._inner_api_calls[
'reallocate_quantum_reservation_grant'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.reallocate_quantum_reservation_grant,
default_retry=self._method_configs['ReallocateQuantumReservationGrant'].retry,
default_timeout=self._method_configs['ReallocateQuantumReservationGrant'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ReallocateQuantumReservationGrantRequest(
name=name,
source_project_id=source_project_id,
target_project_id=target_project_id,
duration=duration,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('name', name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls['reallocate_quantum_reservation_grant'](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_quantum_reservation_budgets(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_reservation_budgets():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_reservation_budgets().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumReservationBudget` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_reservation_budgets' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_reservation_budgets'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_reservation_budgets,
default_retry=self._method_configs['ListQuantumReservationBudgets'].retry,
default_timeout=self._method_configs['ListQuantumReservationBudgets'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumReservationBudgetsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_reservation_budgets'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='reservation_budgets',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
def list_quantum_time_slots(
self,
parent: Optional[str] = None,
page_size: Optional[int] = None,
filter_: Optional[str] = None,
retry: Optional[google.api_core.retry.Retry] = google.api_core.gapic_v1.method.DEFAULT,
timeout: Optional[float] = google.api_core.gapic_v1.method.DEFAULT,
metadata: Optional[Sequence[Tuple[str, str]]] = None,
):
"""
-
Example:
>>> from cirq_google.engine.client import quantum_v1alpha1
>>>
>>> client = quantum_v1alpha1.QuantumEngineServiceClient()
>>>
>>> # Iterate over all results
>>> for element in client.list_quantum_time_slots():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_quantum_time_slots().pages:
... for element in page:
... # process element
... pass
Args:
parent (str): -
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
filter_ (str): -
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~cirq_google.engine.client.quantum_v1alpha1.types.QuantumTimeSlot` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_quantum_time_slots' not in self._inner_api_calls:
self._inner_api_calls[
'list_quantum_time_slots'
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_quantum_time_slots,
default_retry=self._method_configs['ListQuantumTimeSlots'].retry,
default_timeout=self._method_configs['ListQuantumTimeSlots'].timeout,
client_info=self._client_info,
)
request = engine_pb2.ListQuantumTimeSlotsRequest(
parent=parent,
page_size=page_size,
filter=filter_,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [('parent', parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_quantum_time_slots'],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field='time_slots',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator
|
from .env import WorldEnv
def make_env(args):
return WorldEnv()
|
import pathlib, sys
home_path = pathlib.Path('.').resolve()
while home_path.name != "membership_inference_attack":
home_path = home_path.parent
utils_path = home_path/'src'/'utils'
if utils_path.as_posix() not in sys.path:
sys.path.insert(0, utils_path.as_posix())
import torch
import torch.utils.data
import torch.optim as optim
from copy import deepcopy
from utils_modules import weight_init, train, test
from miscellaneous import progress_bar, fixed_random_split, fixed_random_subset
from torch.utils.data import TensorDataset
def split_shadow_dataset(train_set, test_set, shadow_number, data_size = 5000):
shadow_datasets_in = []
shadow_datasets_out = []
for i in range(shadow_number):
shadow_datasets_in.append(fixed_random_subset(train_set, data_size, i))
shadow_datasets_out.append(fixed_random_subset(test_set, data_size, i))
return shadow_datasets_in, shadow_datasets_out
def get_mia_train_dataset(dataset = None,
test_set = None,
shadow_number = None,
shadow_model = None,
use_cuda = False,
custom_shadow_optim_args = None,
shadow_model_base_path = None,
mia_dataset_path = None,
class_number = None,
stats = None,
shadow_train_epochs = None,
shadow_batch_size = None):
"""get_mia_train_dataset() create training datasets for the MIA models.
First it trains shadow models, which are usually copies of the target
model, then samples of the provided dataset are used to generate MIA
in and out sample by using the trained shadow models.
Args:
dataset (torch Dataset): a dataset for training the shadow models and generate in and out samples.
shadow_number (int): the number of shadow models.
shadow_model (torch Module): the shadow model to use.
use_cuda (bool): whether to use cuda or not. False by default.
custom_shadow_optim_args (Dict): custom options for the Adam optimizer.
shadow_model_base_path (string/Path): path of the shadow model dir.
mia_dataset_path (string/Path): path of the MIA training dataset dir.
class_number (int): number of classes in the problem solved by the target model.
stats (Statistics): statistics object that records the data of the shadow training.
shadow_train_epochs (int): number of epoch for the shadow training.
shadow_batch_size (int): batch size of the shadow training.
Returns:
list(TensorDataset): a list of datasets to train the MIA model.
"""
if (dataset is None) or \
(shadow_number is None) or \
(shadow_model is None) or \
(mia_dataset_path is None) or \
(class_number is None) or \
(shadow_model_base_path is None):
raise ValueError("get_mia_train_dataset: one of the requiered "
"argument is not set")
# test if shadow models needs to be trained or whether the work is
# already done. So we test if there is a shadow model with the last
# index, meaning that shadow models have been trained.
# TODO(PI) deal with the case where the shadow model differs
last_shadow_model_path = pathlib.Path(shadow_model_base_path + "_{}.pt".format(shadow_number - 1))
more_shadow_model_path = pathlib.Path(shadow_model_base_path + "_{}.pt".format(shadow_number))
cuda_args = { 'num_workers' : 1, 'pin_memory' : False } if use_cuda else {}
device = torch.device('cuda' if use_cuda else 'cpu')
dataset_size = len(dataset)
shadow_models = []
mia_datasets_dir = pathlib.Path(mia_dataset_path)
if not mia_datasets_dir.exists():
mia_datasets_dir.mkdir()
if last_shadow_model_path.exists() and (not more_shadow_model_path.exists()):
need_to_work = False
for i in range(class_number):
if not (mia_datasets_dir/f"class_{i}.pt").exists():
# some of the training dataset is missing, so they have to be
# built
need_to_work = True
break
if not need_to_work:
print("loading the MIA train datasets")
mia_datasets = list()
for i in range(class_number):
mia_datasets.append(torch.load((mia_datasets_dir/f"class_{i}.pt").as_posix()))
return mia_datasets
# the MIA dataset creation has not been done, load the shadow models
print("\nloading shadow models")
for i in range(shadow_number):
shadow_models.append(torch.load(f"{shadow_model_base_path}_{i}.pt"))
else:
# nothing has been done, train the shadow models
print("\ntraining shadow models")
shadow_dir = pathlib.Path(shadow_model_base_path).parent
if not shadow_dir.exists():
shadow_dir.mkdir()
shadow_train_datasets, shadow_test_datasets = split_shadow_dataset(dataset, test_set, shadow_number)
for i in range(shadow_number):
# copy model parameters but we wanna keep the weights randomized
# differently between shadows (initialized at training time, see later)
model = deepcopy(shadow_model).to(device)
shadow_models.append(model)
# remove old models if necessary
if more_shadow_model_path.exists():
base_file = pathlib.Path(shadow_model_base_path)
base_dir = base_file.parent
base_file_name = base_file.name
old_shadow_models = list(base_dir.glob(base_file_name + '_*.pt'))
for i in range(len(old_shadow_models)):
old_shadow_models[i].unlink()
# shadow swarm training
for i in range(shadow_number):
model = shadow_models[i].to(device)
model.apply(weight_init)
train_loader = torch.utils.data.DataLoader(shadow_train_datasets[i], batch_size = shadow_batch_size,
shuffle = True, **cuda_args)
test_loader = torch.utils.data.DataLoader(shadow_test_datasets[i], batch_size = 1000,
shuffle = True, **cuda_args)
optim_args = {}
if custom_shadow_optim_args is not None:
optim_args = custom_shadow_optim_args
optimizer = optim.Adam(model.parameters(), **optim_args)
stats.new_train(label = "shadow-model")
for epoch in range(shadow_train_epochs):
train(model.to(device), device, train_loader, optimizer, epoch, verbose = False, train_stats = stats)
if epoch == shadow_train_epochs - 1:
stats.new_epoch()
test(model.to(device), device, test_loader, test_stats = stats, verbose = False)
torch.save(model, shadow_model_base_path + "_{}.pt".format(i))
progress_bar(iteration = i, total = shadow_number - 1)
# set all shadow models in evaluation mode
print("\nbuilding the MIA train datasets")
shadow_datasets_in, shadow_datasets_out = split_shadow_dataset(dataset, test_set, shadow_number)
# build the MIA datasets
input_tensor_lists = [list() for i in range(class_number)]
output_tensor_lists = [list() for i in range(class_number)]
for i in range(shadow_number):
current_shadow = shadow_models[i]
current_shadow.eval()
data_in_loader = torch.utils.data.DataLoader(shadow_datasets_in[i],
batch_size = 1000, shuffle = True, **cuda_args)
with torch.no_grad():
for batch in data_in_loader:
data = batch[0:-1]
targets = batch[-1]
data = [e.to(device) for e in data]
targets = targets.to(device)
outputs = current_shadow(*data)
for target, output in zip(targets, outputs):
# ~ input_tensor_lists[target].append(output)
# ~ output_tensor_lists[target].append(torch.tensor(1))
if torch.argmax(output).item() == target.item():
input_tensor_lists[target].append(output)
output_tensor_lists[target].append(torch.tensor(1))
data_out_loader = torch.utils.data.DataLoader(shadow_datasets_out[i],
batch_size = 1000, shuffle = True, **cuda_args)
with torch.no_grad():
for batch in data_out_loader:
data = batch[0:-1]
targets = batch[-1]
data = [e.to(device) for e in data]
targets = targets.to(device)
outputs = current_shadow(*data)
for target, output in zip(targets, outputs):
# ~ input_tensor_lists[target].append(output)
# ~ output_tensor_lists[target].append(torch.tensor(0))
if torch.argmax(output).item() == target.item():
input_tensor_lists[target].append(output)
output_tensor_lists[target].append(torch.tensor(0))
i = 0
mia_datasets = list()
for inputs, labels in zip(input_tensor_lists, output_tensor_lists):
mia_datasets.append(TensorDataset(torch.stack(inputs), torch.stack(labels)))
torch.save(mia_datasets[-1], (mia_datasets_dir/f"class_{i}.pt").as_posix())
i += 1
return mia_datasets
def get_mia_test_dataset(train_dataset = None,
test_dataset = None,
target_model = None,
use_cuda = False,
mia_dataset_path = None,
class_number = None):
"""get_mia_test_dataset() generate test datasets for the MIA models.
It executes, with the target model, samples coming from the train and
test dataset of the target model.
Args:
train_dataset (torch Dataset): dataset used to train the target model.
test_dataset (torch Dataset): dataset that was not used to train the target model.
target_model (torch Module): the trained target model.
use_cuda (bool): whether to use cuda or not. False by default.
mia_dataset_path (string/Path): path for saving the MIA test datasets.
class_number (int): number of classes in the problem solved by the target model.
Returns:
list(TensorDataset): a list of datasets to test the MIA model.
"""
if (mia_dataset_path is None) or (class_number is None):
raise ValueError("get_mia_test_dataset: one of the required "
"argument is not set")
mia_datasets_dir = pathlib.Path(mia_dataset_path)
need_to_work = False
for i in range(class_number):
if not (mia_datasets_dir/f"class_{i}.pt").exists():
# some of the training dataset is missing, so they have to be
# built
need_to_work = True
break
if not need_to_work:
print("loading the MIA test datasets")
mia_datasets = list()
for i in range(class_number):
mia_datasets.append(torch.load((mia_datasets_dir/f"class_{i}.pt").as_posix()))
return mia_datasets
if not mia_datasets_dir.exists():
mia_datasets_dir.mkdir()
print('building the MIA test datasets')
cuda_args = { 'num_workers' : 1, 'pin_memory' : False } if use_cuda else {}
device = torch.device('cuda' if use_cuda else 'cpu')
input_tensor_lists = [list() for i in range(class_number)]
output_tensor_lists = [list() for i in range(class_number)]
data_in_loader = torch.utils.data.DataLoader(train_dataset, batch_size = 1000,
shuffle = True, **cuda_args)
with torch.no_grad():
for batch in data_in_loader:
data = batch[0:-1]
targets = batch[-1]
data = [e.to(device) for e in data]
targets = targets.to(device)
outputs = target_model(*data)
for target, output in zip(targets, outputs):
# ~ input_tensor_lists[target].append(output)
# ~ output_tensor_lists[target].append(torch.tensor(1))
if torch.argmax(output).item() == target.item():
input_tensor_lists[target].append(output)
output_tensor_lists[target].append(torch.tensor(1))
data_out_loader = torch.utils.data.DataLoader(test_dataset, batch_size = 1000,
shuffle = True, **cuda_args)
with torch.no_grad():
for batch in data_out_loader:
data = batch[0:-1]
targets = batch[-1]
data = [e.to(device) for e in data]
targets = targets.to(device)
outputs = target_model(*data)
for target, output in zip(targets, outputs):
# ~ input_tensor_lists[target].append(output)
# ~ output_tensor_lists[target].append(torch.tensor(0))
if torch.argmax(output).item() == target.item():
input_tensor_lists[target].append(output)
output_tensor_lists[target].append(torch.tensor(0))
i = 0
mia_datasets = list()
for inputs, labels in zip(input_tensor_lists, output_tensor_lists):
mia_datasets.append(TensorDataset(torch.stack(inputs), torch.stack(labels)))
torch.save(mia_datasets[-1], (mia_datasets_dir/f"class_{i}.pt").as_posix())
i += 1
return mia_datasets
|
import csv
try:
import importlib.resources
except ImportError:
pass
class IANAList:
def __init__(self):
self.used_ports = set()
raw = importlib.resources.read_text('str2port', 'iana.csv').strip().split('\n')
records = csv.DictReader(raw)
for record in records:
port = record['Port Number'].strip()
if port:
try:
self.used_ports.add(int(port))
except ValueError:
port_from, port_to = port.split('-')
self.used_ports.update(range(int(port_from), int(port_to) + 1))
self.available_ports = sorted(set(range(1024, 65536)) - self.used_ports)
def __len__(self):
return len(self.available_ports)
def is_free(self, i):
return i not in self.used_ports
|
print (lambda Ru,Ro,Iu,Io,IM,Sx,Sy:reduce(lambda x,y:x+y,map(lambda y,Iu=Iu,Io=Io,Ru=Ru,Ro=Ro,Sy=Sy,L=lambda yc,Iu=Iu,Io=Io,Ru=Ru,Ro=Ro,i=IM,Sx=Sx,Sy=Sy:reduce(lambda x,y:x+y,map(lambda x,xc=Ru,yc=yc,Ru=Ru,Ro=Ro,i=i,Sx=Sx,F=lambda xc,yc,x,y,k,f=lambda xc,yc,x,y,k,f:(k<=0)or (x*x+y*y>=4.0) or 1+f(xc,yc,x*x-y*y+xc,2.0*x*y+yc,k-1,f):f(xc,yc,x,y,k,f):chr(64+F(Ru+x*(Ro-Ru)/Sx,yc,0,0,i)),range(Sx))):L(Iu+y*(Io-Iu)/Sy),range(Sy))))(-2.1, 0.7, -1.2, 1.2, 30, 85, 25)
|
from flask import Flask, request, jsonify
import sqlite3
import time
import os
import sys
database_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "database.db")
app = Flask(__name__)
@app.route('/', methods=['GET'])
def get_api_version():
return 'API V1'
@app.route('/api/get/all', methods=['GET', 'OPTIONS'])
def get_all_data():
if request.method == "OPTIONS": # CORS preflight
return _build_cors_prelight_response()
try:
with sqlite3.connect(database_dir) as db:
cur = db.cursor()
cur.execute("SELECT * FROM sensor_measurements")
return _corsify_actual_response(jsonify(cur.fetchall()))
except Exception as exception:
print(exception, file=sys.stderr)
db.rollback()
finally:
db.close()
return 'OK'
@app.route('/api/get/latest', methods=['GET', 'OPTIONS'])
def get_latest_data():
if request.method == "OPTIONS": # CORS preflight
return _build_cors_prelight_response()
try:
with sqlite3.connect(database_dir) as db:
cur = db.cursor()
cur.execute(("SELECT sm.* from sensor_measurements sm inner join "
"(select sensor, max(timestamp) as maxdate from sensor_measurements group by sensor) t "
"on t.sensor=sm.sensor and t.maxdate=sm.timestamp"))
return _corsify_actual_response(jsonify(cur.fetchall()))
except Exception as exception:
print(exception, file=sys.stderr)
db.rollback()
finally:
db.close()
return 'OK'
@app.route('/api', methods=['POST'])
def store_data():
data = request.get_json()
try:
with sqlite3.connect(database_dir) as db:
cur = db.cursor()
cur.execute("INSERT INTO sensor_measurements"
"(timestamp, sensor, humidity, temperature, pressure)"
"VALUES(?,?,?,?,?)",
(int(time.time()),
data.get("sensor"),
data.get("humidity"),
data.get("temperature"),
data.get("pressure")
)
)
db.commit()
except Exception as exception:
print(exception, file=sys.stderr)
db.rollback()
finally:
db.close()
return 'OK'
def _build_cors_prelight_response():
response = make_response()
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add('Access-Control-Allow-Headers', "*")
response.headers.add('Access-Control-Allow-Methods', "*")
return response
def _corsify_actual_response(response):
response.headers.add("Access-Control-Allow-Origin", "*")
return response
|
import codecs
import io
from datetime import datetime
from pathlib import Path
import pandas as pd
import spacy
# nlp = spacy.load("../CustomNERData")
nlp = spacy.load("el_core_news_lg")
# with open("zeroUrlText.txt", "r", encoding='utf8') as text_file:
# text = text_file.read()
with open("zeroUrlSubject.txt", "r", encoding='utf8') as text_file:
text = text_file.read()
doc = nlp(text)
for entity in doc.ents:
print(entity.text,'--- ',entity.label_)
# from matplotlib.ticker import EngFormatter
# import numpy as np
# import matplotlib.pyplot as plt
#
#
# example_dict = {"A random very long string to showcase the example": 50000000,
# "Another random smaller string": 3500000000,
# "A small string": 700000000,
# "String": 100000000,
# "Another larger than usual example string that will get sadly cut": 70000000}
#
#
# def categorical_horizontal_bar_numbers1(dataset, fig_dim=(10, 5), title="", x_label="",
# y_label=""):
# fmt = EngFormatter(places=0)
#
# fig, ax = plt.subplots(figsize=fig_dim)
# width = 0.75 # the width of the bars
# ind = np.arange(len(dataset.values())) # the x locations for the groups
# ax.barh(ind, dataset.values(), width, color="blue")
# ax.set_yticks(ind + width / 2)
# ax.set_yticklabels(dataset.keys(), minor=False)
#
# plt.grid(False)
# plt.title(title)
# plt.xlabel(x_label)
# plt.ylabel(y_label)
# ax.xaxis.set_major_formatter(fmt)
#
# for i, v in enumerate(dataset.values()):
# ax.text(v + 500, i, s=fmt.format_eng(v), color='blue', va='center')
#
# plt.show()
#
# categorical_horizontal_bar_numbers1(example_dict,fig_dim=(30,10))
|
import pymysql
import json
def insertsql_from_json():
#connect
conn = pymysql.connect(
host="",
port=3306,
user="root",
password="",
database="",
charset="utf8"
)
curs = conn.cursor()
with open("foul_of.json","r",encoding="utf8") as json_file:
contents = json_file.read()
json_data = json.loads(contents)
sql="insert into foul_of(match_id, club_id, player_id,foul_time, sent_off) values(%s,%s,%s,%s,%s)"
for i in range(len(json_data["foul_of"])):
curs.execute(sql,(json_data["foul_of"][i]["match_id"],
json_data["foul_of"][i]["club_id"],
json_data["foul_of"][i]["player_id"],
json_data["foul_of"][i]["foul_time"],
json_data["foul_of"][i]["sent_off"]
)
)
rows=curs.fetchall()
conn.commit()
conn.close()
insertsql_from_json()
# Reference by https://thalals.tistory.com/37
|
from django.contrib import admin
from django import forms
from . import models
class procedure_s9AdminForm(forms.ModelForm):
class Meta:
model = models.procedure_s9
fields = "__all__"
class procedure_s9Admin(admin.ModelAdmin):
form = procedure_s9AdminForm
list_display = [
"N_prog_jour",
"nom_phase",
"trx_nuit",
"DFV",
"continuite_elec",
"created",
"active",
"h_fin",
"date_debut_trx",
"pk_fin",
"Ct_Tx",
"etat_surface_rail",
"pk_debut",
"h_debut",
"GEq",
"RCT",
"indice_CtTx",
"Voie",
"last_updated",
]
readonly_fields = [
"N_prog_jour",
"nom_phase",
"trx_nuit",
"DFV",
"continuite_elec",
"created",
"active",
"h_fin",
"date_debut_trx",
"pk_fin",
"Ct_Tx",
"etat_surface_rail",
"pk_debut",
"h_debut",
"GEq",
"RCT",
"indice_CtTx",
"Voie",
"last_updated",
]
class chantier_seAdminForm(forms.ModelForm):
class Meta:
model = models.chantier_se
fields = "__all__"
class chantier_seAdmin(admin.ModelAdmin):
form = chantier_seAdminForm
list_display = [
"created",
"pk_depose_debut",
"pk_depose_fin",
"last_updated",
]
readonly_fields = [
"created",
"pk_depose_debut",
"pk_depose_fin",
"last_updated",
]
class ttxAdminForm(forms.ModelForm):
class Meta:
model = models.ttx
fields = "__all__"
class ttxAdmin(admin.ModelAdmin):
form = ttxAdminForm
list_display = [
"last_updated",
"pk_fin",
"fonction",
"pk_deput",
"nom_ttx",
"created",
"pk_engagment",
"pk_degagment",
]
readonly_fields = [
"last_updated",
"pk_fin",
"fonction",
"pk_deput",
"nom_ttx",
"created",
"pk_engagment",
"pk_degagment",
]
class personelAdminForm(forms.ModelForm):
class Meta:
model = models.personel
fields = "__all__"
class personelAdmin(admin.ModelAdmin):
form = personelAdminForm
list_display = [
"created",
"telephone",
"prenom",
"last_updated",
"habilitation",
"nom",
"date_depart",
"date_arrive",
"fonction",
]
readonly_fields = [
"created",
"telephone",
"prenom",
"last_updated",
"habilitation",
"nom",
"date_depart",
"date_arrive",
"fonction",
]
class info_chantierAdminForm(forms.ModelForm):
class Meta:
model = models.info_chantier
fields = "__all__"
class info_chantierAdmin(admin.ModelAdmin):
form = info_chantierAdminForm
list_display = [
"numero_ligne",
"nbr_voies_travail",
"created",
"last_updated",
"date_debut_chantier",
"date_fin_chantier",
]
readonly_fields = [
"numero_ligne",
"nbr_voies_travail",
"created",
"last_updated",
"date_debut_chantier",
"date_fin_chantier",
]
admin.site.register(models.procedure_s9, procedure_s9Admin)
admin.site.register(models.chantier_se, chantier_seAdmin)
admin.site.register(models.ttx, ttxAdmin)
admin.site.register(models.personel, personelAdmin)
admin.site.register(models.info_chantier, info_chantierAdmin)
|
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
from aif360.algorithms.inprocessing.adversarial_error_debiasing import AdversarialErrorDebiasing
from aif360.algorithms.inprocessing.art_classifier import ARTClassifier
from aif360.algorithms.inprocessing.prejudice_remover import PrejudiceRemover
from aif360.algorithms.inprocessing.meta_fair_classifier import MetaFairClassifier
from aif360.algorithms.inprocessing.gerryfair_classifier import GerryFairClassifier
from aif360.algorithms.inprocessing.exponentiated_gradient_reduction import ExponentiatedGradientReduction
from aif360.algorithms.inprocessing.grid_search_reduction import GridSearchReduction
|
"""
Before going through the code, have a look at the following blog about AVL Tree:
https://en.wikipedia.org/wiki/AVL_tree
@author: lashuk1729
"""
class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
class AVL_Tree():
def get_height(self, root):
if root is None:
return 0
return root.height
def get_balance(self, root):
"""
Checks whether the given tree is balanced or not, i.e.,
difference between height of left_subtree and right_subtree should lie between -1 & 1
"""
if root is None:
return 0
return self.get_height(root.left) - self.get_height(root.right)
def inorder(self, root):
if root:
self.inorder(root.left)
print(root.val, end = " -> ")
self.inorder(root.right)
def preorder(self, root):
if root:
print(root.val, end = " -> ")
self.preorder(root.left)
self.preorder(root.right)
def right_rotate(self, z):
y = z.left
T3 = y.right
y.right = z
z.left = T3
z.height = 1 + max(self.get_height(z.left), self.get_height(z.right))
y.height = 1 + max(self.get_height(y.left), self.get_height(y.right))
return y
def left_rotate(self, z):
y = z.right
T3 = y.left
y.left = z
z.right = T3
z.height = 1 + max(self.get_height(z.left), self.get_height(z.right))
y.height = 1 + max(self.get_height(y.left), self.get_height(y.right))
return y
def get_min(self, root):
while root.left is not None:
root = root.left
return root
def insert(self, root, key):
if root is None:
return Node(key)
elif root.val > key:
root.left = self.insert(root.left, key)
else:
root.right = self.insert(root.right, key)
root.height = 1 + max(self.get_height(root.left), self.get_height(root.right))
balance = self.get_balance(root)
# Left-left case
if balance > 1 and key < root.left.val:
return self.right_rotate(root)
# Right-right case
if balance < -1 and key > root.right.val:
return self.left_rotate(root)
# Left-right case
if balance > 1 and key > root.left.val:
root.left = self.left_rotate(root.left)
return self.right_rotate(root)
# Right-left case
if balance < -1 and key < root.right.val:
root.right = self.right_rotate(root.right)
return self.left_rotate(root)
return root
def delete(self, root, key):
if root is None:
return root
elif root.val > key:
root.left = self.delete(root.left, key)
elif root.val < key:
root.right = self.delete(root.right, key)
else:
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
temp = self.get_min(root.right)
root.key = temp.key
root.right = self.delete(root.right, temp.key)
if root is None:
return root
root.height = 1 + max(self.get_height(root.left), self.get_height(root.right))
balance = self.get_balance(root)
# Left-left case
if balance > 1 and self.get_balance(root.left) >= 0:
return self.right_rotate(root)
# Right-right case
if balance < -1 and self.get_balance(root.right) <= 0:
return self.left_rotate
# Left-right case
if balance > 1 and self.get_balance(root.left) < 0:
root.left = self.left_rotate(root.left)
return self.right_rotate(root)
# Right-left case
if balance < -1 and self.get_balance(root.right) > 0:
root.right = self.right_rotate(root.right)
return self.left_rotate(root)
return root
if __name__=='__main__':
myTree = AVL_Tree()
root = None
nums = [9, 5, 10, 0, 6, 11, -1, 1, 2]
for num in nums:
root = myTree.insert(root, num)
# Preorder Traversal
print("AVL tree after insertion is: ", end = "")
myTree.preorder(root)
print('X')
key = 10
root = myTree.delete(root, key)
# Preorder Traversal
print("AVL tree after deletion is: ", end = "")
myTree.preorder(root)
print('X')
|
import numpy as np
from scipy.integrate import simps
def cog(input_data,wave,wave_axis,lande_factor=0,cpos = False):
'''
Calculates the velocity [and longitudinal field] of a profile or set of profiles using CoG technique.
input_data: the input data may have dimmensions of:
[wave] = 1D the program assumes Stokes I profile (one profile) - returns Vlos
[wave, Stokes] = 2D the program assumes Stokes I,Q,U,V [length 4] (one profile) - return Vlos + Blos
[wave, X , Y] = 3D the program assumes Stokes I only in an X and Y image - return Vlos
[wave, Stokes, X , Y] = 4D the program assumes Stokes I,Q,U,V in an X and Y image - return Vlos + Blos
wave: line central wavelength in Angstrom.
wave_axis = wavelength axis in Angstrom.
cpos: position where the continuum of the line is located. Default is last position.
In case of Stokes parameters are given, the user has to provide the lande_factor of the transition
'''
# check dimensions of input
# posibilitis are:
ndim = input_data.ndim
if ndim == 1:
l = input_data.shape
print('Single Stokes I profile')
#check continuum position
try:
lpos = int(cpos)
except:
lpos = l
Ic = input_data[lpos-1]
t1 = wave_axis * ( Ic - input_data )
tc = ( Ic - input_data )
Itn = simps(t1, x=wave_axis) / simps(tc, x=wave_axis)
vlos = -(wave - Itn ) * 2.99792458e+5 / wave
return vlos
elif ndim == 2:
l,s = input_data.shape
print('Single Full Stokes profile')
if lande_factor==0:
print('Warning: lande factor is zero pelotero')
#check continuum position
try:
lpos = int(cpos)
except:
lpos = l
Ic = input_data[lpos-1,0]
t1 = wave_axis * ( Ic - input_data[:,0] )
tc = ( Ic - input_data[:,0] )
Itn = simps(t1, x=wave_axis) / simps(tc, x=wave_axis)
vlos = -(wave - Itn ) * 2.99792458e+5 / wave
t_plus = input_data[:,0] + input_data[:,3]
t_minus = input_data[:,0] - input_data[:,3]
t1 = wave_axis * ( Ic - t_plus ) #Ic*0.5 ??
tc1 = Ic - t_plus
t2 = wave_axis * ( Ic - t_minus ) #Ic*0.5 ??
tc2 = Ic - t_minus
l_plus = simps(t1, x=wave_axis) / simps(tc1, x=wave_axis)
l_minus = simps(t2, x=wave_axis) / simps(tc2, x=wave_axis)
blos = (l_plus - l_minus) / 2 / 4.67e-13/ (lande_factor * wave**2)
return vlos,blos
elif ndim == 3:
l,sy,sx = input_data.shape
print('Multiple Stokes I profiles [l,x,y]: ', l, sx, sy )
#check continuum position
try:
lpos = int(cpos)
except:
lpos = l
Ic = input_data[lpos-1,:,:]
t1 = wave_axis[:,np.newaxis,np.newaxis] * ( Ic[np.newaxis,:,:] - input_data )
tc = ( Ic[np.newaxis,:,:] - input_data )
Itn = simps(t1, x=wave_axis,axis=0) / simps(tc, x=wave_axis,axis=0)
vlos = -(wave - Itn ) * 2.99792458e+5 / wave
return vlos
elif ndim == 4:
l,s,sy,sx = input_data.shape
print('Multiple Full Stokes profiles [l,stokes,x,y]: ', l, s, sx, sy )
if lande_factor==0:
print('Warning: lande factor is zero pelotero')
#check continuum position
try:
lpos = int(cpos)
except:
lpos = l
Ic = input_data[lpos-1,0,:,:]
t1 = wave_axis[:,np.newaxis,np.newaxis] * ( Ic[np.newaxis,:,:] - input_data[:,0,:,:] )
tc = ( Ic[np.newaxis,:,:] - input_data[:,0,:,:] )
Itn = simps(t1, x=wave_axis,axis=0) / simps(tc, x=wave_axis,axis=0)
vlos = -(wave - Itn ) * 2.99792458e+5 / wave
t_plus = input_data[:,0,:,:] + input_data[:,3,:,:]
t_minus = input_data[:,0,:,:] - input_data[:,3,:,:]
t1 = wave_axis[:,np.newaxis,np.newaxis] * ( Ic[np.newaxis,:,:] - t_plus ) #Ic*0.5 ??
tc1 = Ic[np.newaxis,:,:] - t_plus
t2 = wave_axis[:,np.newaxis,np.newaxis] * ( Ic[np.newaxis,:,:] - t_minus ) #Ic*0.5 ??
tc2 = Ic[np.newaxis,:,:] - t_minus
l_plus = simps(t1, x=wave_axis,axis=0) / simps(tc1, x=wave_axis,axis=0)
l_minus = simps(t2, x=wave_axis,axis=0) / simps(tc2, x=wave_axis,axis=0)
blos = (l_plus - l_minus) / 2 / 4.67e-13/ (lande_factor * wave**2)
return vlos,blos
else:
print('No input data or wrong dimentions',ndim)
return
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request,FormRequest
class LonginspiderSpider(scrapy.Spider):
name = 'loginSpider'
allowed_domains = ['example.webscraping.com']
# 爬取只能用户登录后才能爬取的数据
start_urls = ['http://example.webscraping.com/places/default/user/profile']
def parse(self, response):
# 这里response是start_urls中返回的response
if 'Welcome' in response.text:
self.logger.info('登陆成功')
yield {
'name':response.xpath("//tr[@id='auth_user_first_name__row']/td[2]/text()").get()+
response.xpath("//tr[@id='auth_user_last_name__row']/td[2]/text()").get(),
'email':response.xpath("//tr[@id='auth_user_email__row']/td[2]/text()").get()
}
# -----------------------------登录----------------------------
login_url = 'http://example.webscraping.com/places/default/user/login' # 需要登录的页面
# 覆写父类方法,在开始时最先执行登录,而不是start_urls中的网址
def start_requests(self):
yield Request(self.login_url,callback=self.login)
def login(self,response):
# 登录页面的解析函数,构造FormRequest对象提交表单
fd = {'email':'xzlmark@126.com','password':'123456'}
yield FormRequest.from_response(response,formdata=fd,callback=self.parse_login)
# 登录成功后,继续爬取start_urls中的页面。
def parse_login(self,response):
#注意:这里response是登陆后跳转的页面,http://example.webscraping.com/places/default/index#
# 需要再次执行下面的请求,才会跳转到需要采集的页面中
yield from super().start_requests()
|
from django.contrib.sites.models import Site
from django.core.management.base import BaseCommand
from accounts.models import get_sentinel_user
from django.conf import settings
from core.models import SiteCustomization
class Command(BaseCommand):
def _create_site(self):
example_site = Site.objects.get(pk=1)
example_site.domain = settings.ALLOWED_HOSTS[0]
example_site.name = settings.CORE_DEFAULT_SITE_NAME
site_customization = SiteCustomization.objects.get_or_create(site=example_site)
example_site.save()
def _create_sentinel_user(self):
get_sentinel_user()
def handle(self, *args, **options):
self._create_site()
self._create_sentinel_user()
|
#!/usr/bin/env python
'''
Naive parallel algorithm of prefix sum
http://people.cs.vt.edu/yongcao/teaching/cs5234/spring2013/slides/Lecture10.pdf
'''
import threading
# look maybe multiprocessing lib
import TestFunction
test_data = [2,6,2,3,5]
'''
Generic sum function
'''
def accumulate(in_list, amount, out_list, out_index):
sum = 0
for i in range(0, amount):
sum += in_list[i]
out_list[out_index] = sum
'''
What is prefix sum?
A = [2,6,2,3,5], then R = AllPrefixSum(A) = [0,2,8,10,13,18]
'''
def prefixSum(num_list):
# create new output holder of the same size
out = [0] * len(num_list)
jobs = []
for i in range(0, len(num_list)):
thread = threading.Thread(target = accumulate(num_list, i + 1, out, i))
jobs.append(thread)
for job in jobs:
job.start()
for job in jobs:
job.join()
return out
result = prefixSum(test_data)
# print(result)
TestFunction.Test(prefixSum, 64)
TestFunction.Test(prefixSum, 128)
TestFunction.Test(prefixSum, 256)
TestFunction.Test(prefixSum, 256)
TestFunction.Test(prefixSum, 512)
TestFunction.Test(prefixSum, 1024)
TestFunction.Test(prefixSum, 2048)
TestFunction.Test(prefixSum, 4096)
TestFunction.Test(prefixSum, 8192)
|
from __future__ import absolute_import
from .models import UEditor, UEditorField
from .widgets import AdminUEditor
__all__ = ['UEditor', 'UEditorField', 'AdminUEditor']
|
"""A launcher script for running algorithms."""
if __name__ == '__main__':
import argparse
import gym
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, required=True)
parser.add_argument('--alg', type=str, required=True)
known_args, unknown_args = parser.parse_known_args()
|
from enum import Enum
class KillType(Enum):
NONE = 0
CURRENT = 1
ALL = 2
def __bool__(self):
return self != KillType.NONE
# Python2 compatibility
__nonzero__ = __bool__
|
import numpy as np
import paddle
import paddle.nn.functional as F
from paddle import ParamAttr
from paddle.nn import Layer
from paddle.nn import Conv2D
from paddle.nn.initializer import XavierUniform
from paddle.regularizer import L2Decay
from ppdet.core.workspace import register, serializable
@register
@serializable
class FPN(Layer):
def __init__(self,
in_channels,
out_channel,
min_level=0,
max_level=4,
spatial_scale=[0.25, 0.125, 0.0625, 0.03125],
has_extra_convs=False,
use_c5=True,
relu_before_extra_convs=True):
super(FPN, self).__init__()
self.min_level = min_level
self.max_level = max_level
self.spatial_scale = spatial_scale
self.has_extra_convs = has_extra_convs
self.use_c5 = use_c5
self.relu_before_extra_convs = relu_before_extra_convs
self.lateral_convs = []
self.fpn_convs = []
fan = out_channel * 3 * 3
self.num_backbone_stages = len(spatial_scale)
self.num_outs = self.max_level - self.min_level + 1
self.highest_backbone_level = self.min_level + self.num_backbone_stages - 1
for i in range(self.min_level, self.highest_backbone_level + 1):
if i == 3:
lateral_name = 'fpn_inner_res5_sum'
else:
lateral_name = 'fpn_inner_res{}_sum_lateral'.format(i + 2)
in_c = in_channels[i]
lateral = self.add_sublayer(
lateral_name,
Conv2D(
in_channels=in_c,
out_channels=out_channel,
kernel_size=1,
weight_attr=ParamAttr(
initializer=XavierUniform(fan_out=in_c)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
self.lateral_convs.append(lateral)
fpn_name = 'fpn_res{}_sum'.format(i + 2)
fpn_conv = self.add_sublayer(
fpn_name,
Conv2D(
in_channels=out_channel,
out_channels=out_channel,
kernel_size=3,
padding=1,
weight_attr=ParamAttr(
initializer=XavierUniform(fan_out=fan)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
self.fpn_convs.append(fpn_conv)
# add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
if self.has_extra_convs and self.num_outs > self.num_backbone_stages:
for lvl in range(self.highest_backbone_level + 1, self.max_level + 1): # P6 P7 ...
if lvl == self.highest_backbone_level + 1 and self.use_c5:
in_c = in_channels[self.highest_backbone_level]
else:
in_c = out_channel
extra_fpn_name = 'fpn_{}'.format(lvl + 2)
extra_fpn_conv = self.add_sublayer(
extra_fpn_name,
Conv2D(
in_channels=in_c,
out_channels=out_channel,
kernel_size=3,
stride=2,
padding=1,
weight_attr=ParamAttr(
initializer=XavierUniform(fan_out=fan)),
bias_attr=ParamAttr(
learning_rate=2., regularizer=L2Decay(0.))))
self.fpn_convs.append(extra_fpn_conv)
def forward(self, body_feats):
laterals = []
used_backbone_levels = len(self.spatial_scale)
for i in range(used_backbone_levels):
laterals.append(self.lateral_convs[i](body_feats[i]))
used_backbone_levels = len(self.spatial_scale)
for i in range(used_backbone_levels - 1, 0, -1):
upsample = F.interpolate(
laterals[i],
scale_factor=2.,
mode='nearest', )
laterals[i - 1] += upsample
fpn_output = []
for lvl in range(self.min_level, self.highest_backbone_level + 1):
i = lvl - self.min_level
fpn_output.append(self.fpn_convs[i](laterals[i]))
spatial_scale = self.spatial_scale
if self.num_outs > len(fpn_output):
# use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN)
if not self.has_extra_convs:
fpn_output.append(F.max_pool2d(fpn_output[-1], 1, stride=2))
spatial_scale = spatial_scale + [spatial_scale[-1] * 0.5]
# add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
else:
if self.use_c5:
extra_source = body_feats[-1]
else:
extra_source = fpn_output[-1]
fpn_output.append(self.fpn_convs[used_backbone_levels](extra_source))
spatial_scale = spatial_scale + [spatial_scale[-1] * 0.5]
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
fpn_output.append(self.fpn_convs[i](F.relu(fpn_output[-1])))
else:
fpn_output.append(self.fpn_convs[i](fpn_output[-1]))
spatial_scale = spatial_scale + [spatial_scale[-1] * 0.5]
return fpn_output, spatial_scale
|
def __chooseObjectFromList(ctx, objects, attribute):
# TODO: use entropy
random = ctx.random
temperature = ctx.temperature
weights = [
temperature.getAdjustedValue(
getattr(o, attribute)
)
for o in objects
]
return random.weighted_choice(objects, weights)
def chooseUnmodifiedObject(ctx, attribute, inObjects):
workspace = ctx.workspace
objects = [o for o in inObjects if o.string != workspace.modified]
return __chooseObjectFromList(ctx, objects, attribute)
def chooseNeighbor(ctx, source):
workspace = ctx.workspace
objects = [o for o in workspace.objects if o.beside(source)]
return __chooseObjectFromList(ctx, objects, "intraStringSalience")
def chooseDirectedNeighbor(ctx, source, direction):
slipnet = ctx.slipnet
workspace = ctx.workspace
if direction == slipnet.left:
objects = [o for o in workspace.objects
if o.string == source.string
and source.leftIndex == o.rightIndex + 1]
else:
objects = [o for o in workspace.objects
if o.string == source.string
and o.leftIndex == source.rightIndex + 1]
return __chooseObjectFromList(ctx, objects, 'intraStringSalience')
|
# -*- coding: utf-8 -*-
# python.
import datetime
# ------------------------------------------------------------
# django.
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
# django::decrators
from django.contrib.auth.decorators import login_required
from django.views.generic import DetailView
# ------------------------------------------------------------
# 3dpart.
# ------------------------------------------------------------
# ddtcms.
from news.models import News
from news.forms import NewsForm
from news.models import Attachment
# ------------------------------------------------------------
# config.
#
# ------------------------------------------------------------
class NewsDetailView(DetailView):
model = News
context_object_name = 'news'
template_name = 'news/news_detail.html'
def get_object(self):
# Call the superclass
object = super(NewsDetailView, self).get_object()
# Record the last accessed date
object.views += 1
object.save()
# Return the object
return object
from xheditor.forms import ImageForm
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
@login_required
def post(request, category = "", success_url=None,
form_class=NewsForm,
template_name='news/news_post.html',
extra_context=None):
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.deliverer = request.user
instance.save()
form.save_m2m()
instance.check_content_attachments() # check attachments in the content, and save to attachment table
return HttpResponseRedirect(instance.get_absolute_url())
else:
initial={
'deliverer':request.user.id,
}
form = form_class(initial=initial)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
{ 'form': form,
},
context_instance=context)
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
This file is used to define the node of graph and associated base types.
"""
from enum import Enum
class NodeTypeEnum(Enum):
"""Node type enum. The following types are new to our custom."""
NAME_SCOPE = 'name_scope'
POLYMERIC_SCOPE = 'polymeric_scope'
PARAMETER = 'Parameter'
CONST = 'Const'
class Node:
"""
Define a node object.
Args:
name (str): Name of new node.
node_id (str): The id of this node, and node id is unique in graph.
"""
def __init__(self, name, node_id):
self._node_id = node_id
self._name = name
self._type = ""
self._attr = dict()
self._input = dict()
self._output_i = -1
self._output = {}
self._polymeric_input = {}
self._polymeric_output = {}
self._polymeric_scope_name = ""
self._subnode_count = 0
self._name_scope = ""
self.shape = []
def to_dict(self):
"""Converts the node object to dictionary format."""
return {
'name': self._name,
'type': self._type,
'attr': self._attr,
'input': self._input,
'output_i': self._output_i,
'output': self._output,
'polymeric_input': self._polymeric_input,
'polymeric_output': self._polymeric_output,
'subnode_count': self._subnode_count,
'polymeric_scope_name': self._polymeric_scope_name
}
@property
def node_id(self):
"""The id of this node, and id is unique in graph."""
return self._node_id
@property
def name(self):
"""Get node name."""
return self._name
@name.setter
def name(self, name):
"""Set node name."""
self._name = name
@property
def node_type(self):
"""Get node type."""
return self._type
@node_type.setter
def node_type(self, node_type):
"""Set node type."""
self._type = node_type
@property
def attr(self):
"""Get node attr."""
return self._attr
def update_attr(self, attr_dict):
"""
Update node attr.
Args:
attr_dict (dict[str, str]): Format is {'<key>': '<value>'}.
"""
self._attr.update(attr_dict)
@property
def input(self):
"""
Get all input of current node.
Returns:
dict[str, dict], format is {'<src_name>': {'shape': [], 'edge_type', 'scope'}}.
"""
return self._input
def update_input(self, input_dict):
"""
Update input.
Args:
input_dict (dict[str, dict]): Format is {'<src_name>': {'shape': [], 'edge_type', 'scope'}}.
"""
self._input.update(input_dict)
@property
def output_i(self):
"""The memory address of this node when it is in run time."""
return self._output_i
@output_i.setter
def output_i(self, output_i):
"""Set memory address."""
self._output_i = output_i
@property
def polymeric_input(self):
"""
The polymeric input is the input of the polymeric nodes.
Returns:
dict[str, dict], format is {'<src_name>': {'edge_type': '<value>'}}.
"""
return self._polymeric_input
def update_polymeric_input(self, polymeric_input):
"""The polymeric input is the input of the polymeric nodes."""
self._polymeric_input.update(polymeric_input)
@property
def output(self):
"""The output node of this node."""
return self._output
def update_output(self, output):
"""
Update output node.
Args:
output (dict[str, TypedDict('NodeType', {'type': str})]): Format
is {"<node_name>": {"type": "<node type>"}}.
"""
self._output.update(output)
@property
def polymeric_output(self):
"""Get polymeric output."""
return self._polymeric_output
def update_polymeric_output(self, polymeric_output):
"""
Update polymeric output.
Args:
polymeric_output (dict[str, dict): Format is {dst_node.polymeric_scope_name:
{'edge_type': EdgeTypeEnum.data}}).
"""
self._polymeric_output.update(polymeric_output)
@property
def polymeric_scope_name(self):
"""Get polymeric scope name."""
return self._polymeric_scope_name
@polymeric_scope_name.setter
def polymeric_scope_name(self, name):
"""Set polymeric scope name."""
self._polymeric_scope_name = name
@property
def subnode_count(self):
"""The sub node count of this node, if this node is a scope node, this count will not be zero."""
return self._subnode_count
@subnode_count.setter
def subnode_count(self, count):
"""Set sub node count."""
self._subnode_count = count
@property
def name_scope(self):
"""Get name scope of this node."""
return self._name_scope
@name_scope.setter
def name_scope(self, name_scope):
"""Set name scope."""
self._name_scope = name_scope
def __str__(self):
return f'<Node, name: {self._name}, type: {self._type}>'
|
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2010 Nathanael C. Fritz, Lance J.T. Stout
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
import threading
from slixmpp import Iq
from slixmpp.exceptions import XMPPError, IqError, IqTimeout
from slixmpp.xmlstream import JID
from slixmpp.plugins.xep_0030 import DiscoInfo, DiscoItems
log = logging.getLogger(__name__)
class StaticDisco(object):
"""
While components will likely require fully dynamic handling
of service discovery information, most clients and simple bots
only need to manage a few disco nodes that will remain mostly
static.
StaticDisco provides a set of node handlers that will store
static sets of disco info and items in memory.
Attributes:
nodes -- A dictionary mapping (JID, node) tuples to a dict
containing a disco#info and a disco#items stanza.
xmpp -- The main Slixmpp object.
"""
def __init__(self, xmpp, disco):
"""
Create a static disco interface. Sets of disco#info and
disco#items are maintained for every given JID and node
combination. These stanzas are used to store disco
information in memory without any additional processing.
Arguments:
xmpp -- The main Slixmpp object.
"""
self.nodes = {}
self.xmpp = xmpp
self.disco = disco
self.lock = threading.RLock()
def add_node(self, jid=None, node=None, ifrom=None):
"""
Create a new set of stanzas for the provided
JID and node combination.
Arguments:
jid -- The JID that will own the new stanzas.
node -- The node that will own the new stanzas.
"""
with self.lock:
if jid is None:
jid = self.xmpp.boundjid.full
if node is None:
node = ''
if ifrom is None:
ifrom = ''
if isinstance(ifrom, JID):
ifrom = ifrom.full
if (jid, node, ifrom) not in self.nodes:
self.nodes[(jid, node, ifrom)] = {'info': DiscoInfo(),
'items': DiscoItems()}
self.nodes[(jid, node, ifrom)]['info']['node'] = node
self.nodes[(jid, node, ifrom)]['items']['node'] = node
def get_node(self, jid=None, node=None, ifrom=None):
with self.lock:
if jid is None:
jid = self.xmpp.boundjid.full
if node is None:
node = ''
if ifrom is None:
ifrom = ''
if isinstance(ifrom, JID):
ifrom = ifrom.full
if (jid, node, ifrom) not in self.nodes:
self.add_node(jid, node, ifrom)
return self.nodes[(jid, node, ifrom)]
def node_exists(self, jid=None, node=None, ifrom=None):
with self.lock:
if jid is None:
jid = self.xmpp.boundjid.full
if node is None:
node = ''
if ifrom is None:
ifrom = ''
if isinstance(ifrom, JID):
ifrom = ifrom.full
if (jid, node, ifrom) not in self.nodes:
return False
return True
# =================================================================
# Node Handlers
#
# Each handler accepts four arguments: jid, node, ifrom, and data.
# The jid and node parameters together determine the set of info
# and items stanzas that will be retrieved or added. Additionally,
# the ifrom value allows for cached results when results vary based
# on the requester's JID. The data parameter is a dictionary with
# additional parameters that will be passed to other calls.
#
# This implementation does not allow different responses based on
# the requester's JID, except for cached results. To do that,
# register a custom node handler.
def supports(self, jid, node, ifrom, data):
"""
Check if a JID supports a given feature.
The data parameter may provide:
feature -- The feature to check for support.
local -- If true, then the query is for a JID/node
combination handled by this Slixmpp instance and
no stanzas need to be sent.
Otherwise, a disco stanza must be sent to the
remove JID to retrieve the info.
cached -- If true, then look for the disco info data from
the local cache system. If no results are found,
send the query as usual. The self.use_cache
setting must be set to true for this option to
be useful. If set to false, then the cache will
be skipped, even if a result has already been
cached. Defaults to false.
"""
feature = data.get('feature', None)
data = {'local': data.get('local', False),
'cached': data.get('cached', True)}
if not feature:
return False
try:
info = self.disco.get_info(jid=jid, node=node,
ifrom=ifrom, **data)
info = self.disco._wrap(ifrom, jid, info, True)
features = info['disco_info']['features']
return feature in features
except IqError:
return False
except IqTimeout:
return None
def has_identity(self, jid, node, ifrom, data):
"""
Check if a JID has a given identity.
The data parameter may provide:
category -- The category of the identity to check.
itype -- The type of the identity to check.
lang -- The language of the identity to check.
local -- If true, then the query is for a JID/node
combination handled by this Slixmpp instance and
no stanzas need to be sent.
Otherwise, a disco stanza must be sent to the
remove JID to retrieve the info.
cached -- If true, then look for the disco info data from
the local cache system. If no results are found,
send the query as usual. The self.use_cache
setting must be set to true for this option to
be useful. If set to false, then the cache will
be skipped, even if a result has already been
cached. Defaults to false.
"""
identity = (data.get('category', None),
data.get('itype', None),
data.get('lang', None))
data = {'local': data.get('local', False),
'cached': data.get('cached', True)}
try:
info = self.disco.get_info(jid=jid, node=node,
ifrom=ifrom, **data)
info = self.disco._wrap(ifrom, jid, info, True)
trunc = lambda i: (i[0], i[1], i[2])
return identity in map(trunc, info['disco_info']['identities'])
except IqError:
return False
except IqTimeout:
return None
def get_info(self, jid, node, ifrom, data):
"""
Return the stored info data for the requested JID/node combination.
The data parameter is not used.
"""
with self.lock:
if not self.node_exists(jid, node):
if not node:
return DiscoInfo()
else:
raise XMPPError(condition='item-not-found')
else:
return self.get_node(jid, node)['info']
def set_info(self, jid, node, ifrom, data):
"""
Set the entire info stanza for a JID/node at once.
The data parameter is a disco#info substanza.
"""
with self.lock:
self.add_node(jid, node)
self.get_node(jid, node)['info'] = data
def del_info(self, jid, node, ifrom, data):
"""
Reset the info stanza for a given JID/node combination.
The data parameter is not used.
"""
with self.lock:
if self.node_exists(jid, node):
self.get_node(jid, node)['info'] = DiscoInfo()
def get_items(self, jid, node, ifrom, data):
"""
Return the stored items data for the requested JID/node combination.
The data parameter is not used.
"""
with self.lock:
if not self.node_exists(jid, node):
if not node:
return DiscoItems()
else:
raise XMPPError(condition='item-not-found')
else:
return self.get_node(jid, node)['items']
def set_items(self, jid, node, ifrom, data):
"""
Replace the stored items data for a JID/node combination.
The data parameter may provide:
items -- A set of items in tuple format.
"""
with self.lock:
items = data.get('items', set())
self.add_node(jid, node)
self.get_node(jid, node)['items']['items'] = items
def del_items(self, jid, node, ifrom, data):
"""
Reset the items stanza for a given JID/node combination.
The data parameter is not used.
"""
with self.lock:
if self.node_exists(jid, node):
self.get_node(jid, node)['items'] = DiscoItems()
def add_identity(self, jid, node, ifrom, data):
"""
Add a new identity to te JID/node combination.
The data parameter may provide:
category -- The general category to which the agent belongs.
itype -- A more specific designation with the category.
name -- Optional human readable name for this identity.
lang -- Optional standard xml:lang value.
"""
with self.lock:
self.add_node(jid, node)
self.get_node(jid, node)['info'].add_identity(
data.get('category', ''),
data.get('itype', ''),
data.get('name', None),
data.get('lang', None))
def set_identities(self, jid, node, ifrom, data):
"""
Add or replace all identities for a JID/node combination.
The data parameter should include:
identities -- A list of identities in tuple form:
(category, type, name, lang)
"""
with self.lock:
identities = data.get('identities', set())
self.add_node(jid, node)
self.get_node(jid, node)['info']['identities'] = identities
def del_identity(self, jid, node, ifrom, data):
"""
Remove an identity from a JID/node combination.
The data parameter may provide:
category -- The general category to which the agent belonged.
itype -- A more specific designation with the category.
name -- Optional human readable name for this identity.
lang -- Optional, standard xml:lang value.
"""
with self.lock:
if self.node_exists(jid, node):
self.get_node(jid, node)['info'].del_identity(
data.get('category', ''),
data.get('itype', ''),
data.get('name', None),
data.get('lang', None))
def del_identities(self, jid, node, ifrom, data):
"""
Remove all identities from a JID/node combination.
The data parameter is not used.
"""
with self.lock:
if self.node_exists(jid, node):
del self.get_node(jid, node)['info']['identities']
def add_feature(self, jid, node, ifrom, data):
"""
Add a feature to a JID/node combination.
The data parameter should include:
feature -- The namespace of the supported feature.
"""
with self.lock:
self.add_node(jid, node)
self.get_node(jid, node)['info'].add_feature(
data.get('feature', ''))
def set_features(self, jid, node, ifrom, data):
"""
Add or replace all features for a JID/node combination.
The data parameter should include:
features -- The new set of supported features.
"""
with self.lock:
features = data.get('features', set())
self.add_node(jid, node)
self.get_node(jid, node)['info']['features'] = features
def del_feature(self, jid, node, ifrom, data):
"""
Remove a feature from a JID/node combination.
The data parameter should include:
feature -- The namespace of the removed feature.
"""
with self.lock:
if self.node_exists(jid, node):
self.get_node(jid, node)['info'].del_feature(
data.get('feature', ''))
def del_features(self, jid, node, ifrom, data):
"""
Remove all features from a JID/node combination.
The data parameter is not used.
"""
with self.lock:
if not self.node_exists(jid, node):
return
del self.get_node(jid, node)['info']['features']
def add_item(self, jid, node, ifrom, data):
"""
Add an item to a JID/node combination.
The data parameter may include:
ijid -- The JID for the item.
inode -- Optional additional information to reference
non-addressable items.
name -- Optional human readable name for the item.
"""
with self.lock:
self.add_node(jid, node)
self.get_node(jid, node)['items'].add_item(
data.get('ijid', ''),
node=data.get('inode', ''),
name=data.get('name', ''))
def del_item(self, jid, node, ifrom, data):
"""
Remove an item from a JID/node combination.
The data parameter may include:
ijid -- JID of the item to remove.
inode -- Optional extra identifying information.
"""
with self.lock:
if self.node_exists(jid, node):
self.get_node(jid, node)['items'].del_item(
data.get('ijid', ''),
node=data.get('inode', None))
def cache_info(self, jid, node, ifrom, data):
"""
Cache disco information for an external JID.
The data parameter is the Iq result stanza
containing the disco info to cache, or
the disco#info substanza itself.
"""
with self.lock:
if isinstance(data, Iq):
data = data['disco_info']
self.add_node(jid, node, ifrom)
self.get_node(jid, node, ifrom)['info'] = data
def get_cached_info(self, jid, node, ifrom, data):
"""
Retrieve cached disco info data.
The data parameter is not used.
"""
with self.lock:
if not self.node_exists(jid, node, ifrom):
return None
else:
return self.get_node(jid, node, ifrom)['info']
|
def parameters(net, base_lr):
total_length = 0
default_lr_param_group = []
lr_mult_param_groups = {}
for m in net.modules():
# print(type(m), len(list(m.named_parameters(recurse=False))))
# print(list(m.named_parameters(recurse=False)))
total_length += len(list(m.parameters(recurse=False)))
if hasattr(m, 'lr_mult'):
lr_mult_param_groups.setdefault(m.lr_mult, [])
lr_mult_param_groups[m.lr_mult] += list(
m.parameters(recurse=False))
else:
default_lr_param_group += list(m.parameters(recurse=False))
param_list = [{
'params': default_lr_param_group
}] + [{
'params': p,
'lr': base_lr * lm
} for lm, p in lr_mult_param_groups.items()]
_total_length = len(list(net.parameters()))
assert total_length == _total_length, '{} vs {}'.format(
total_length, _total_length)
_total_length = sum([len(p['params']) for p in param_list])
assert total_length == _total_length, '{} vs {}'.format(
total_length, _total_length)
return param_list
|
##############################################################################
# Copyright 2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import logging
import sys
from pyquil.api._logger import logger
def exception_handler(exception_type, exception, traceback, debug_hook=sys.excepthook):
"""
This allows us to suppress tracebacks for UserMessageError outside of debug mode
by overriding the default exception handler.
"""
if logger.level <= logging.DEBUG or exception_type is not UserMessageError:
debug_hook(exception_type, exception, traceback)
sys.excepthook = exception_handler
class ApiError(RuntimeError):
def __init__(self, server_status, explanation):
super(ApiError, self).__init__(self, server_status)
self.server_status = server_status
self.explanation = explanation
def __repr__(self):
return repr(str(self))
def __str__(self):
return "{}\n{}".format(self.server_status, self.explanation)
class CancellationError(ApiError):
def __init__(self, server_status):
explanation = "Please try resubmitting the job again."
super(CancellationError, self).__init__(server_status, explanation)
class DeviceOfflineError(ApiError):
def __init__(self, server_status):
explanation = """
The device you requested is offline. Use the following code to check for the
currently available devices:
from pyquil.api import get_devices
print(get_devices())"""
super(DeviceOfflineError, self).__init__(server_status, explanation)
class DeviceRetuningError(ApiError):
def __init__(self, server_status):
explanation = """
The device you requested is temporarily down for retuning. Use the following
code to check for the currently available devices:
from pyquil.api import get_devices
print(get_devices())"""
super(DeviceRetuningError, self).__init__(server_status, explanation)
class InvalidInputError(ApiError):
def __init__(self, server_status):
explanation = """
The server returned the above error because something was wrong with the HTTP
request sent to it. This could be due to a bug in the server or a bug in your
code. If you suspect this to be a bug in pyQuil or Rigetti Forest, then please
describe the problem in a GitHub issue at:
https://github.com/rigetti/pyquil/issues"""
super(InvalidInputError, self).__init__(server_status, explanation)
class InvalidUserError(ApiError):
def __init__(self, server_status):
explanation = """
There was an issue validating your Forest account!
Have you run the `pyquil-config-setup` command yet?
If you do not yet have a Forest account then sign up for one at:
https://forest.rigetti.com"""
super(InvalidUserError, self).__init__(server_status, explanation)
class JobNotFoundError(ApiError):
def __init__(self, server_status):
explanation = """
The above job may have been deleted manually or due to some bug in the server.
If you suspect this to be a bug then please describe the problem in a Github
issue at:
https://github.com/rigetti/pyquil/issues"""
super(JobNotFoundError, self).__init__(server_status, explanation)
class MissingPermissionsError(ApiError):
def __init__(self, server_status):
explanation = """
Your account may not be whitelisted for QPU access. To request the appropriate
permissions please read the information located at:
https://forest.rigetti.com"""
super(MissingPermissionsError, self).__init__(server_status, explanation)
class QPUError(ApiError):
def __init__(self, server_status):
explanation = """
The QPU returned the above error. This could be due to a bug in the server or a
bug in your code. If you suspect this to be a bug in pyQuil or Rigetti Forest,
then please describe the problem in a GitHub issue at:
https://github.com/rigetti/pyquil/issues"""
super(QPUError, self).__init__(server_status, explanation)
class QVMError(ApiError):
def __init__(self, server_status):
explanation = """
The QVM returned the above error. This could be due to a bug in the server or a
bug in your code. If you suspect this to be a bug in pyQuil or Rigetti Forest,
then please describe the problem in a GitHub issue at:
https://github.com/rigetti/pyquil/issues"""
super(QVMError, self).__init__(server_status, explanation)
class QUILCError(ApiError):
def __init__(self, server_status):
explanation = """
QUILC returned the above error. This could be due to a bug in the server or a
bug in your code. If you suspect this to be a bug in pyQuil or Rigetti Forest,
then please describe the problem in a GitHub issue at:
https://github.com/rigetti/pyquil/issues"""
super(QUILCError, self).__init__(server_status, explanation)
class TooManyQubitsError(ApiError):
def __init__(self, server_status):
explanation = """
You requested too many qubits on the QVM. More qubits are available when you use
the queue. Pass the use_queue parameter to QVMConnection to enable additional
qubits (however, each program will take longer to run). For example:
qvm = QVMConnection(use_queue=True)
qvm.run(twenty_qubit_program)
See https://go.rigetti.com/connections for more info."""
super(TooManyQubitsError, self).__init__(server_status, explanation)
class UserMessageError(Exception):
"""
A special class of error which only displays its traceback when the program
is run in debug mode (eg, with `LOG_LEVEL=DEBUG`).
The purpose of this is to improve the user experience, reducing noise in
the case of errors for which the cause is known.
"""
def __init__(self, message):
if logger.level <= logging.DEBUG:
super().__init__(message)
else:
logger.error(message)
class UnknownApiError(ApiError):
def __init__(self, server_status):
explanation = """
The server has failed to return a proper response. Please describe the problem
and copy the above message into a GitHub issue at:
https://github.com/rigetti/pyquil/issues"""
super(UnknownApiError, self).__init__(server_status, explanation)
# NB: Some errors are not included here if they are only returned by async endpoints
# The source of truth for this mapping is the _errors.py file on the server
error_mapping = {
'device_offline': DeviceOfflineError,
'device_retuning': DeviceRetuningError,
'invalid_input': InvalidInputError,
'invalid_user': InvalidUserError,
'job_not_found': JobNotFoundError,
'missing_permissions': MissingPermissionsError,
'quilc_error': QUILCError,
'qvm_error': QVMError,
}
|
import ray
@ray.remote(num_cpus=8)
def f():
print('blah')
return True
ray.init(address="auto")
[f.remote() for _ in range(100)]
|
import logging
import os
class Utility():
"""This class is the utility for named entity recognition."""
def __init__(self, debug_mode=False, verbose_mode=False, callback_status=None, callback_progress=None):
"""Creates Utility instance.
Args:
bool *debug_mode*: toggle logger level to INFO
bool *verbose_mode*: toggle logger level to DEBUG
function *callback_status*: callback function that message about status can be passed to
function *callback_progress*: callback function that message about progress can be passed to
"""
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
self.debug = debug_mode
self.verbose = verbose_mode
self.logger = logging.info if self.verbose else logging.debug
if self.verbose:
logging.root.setLevel(logging.INFO)
if self.debug:
logging.root.setLevel(logging.DEBUG)
self.logger('Debug mode is on')
self.callback_status = callback_status
self.callback_progress = callback_progress
logging.debug('Utility class has been initialized')
def __del__(self):
"""Destructor."""
pass
def __enter__(self):
"""Enter `with`."""
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
"""Exit `with`."""
pass
def push_message(self, message, callback_function):
"""Passes message to callback_function.
Args:
str *message*: message to pass
function *callback_function*: function to take *message* as an argument
"""
if callback_function is not None:
callback_function(message)
def compile_dict_specs(self, fields):
"""Reshapes list of fields' specifications into dict used by other members of Utility class.
Returns new dict with specifications.
Args:
list *fields*: list of fields (columns)
Each member of *fields* list must be a dict as follows: {
'name': 'str name of attribute',
'include': bool True for including this column else False,
'delimiter': 'str delimiter in case column stores concatenated lists',
'id_flag': bool True if column stores entity ID else False,
'normalizer_flag': bool True if column stores string normalizer tag else False,
'value_flag': bool True if column stores string label to recognize else False
}
"""
logging.debug('Compiling specs')
specs = {'fields': {}, 'id': None, 'tokenizer': None, 'value': None}
for i in range(0, len(fields)):
field = fields[i]
if not field['include']:
continue
specs['fields'][field['name']] = (i, field['delimiter'], field['normalizer_flag'], field['value_flag'])
if field['id_flag']:
specs['id'] = specs['fields'][field['name']]
if field['normalizer_flag']:
specs['tokenizer'] = specs['fields'][field['name']]
if field['value_flag']:
specs['value'] = specs['fields'][field['name']]
logging.debug('Done compiling specs')
return specs
def insert_node(self, label, label_id, entity_id, subtrie, specs, columns, model):
"""Inserts string into trie structure represented by dict object.
Args:
str *label*: string to insert
int *label_id*: ID of the label
int *entity_id*: ID of the entity given label belongs to
dict *subtrie*: object representing the trie
dict *specs*: dictionary specifications
list *columns*: list of values associated with the entity
*model*: instance of Model class handling the trie and metadata
NB: only works with uncompressed trie.
"""
for character in label:
if character not in subtrie:
subtrie[character] = {}
subtrie = subtrie[character]
model.store_attributes(label_id, entity_id, subtrie, specs, columns)
def remove_node(self, model, label, subtrie, prev_length=0):
"""Removes string from trie structure represented by dict object.
Args:
Model *model*: instance of Model class handling the trie and metadata
str *label*: string to remove
dict *subtrie*: object representing the trie
int *pref_length*: length of substring found in the trie (used with recursion)
NB: only works with uncompressed trie.
"""
if label:
head, tail = label[0], label[1:]
current_length = int(len(subtrie))
next_length, bottom = self.remove_node(model, tail, subtrie=subtrie[head], prev_length=current_length)
if bottom and next_length > 1:
bottom = False
elif bottom and (current_length > 1 or not prev_length):
del subtrie[head]
bottom = False
return current_length, bottom
else:
del subtrie[model.ENTITY_KEY]
return len(subtrie) + 1, True
def ignore_node(self, model, label):
"""Looks up *label* in a given *model* and hooks a special tag to the leaf in case *label* is found.
Tagged label will not be racognized by Utility.spot_entities() function.
Args:
Model *model*: Model instance to look up
str *label*: string to tag
"""
label_length = int(len(label))
string_so_far = ''
character_index = 0
for section in model[model.DICTIONARY_KEY]:
content = section[model.CONTENT_KEY]
for tokenizer_key in content:
trie = content[tokenizer_key]
for character_index in range(0, label_length):
string_so_far += label[character_index]
if string_so_far in trie:
trie = trie[string_so_far]
string_so_far = ''
if character_index == label_length - 1 and model.ENTITY_KEY in trie and string_so_far == '':
trie[model.IGNORE_KEY] = []
def make_recognizer(self, model, filename, specs, word_separator, item_limit, compressed, column_separator, column_enclosure, tokenizer_option):
"""Reads tab-delimited text file, populates dict objects representing tries, and fills database associated with a given Model instance according to provided specs.
Returns tuple(list *tries*, dict *line_numbers*) where *tries* are populated dicts representing tries, *line_numbers* is dict that maps line numbers from the text file to internally generated entity IDs.
Args:
Model *model*: Model instance to populate
str *filename*: path and name of tab-delimited text file with the content
dict *specs*: specifications for columns in the text file
str *word_separator*: string considered to be the word delimiter
int *item_limit*: maximum number of rows to stuff in a single trie of a model
bool *compressed*: whether given tries must be compressed
str *column_separator*: delimiter to split columns
str *column_enclosure*: any string that columns are supposed to be trimmed of
int *tokenizer_option*: tokenizer mode (see documentation for normalization for details)
"""
# TODO: review for refactoring
self.logger('Making recognizer using %s' % (filename))
self.push_message('Making recognizer using %s' % (filename), self.callback_status)
entity_ids = {}
line_numbers = {}
total_bytes = os.path.getsize(filename) + 1
increment_bytes = int(total_bytes / 100) if total_bytes > 100 else total_bytes
this_progress_position = 0
last_progress_position = 0
if model.connection is not None:
rows = model.cursor.execute('select 0 where not exists (select name from sqlite_master where type = \'table\' and name = \'attrs\');')
for _ in rows:
model.create_recognizer_schema(model.cursor)
break
with open(filename, mode='r', encoding='utf8') as f:
ret = []
line_count = 0
line_number = 0
chars_read = 0
trie = model.next_trie(specs, compressed, tokenizer_option, word_separator)
for line in f:
chars_read += int(len(line))
this_progress_position = int(chars_read / increment_bytes)
if this_progress_position != last_progress_position:
last_progress_position = this_progress_position
self.push_message(int(100 * chars_read / total_bytes), self.callback_progress)
if item_limit > 0 and line_count == item_limit:
packed = model.pack_trie(trie, compressed)
ret.append(packed)
trie = model.next_trie(specs, compressed, tokenizer_option, word_separator)
self.logger('Lines read: %d' % (line_count))
line_count = 0
columns, internal_id = model.get_dictionary_line(specs, entity_ids, line_numbers, line_number, line, column_separator, column_enclosure)
synonym, normalizer_name = model.get_dictionary_synonym(columns, specs, word_separator, tokenizer_option)
subtrie = trie[model.CONTENT_KEY][normalizer_name]
self.insert_node(synonym, line_number, internal_id, subtrie, specs, columns, model)
line_count += 1
line_number += 1
if line_count > 0 and len(trie) > 3:
packed = model.pack_trie(trie, compressed)
ret.append(packed)
self.logger('Lines read: %d' % (line_count))
if model.connection is not None:
model.connection.commit()
model.cursor.execute('create index ix_attrs_n_attr_name_attr_value on attrs (n asc, attr_name asc, attr_value asc);')
self.logger('Recognizer completed.')
return ret, line_numbers
def make_keywords(self, model, filename, specs, line_numbers, word_separator, disambiguate_all, column_separator, column_enclosure, tokenizer_option):
"""Generates dictionary of keywords for a given model using tab-delimited text file that contains entity IDs and synonyms. Typically, for a given model it is the same file `make_recognizer()` function is processing.
Returns dict object can be plugged into model.
Args:
Model *model*: Model instance to use
str *filename*: path and name of tab-delimited text file with the content
dict *specs*: specifications for columns in the text file
dict *line_numbers*: dict that maps line numbers from the text file to internally generated entity IDs
str *word_separator*: string considered to be the word delimiter
bool *disambiguate_all*: whether generate keywords for all entities or only for those having conflicting synonyms
str *column_separator*: delimiter to split columns
str *column_enclosure*: any string that columns are supposed to be trimmed of
int *tokenizer_option*: tokenizer mode (see documentation for normalization for details)
"""
self.logger('Making keywords using %s... ' % (filename))
self.push_message('Making keywords from {0}'.format(filename), self.callback_status)
total_bytes = os.path.getsize(filename) + 1
increment_bytes = int(total_bytes / 100) if total_bytes > 100 else total_bytes
this_progress_position = 0
last_progress_position = 0
entity_ids = {}
internal_id_map = {}
synonyms = {}
with open(filename, mode='r', encoding='utf8') as f:
line_count = 0
chars_read = 0
for line in f:
chars_read += int(len(line))
this_progress_position = int(chars_read / increment_bytes)
if this_progress_position != last_progress_position:
last_progress_position = this_progress_position
self.push_message(int(100 * chars_read / total_bytes), self.callback_progress)
columns, internal_id = model.get_dictionary_line(specs, entity_ids, line_numbers, line_count, line, column_separator, column_enclosure)
internal_id_map[line_count] = internal_id
synonym = model.get_dictionary_synonym(columns, specs, word_separator, tokenizer_option)[0]
if synonym not in synonyms:
synonyms[synonym] = set()
synonyms[synonym].add(internal_id)
line_count += 1
overlapping_ids = {}
for s in synonyms:
if len(synonyms[s]) > 1 or disambiguate_all:
for internal_id in synonyms[s]:
overlapping_ids[internal_id] = set()
synonyms.clear()
entity_ids.clear()
with open(filename, mode='r', encoding='utf8') as f:
line_count = 0
for line in f:
columns, internal_id = model.get_dictionary_line(specs, entity_ids, line_numbers, line_count, line, column_separator, column_enclosure)
if internal_id in overlapping_ids:
synonym = model.get_dictionary_synonym(columns, specs, word_separator, tokenizer_option)[0]
tokens = synonym.split(word_separator)
overlapping_ids[internal_id] = overlapping_ids[internal_id].union(set(tokens))
line_count += 1
# TODO: only leave tokens unique for a given internal_id (?)
keywords = {model.CONTENT_KEY: overlapping_ids, model.INTERNAL_ID_KEY: internal_id_map}
self.logger('Done compiling keywords.')
return keywords
def compile_model(self, model, filename, fields, word_separator, column_separator, column_enclosure, compressed=True, item_limit=0, tokenizer_option=0, include_keywords=False, disambiguate_all=False):
"""Populates given Model instance with tries and keywords.
Args:
Model *model*: Model instance to populate
str *filename*: path and name of tab-delimited text file with the content
list *fields*: list of dict objects defining the columns in the text file
str *word_separator*: string considered to be the word delimiter
str *column_separator*: delimiter to split columns
str *column_enclosure*: any string that columns are supposed to be trimmed of
bool *compressed*: whether given tries must be compressed
int *item_limit*: maximum number of rows to stuff in a single trie of a model
int *tokenizer_option*: tokenizer mode (see documentation for normalization for details)
bool *include_keywords*: whether generate keywords at all or not
bool *disambiguate_all*: whether generate keywords for all entities or only for those having conflicting synonyms
Data structure for *fields* argument (also see compile_dict_specs() function):
[
{
'name': 'str name of attribute',
'include': bool True for including this column else False,
'delimiter': 'str delimiter in case column stores concatenated lists',
'id_flag': bool True if column stores entity ID else False,
'normalizer_flag': bool True if column stores string normalizer tag else False,
'value_flag': bool True if column stores string label to recognize else False
}
]
"""
specs = self.compile_dict_specs(fields)
tries, line_numbers = self.make_recognizer(model, filename, specs, word_separator, item_limit, compressed, column_separator, column_enclosure, tokenizer_option)
keywords = {model.CONTENT_KEY: {}, model.INTERNAL_ID_KEY: {}}
if include_keywords:
keywords = self.make_keywords(model, filename, specs, line_numbers, word_separator, disambiguate_all, column_separator, column_enclosure, tokenizer_option)
model[model.DICTIONARY_KEY] = tries
model[model.KEYWORDS_KEY] = keywords
return True
def unpack_trie(self, model, packed_trie, compressed):
"""Unpacks compressed trie.
Returns dict object representing unpacked trie.
Args:
Model *model*: Model instance to use
dict *packed_trie*: trie to process
bool *compressed*: whether given trie is already compressed
"""
if not compressed or len(packed_trie) != 1:
return packed_trie
branches = [k for k in packed_trie.keys() if k not in [model.ENTITY_KEY]]
if not branches:
return packed_trie
radix = branches[0]
if len(radix) <= 1:
return packed_trie
unpacked_trie = {}
unpacked_trie_pointer = unpacked_trie
for character in radix[:-1]:
unpacked_trie_pointer[character] = {}
unpacked_trie_pointer = unpacked_trie_pointer[character]
unpacked_trie_pointer[radix[-1:]] = packed_trie[radix]
return unpacked_trie
def unpack_attributes(self, model, cur, leaf_ids, include_query, exclude_query, process_exclude, attrs_out_query):
"""Loads attributes for internal IDs found in a leaf of a trie from a model's database using associated sqlite3.connect.cursor object.
Returns dict object that maps internal IDs with attributes.
Args:
sqlite3.connect.cursor *cur*: cursor to use for throwing queries
list *leaf_ids*: internal IDs found in a trie leaf
str *include_query*: part of SQL query to filter something in
str *exclude_query*: part of SQL query to filter something out
bint *process_exclude*: whether use *exclude_query* at all
str *attrs_out_query*: part of SQL query that specifies which attributes to eventually return
"""
attributes = {}
if cur is None:
for n in leaf_ids:
if n not in attributes:
attributes[n] = {}
if 'ID' not in attributes[n]:
attributes[n]['ID'] = []
attributes[n]['ID'].append(model[model.INTERNAL_ID_KEY][n])
return attributes
include_attrs = set()
exclude_attrs = set()
for n in leaf_ids:
rows = cur.execute('select distinct n from attrs where n = %d %s;' % (n, include_query))
for row in rows:
include_attrs.add(int(row[0]))
if process_exclude:
for n in leaf_ids:
rows = cur.execute('select distinct n from attrs where n = %d %s;' % (n, exclude_query))
for row in rows:
exclude_attrs.add(int(row[0]))
ns = include_attrs - exclude_attrs
for n in ns:
rows = cur.execute('select attr_name, attr_value from attrs where n = %d%s;' % (n, attrs_out_query))
if n not in attributes:
attributes[n] = {}
for row in rows:
attr_name, attr_value = str(row[0]), str(row[1])
if attr_name not in attributes[n]:
attributes[n][attr_name] = []
attributes[n][attr_name].append(attr_value)
return attributes
def check_attrs(self, model, trie_leaf, cur, include_query, exclude_query, process_exclude, attrs_out_query):
"""Attaches attributes to a given trie leaf and returns it.
Args:
Model *model*: Model instance to use
dict *trie_leaf*: terminal node of a trie to attach attributes to
sqlite3.connect.cursor *cur*: cursor to use for throwing queries
str *include_query*: part of SQL query to filter something in
str *exclude_query*: part of SQL query to filter something out
bint *process_exclude*: whether use *exclude_query* at all
str *attrs_out_query*: part of SQL query that specifies which attributes to eventually return
"""
this_trie_leaf = dict(trie_leaf)
this_trie_leaf[model.ATTRS_KEY] = self.unpack_attributes(model, cur, trie_leaf[model.ENTITY_KEY], include_query, exclude_query, process_exclude, attrs_out_query)
if int(len(this_trie_leaf[model.ATTRS_KEY])) == 0:
return {}
return this_trie_leaf
def spot_entities(self, model, source_string, normalizer_name, include_query='', exclude_query='', process_exclude=False, attrs_out_query='', progress_from=0, progress_to=100):
"""Zooms through a string, finds boundaries of synonyms stored in model's trie, and pulls associated attributes from the storage.
Returns list(list(tuple *datapoint*)) where datapoint is tuple(list *ids*, dict *attributes*, str *found_synonym*, int *begin*, int *end*) where *ids* are internal IDs of entities, *attributes* is dict {id_entity: {attribute: [value]}}, *found_synonym* is identified substring, *begin* and *end* are indexes of first and last character of recognized substring.
Args:
Model *model*: Model instance to use
str *source_string*: string to parse
str *normalizer_name*: name of normalization unit (used to pick the right trie from the model; supposed to match normalization unit applied to *source_string*)
str *include_query*: part of SQL query to filter something in
str *exclude_query*: part of SQL query to filter something out
bint *process_exclude*: whether use *exclude_query* at all
str *attrs_out_query*: part of SQL query that specifies which attributes to eventually return
int *progress_from*: initial progress value to report
int *progress_to*: maximum progress value to report
Data structure for returned value:
[
(
[int internal_ids],
{
int internal_id: {str attribute_name: [str attribute_value]}
},
str identified_label,
int unmapped_begin,
int unmapped_end
)
]
"""
# TODO: review for refactoring
self.logger('Analyzing "%s"... ' % (source_string))
rets = []
this_progress_position = 0
last_progress_position = 0
total_tries = int(len(model[model.DICTIONARY_KEY]))
if total_tries == 0:
return rets
progress_share = progress_to - progress_from
trie_increment = int(progress_share / total_tries)
current_trie_index = 0
for trie in model[model.DICTIONARY_KEY]:
ret = []
word_separator = trie[model.WORD_SEPARATOR_KEY]
start_index, end_index, string_so_far = -1, 0, ''
reading_entity = source_string[0:1] != word_separator
trie_is_compressed = bool(trie[model.COMPRESSED_KEY])
subtrie = trie[model.CONTENT_KEY][normalizer_name]
shorter_alternative = None
current_index = 0
temporary_index = -1
total_length = int(len(source_string))
increment_chars = int(total_length / progress_share) if total_length > progress_share else total_length - 1
while current_index < total_length:
this_progress_position = int(current_index / increment_chars / total_tries)
if this_progress_position != last_progress_position:
last_progress_position = this_progress_position
self.push_message(int(progress_share * current_index / total_length / total_tries) + current_trie_index * trie_increment + progress_from, self.callback_progress)
if len(ret) > 0 and current_index < ret[-1][-1]:
current_index = ret[-1][-1]
if not reading_entity: # wait for word separator
character = source_string[current_index]
start_index = current_index
if character == word_separator:
reading_entity = True
end_index = start_index
else: # reading entity
end_index = current_index
character = source_string[current_index]
if character == word_separator and model.ENTITY_KEY in subtrie and model.IGNORE_KEY not in subtrie:
found_object = self.check_attrs(model, subtrie, model.cursor, include_query, exclude_query, process_exclude, attrs_out_query)
if found_object:
identified = found_object[model.ENTITY_KEY], found_object[model.ATTRS_KEY]
shorter_alternative = (identified[0], identified[1], string_so_far, start_index + 1, end_index)
if character in subtrie:
if character == word_separator and temporary_index == -1:
temporary_index = current_index
string_so_far += character
subtrie = self.unpack_trie(model, subtrie[character], trie_is_compressed)
else:
#if everything_or_nothing and current_index == total_length: return []
if character == word_separator or current_index == total_length: # - 1:
if model.ENTITY_KEY in subtrie and model.IGNORE_KEY not in subtrie:
found_object = self.check_attrs(model, subtrie, model.cursor, include_query, exclude_query, process_exclude, attrs_out_query)
if found_object:
identified = found_object[model.ENTITY_KEY], found_object[model.ATTRS_KEY]
ret.append((identified[0], identified[1], string_so_far, start_index + 1, end_index))
shorter_alternative = None
else:
if shorter_alternative:
ret.append(shorter_alternative)
shorter_alternative = None
else:
if shorter_alternative:
ret.append(shorter_alternative)
shorter_alternative = None
start_index = current_index
else:
if shorter_alternative:
ret.append(shorter_alternative)
shorter_alternative = None
if temporary_index == -1:
reading_entity = False
else:
current_index = temporary_index
temporary_index = -1
reading_entity = True
string_so_far = ''
start_index = current_index
subtrie = trie[model.CONTENT_KEY][normalizer_name]
current_index += 1
if model.ENTITY_KEY in subtrie and model.IGNORE_KEY not in subtrie:
found_object = self.check_attrs(model, subtrie, model.cursor, include_query, exclude_query, process_exclude, attrs_out_query)
if found_object:
identified = found_object[model.ENTITY_KEY], found_object[model.ATTRS_KEY]
ret.append((identified[0], identified[1], string_so_far, start_index + 1, current_index - 1))
elif shorter_alternative:
ret.append(shorter_alternative)
elif shorter_alternative:
ret.append(shorter_alternative)
rets += ret
current_trie_index += 1
self.push_message(progress_to, self.callback_progress)
self.logger('Done.')
return rets
def disambiguate(self, model, recognized, srcs, word_separator):
"""For a list of identified datapoints, weighs context of identified labels that belong to more than 1 entity and keeps heaviest ones.
Returns filtered list of identified datapoints.
Args:
Model *model*: Model instance to use
list *recognized*: identified datapoints
list *srcs*: list of all normalized values of original string (using all normalization units applied)
str *word_separator*: string to be considered a word separator
Data structure for *recognized* (input) and for returned value:
[
(
[int internal_ids],
{
int intenal_id: {str attribute_name: [str attribute_value]}
},
int mapped_begin,
int mapped_end,
[int indexes_in_srcs],
[
(int unmapped_begin, int unmapped_end)
]
)
]
"""
_recognized = sorted(recognized, key=lambda x: x[2])
id_list = [[model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY][x] for x in rec[0] if x in model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY]] for rec in _recognized]
for k in range(len(id_list)):
ids = id_list[k]
if len(ids) < 2:
continue
si = {}
src = {}
ei = {}
tokens = {}
s_tokens = {}
for j in range(len(ids)):
si[ids[j]] = 0
src[ids[j]] = srcs[_recognized[k][4][j]]
ei[ids[j]] = len(src[ids[j]])
if k > 0:
# take map from normalizer [k-1] and remap location on map of normalizer[k] as a boundary
if _recognized[k][7][ids[j]][_recognized[k-1][3]][1] > si[ids[j]]:
si[ids[j]] = _recognized[k][7][ids[j]][_recognized[k-1][3]][1]
if k < len(id_list) - 1:
# take map from normalizer [k+1] and remap location on map of normalizer[k] as a boundary
if _recognized[k][7][ids[j]][_recognized[k+1][2]][0] < ei[ids[j]]:
ei[ids[j]] = _recognized[k][7][ids[j]][_recognized[k+1][2]][0]
tokens[ids[j]] = src[ids[j]][si[ids[j]]:ei[ids[j]]]
s_tokens[ids[j]] = set(tokens[ids[j]].split(word_separator))
tmp = {i: model[model.KEYWORDS_KEY][model.CONTENT_KEY][i] if i in model[model.KEYWORDS_KEY][model.CONTENT_KEY] else set() for i in ids}
kwd = {i: tmp[i] - tmp[j] for i in tmp for j in tmp if j != i}
winner_score = 0
winner_id = set()
kwd_score = {}
for i in kwd:
kwd_score[i] = len(kwd[i].intersection(s_tokens[i]))
if kwd_score[i] > winner_score:
winner_score = kwd_score[i]
winner_id.clear()
if kwd_score[i] == winner_score:
winner_id.add(i)
_recognized[k] = tuple([[x for x in _recognized[k][0] if model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY][x] in winner_id]] + [{x: _recognized[k][1][x] for x in _recognized[k][1] if model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY][x] in winner_id}] + list(_recognized[k])[2:])
return _recognized
def flatten_layers(self, model, layers):
"""Flattens list of lists of identified datapoints, invokes disambiguation, remaps label locations to the original string, reshapes the output.
Returns list(tuple *datapoint*) where *datapoint* is tuple(list *ids*, dict *attributes*, int *begin*, int *end*).
Args:
Model *model*: Model instance to use
list *layers*: list of identified datapoints
Data structure for *layers* (input):
[
(
(
[int normalized_positions], # indexes are original positions
[[int min_original_position, int max_original_position]], # indexes are normalized positions
),
[
(
[int internal_ids],
{int internal_id: {str attribute_name: [str attribute_value]}},
str identified_label,
int unmapped_begin,
int unmapped_end
)
],
str parsed_normalized_string
)
]
Returned data structure:
[
(
[int internal_ids],
{int internal_id: {str attribute_name: [str attribute_value]}},
int mapped_begin,
int mapped_end
)
]
"""
spans = {}
srcs = []
for i in range(0, len(layers)):
layer = layers[i]
_map = layer[0][0]
_r_map = layer[0][1]
_recognized = layer[1]
_src = layer[2]
srcs.append(_src)
for span in _recognized:
location = tuple([_map[span[3]], _map[span[4]]])
if location not in spans:
spans[location] = []
spans[location].append(tuple([span[0], span[1], [i] * len(span[0]), span[3], span[4], _map, _r_map]))
new_layers = []
for location in spans:
new_left = location[0]
new_right = location[1]
new_ids = []
new_attrs = {}
new_srcids = []
new_locations = []
new_map = {}
new_r_map = {}
for item in spans[location]:
new_ids += item[0]
new_attrs = {**new_attrs, **item[1]}
if model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY]:
new_srcids += item[2]
new_locations.append(tuple([item[3], item[4]]))
new_map.update({model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY][k]: item[5] for k in item[0]})
new_r_map.update({model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY][k]: item[6] for k in item[0]})
new_layers.append(tuple([new_ids, new_attrs, new_left, new_right, new_srcids, new_locations, new_map, new_r_map]))
if model[model.KEYWORDS_KEY][model.INTERNAL_ID_KEY]:
new_layers = self.disambiguate(model, new_layers, srcs, ' ')
pass
ret = [x[0:4] for x in new_layers]
return ret
def flatten_spans(self, spans):
"""Transforms list of normalized tuples into one dict.
Returns dict {(int *begin*, int *end*): {str *attribute_name*: {str *attribute_value*}}}.
Args:
list *spans*: list of identified datapoints, as returned by flatten_layers() function
"""
ret = {}
all_entries = []
for span in spans:
_ids, _content, _left, _right = span[0], span[1], span[2], span[3]
for _id in _ids:
_attrs = _content[_id]
for _attr_name in _attrs:
for _attr_value in _attrs[_attr_name]:
all_entries.append(tuple([_left, _right, _attr_name, _attr_value]))
if len(all_entries) > 0:
all_entries = sorted(sorted(all_entries, key=lambda x: -x[1]), key=lambda x: x[0])
filtered_entries = [all_entries[0]]
for i in range(1, len(all_entries)):
q = all_entries[i]
if (filtered_entries[-1][0] <= q[0] < filtered_entries[-1][1] and filtered_entries[-1][0] < q[1] < filtered_entries[-1][1]) or (filtered_entries[-1][0] < q[0] < filtered_entries[-1][1] and filtered_entries[-1][0] < q[1] <= filtered_entries[-1][1]):
continue
filtered_entries.append(q)
for entry in filtered_entries:
_location, _attr_name, _attr_value = tuple([int(entry[0]), int(entry[1])]), str(entry[2]), str(entry[3])
if _location not in ret:
ret[_location] = {}
if _attr_name not in ret[_location]:
ret[_location][_attr_name] = set()
ret[_location][_attr_name].add(_attr_value)
return ret
def reduce_spans(self, segments):
"""Reduces overlapping segments by keeping longer ones or leftmost ones in case of equal length.
Returnes reduced list of tuples [(int *begin*, int *end*)].
Args:
set *segments*: set of tuples(int *begin*, int *end*)
"""
def intersects(segment1, segment2):
return segment2[0] >= segment1[0] and segment2[0] <= segment1[1]
def length(segment):
return segment[1] - segment[0]
sorted_segments = [[x] for x in sorted(sorted(segments, key=lambda x: x[1] - x[0]), key=lambda x: x[0])]
for i in range(len(sorted_segments) - 1):
if len(sorted_segments[i]) == 0:
continue
if intersects(sorted_segments[i][0], sorted_segments[i+1][0]):
if length(sorted_segments[i][0]) >= length(sorted_segments[i+1][0]):
sorted_segments[i+1] = sorted_segments[i]
sorted_segments[i] = []
elif length(sorted_segments[i][0]) < length(sorted_segments[i+1][0]):
recovered = False
for j in range(1, len(sorted_segments[i])):
if not intersects(sorted_segments[i][j], sorted_segments[i+1][0]):
sorted_segments[i][0] = sorted_segments[i][j]
recovered = True
break
if not recovered:
sorted_segments[i+1] += sorted_segments[i]
sorted_segments[i] = []
ret = [x[0] for x in sorted_segments if len(x) > 0]
return ret
def parse(self, model, source_string, attrs_where=None, attrs_out=None):
"""Wraps around all functions that normalize string, spot entities, disambiguate, and post-process the output.
Returns dict {(int *begin*, int *end*): {str *attribute_name*: {str attribute_value}}}.
Args:
Model *model*: Model instance to use
str *source_string*: source string to parse
dict *attrs_where*: specifications for filtering model's data used for recognition
list *attrs_out*: list of attribute names to output
Data structure for *attrs_where*:
{
'+': {str attribute_name: {str attribute_value}}, # if indicated, only entities that have these attributes will be considered
'-': {str attribute_name: {str attribute_value}} # if indicated, entities that have these attributes will not be considered
}
"""
attributes = attrs_where
if attributes is None:
attributes = {}
for action in ['+', '-']:
if action not in attributes:
attributes[action] = {}
process_exclude = False
include_set, include_query = set(), ''
for attr_name in attributes['+']:
for attr_value in attributes['+'][attr_name]:
include_set.add('(attr_name = \'' + attr_name.replace('\'', '\'\'') + '\' and attr_value = \'' + attr_value.replace('\'', '\'\'') + '\')')
if len(include_set) > 0:
include_query = 'and (' + ' or '.join(include_set) + ')'
exclude_set, exclude_query = set(), ''
for attr_name in attributes['-']:
for attr_value in attributes['-'][attr_name]:
exclude_set.add('(attr_name = \'' + attr_name.replace('\'', '\'\'') + '\' and attr_value = \'' + attr_value.replace('\'', '\'\'') + '\')')
if len(exclude_set) > 0:
exclude_query = 'and (' + ' or '.join(exclude_set) + ')'
process_exclude = True
attrs_out_query = ''
if attrs_out is not None and len(attrs_out) > 0:
attrs_out_query = ' and attr_name in (\'%s\')' % ('\', \''.join([x.replace('\'', '\'\'') for x in attrs_out]))
self.logger('Parsing text...')
self.push_message('Parsing text', self.callback_status)
rets = []
total_normalizers = int(len(model[model.NORMALIZER_KEY]))
try:
assert total_normalizers > 0, 'Model does not have normalization units'
except Exception as e:
model.destroy()
raise e
spot_progress_share = int(100 / total_normalizers)
current_normalizer_index = 0
for normalizer_name in model[model.NORMALIZER_KEY]:
normalized_string = model[model.NORMALIZER_KEY][normalizer_name].normalize(source_string, model[model.WORD_SEPARATOR_KEY], model[model.TOKENIZER_OPTION_KEY])
character_map = model[model.NORMALIZER_KEY][normalizer_name].result['map']
r_character_map = model[model.NORMALIZER_KEY][normalizer_name].result['r_map']
progress_from = current_normalizer_index * spot_progress_share
progress_to = (current_normalizer_index + 1) * spot_progress_share
parsed = self.spot_entities(model, normalized_string, normalizer_name, include_query, exclude_query, process_exclude, attrs_out_query, progress_from=progress_from, progress_to=progress_to)
rets.append(((character_map, r_character_map), parsed, normalized_string))
current_normalizer_index += 1
layers = self.flatten_layers(model, rets)
spans = self.flatten_spans(layers)
locations = self.reduce_spans(set(spans.keys()))
ret = {location: spans[location] for location in locations}
self.logger('Done parsing text.')
return ret
|
def invert(image):
for row in range(image.shape[0]):
for col in range(image.shape[1]):
for c in range(3):
image[row][col][c] = -image[row][col][c]
|
'''
TMC, XTI, tsproj parsing utilities
'''
import collections
import logging
import os
import pathlib
import re
import types
import lxml
import lxml.etree
from .code import (get_pou_call_blocks, program_name_from_declaration,
variables_from_declaration, determine_block_type)
# Registry of all TwincatItem-based classes
TWINCAT_TYPES = {}
USE_NAME_AS_PATH = object()
logger = logging.getLogger(__name__)
SLN_PROJECT_RE = re.compile(
r"^Project.*?=\s*\"(.*?)\",\s*\"(.*?)\"\s*,\s*(.*?)\"\s*$",
re.MULTILINE
)
def parse(fn, *, parent=None):
'''
Parse a given tsproj, xti, or tmc file.
Returns
-------
item : TwincatItem
'''
fn = case_insensitive_path(fn)
with open(fn, 'rb') as f:
tree = lxml.etree.parse(f)
root = tree.getroot()
return TwincatItem.parse(root, filename=fn, parent=parent)
def projects_from_solution(fn, *, exclude=None):
'''
Find project filenames from a solution.
Parameters
----------
fn : str, pathlib.Path
Solution filename
exclude : list or None
Exclude certain extensions. Defaults to excluding .tcmproj
'''
with open(fn, 'rt') as f:
solution_text = f.read()
if exclude is None:
exclude = ('.tcmproj', )
projects = [
pathlib.PureWindowsPath(match[1])
for match in SLN_PROJECT_RE.findall(solution_text)
]
solution_path = pathlib.Path(fn).parent
return [(solution_path / pathlib.Path(project)).absolute()
for project in projects
if project.suffix not in exclude
]
def element_to_class_name(element, *, parent=None):
'''
Determine the Python class name for an element
Parameters
----------
element : lxml.etree.Element
Returns
-------
class_name : str
base_class : class
'''
tag = strip_namespace(element.tag)
extension = os.path.splitext(element.base)[-1].lower()
if tag == 'Project':
if isinstance(parent, TcSmProject):
return 'TopLevelProject', TwincatItem
if 'File' in element.attrib:
# File to be loaded will contain PrjFilePath
return 'Plc', TwincatItem
if 'PrjFilePath' in element.attrib:
return 'Plc', TwincatItem
if isinstance(parent, (Plc, TcSmItem)):
return 'PlcProject', TwincatItem
return 'Project', TwincatItem
if tag == 'Plc':
return 'TopLevelPlc', TwincatItem
if tag == 'Symbol':
base_type, = element.xpath('BaseType')
return f'{tag}_' + base_type.text, Symbol
if extension == '.tmc':
return tag, _TmcItem
return tag, TwincatItem
def _determine_path(base_path, name, class_hint):
'''
Determine the path to load child XTI items from, given a base path and the
class load path hint.
Parameters
----------
base_path : pathlib.Path
The path from which to start, e.g., the child_load_path of the parent
object
name : str
The name of the parent object, to be used when USE_NAME_AS_PATH is
specified
class_hint : pathlib.Path or USE_NAME_AS_PATH
A hint path as to where to load child objects from
'''
if not class_hint:
return base_path
path = base_path / (name
if class_hint is USE_NAME_AS_PATH
else class_hint)
if path.exists() and path.is_dir():
return path
return base_path # the fallback
class TwincatItem:
_load_path_hint = ''
def __init__(self, element, *, parent=None, name=None, filename=None):
'''
Represents a single TwinCAT project XML Element, for either tsproj,
xti, tmc, etc.
Parameters
----------
element : lxml.etree.Element
parent : TwincatItem, optional
name : str, optional
filename : pathlib.Path, optional
'''
self.child_load_path = _determine_path(
filename.parent, name, self._load_path_hint)
self.attributes = dict(element.attrib)
self._children = []
self.children = None # populated later
self.comments = []
self.element = element
self.filename = filename
self.name = name
self.parent = parent
self.tag = element.tag
self.text = element.text.strip() if element.text else None
self._add_children(element)
self.post_init()
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
TWINCAT_TYPES[cls.__name__] = cls
def post_init(self):
'Hook for subclasses; called after __init__'
...
@property
def root(self):
'The top-level TwincatItem (likely TcSmProject)'
parent = self
while parent.parent is not None:
parent = parent.parent
return parent
@property
def path(self):
'Path of classes required to get to this instance'
hier = [self]
parent = self.parent
while parent:
hier.append(parent)
parent = parent.parent
return '/'.join(item.__class__.__name__ for item in reversed(hier))
def find_ancestor(self, cls):
'''
Find an ancestor of this instance
Parameters
----------
cls : TwincatItem
'''
parent = self.parent
while parent and not isinstance(parent, cls):
parent = parent.parent
return parent
def get_relative_path(self, path):
'''
Get an absolute path relative to this item
Returns
-------
path : pathlib.Path
'''
root = pathlib.Path(self.filename).parent
rel_path = pathlib.PureWindowsPath(path)
return (root / rel_path).resolve()
def find(self, cls, *, recurse=True):
'''
Find any descendents that are instances of cls
Parameters
----------
cls : TwincatItem
'''
for child in self._children:
if isinstance(child, cls):
yield child
if not recurse:
continue
yield from child.find(cls, recurse=recurse)
def _add_children(self, element):
'A hook for adding all children'
for child_element in element.iterchildren():
if isinstance(child_element, lxml.etree._Comment):
self.comments.append(child_element.text)
continue
self._add_child(child_element)
by_tag = separate_by_classname(self._children)
self.children = types.SimpleNamespace(**by_tag)
for key, value in by_tag.items():
if not hasattr(self, key):
setattr(self, key, value)
def _add_child(self, element):
child = self.parse(element, parent=self, filename=self.filename)
if child is None:
return
self._children.append(child)
if not hasattr(child, '_squash_children'):
return
for grandchild in list(child._children):
if any(isinstance(grandchild, squashed_type)
for squashed_type in child._squash_children):
self._children.append(grandchild)
grandchild.container = child
grandchild.parent = self
child._children.remove(grandchild)
@staticmethod
def parse(element, parent=None, filename=None):
'''
Parse an XML element and return a TwincatItem
Parameters
----------
element : lxml.etree.Element
parent : TwincatItem, optional
The parent to assign to the new element
filename : str, optional
The filename the element originates from
Returns
-------
item : TwincatItem
'''
classname, base = element_to_class_name(element, parent=parent)
try:
cls = TWINCAT_TYPES[classname]
except KeyError:
# Dynamically create and register new TwincatItem-based types!
cls = type(classname, (base, ), {})
if 'File' in element.attrib:
# This is defined directly in the file. Instantiate it as-is:
filename = element.attrib['File']
return cls.from_file(filename, parent=parent)
# Two ways for names to come in:
# 1. a child has a tag of 'Name', with its text being our name
names = [child.text for child in element.iterchildren()
if child.tag == 'Name' and child.text]
name = names[0] if names else None
# 2. the child has an attribute key 'Name'
try:
name = element.attrib['Name'].strip()
except KeyError:
...
# A special identifier __FILENAME__ means to replace the name
if name == '__FILENAME__':
name = filename.stem
return cls(element, parent=parent, filename=filename, name=name)
def _repr_info(self):
'__repr__ information'
return {
'name': self.name,
'attributes': self.attributes,
'children': self._children,
'text': self.text,
}
def __repr__(self):
info = ' '.join(f'{key}={value!r}'
for key, value in self._repr_info().items()
if value)
return f'<{self.__class__.__name__} {info}>'
@classmethod
def from_file(cls, filename, parent):
base_path = _determine_path(
base_path=parent.child_load_path,
name=parent.name,
class_hint=cls._load_path_hint
)
return parse(base_path / filename, parent=parent)
class _TwincatProjectSubItem(TwincatItem):
'[XTI/TMC/...] A base class for items that appear in virtual PLC projects'
@property
def plc(self):
'The nested project (virtual PLC project) associated with the item'
return self.find_ancestor(Plc)
class TcModuleClass(_TwincatProjectSubItem):
'[TMC] The top-level TMC file'
DataTypes: list
def get_data_type(self, type_name):
data_types = self.DataTypes[0].types
try:
return data_types[type_name]
except KeyError:
return BuiltinDataType(type_name)
class OwnerA(TwincatItem):
'[XTI] For a Link between VarA and VarB, this is the parent of VarA'
class OwnerB(TwincatItem):
'[XTI] For a Link between VarA and VarB, this is the parent of VarB'
class Link(TwincatItem):
'[XTI] Links between NC/PLC/IO'
def post_init(self):
self.a = (self.find_ancestor(OwnerA).name, self.attributes.get('VarA'))
self.b = (self.find_ancestor(OwnerB).name, self.attributes.get('VarB'))
self.link = [self.a, self.b]
def __repr__(self):
return f'<Link a={self.a} b={self.b}>'
class TopLevelProject(TwincatItem):
'[tsproj] Containing Io, System, Motion, TopLevelPlc, etc.'
@property
def ams_id(self):
'''
The AMS ID of the configured target
'''
return self.attributes.get('TargetNetId', '')
@property
def target_ip(self):
'''
A guess of the target IP, based on the AMS ID
'''
ams_id = self.ams_id
if ams_id.endswith('.1.1'):
return ams_id[:-4]
return ams_id # :(
class PlcProject(TwincatItem):
...
class TcSmProject(TwincatItem):
'[tsproj] A top-level TwinCAT tsproj'
def post_init(self):
self.top_level_plc, = list(self.find(TopLevelPlc, recurse=False))
@property
def plcs(self):
'The virtual PLC projects contained in this TcSmProject'
yield from self.top_level_plc.projects.values()
@property
def plcs_by_name(self):
'The virtual PLC projects in a dictionary keyed by name'
return {plc.name: plc for plc in self.plcs}
@property
def plcs_by_link_name(self):
'The virtual PLC projects in a dictionary keyed by link name'
return {plc.link_name: plc for plc in self.plcs}
class TcSmItem(TwincatItem):
'''
[XTI] Top-level container for XTI files
Visual Studio-level configuration changes the project layout significantly,
with individual XTI files being created for axes, PLCs, etc. instead of
updating the original tsproj file.
The additional, optional, level of indirection here can make walking the
tree frustrating. So, we squash these TcSmItems - skipping over them in the
hierarchy - and pushing its children into its parent.
The original container `TcSmItem` is accessible in those items through the
`.container` attribute.
'''
_squash_children = [TwincatItem]
class TopLevelPlc(TwincatItem):
'[XTI] Top-level PLC, contains one or more projects'
PlcProjectContainer: list
def post_init(self):
# TODO: this appears to cover all bases, but perhaps it could be
# refactored out
if hasattr(self, 'Plc'):
projects = self.Plc
elif hasattr(self, 'TcSmItem'):
projects = self.TcSmItem[0].PlcProject
else:
raise RuntimeError('Unable to find project?')
self.projects = {
project.name: project
for project in projects
}
self.projects_by_link_name = {
project.link_name: project
for project in projects
}
# Fix to squash hack: squashed Mappings belong to the individual
# projects, not this TopLevelPlc
for mapping in getattr(self, 'Mappings', []):
for project in projects:
if project.filename == mapping.filename:
self._children.remove(mapping)
project.Mappings = [mapping]
project._children.append(mapping)
continue
class Plc(TwincatItem):
'[tsproj] A project which contains Plc, Io, Mappings, etc.'
_load_path_hint = pathlib.Path('_Config') / 'PLC'
def post_init(self):
self.link_name = (self.Instance[0].name
if hasattr(self, 'Instance')
else self.name)
self.namespaces = {}
self.project_path = self.get_relative_path(
self.attributes['PrjFilePath'])
self.tmc_path = self.get_relative_path(
self.attributes['TmcFilePath'])
self.project = (parse(self.project_path, parent=self)
if self.project_path.exists()
else None)
self.tmc = (parse(self.tmc_path, parent=self)
if self.tmc_path.exists()
else None)
self.source_filenames = [
self.project.get_relative_path(compile.attributes['Include'])
for compile in self.find(Compile)
if 'Include' in compile.attributes
]
self.source = {
str(fn.relative_to(self.project.filename.parent)):
parse(fn, parent=self)
for fn in self.source_filenames
}
def get_source_items(attr):
for plc_obj in self.source.values():
try:
source_obj = getattr(plc_obj, attr, [None])[0]
except IndexError:
continue
if source_obj and source_obj.name:
yield (source_obj.name, source_obj)
self.pou_by_name = dict(sorted(get_source_items('POU')))
self.gvl_by_name = dict(sorted(get_source_items('GVL')))
self.dut_by_name = dict(sorted(get_source_items('DUT')))
self.namespaces.update(self.pou_by_name)
self.namespaces.update(self.gvl_by_name)
self.namespaces.update(self.dut_by_name)
@property
def links(self):
return [link
for mapping in self.Mappings
for link in mapping.find(Link, recurse=False)
]
@property
def port(self):
'''
The ADS port for the project
'''
return self.attributes.get('AmsPort', '')
@property
def ams_id(self):
'''
The AMS ID of the configured target
'''
return self.find_ancestor(TopLevelProject).ams_id
return self.attributes.get('TargetNetId', '')
@property
def target_ip(self):
'''
A guess of the target IP, based on the AMS ID
'''
return self.find_ancestor(TopLevelProject).target_ip
def find(self, cls, *, recurse=True):
yield from super().find(cls, recurse=recurse)
if self.project is not None:
yield from self.project.find(cls, recurse=recurse)
for _, ns in self.namespaces.items():
if isinstance(ns, cls):
yield ns
if self.tmc is not None:
yield from self.tmc.find(cls, recurse=recurse)
def get_source_code(self):
'Get the full source code, DUTs, GVLs, and then POUs'
source_items = (
list(self.dut_by_name.items()) +
list(self.gvl_by_name.items()) +
list(self.pou_by_name.items())
)
return '\n'.join(
item.get_source_code()
for item in source_items
if hasattr(item, 'get_source_code')
)
class Compile(TwincatItem):
'''
[XTI] A code entry in a nested/virtual PLC project
File to load is marked with 'Include'
May be TcTTO, TcPOU, TcDUT, GVL, etc.
'''
class _TmcItem(_TwincatProjectSubItem):
'[TMC] Any item found in a TMC file'
@property
def tmc(self):
'The TcModuleClass (TMC) associated with the item'
return self.find_ancestor(TcModuleClass)
class DataTypes(_TmcItem):
'[TMC] Container of DataType'
def post_init(self):
self.types = {
dtype.qualified_type: dtype
for dtype in self.find(DataType)
}
self.types['Tc2_System.T_MaxString'] = T_MaxString()
class Type(_TmcItem):
'[TMC] DataTypes/DataType/SubItem/Type'
@property
def qualified_type(self):
'The base type, including the namespace'
namespace = self.attributes.get("Namespace", None)
return f'{namespace}.{self.text}' if namespace else self.text
class EnumInfo(_TmcItem):
'[TMC] Enum values, strings, and associated comments'
Text: list
Enum: list
Comment: list
@property
def enum_text(self):
return self.Text[0].text
@property
def enum_value(self):
try:
return self.Enum[0].text
except AttributeError:
...
logger.warning(
'Encountered a known issue with the TwinCAT-generated TMC file: '
'%s is missing an Enum value in section %s; this may cause '
'database generation errors.', self.parent.name, self.path
)
return ''
@property
def enum_comment(self):
return self.Comment[0].text if hasattr(self, 'Comment') else ''
class ArrayInfo(_TmcItem):
'[TMC] Array information for a DataType or Symbol'
LBound: list
UBound: list
Elements: list
def post_init(self):
lbound = (int(self.LBound[0].text)
if hasattr(self, 'LBound')
else 0)
elements = (int(self.Elements[0].text)
if hasattr(self, 'Elements')
else 1)
ubound = (int(self.UBound[0].text)
if hasattr(self, 'UBound')
else lbound + elements - 1)
self.bounds = (lbound, ubound)
self.elements = elements
class ExtendsType(_TmcItem):
'[TMC] A marker of inheritance / extension, found on DataType'
@property
def qualified_type(self):
if 'Namespace' in self.attributes:
return f'{self.attributes["Namespace"]}.{self.text}'
return self.text
class DataType(_TmcItem):
'[TMC] A DataType with SubItems, likely representing a structure'
Name: list
EnumInfo: list
SubItem: list
@property
def qualified_type(self):
name_attrs = self.Name[0].attributes
if 'Namespace' in name_attrs:
return f'{name_attrs["Namespace"]}.{self.name}'
return self.name
@property
def is_complex_type(self):
return True
def walk(self, condition=None):
if self.is_enum:
# Ensure something is yielded for this type - it doesn't
# appear possible to have SubItems or use ExtendsType
# in this case.
yield []
return
extends_types = [
self.tmc.get_data_type(ext_type.qualified_type)
for ext_type in getattr(self, 'ExtendsType', [])
]
for extend_type in extends_types:
yield from extend_type.walk(condition=condition)
if hasattr(self, 'SubItem'):
for subitem in self.SubItem:
for item in subitem.walk(condition=condition):
yield [subitem] + item
@property
def enum_dict(self):
return {int(item.enum_value): item.enum_text
for item in getattr(self, 'EnumInfo', [])}
@property
def is_enum(self):
return len(getattr(self, 'EnumInfo', [])) > 0
@property
def is_array(self):
return len(getattr(self, 'ArrayInfo', [])) > 0
@property
def is_string(self):
return False
@property
def array_info(self):
try:
return self.ArrayInfo[0]
except (AttributeError, IndexError):
return None
@property
def length(self):
array_info = self.array_info
return array_info.elements if array_info else 1
class SubItem(_TmcItem):
'[TMC] One element of a DataType'
Type: list
@property
def data_type(self):
return self.tmc.get_data_type(self.qualified_type_name)
@property
def array_info(self):
try:
return self.ArrayInfo[0]
except (AttributeError, IndexError):
return None
@property
def type(self):
'The base type'
return self.Type[0].text
@property
def qualified_type_name(self):
'The base type, including the namespace'
type_ = self.Type[0]
namespace = type_.attributes.get("Namespace", None)
return f'{namespace}.{type_.text}' if namespace else type_.text
def walk(self, condition=None):
if condition is None or condition(self):
yield from self.data_type.walk(condition=condition)
class Module(_TmcItem):
'''
[TMC] A Module
Contains generated symbols, data areas, and miscellaneous properties.
'''
@property
def ads_port(self):
'The ADS port assigned to the Virtual PLC'
try:
return self._ads_port
except AttributeError:
app_prop, = [prop for prop in self.find(Property)
if prop.name == 'ApplicationName']
port_text = app_prop.value
self._ads_port = int(port_text.split('Port_')[1])
return self._ads_port
class Property(_TmcItem):
'''
[TMC] A property containing a key/value pair
Examples of TMC properties::
ApplicationName (used for the ADS port)
ChangeDate
GeneratedCodeSize
GlobalDataSize
'''
Value: list
@property
def key(self):
'The property key name'
return self.name
@property
def value(self):
'The property value text'
return self.Value[0].text if hasattr(self, 'Value') else self.text
def __repr__(self):
return f'<Property {self.key}={self.value!r}>'
class BuiltinDataType:
'[TMC] A built-in data type such as STRING, INT, REAL, etc.'
def __init__(self, typename, *, length=1):
if '(' in typename:
typename, length = typename.split('(')
length = int(length.rstrip(')'))
self.name = typename
self.length = length
@property
def is_complex_type(self):
return False
@property
def enum_dict(self):
return {int(item.enum_value): item.enum_text
for item in getattr(self, 'EnumInfo', [])}
@property
def is_enum(self):
return len(getattr(self, 'EnumInfo', [])) > 0
@property
def is_string(self):
return self.name == 'STRING'
@property
def is_array(self):
# TODO: you can have an array of STRING(80), for example
# the length would be reported as 80 here, and the DataType would have
# ArrayInfo
return self.length > 1 and not self.is_string
def walk(self, condition=None):
yield []
class T_MaxString(BuiltinDataType):
def __init__(self):
super().__init__(typename='STRING', length=255)
class Symbol(_TmcItem):
'''
[TMC] A basic Symbol type
This is dynamically subclassed into new classes for ease of implementation
and searching. For example, a function block defined as `FB_MotionStage`
will become `Symbol_FB_MotionStage`.
'''
BitOffs: list
BitSize: list
BaseType: list
@property
def type_name(self):
'The base type'
return self.BaseType[0].text
@property
def qualified_type_name(self):
'The base type, including the namespace'
type_ = self.BaseType[0]
namespace = type_.attributes.get("Namespace", None)
return f'{namespace}.{type_.text}' if namespace else type_.text
@property
def data_type(self):
return self.tmc.get_data_type(self.qualified_type_name)
@property
def module(self):
'The TMC Module containing the Symbol'
return self.find_ancestor(Module)
@property
def info(self):
return dict(name=self.name,
bit_size=self.BitSize[0].text,
type=self.type_name,
qualified_type_name=self.qualified_type_name,
bit_offs=self.BitOffs[0].text,
module=self.module.name,
is_pointer=self.is_pointer,
array_bounds=self.array_bounds,
summary_type_name=self.summary_type_name,
)
def walk(self, condition=None):
if condition is None or condition(self):
for item in self.data_type.walk(condition=condition):
yield [self] + item
@property
def array_info(self):
try:
return self.ArrayInfo[0]
except (AttributeError, IndexError):
return None
@property
def array_bounds(self):
try:
return self.array_info.bounds
except (AttributeError, IndexError):
return None
def get_links(self, *, strict=False):
sym_name = '^' + self.name.lower()
dotted_name = sym_name + '.'
plc = self.plc
plc_name = plc.link_name
for link in plc.links:
if any(owner == plc_name and
(var.lower().endswith(sym_name) or
not strict and dotted_name in var.lower())
for owner, var in link.link):
yield link
@property
def is_pointer(self):
type_ = self.BaseType[0]
pointer_info = type_.attributes.get("PointerTo", None)
return bool(pointer_info)
@property
def summary_type_name(self):
summary = self.qualified_type_name
if self.is_pointer:
summary = 'POINTER TO ' + summary
array_bounds = self.array_bounds
if array_bounds:
summary = 'ARRAY[{}..{}] OF '.format(*array_bounds) + summary
return summary
class Symbol_DUT_MotionStage(Symbol):
'[TMC] A customized Symbol, representing only DUT_MotionStage'
def _repr_info(self):
'__repr__ information'
repr_info = super()._repr_info()
# Add on the NC axis name
try:
repr_info['nc_axis'] = self.nc_axis.name
except Exception as ex:
repr_info['nc_axis'] = repr(ex)
return repr_info
@property
def program_name(self):
'`Main` of `Main.M1`'
return self.name.split('.')[0]
@property
def motor_name(self):
'`M1` of `Main.M1`'
return self.name.split('.')[1]
@property
def nc_to_plc_link(self):
'''
The Link for NcToPlc
That is, how the NC axis is connected to the DUT_MotionStage
'''
expected = '^' + self.name.lower() + '.axis.nctoplc'
links = [link
for link in self.plc.find(Link, recurse=False)
if expected in link.a[1].lower()
]
if not links:
raise RuntimeError(f'No NC link to DUT_MotionStage found for '
f'{self.name!r}')
link, = links
return link
@property
def nc_axis(self):
'The NC `Axis` associated with the DUT_MotionStage'
link = self.nc_to_plc_link
parent_name = link.parent.name.split('^')
if parent_name[0] == 'TINC':
parent_name = parent_name[1:]
task_name, axis_section, axis_name = parent_name
nc, = list(nc for nc in self.root.find(NC, recurse=False)
if nc.SafTask[0].name == task_name)
nc_axis = nc.axis_by_name[axis_name]
# link nc_axis and FB_MotionStage?
return nc_axis
class GVL(_TwincatProjectSubItem):
'[TcGVL] A Global Variable List'
@property
def declaration(self):
'The declaration code; i.e., the top portion in visual studio'
return self.Declaration[0].text
def get_source_code(self, *, close_block=True):
'The full source code - declaration only in the case of a GVL'
return self.declaration
class ST(_TwincatProjectSubItem):
'[TcDUT/TcPOU] Structured text'
class Implementation(_TwincatProjectSubItem):
'[TcDUT/TcPOU] Code implementation'
class Declaration(_TwincatProjectSubItem):
'[TcDUT/TcPOU/TcGVL] Code declaration'
class DUT(_TwincatProjectSubItem):
'[TcDUT] Data unit type (DUT)'
@property
def declaration(self):
'The declaration code; i.e., the top portion in visual studio'
return self.Declaration[0].text
def get_source_code(self, *, close_block=True):
'The full source code - declaration only in the case of a DUT'
return self.declaration
class Action(_TwincatProjectSubItem):
'[TcPOU] Code declaration for actions'
@property
def source_code(self):
return f'''\
ACTION {self.name}:
{self.implementation or ''}
END_ACTION'''
@property
def implementation(self):
'The implementation code; i.e., the bottom portion in visual studio'
impl = self.Implementation[0]
if hasattr(impl, 'ST'):
# NOTE: only ST for now
return impl.ST[0].text
class POU(_TwincatProjectSubItem):
'[XTI] A Program Organization Unit'
# TODO: may fail when mixed with ladder logic?
Declaration: list
Implementation: list
def get_fully_qualified_name(self, name):
if '.' in name:
first, rest = name.split('.', 1)
if (first == self.name or first in self.project.namespaces):
return name
return f'{self.name}.{name}'
@property
def declaration(self):
'The declaration code; i.e., the top portion in visual studio'
return self.Declaration[0].text
@property
def implementation(self):
'The implementation code; i.e., the bottom portion in visual studio'
impl = self.Implementation[0]
if hasattr(impl, 'ST'):
return impl.ST[0].text
@property
def actions(self):
'The action implementations (zero or more)'
return list(getattr(self, 'Action', []))
def get_source_code(self, *, close_block=True):
'The full source code - declaration, implementation, and actions'
source_code = [self.declaration or '',
self.implementation or '',
]
if close_block:
source_code.append('')
closing = {
'function_block': 'END_FUNCTION_BLOCK',
'program': 'END_PROGRAM',
'function': 'END_FUNCTION',
'action': 'END_ACTION',
}
source_code.append(
closing.get(determine_block_type(self.declaration),
'# pytmc: unknown block type')
)
# TODO: actions defined outside of the block?
for action in self.actions:
source_code.append(action.source_code)
return '\n'.join(source_code)
@property
def call_blocks(self):
'A dictionary of all implementation call blocks'
return get_pou_call_blocks(self.declaration, self.implementation)
@property
def program_name(self):
'The program name, determined from the declaration'
return program_name_from_declaration(self.declaration)
@property
def variables(self):
'A dictionary of variables defined in the POU'
return variables_from_declaration(self.declaration)
class AxisPara(TwincatItem):
'''
[XTI] Axis Parameters
Has information on units, acceleration, deadband, etc.
'''
class NC(TwincatItem):
'[tsproj or XTI] Top-level NC'
_load_path_hint = pathlib.Path('_Config') / 'NC'
def post_init(self):
# Axes can be stored directly in the tsproj:
self.axes = getattr(self, 'Axis', [])
self.axis_by_id = {
int(axis.attributes['Id']): axis
for axis in self.axes
}
self.axis_by_name = {
axis.name: axis
for axis in self.axes
}
class Axis(TwincatItem):
'[XTI] A single NC axis'
_load_path_hint = pathlib.Path('Axes')
@property
def axis_number(self):
return int(self.attributes['Id'])
@property
def units(self):
try:
for axis_para in getattr(self, 'AxisPara', []):
for general in getattr(axis_para, 'General', []):
if 'UnitName' in general.attributes:
return general.attributes['UnitName']
except Exception:
logger.exception('Unable to determine EGU for Axis %s', self)
# 'mm' is the default in twincat if unspecified. defaults are not saved
# in the xti files:
return 'mm'
def summarize(self):
yield from self.attributes.items()
for param in self.find(AxisPara, recurse=False):
yield from param.attributes.items()
for child in param._children:
for key, value in child.attributes.items():
yield f'{child.tag}:{key}', value
for encoder in getattr(self, 'Encoder', []):
for key, value in encoder.summarize():
yield f'Enc:{key}', value
class EncPara(TwincatItem):
'''
[XTI] Encoder parameters
Includes such parameters as ScaleFactorNumerator, ScaleFactorDenominator,
and so on.
'''
class Encoder(TwincatItem):
'''
[XTI] Encoder
Contains EncPara, Vars, Mappings, etc.
'''
def summarize(self):
yield 'EncType', self.attributes['EncType']
for param in self.find(EncPara, recurse=False):
yield from param.attributes.items()
for child in param._children:
for key, value in child.attributes.items():
yield f'{child.tag}:{key}', value
class Device(TwincatItem):
'[XTI] Top-level IO device container'
_load_path_hint = pathlib.Path('_Config') / 'IO'
def __init__(self, element, *, parent=None, name=None, filename=None):
super().__init__(element, parent=parent, name=name, filename=filename)
class Box(TwincatItem):
'[XTI] A box / module'
_load_path_hint = USE_NAME_AS_PATH
class RemoteConnections(TwincatItem):
'[StaticRoutes] Routes contained in the TwinCat configuration'
def post_init(self):
def to_dict(child):
return {
item.tag: item.text
for item in child._children
}
def keyed_on(key):
return {
getattr(child, key)[0].text: to_dict(child)
for child in self._children
if hasattr(child, key)
}
self.by_name = keyed_on('Name')
self.by_address = keyed_on('Address')
self.by_ams_id = keyed_on('NetId')
class _ArrayItemProxy:
'''
A TwincatItem proxy that represents a single element of an array value.
Adjusts 'name' such that access from EPICS will refer to the correct index.
Parameters
----------
item : TwincatItem
The item to mirror
index : int
The array index to use
'''
def __init__(self, item, index):
self.__dict__.update(
name=f'{item.name}[{index}]',
item=item,
_index=index,
)
def __getattr__(self, attr):
return getattr(self.__dict__['item'], attr)
def __setattr__(self, attr, value):
return setattr(self.__dict__['item'], attr, value)
def case_insensitive_path(path):
'''
Match a path in a case-insensitive manner, returning the actual filename as
it exists on the host machine
Required on Linux to find files in a case-insensitive way. Not required on
OSX/Windows, but platform checks are not done here.
Parameters
----------
path : pathlib.Path or str
The case-insensitive path
Returns
-------
path : pathlib.Path or str
The case-corrected path
Raises
------
FileNotFoundError
When the file can't be found
'''
path = pathlib.Path(path)
if path.exists():
return path.resolve()
new_path = pathlib.Path(path.parts[0])
for part in path.parts[1:]:
if not (new_path / part).exists():
all_files = {fn.lower(): fn
for fn in os.listdir(new_path)}
try:
part = all_files[part.lower()]
except KeyError:
raise FileNotFoundError(
f'{path} does not exist ({part!r} not in {new_path!r})'
) from None
new_path = new_path / part
return new_path.resolve()
def separate_by_classname(children):
'''
Take in a list of `TwincatItem`, categorize each by their class name (based
on XML tag), and return a dictionary keyed on that.
For example::
<a> <a> <b> <b>
Would become::
{'a': [<a>, <a>],
'b': [<b>, <b>]
}
Parameters
----------
children : list
list of TwincatItem
Returns
-------
dict
Categorized children
'''
d = collections.defaultdict(list)
for child in children:
d[child.__class__.__name__].append(child)
return d
def strip_namespace(tag):
'Strip off {{namespace}} from: {{namespace}}tag'
return lxml.etree.QName(tag).localname
|
import sublime
import sys, os
def plugin_loaded():
editor_dir = sys.path[0]
if editor_dir in os.environ['PATH']:
return
settings = sublime.load_settings('PortableInstallationHelper.sublime-settings')
if settings.get('subdir') is None:
print("="*86)
print("PortableInstallationHelper: Error loading settings. Please restart Sublime Text after installation")
print("="*86)
return
subdir = settings.get('subdir').replace('/', os.path.sep)
bindir = os.path.sep.join([editor_dir, subdir]) if len(subdir) else editor_dir
os.environ['PATH'] = os.pathsep.join([bindir, os.environ['PATH']])
print("PortableInstallationHelper: PATH altered to " + os.environ['PATH'])
if int(sublime.version()) < 3000:
plugin_loaded()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Schooner - Course Management System
# University of Turku / Faculty of Technilogy / Department of Computing
# (c) 2021, Jani Tammi <jasata@utu.fi>
#
# AssistantWorkqueue.py - Data object for assistant workqueue
# 2021-09-04 Initial version.
# 2021-09-25 Support for NULL and NOT NULL column criterion.
#
#
class AssistantWorkqueue(list):
"""List of dictionaries. All unevaluated HUBBOT type submissions for course(s) in which the assistant uid is registered as an active assistant."""
def __init__(self, cursor, uid:str, **kwargs):
"""Queries all unevaluated HUBBOT type submissions from course(s) in which the specified uid (assistant) is registered in, and is in active status. kwargs may specify column (key) = values that will be used to filter the results. Value may be a single value or list of values."""
self.SQL = """
SELECT *
FROM assistant.workqueue(%(uid)s)
"""
where = []
for k, v in kwargs.items():
if v is None or (isinstance(v, bool) and v is False):
where.append(f" {k} IS NULL")
elif (isinstance(v, bool) and v is True):
where.append(f" {k} IS NOT NULL")
else:
if not isinstance(v, list):
kwargs[k] = [v]
where.append(f" {k} = ANY(%({k})s) ")
# Crete WHERE clause
if where:
self.SQL += f" WHERE {' AND '.join(where)}"
kwargs['uid'] = uid
self.args = kwargs
if cursor.execute(self.SQL, kwargs).rowcount:
super().__init__(
[dict(zip([k[0] for k in cursor.description], row)) for row in cursor]
)
def sort(self, key, desc: bool = False):
super().sort(key=lambda k : k[key], reverse = desc)
return self
# EOF
|
"""
Mask R-CNN
Train on my occ5000 dataset
Copyright (c) 2018 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Kidd Liu
"""
import os
import sys
import datetime
import numpy as np
import skimage.draw
# Root directory of the projec
ROOT_DIR = os.getcwd()
if ROOT_DIR.endswith("samples/occ5000"):
# Go up two levels to the repo root
ROOT_DIR = os.path.dirname(os.path.dirname(ROOT_DIR))
# Import Mask RCNN
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, 'mask_rcnn_coco.h5')
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class Occ5000Config(Config):
"""
Configuration for training on the occ5000 dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "occ5000"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
# The default image size is 1024x1024px.
IMAGES_PER_GPU = 2
# Number of classes (including background)
# Background+hair+face+upper closes+left arm
# +right arm+left hand +right hand+ left leg
# +right leg+ left feet+ right feet+ accesessory
NUM_CLASSES = 12 + 1
# Number of training steps per epoch
STEPS_PER_EPOCH = 1250
# Class names and accessory can be ignored
CLASS_NAMES = ['hair', 'face', 'upper_clothes', 'left_arm',
'right_arm', 'left_hand', 'right_hand',
'left_leg', 'right_leg', 'left_feet', 'right_feet',
'accessory']
IMAGE_MIN_DIM = 512
IMAGE_MAX_DIM = 1024
############################################################
# Dataset
############################################################
class Occ5000Dataset(utils.Dataset):
def load_occ5000(self, dataset_dir, subset):
"""
Load a subset of the Balloon dataset.
dataset_dir: The root directory of the COCO dataset(like /home/kidd/kidd1/occ5000)
subset: What to load (train_all2500, val_all2500, see in occ5000/list)
TODO: resize the train val to 4000(3200occ + 800unOcc) and 1000(800occ+200unocc)
which is 80% of the whole dataset
"""
# Add classes, can ignore some class here
classNames = Occ5000Config.CLASS_NAMES
for className, i in zip(classNames, np.arange(len(Occ5000Config.CLASS_NAMES))):
self.add_class('occ5000', i+1, Occ5000Config.CLASS_NAMES[i])
# Train or validation datset
assert subset in ['train_all2500', 'val_all2500']
#dateset_dir = os.path.join(dataset_dir, sub)
# Get image and annotation path in the format 'impath annpath'
lines = []
with open(os.path.join(dataset_dir, 'list', subset+'.txt'), 'r') as list_file:
while True:
line = list_file.readline()
if not line:
break
line = line.strip('\n')
lines.append(line)
# Seperate image and annotation path
im_paths = []
ann_paths = []
for line in lines:
im_path = line[0:line.find('.png')+4]
ann_path = line[line.find('.png')+5:]
im_paths.append(im_path)
ann_paths.append(ann_path)
# Read image and annotation from png and add images
for im_path, ann_path in zip(im_paths, ann_paths):
im = skimage.io.imread(dataset_dir + im_path)
height, width = im.shape[:2]
_, image_id = os.path.split(im_path)
path = dataset_dir + im_path
ann = skimage.io.imread(dataset_dir + ann_path)
self.add_image('occ5000',
image_id = image_id,
path = path,
width = width, height = height,
annotation = ann)
def load_mask(self, image_id):
"""
Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If not a balloon dataset image, delegate to parent class.
info = self.image_info[image_id]
if info["source"] != 'occ5000':
return super(self.__class__, self).load_mask(image_id)
# Build mask of shape [height, width, instance_count] and list
# of class IDs that correspond to each channel of the mask.
masks = []
classID = []
ann = info['annotation']
for i in np.arange(1, len(Occ5000Config.CLASS_NAMES) + 1):
mask = np.where(ann == i,
np.ones(ann.shape, dtype=np.uint8),
np.zeros(ann.shape, dtype=np.uint8))
if np.sum(mask) != 0:
masks.append(mask)
classID.append(i)
masks = np.stack(masks, axis = 2)
#classID = np.arange(1,len(Occ5000Config.CLASS_NAMES) + 1)
return masks, np.array(classID).astype(np.int32)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info['source'] == 'occ5000':
return info['path']
else:
super(self.__class__, self).image_reference(image_id)
############################################################
# Training
############################################################
def train(model):
"""Train the model."""
# Training dataset.
dataset_train = Occ5000Dataset()
dataset_train.load_occ5000(args.dataset, "train_all2500")
dataset_train.prepare()
# Validation dataset
dataset_val = Occ5000Dataset()
dataset_val.load_occ5000(args.dataset, "val_all2500")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
############################################################
# Evaluation
###########################################################
#def evaluate_occ5000(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Train Mask R-CNN on occ5000.')
parser.add_argument('command',
metavar="<command>",
help="'train'")
parser.add_argument('--dataset', required=False,
metavar='/path/to/occ5000',
help='Directory of the occ5000 dataset')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to apply the color splash effect on')
args = parser.parse_args()
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
# Configurations
if args.command == "train":
config = Occ5000Config()
else:
class InferenceConfig(BalloonConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# Creat model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model)
else:
print("'{}' is not recognized. "
"Use 'train'".format(args.command))
|
class LocationConfig():
def __init__(self, core_config: dict):
self.core_config = core_config
self.county = None
self.county_code = None
@property
def city(self):
"""The current value of the city name in the device configuration."""
return self.core_config["location"]["city"]["name"]
@property
def country(self):
"""The current value of the country name in the device configuration."""
return self.core_config["location"]["city"]["state"]["country"]["name"]
@property
def latitude(self):
"""The current value of the latitude location configuration"""
return self.core_config["location"]["coordinate"]["latitude"]
@property
def longitude(self):
"""The current value of the longitude location configuration"""
return self.core_config["location"]["coordinate"]["longitude"]
@property
def state(self):
"""The current value of the state name in the device configuration."""
return self.core_config["location"]["city"]["state"]["name"]
|
"""
Let's see how a class gets actually created
define a body of class with constructor and methods
create dictionary to have constructor and methods in it.
Create a new class Spam using type method, class name as first
argument, second base classes from which it inherits and pass
clsdict as third argument as dictionary that contains attributes
and functions/methods.
"""
class Base:
pass
# Class Body
body = '''
def __init__(self, name):
self.name = name
def bar(self):
print("I am in Spam.bar")
'''
# Class dictionary
clsdict = type.__prepare__("Spam", (Base,))
exec(body, globals(), clsdict)
print(clsdict)
# Create a class named Spam
Spam = type("Spam", (Base,), clsdict)
print(Spam)
s = Spam("Mat") # call __init__
s.bar() # call bar method
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from rest_framework.authtoken.views import obtain_auth_token
from .views import FrameCreateView, FrameDetailsView, SpaceDetailsView, SpaceCreateView, AgentCreateView, AgentDetailsView
urlpatterns = {
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^frames/$', FrameCreateView.as_view(), name="frame_create"),
url(r'^frames/(?P<pk>[0-9a-f]+)/$', FrameDetailsView.as_view(), name="frame_details"),
url(r'^spaces/$', SpaceCreateView.as_view(), name="space_create"),
url(r'^spaces/(?P<pk>[0-9a-f]+)/$', SpaceDetailsView.as_view(), name="space_details"),
url(r'^agents/$', AgentCreateView.as_view(), name="agent_create"),
url(r'^agents/(?P<pk>[0-9a-f]+)/$', AgentDetailsView.as_view(), name="agent_details"),
url(r'^get-token/', obtain_auth_token),
}
urlpatterns = format_suffix_patterns(urlpatterns)
|
from unittest.mock import patch
from phoenix.integration.datadog import get_all_slack_channels
@patch("phoenix.integration.datadog.api.Monitor.get_all")
def test_get_all_slack_channels(mocked_get_all):
mocked_get_all.return_value = (
"@slack-alerts{{/is_warning}}{{#is_warning_"
"recovery}}@slack-alerts{{/is_warning_recov"
"ery}}{{#is_alert}}@slack-alerts-a{{/is_alert"
"}}{{#is_alert_recovery}}@slack-alerts-test{{"
"/is_alert_recovery}}"
)
data = get_all_slack_channels()
test_data = ["alerts-test", "alerts", "alerts-a"]
assert data.sort() == test_data.sort()
|
#!/usr/bin/env python3
scores = {
"John": 75,
"Ronald": 99,
"Clarck": 78,
"Mark": 69,
"Newton": 82,
}
grades = {}
for name in scores:
score = scores[name]
if score > 90:
grades[name] = "Outstanding"
elif score > 80:
grades[name] = "Exceeds Expectations"
elif score > 70:
grades[name] = "Acceptable"
else:
grades[name] = "Fail"
for key in grades:
print("{:s}: {:s}".format(key, grades[key]))
|
"""Task that gives reward at regular intervals during task."""
from . import abstract_task
class StayAlive(abstract_task.AbstractTask):
"""StayAlive task. In this task a reward is given at regular intervals."""
def __init__(self, reward_period, reward_value=1.):
"""Constructor.
Args:
reward_period: Int. Number of steps between each reward.
reward_value: Scalar. Value of reward given.
"""
self._reward_period = reward_period
self._reward_value = reward_value
def reset(self, state, meta_state):
pass
def reward(self, state, meta_state, step_count):
"""Compute reward."""
del state
del meta_state
if (step_count + 1) % self._reward_period == 0:
reward = self._reward_value
else:
reward = 0
return reward, False
|
from pathhack import pkg_path
import os
import pickle
import numpy as np
from os.path import join
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib.patches as patches
from matplotlib.animation import FuncAnimation
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
class VisualizationMIF:
def __init__(self, map_info, particle_num_per_intent, infos, sample_true):
# map_info: contains map information that helps draw the map.
self.map_info = map_info
self.particle_num_per_intent = particle_num_per_intent
(_, _, self.intention_dist_hist, \
self.particle_intention_hist, self.long_term_pred_hist, self.long_term_true_hist) = infos
self.intention_dist_hist = np.vstack(self.intention_dist_hist)
self.sample_true = sample_true
self.fig = plt.figure(figsize=map_info['fig_size'])
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
self.axs = []
for i in range(2):
self.axs.append(plt.subplot(gs[i]))
self.num_samples = self.particle_num_per_intent * self.map_info['intent_num']
plt.tight_layout()
self.init_plot()
def init_plot(self):
# plot intents
for i in range(self.map_info['intent_num']):
intent_sq = patches.Rectangle(self.map_info['intent_mean'][i]-self.map_info['intent_radius'], \
2.*self.map_info['intent_radius'], 2.*self.map_info['intent_radius'], linewidth=1, ls='--', edgecolor=self.map_info['intent_color'][i], facecolor='none')
self.axs[0].add_patch(intent_sq)
self.axs[0].set_aspect('equal', 'box')
self.axs[0].set(xlim=(-1, 18), ylim=(-1, 14))
self.axs[0].set_yticks([0, 4, 8, 12])
self.axs[0].set_xticks([0, 4, 8, 12, 16])
# set up probability distribution plot
self.axs[1].set_xlim([0, self.map_info['intent_num']+1]) # look symmetric
self.axs[1].set_ylim([0, 1])
self.axs[1].set_yticks([0, 0.5, 1])
self.axs[1].set_xticklabels([])
self.prob_dist = self.axs[1].bar(range(1, self.map_info['intent_num']+1),\
1. / self.map_info['intent_num'] * np.ones(self.map_info['intent_num']))
for j, b in enumerate(self.prob_dist): # k is 0, 1, 2
b.set_color(self.map_info['intent_color'][j])
self.prediction_samples = []
for i in range(self.num_samples):
particle, = self.axs[0].plot([], [])
self.prediction_samples.append(particle)
self.last_obs_point, = self.axs[0].plot([], [], '.', c='k', ms=30)
self.ped_track, = self.axs[0].plot(self.sample_true[:, 0], self.sample_true[:, 1], 'k-')
print('Visualization is initialized.')
def anime_intention_prob_dist(self, interval=20):
def init():
pass
def update(ts):
sample_full_pred = self.long_term_pred_hist[ts]
x_true_remained = self.long_term_true_hist[ts]
particle_intention = self.particle_intention_hist[ts]
intention_dist = self.intention_dist_hist[ts]
max_prob_intention = np.argmax(intention_dist)
for j, b in enumerate(self.prob_dist): # k is 0, 1, 2
b.set_height(intention_dist[j])
for i in range(self.num_samples):
self.prediction_samples[i].set_data([], [])
for i, (xy_pred, intention) in enumerate(zip(sample_full_pred, particle_intention)):
if intention == max_prob_intention:
xy_pred = xy_pred - xy_pred[0] + x_true_remained[0] # refine the start
self.prediction_samples[i].set_data(xy_pred[:,0], xy_pred[:,1])
self.prediction_samples[i].set_color(self.map_info['intent_color'][intention])
self.last_obs_point.set_data(x_true_remained[0, 0], x_true_remained[0, 1])
ani = FuncAnimation(self.fig, update, frames=self.intention_dist_hist.shape[0],
init_func=init, interval=interval, repeat=True)
plt.show()
def intention_hist_plot(intention_dist_hist):
hues = [[215, 25, 28], [253, 174, 97], [44, 123, 182]]
for i in range(len(hues)):
hues[i] = np.array(hues[i])/255.
intention_dist_hist = np.stack(intention_dist_hist)
fig, ax = plt.subplots()
fig.set_tight_layout(True)
ax.plot(intention_dist_hist[:, 0], c=hues[0], lw=3, label='intention 0')
ax.plot(intention_dist_hist[:, 1], c=hues[1], lw=3, label='intention 1')
ax.plot(intention_dist_hist[:, 2], c=hues[2], lw=3, label='intention 2')
ax.axvline(x=30, c='k', ls='--')
ax.axvline(x=86-12-2, c='k', ls='--')
ax.axvline(x=150, c='k', ls='--')
intent_gt_1 = patches.Rectangle((0, 1.05), 86-12-2, 1.1, facecolor=hues[1])
ax.add_patch(intent_gt_1)
intent_gt_2 = patches.Rectangle((86-12-2, 1.05), len(intention_dist_hist)-(86-12-2)-0.5, 1.1, facecolor=hues[0])
ax.add_patch(intent_gt_2)
ax.set(xlim=(-5, 205), ylim=(-0.1, 1.1))
ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
vals = ax.get_yticks()
ax.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
fig_filename = 'intention_estimation_history.pdf'
fig_filepath = join(pkg_path, 'results/visual', fig_filename)
plt.savefig(fig_filepath)
print(fig_filepath+' is created.')
def filtering_prediction_plot(infos):
hues = [[215, 25, 28], [253, 174, 97], [44, 123, 182]]
for i in range(len(hues)):
hues[i] = np.array(hues[i])/255.
dataset_filename = 'original_traj_intention_changing_animation.p'
dataset_filepath = join(pkg_path, 'results/visual', dataset_filename)
with open(dataset_filepath, 'rb') as f:
abnormal_traj_data = pickle.load(f)
print(dataset_filepath+' is loaded.')
num_abnormal_samples = 0
for data_i in abnormal_traj_data:
num_abnormal_samples = num_abnormal_samples + len(data_i)
print('Number of abnormal samples: ', num_abnormal_samples)
abnormal_traj_data_total = abnormal_traj_data[0]+abnormal_traj_data[1]+abnormal_traj_data[2]
sample_true_abnormal = abnormal_traj_data_total[0]
(correct_intention, percentage_hist, intention_dist_hist, \
particle_intention_hist, long_term_pred_hist, long_term_true_hist) = infos
for time_idx in [30, 150]:
sample_full_pred = long_term_pred_hist[time_idx]
x_true_remained = long_term_true_hist[time_idx]
particle_intention = particle_intention_hist[time_idx]
intention_dist = intention_dist_hist[time_idx]
max_prob_intention = np.argmax(intention_dist)
for xy_pred, intention in zip(sample_full_pred, particle_intention):
if intention == max_prob_intention:
xy_pred = xy_pred - xy_pred[0] + x_true_remained[0] # refine the start
plt.plot(xy_pred[:,0], xy_pred[:,1], c=hues[intention])
plt.plot(x_true_remained[0, 0], x_true_remained[0, 1], '.', c='k', ms=15, label='last observation') # int
plt.plot(sample_true_abnormal[:, 0], sample_true_abnormal[:, 1],'k-')
time_intention_changing_idx = 86-12-2
x_true_remained = long_term_true_hist[time_intention_changing_idx]
plt.plot(x_true_remained[0, 0], x_true_remained[0, 1], '.', c='k', ms=15, label='last observation') # int
plt.xlim(-1, 18)
plt.ylim(-1, 14)
plt.yticks([0, 4, 8, 12])
plt.xticks([0, 4, 8, 12, 16])
plt.tight_layout()
fig_filename = 'trajectory_prediction_snapshots_with_top_probability_intention.pdf'
fig_filepath = join(pkg_path, 'results/visual', fig_filename)
plt.savefig(fig_filepath)
print(fig_filepath+' is created.')
def main_animation():
r"""Animate the filtering process."""
# initilize visualization settings
map_info = {}
map_info['intent_num'] = 3
map_info['intent_mean'] = np.array([[ 7.6491633 , 11.74338086],
[ 3.00575615, 0.77987421],
[15.72789116, 7.75681342],])
map_info['intent_color'] = [[215, 25, 28], [253, 174, 97], [44, 123, 182]]
for i in range(len(map_info['intent_color'])):
map_info['intent_color'][i] = np.array(map_info['intent_color'][i])/255.
map_info['intent_radius'] = 0.8
map_info['fig_size'] = (12, 7)
particle_num_per_intent = 200
# load filtering data
filtering_results_filename = 'infos_intention_changing_animation.p'
filtering_results_filepath = join(pkg_path, 'results/visual', filtering_results_filename)
with open(filtering_results_filepath, 'rb') as f:
infos_test = pickle.load(f)
print(filtering_results_filepath + ' is loaded.')
dataset_filename = 'original_traj_intention_changing_animation.p'
dataset_filepath = join(pkg_path, 'results/visual', dataset_filename)
with open(dataset_filepath, 'rb') as f:
abnormal_traj_data = pickle.load(f)
print(dataset_filepath+' is loaded.')
sample_true_abnormal = abnormal_traj_data[0][0]
vis_mif = VisualizationMIF(map_info, particle_num_per_intent, infos_test['200_12_True'], sample_true_abnormal)
vis_mif.anime_intention_prob_dist()
def main_plot():
r"""Create snapshots of trajectory prediction with top probability intention and intention estimation history."""
filtering_results_filename = 'infos_intention_changing_animation.p'
filtering_results_filepath = join(pkg_path, 'results/visual', filtering_results_filename)
with open(filtering_results_filepath, 'rb') as f:
infos_test = pickle.load(f)
print(filtering_results_filepath + ' is loaded.')
filtering_prediction_plot(infos_test['200_12_False'])
(correct_intention, percentage_hist, intention_dist_hist, \
particle_intention_hist, long_term_pred_hist, long_term_true_hist) = infos_test['200_12_False']
intention_dist_hist = np.vstack(intention_dist_hist)
intention_hist_plot(intention_dist_hist)
if __name__ == '__main__':
main_animation() # animate the filtering process
# main_plot() # create snapshots of trajectory prediction with top probability intention and intention estimation history
|
"""
Creates a new pytplot variable as the time average of original.
Notes
-----
Similar to avg_data.pro in IDL SPEDAS.
"""
import numpy as np
import pyspedas
import pytplot
def avg_data(names, dt=None, width=60, noremainder=False,
new_names=None, suffix=None, overwrite=None):
"""
Get a new tplot variable with averaged data.
Parameters
----------
names: str/list of str
List of pytplot names.
dt: float, optional
Time window in seconds for averaging data. It can be less than 1 sec.
width: int, optional
Number of values for the averaging window.
Default is 60 points (usually this means 60 seconds).
If dt is set, then width is ignored.
noremainder: boolean, optional
If True, the remainter (last part of data) will not be included.
If False. the remainter will be included.
new_names: str/list of str, optional
List of new_names for pytplot variables.
If not given, then a suffix is applied.
suffix: str, optional
A suffix to apply. Default is '-avg'.
overwrite: bool, optional
Replace the existing tplot name.
Returns
-------
None.
"""
old_names = pyspedas.tnames(names)
if len(old_names) < 1:
print('avg_data error: No pytplot names were provided.')
return
if suffix is None:
suffix = '-avg'
if overwrite is not None:
n_names = old_names
elif new_names is None:
n_names = [s + suffix for s in old_names]
else:
n_names = new_names
if isinstance(n_names, str):
n_names = [n_names]
if len(n_names) != len(old_names):
n_names = [s + suffix for s in old_names]
for old_idx, old in enumerate(old_names):
new = n_names[old_idx]
d = pytplot.data_quants[old].copy()
data = d.values
time = d.time.values
dim = data.shape
dim0 = dim[0]
if len(dim) < 2:
dim1 = 1
else:
dim1 = dim[1]
new_data = []
new_time = []
if dt is None:
# Use width
width = int(width)
# print(dim0, width)
for i in range(0, dim0, width):
last = (i + width) if (i + width) < dim0 else dim0
# idx = int(i + width/2) # redefined below before it's ever used?
if (i + width > dim0) and noremainder:
continue # Skip the last part of data.
else:
idx = int((i + last - 1)/2) # Include the last part.
new_time.append(time[idx])
if dim1 < 2:
nd0 = np.average(data[i:last])
else:
nd0 = []
for j in range(dim1):
nd0.append(np.average(data[i:last, j]))
new_data.append(nd0)
else:
# Use dt
dt = float(dt)
timedbl = np.array(pyspedas.time_float(time))
alldt = timedbl[-1] - timedbl[0]
if not dt > 0.0:
print("avg_data: Time interval dt<=0.0. Exiting.")
return
if dt > alldt:
print("avg_data: Time interval dt is too large. Exiting.")
return
# Find bins for time: equal bins of length dt.
bincount = int(alldt/dt)
if alldt % dt > 0.0 and not noremainder: # residual bin
# Include the last bin which might not be the same size.
bincount += 1
time0 = timedbl[0]
maxtime = timedbl[-1]
for i in range(bincount):
time1 = time0 + dt
bintime = time0 + dt/2.0
if bintime > maxtime:
bintime = maxtime
new_time.append(bintime)
# Find all indexes between time0 and time1.
idx = np.where((timedbl >= time0) & (timedbl < time1))
# Check if idx is empty, ie. there is a gap in data.
idx_is_empty = False
if not idx:
idx_is_empty = True
elif len(idx) == 1:
if len(idx[0]) == 0:
idx_is_empty = True
if dim1 < 2:
if idx_is_empty: # Empty list.
nd0 = np.nan
else:
nd0 = np.average(data[idx])
else:
nd0 = []
for j in range(dim1):
if idx_is_empty: # Empty list.
nd0.append(np.nan)
else:
nd0.append(np.average(data[idx, j]))
new_data.append(nd0)
time0 = time1
pytplot.store_data(new, data={'x': new_time, 'y': new_data})
# copy attributes
pytplot.data_quants[new].attrs = d.attrs.copy()
print('avg_data was applied to: ' + new)
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.asset_device_registration_relationship import AssetDeviceRegistrationRelationship
from intersight.model.display_names import DisplayNames
from intersight.model.fc_physical_port import FcPhysicalPort
from intersight.model.inventory_device_info_relationship import InventoryDeviceInfoRelationship
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_mo_ref import MoMoRef
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.port_group_relationship import PortGroupRelationship
from intersight.model.port_sub_group_relationship import PortSubGroupRelationship
globals()['AssetDeviceRegistrationRelationship'] = AssetDeviceRegistrationRelationship
globals()['DisplayNames'] = DisplayNames
globals()['FcPhysicalPort'] = FcPhysicalPort
globals()['InventoryDeviceInfoRelationship'] = InventoryDeviceInfoRelationship
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoMoRef'] = MoMoRef
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['PortGroupRelationship'] = PortGroupRelationship
globals()['PortSubGroupRelationship'] = PortSubGroupRelationship
class FcPhysicalPortRelationship(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'MO.MOREF': "mo.MoRef",
},
('object_type',): {
'AAA.AUDITRECORD': "aaa.AuditRecord",
'AAA.RETENTIONCONFIG': "aaa.RetentionConfig",
'AAA.RETENTIONPOLICY': "aaa.RetentionPolicy",
'ACCESS.POLICY': "access.Policy",
'ADAPTER.CONFIGPOLICY': "adapter.ConfigPolicy",
'ADAPTER.EXTETHINTERFACE': "adapter.ExtEthInterface",
'ADAPTER.HOSTETHINTERFACE': "adapter.HostEthInterface",
'ADAPTER.HOSTFCINTERFACE': "adapter.HostFcInterface",
'ADAPTER.HOSTISCSIINTERFACE': "adapter.HostIscsiInterface",
'ADAPTER.UNIT': "adapter.Unit",
'ADAPTER.UNITEXPANDER': "adapter.UnitExpander",
'APPLIANCE.APPSTATUS': "appliance.AppStatus",
'APPLIANCE.AUTORMAPOLICY': "appliance.AutoRmaPolicy",
'APPLIANCE.BACKUP': "appliance.Backup",
'APPLIANCE.BACKUPPOLICY': "appliance.BackupPolicy",
'APPLIANCE.CERTIFICATESETTING': "appliance.CertificateSetting",
'APPLIANCE.DATAEXPORTPOLICY': "appliance.DataExportPolicy",
'APPLIANCE.DEVICECERTIFICATE': "appliance.DeviceCertificate",
'APPLIANCE.DEVICECLAIM': "appliance.DeviceClaim",
'APPLIANCE.DEVICEUPGRADEPOLICY': "appliance.DeviceUpgradePolicy",
'APPLIANCE.DIAGSETTING': "appliance.DiagSetting",
'APPLIANCE.EXTERNALSYSLOGSETTING': "appliance.ExternalSyslogSetting",
'APPLIANCE.FILEGATEWAY': "appliance.FileGateway",
'APPLIANCE.FILESYSTEMSTATUS': "appliance.FileSystemStatus",
'APPLIANCE.GROUPSTATUS': "appliance.GroupStatus",
'APPLIANCE.IMAGEBUNDLE': "appliance.ImageBundle",
'APPLIANCE.NODEINFO': "appliance.NodeInfo",
'APPLIANCE.NODESTATUS': "appliance.NodeStatus",
'APPLIANCE.RELEASENOTE': "appliance.ReleaseNote",
'APPLIANCE.REMOTEFILEIMPORT': "appliance.RemoteFileImport",
'APPLIANCE.RESTORE': "appliance.Restore",
'APPLIANCE.SETUPINFO': "appliance.SetupInfo",
'APPLIANCE.SYSTEMINFO': "appliance.SystemInfo",
'APPLIANCE.SYSTEMSTATUS': "appliance.SystemStatus",
'APPLIANCE.UPGRADE': "appliance.Upgrade",
'APPLIANCE.UPGRADEPOLICY': "appliance.UpgradePolicy",
'ASSET.CLUSTERMEMBER': "asset.ClusterMember",
'ASSET.DEPLOYMENT': "asset.Deployment",
'ASSET.DEPLOYMENTDEVICE': "asset.DeploymentDevice",
'ASSET.DEVICECLAIM': "asset.DeviceClaim",
'ASSET.DEVICECONFIGURATION': "asset.DeviceConfiguration",
'ASSET.DEVICECONNECTORMANAGER': "asset.DeviceConnectorManager",
'ASSET.DEVICECONTRACTINFORMATION': "asset.DeviceContractInformation",
'ASSET.DEVICECONTRACTNOTIFICATION': "asset.DeviceContractNotification",
'ASSET.DEVICEREGISTRATION': "asset.DeviceRegistration",
'ASSET.SUBSCRIPTION': "asset.Subscription",
'ASSET.SUBSCRIPTIONACCOUNT': "asset.SubscriptionAccount",
'ASSET.SUBSCRIPTIONDEVICECONTRACTINFORMATION': "asset.SubscriptionDeviceContractInformation",
'ASSET.TARGET': "asset.Target",
'BIOS.BOOTDEVICE': "bios.BootDevice",
'BIOS.BOOTMODE': "bios.BootMode",
'BIOS.POLICY': "bios.Policy",
'BIOS.SYSTEMBOOTORDER': "bios.SystemBootOrder",
'BIOS.TOKENSETTINGS': "bios.TokenSettings",
'BIOS.UNIT': "bios.Unit",
'BIOS.VFSELECTMEMORYRASCONFIGURATION': "bios.VfSelectMemoryRasConfiguration",
'BOOT.CDDDEVICE': "boot.CddDevice",
'BOOT.DEVICEBOOTMODE': "boot.DeviceBootMode",
'BOOT.DEVICEBOOTSECURITY': "boot.DeviceBootSecurity",
'BOOT.HDDDEVICE': "boot.HddDevice",
'BOOT.ISCSIDEVICE': "boot.IscsiDevice",
'BOOT.NVMEDEVICE': "boot.NvmeDevice",
'BOOT.PCHSTORAGEDEVICE': "boot.PchStorageDevice",
'BOOT.PRECISIONPOLICY': "boot.PrecisionPolicy",
'BOOT.PXEDEVICE': "boot.PxeDevice",
'BOOT.SANDEVICE': "boot.SanDevice",
'BOOT.SDDEVICE': "boot.SdDevice",
'BOOT.UEFISHELLDEVICE': "boot.UefiShellDevice",
'BOOT.USBDEVICE': "boot.UsbDevice",
'BOOT.VMEDIADEVICE': "boot.VmediaDevice",
'BULK.EXPORT': "bulk.Export",
'BULK.EXPORTEDITEM': "bulk.ExportedItem",
'BULK.MOCLONER': "bulk.MoCloner",
'BULK.MOMERGER': "bulk.MoMerger",
'BULK.REQUEST': "bulk.Request",
'BULK.SUBREQUESTOBJ': "bulk.SubRequestObj",
'CAPABILITY.ADAPTERUNITDESCRIPTOR': "capability.AdapterUnitDescriptor",
'CAPABILITY.CATALOG': "capability.Catalog",
'CAPABILITY.CHASSISDESCRIPTOR': "capability.ChassisDescriptor",
'CAPABILITY.CHASSISMANUFACTURINGDEF': "capability.ChassisManufacturingDef",
'CAPABILITY.CIMCFIRMWAREDESCRIPTOR': "capability.CimcFirmwareDescriptor",
'CAPABILITY.EQUIPMENTPHYSICALDEF': "capability.EquipmentPhysicalDef",
'CAPABILITY.EQUIPMENTSLOTARRAY': "capability.EquipmentSlotArray",
'CAPABILITY.FANMODULEDESCRIPTOR': "capability.FanModuleDescriptor",
'CAPABILITY.FANMODULEMANUFACTURINGDEF': "capability.FanModuleManufacturingDef",
'CAPABILITY.IOCARDCAPABILITYDEF': "capability.IoCardCapabilityDef",
'CAPABILITY.IOCARDDESCRIPTOR': "capability.IoCardDescriptor",
'CAPABILITY.IOCARDMANUFACTURINGDEF': "capability.IoCardManufacturingDef",
'CAPABILITY.PORTGROUPAGGREGATIONDEF': "capability.PortGroupAggregationDef",
'CAPABILITY.PSUDESCRIPTOR': "capability.PsuDescriptor",
'CAPABILITY.PSUMANUFACTURINGDEF': "capability.PsuManufacturingDef",
'CAPABILITY.SERVERMODELSCAPABILITYDEF': "capability.ServerModelsCapabilityDef",
'CAPABILITY.SERVERSCHEMADESCRIPTOR': "capability.ServerSchemaDescriptor",
'CAPABILITY.SIOCMODULECAPABILITYDEF': "capability.SiocModuleCapabilityDef",
'CAPABILITY.SIOCMODULEDESCRIPTOR': "capability.SiocModuleDescriptor",
'CAPABILITY.SIOCMODULEMANUFACTURINGDEF': "capability.SiocModuleManufacturingDef",
'CAPABILITY.SWITCHCAPABILITY': "capability.SwitchCapability",
'CAPABILITY.SWITCHDESCRIPTOR': "capability.SwitchDescriptor",
'CAPABILITY.SWITCHMANUFACTURINGDEF': "capability.SwitchManufacturingDef",
'CERTIFICATEMANAGEMENT.POLICY': "certificatemanagement.Policy",
'CHASSIS.CONFIGCHANGEDETAIL': "chassis.ConfigChangeDetail",
'CHASSIS.CONFIGIMPORT': "chassis.ConfigImport",
'CHASSIS.CONFIGRESULT': "chassis.ConfigResult",
'CHASSIS.CONFIGRESULTENTRY': "chassis.ConfigResultEntry",
'CHASSIS.IOMPROFILE': "chassis.IomProfile",
'CHASSIS.PROFILE': "chassis.Profile",
'CLOUD.AWSBILLINGUNIT': "cloud.AwsBillingUnit",
'CLOUD.AWSKEYPAIR': "cloud.AwsKeyPair",
'CLOUD.AWSNETWORKINTERFACE': "cloud.AwsNetworkInterface",
'CLOUD.AWSORGANIZATIONALUNIT': "cloud.AwsOrganizationalUnit",
'CLOUD.AWSSECURITYGROUP': "cloud.AwsSecurityGroup",
'CLOUD.AWSSUBNET': "cloud.AwsSubnet",
'CLOUD.AWSVIRTUALMACHINE': "cloud.AwsVirtualMachine",
'CLOUD.AWSVOLUME': "cloud.AwsVolume",
'CLOUD.AWSVPC': "cloud.AwsVpc",
'CLOUD.COLLECTINVENTORY': "cloud.CollectInventory",
'CLOUD.REGIONS': "cloud.Regions",
'CLOUD.SKUCONTAINERTYPE': "cloud.SkuContainerType",
'CLOUD.SKUDATABASETYPE': "cloud.SkuDatabaseType",
'CLOUD.SKUINSTANCETYPE': "cloud.SkuInstanceType",
'CLOUD.SKUNETWORKTYPE': "cloud.SkuNetworkType",
'CLOUD.SKUREGIONRATECARDS': "cloud.SkuRegionRateCards",
'CLOUD.SKUVOLUMETYPE': "cloud.SkuVolumeType",
'CLOUD.TFCAGENTPOOL': "cloud.TfcAgentpool",
'CLOUD.TFCORGANIZATION': "cloud.TfcOrganization",
'CLOUD.TFCWORKSPACE': "cloud.TfcWorkspace",
'COMM.HTTPPROXYPOLICY': "comm.HttpProxyPolicy",
'COMPUTE.BIOSPOSTPOLICY': "compute.BiosPostPolicy",
'COMPUTE.BLADE': "compute.Blade",
'COMPUTE.BLADEIDENTITY': "compute.BladeIdentity",
'COMPUTE.BOARD': "compute.Board",
'COMPUTE.MAPPING': "compute.Mapping",
'COMPUTE.PHYSICALSUMMARY': "compute.PhysicalSummary",
'COMPUTE.RACKUNIT': "compute.RackUnit",
'COMPUTE.RACKUNITIDENTITY': "compute.RackUnitIdentity",
'COMPUTE.SERVERPOWERPOLICY': "compute.ServerPowerPolicy",
'COMPUTE.SERVERSETTING': "compute.ServerSetting",
'COMPUTE.VMEDIA': "compute.Vmedia",
'COND.ALARM': "cond.Alarm",
'COND.ALARMAGGREGATION': "cond.AlarmAggregation",
'COND.HCLSTATUS': "cond.HclStatus",
'COND.HCLSTATUSDETAIL': "cond.HclStatusDetail",
'COND.HCLSTATUSJOB': "cond.HclStatusJob",
'CONNECTORPACK.CONNECTORPACKUPGRADE': "connectorpack.ConnectorPackUpgrade",
'CONNECTORPACK.UPGRADEIMPACT': "connectorpack.UpgradeImpact",
'CONVERGEDINFRA.HEALTHCHECKDEFINITION': "convergedinfra.HealthCheckDefinition",
'CONVERGEDINFRA.HEALTHCHECKEXECUTION': "convergedinfra.HealthCheckExecution",
'CONVERGEDINFRA.POD': "convergedinfra.Pod",
'CRD.CUSTOMRESOURCE': "crd.CustomResource",
'DEVICECONNECTOR.POLICY': "deviceconnector.Policy",
'EQUIPMENT.CHASSIS': "equipment.Chassis",
'EQUIPMENT.CHASSISIDENTITY': "equipment.ChassisIdentity",
'EQUIPMENT.CHASSISOPERATION': "equipment.ChassisOperation",
'EQUIPMENT.DEVICESUMMARY': "equipment.DeviceSummary",
'EQUIPMENT.EXPANDERMODULE': "equipment.ExpanderModule",
'EQUIPMENT.FAN': "equipment.Fan",
'EQUIPMENT.FANCONTROL': "equipment.FanControl",
'EQUIPMENT.FANMODULE': "equipment.FanModule",
'EQUIPMENT.FEX': "equipment.Fex",
'EQUIPMENT.FEXIDENTITY': "equipment.FexIdentity",
'EQUIPMENT.FEXOPERATION': "equipment.FexOperation",
'EQUIPMENT.FRU': "equipment.Fru",
'EQUIPMENT.IDENTITYSUMMARY': "equipment.IdentitySummary",
'EQUIPMENT.IOCARD': "equipment.IoCard",
'EQUIPMENT.IOCARDOPERATION': "equipment.IoCardOperation",
'EQUIPMENT.IOEXPANDER': "equipment.IoExpander",
'EQUIPMENT.LOCATORLED': "equipment.LocatorLed",
'EQUIPMENT.PSU': "equipment.Psu",
'EQUIPMENT.PSUCONTROL': "equipment.PsuControl",
'EQUIPMENT.RACKENCLOSURE': "equipment.RackEnclosure",
'EQUIPMENT.RACKENCLOSURESLOT': "equipment.RackEnclosureSlot",
'EQUIPMENT.SHAREDIOMODULE': "equipment.SharedIoModule",
'EQUIPMENT.SWITCHCARD': "equipment.SwitchCard",
'EQUIPMENT.SYSTEMIOCONTROLLER': "equipment.SystemIoController",
'EQUIPMENT.TPM': "equipment.Tpm",
'EQUIPMENT.TRANSCEIVER': "equipment.Transceiver",
'ETHER.HOSTPORT': "ether.HostPort",
'ETHER.NETWORKPORT': "ether.NetworkPort",
'ETHER.PHYSICALPORT': "ether.PhysicalPort",
'ETHER.PORTCHANNEL': "ether.PortChannel",
'EXTERNALSITE.AUTHORIZATION': "externalsite.Authorization",
'FABRIC.APPLIANCEPCROLE': "fabric.AppliancePcRole",
'FABRIC.APPLIANCEROLE': "fabric.ApplianceRole",
'FABRIC.CONFIGCHANGEDETAIL': "fabric.ConfigChangeDetail",
'FABRIC.CONFIGRESULT': "fabric.ConfigResult",
'FABRIC.CONFIGRESULTENTRY': "fabric.ConfigResultEntry",
'FABRIC.ELEMENTIDENTITY': "fabric.ElementIdentity",
'FABRIC.ESTIMATEIMPACT': "fabric.EstimateImpact",
'FABRIC.ETHNETWORKCONTROLPOLICY': "fabric.EthNetworkControlPolicy",
'FABRIC.ETHNETWORKGROUPPOLICY': "fabric.EthNetworkGroupPolicy",
'FABRIC.ETHNETWORKPOLICY': "fabric.EthNetworkPolicy",
'FABRIC.FCNETWORKPOLICY': "fabric.FcNetworkPolicy",
'FABRIC.FCSTORAGEROLE': "fabric.FcStorageRole",
'FABRIC.FCUPLINKPCROLE': "fabric.FcUplinkPcRole",
'FABRIC.FCUPLINKROLE': "fabric.FcUplinkRole",
'FABRIC.FCOEUPLINKPCROLE': "fabric.FcoeUplinkPcRole",
'FABRIC.FCOEUPLINKROLE': "fabric.FcoeUplinkRole",
'FABRIC.FLOWCONTROLPOLICY': "fabric.FlowControlPolicy",
'FABRIC.LINKAGGREGATIONPOLICY': "fabric.LinkAggregationPolicy",
'FABRIC.LINKCONTROLPOLICY': "fabric.LinkControlPolicy",
'FABRIC.MULTICASTPOLICY': "fabric.MulticastPolicy",
'FABRIC.PCMEMBER': "fabric.PcMember",
'FABRIC.PCOPERATION': "fabric.PcOperation",
'FABRIC.PORTMODE': "fabric.PortMode",
'FABRIC.PORTOPERATION': "fabric.PortOperation",
'FABRIC.PORTPOLICY': "fabric.PortPolicy",
'FABRIC.SERVERROLE': "fabric.ServerRole",
'FABRIC.SWITCHCLUSTERPROFILE': "fabric.SwitchClusterProfile",
'FABRIC.SWITCHCONTROLPOLICY': "fabric.SwitchControlPolicy",
'FABRIC.SWITCHPROFILE': "fabric.SwitchProfile",
'FABRIC.SYSTEMQOSPOLICY': "fabric.SystemQosPolicy",
'FABRIC.UPLINKPCROLE': "fabric.UplinkPcRole",
'FABRIC.UPLINKROLE': "fabric.UplinkRole",
'FABRIC.VLAN': "fabric.Vlan",
'FABRIC.VSAN': "fabric.Vsan",
'FAULT.INSTANCE': "fault.Instance",
'FC.PHYSICALPORT': "fc.PhysicalPort",
'FC.PORTCHANNEL': "fc.PortChannel",
'FCPOOL.FCBLOCK': "fcpool.FcBlock",
'FCPOOL.LEASE': "fcpool.Lease",
'FCPOOL.POOL': "fcpool.Pool",
'FCPOOL.POOLMEMBER': "fcpool.PoolMember",
'FCPOOL.UNIVERSE': "fcpool.Universe",
'FEEDBACK.FEEDBACKPOST': "feedback.FeedbackPost",
'FIRMWARE.BIOSDESCRIPTOR': "firmware.BiosDescriptor",
'FIRMWARE.BOARDCONTROLLERDESCRIPTOR': "firmware.BoardControllerDescriptor",
'FIRMWARE.CHASSISUPGRADE': "firmware.ChassisUpgrade",
'FIRMWARE.CIMCDESCRIPTOR': "firmware.CimcDescriptor",
'FIRMWARE.DIMMDESCRIPTOR': "firmware.DimmDescriptor",
'FIRMWARE.DISTRIBUTABLE': "firmware.Distributable",
'FIRMWARE.DISTRIBUTABLEMETA': "firmware.DistributableMeta",
'FIRMWARE.DRIVEDESCRIPTOR': "firmware.DriveDescriptor",
'FIRMWARE.DRIVERDISTRIBUTABLE': "firmware.DriverDistributable",
'FIRMWARE.EULA': "firmware.Eula",
'FIRMWARE.FIRMWARESUMMARY': "firmware.FirmwareSummary",
'FIRMWARE.GPUDESCRIPTOR': "firmware.GpuDescriptor",
'FIRMWARE.HBADESCRIPTOR': "firmware.HbaDescriptor",
'FIRMWARE.IOMDESCRIPTOR': "firmware.IomDescriptor",
'FIRMWARE.MSWITCHDESCRIPTOR': "firmware.MswitchDescriptor",
'FIRMWARE.NXOSDESCRIPTOR': "firmware.NxosDescriptor",
'FIRMWARE.PCIEDESCRIPTOR': "firmware.PcieDescriptor",
'FIRMWARE.PSUDESCRIPTOR': "firmware.PsuDescriptor",
'FIRMWARE.RUNNINGFIRMWARE': "firmware.RunningFirmware",
'FIRMWARE.SASEXPANDERDESCRIPTOR': "firmware.SasExpanderDescriptor",
'FIRMWARE.SERVERCONFIGURATIONUTILITYDISTRIBUTABLE': "firmware.ServerConfigurationUtilityDistributable",
'FIRMWARE.STORAGECONTROLLERDESCRIPTOR': "firmware.StorageControllerDescriptor",
'FIRMWARE.SWITCHUPGRADE': "firmware.SwitchUpgrade",
'FIRMWARE.UNSUPPORTEDVERSIONUPGRADE': "firmware.UnsupportedVersionUpgrade",
'FIRMWARE.UPGRADE': "firmware.Upgrade",
'FIRMWARE.UPGRADEIMPACT': "firmware.UpgradeImpact",
'FIRMWARE.UPGRADEIMPACTSTATUS': "firmware.UpgradeImpactStatus",
'FIRMWARE.UPGRADESTATUS': "firmware.UpgradeStatus",
'FORECAST.CATALOG': "forecast.Catalog",
'FORECAST.DEFINITION': "forecast.Definition",
'FORECAST.INSTANCE': "forecast.Instance",
'GRAPHICS.CARD': "graphics.Card",
'GRAPHICS.CONTROLLER': "graphics.Controller",
'HCL.COMPATIBILITYSTATUS': "hcl.CompatibilityStatus",
'HCL.DRIVERIMAGE': "hcl.DriverImage",
'HCL.EXEMPTEDCATALOG': "hcl.ExemptedCatalog",
'HCL.HYPERFLEXSOFTWARECOMPATIBILITYINFO': "hcl.HyperflexSoftwareCompatibilityInfo",
'HCL.OPERATINGSYSTEM': "hcl.OperatingSystem",
'HCL.OPERATINGSYSTEMVENDOR': "hcl.OperatingSystemVendor",
'HCL.SUPPORTEDDRIVERNAME': "hcl.SupportedDriverName",
'HYPERFLEX.ALARM': "hyperflex.Alarm",
'HYPERFLEX.APPCATALOG': "hyperflex.AppCatalog",
'HYPERFLEX.AUTOSUPPORTPOLICY': "hyperflex.AutoSupportPolicy",
'HYPERFLEX.BACKUPCLUSTER': "hyperflex.BackupCluster",
'HYPERFLEX.CAPABILITYINFO': "hyperflex.CapabilityInfo",
'HYPERFLEX.CLUSTER': "hyperflex.Cluster",
'HYPERFLEX.CLUSTERBACKUPPOLICY': "hyperflex.ClusterBackupPolicy",
'HYPERFLEX.CLUSTERBACKUPPOLICYDEPLOYMENT': "hyperflex.ClusterBackupPolicyDeployment",
'HYPERFLEX.CLUSTERBACKUPPOLICYINVENTORY': "hyperflex.ClusterBackupPolicyInventory",
'HYPERFLEX.CLUSTERHEALTHCHECKEXECUTIONSNAPSHOT': "hyperflex.ClusterHealthCheckExecutionSnapshot",
'HYPERFLEX.CLUSTERNETWORKPOLICY': "hyperflex.ClusterNetworkPolicy",
'HYPERFLEX.CLUSTERPROFILE': "hyperflex.ClusterProfile",
'HYPERFLEX.CLUSTERREPLICATIONNETWORKPOLICY': "hyperflex.ClusterReplicationNetworkPolicy",
'HYPERFLEX.CLUSTERREPLICATIONNETWORKPOLICYDEPLOYMENT': "hyperflex.ClusterReplicationNetworkPolicyDeployment",
'HYPERFLEX.CLUSTERSTORAGEPOLICY': "hyperflex.ClusterStoragePolicy",
'HYPERFLEX.CONFIGRESULT': "hyperflex.ConfigResult",
'HYPERFLEX.CONFIGRESULTENTRY': "hyperflex.ConfigResultEntry",
'HYPERFLEX.DATAPROTECTIONPEER': "hyperflex.DataProtectionPeer",
'HYPERFLEX.DATASTORESTATISTIC': "hyperflex.DatastoreStatistic",
'HYPERFLEX.DEVICEPACKAGEDOWNLOADSTATE': "hyperflex.DevicePackageDownloadState",
'HYPERFLEX.DRIVE': "hyperflex.Drive",
'HYPERFLEX.EXTFCSTORAGEPOLICY': "hyperflex.ExtFcStoragePolicy",
'HYPERFLEX.EXTISCSISTORAGEPOLICY': "hyperflex.ExtIscsiStoragePolicy",
'HYPERFLEX.FEATURELIMITEXTERNAL': "hyperflex.FeatureLimitExternal",
'HYPERFLEX.FEATURELIMITINTERNAL': "hyperflex.FeatureLimitInternal",
'HYPERFLEX.HEALTH': "hyperflex.Health",
'HYPERFLEX.HEALTHCHECKDEFINITION': "hyperflex.HealthCheckDefinition",
'HYPERFLEX.HEALTHCHECKEXECUTION': "hyperflex.HealthCheckExecution",
'HYPERFLEX.HEALTHCHECKEXECUTIONSNAPSHOT': "hyperflex.HealthCheckExecutionSnapshot",
'HYPERFLEX.HEALTHCHECKPACKAGECHECKSUM': "hyperflex.HealthCheckPackageChecksum",
'HYPERFLEX.HXDPVERSION': "hyperflex.HxdpVersion",
'HYPERFLEX.LICENSE': "hyperflex.License",
'HYPERFLEX.LOCALCREDENTIALPOLICY': "hyperflex.LocalCredentialPolicy",
'HYPERFLEX.NODE': "hyperflex.Node",
'HYPERFLEX.NODECONFIGPOLICY': "hyperflex.NodeConfigPolicy",
'HYPERFLEX.NODEPROFILE': "hyperflex.NodeProfile",
'HYPERFLEX.PROTECTEDCLUSTER': "hyperflex.ProtectedCluster",
'HYPERFLEX.PROXYSETTINGPOLICY': "hyperflex.ProxySettingPolicy",
'HYPERFLEX.SERVERFIRMWAREVERSION': "hyperflex.ServerFirmwareVersion",
'HYPERFLEX.SERVERFIRMWAREVERSIONENTRY': "hyperflex.ServerFirmwareVersionEntry",
'HYPERFLEX.SERVERMODEL': "hyperflex.ServerModel",
'HYPERFLEX.SERVICEAUTHTOKEN': "hyperflex.ServiceAuthToken",
'HYPERFLEX.SOFTWAREDISTRIBUTIONCOMPONENT': "hyperflex.SoftwareDistributionComponent",
'HYPERFLEX.SOFTWAREDISTRIBUTIONENTRY': "hyperflex.SoftwareDistributionEntry",
'HYPERFLEX.SOFTWAREDISTRIBUTIONVERSION': "hyperflex.SoftwareDistributionVersion",
'HYPERFLEX.SOFTWAREVERSIONPOLICY': "hyperflex.SoftwareVersionPolicy",
'HYPERFLEX.STORAGECONTAINER': "hyperflex.StorageContainer",
'HYPERFLEX.SYSCONFIGPOLICY': "hyperflex.SysConfigPolicy",
'HYPERFLEX.UCSMCONFIGPOLICY': "hyperflex.UcsmConfigPolicy",
'HYPERFLEX.VCENTERCONFIGPOLICY': "hyperflex.VcenterConfigPolicy",
'HYPERFLEX.VMBACKUPINFO': "hyperflex.VmBackupInfo",
'HYPERFLEX.VMIMPORTOPERATION': "hyperflex.VmImportOperation",
'HYPERFLEX.VMRESTOREOPERATION': "hyperflex.VmRestoreOperation",
'HYPERFLEX.VMSNAPSHOTINFO': "hyperflex.VmSnapshotInfo",
'HYPERFLEX.VOLUME': "hyperflex.Volume",
'HYPERFLEX.WITNESSCONFIGURATION': "hyperflex.WitnessConfiguration",
'IAAS.CONNECTORPACK': "iaas.ConnectorPack",
'IAAS.DEVICESTATUS': "iaas.DeviceStatus",
'IAAS.DIAGNOSTICMESSAGES': "iaas.DiagnosticMessages",
'IAAS.LICENSEINFO': "iaas.LicenseInfo",
'IAAS.MOSTRUNTASKS': "iaas.MostRunTasks",
'IAAS.SERVICEREQUEST': "iaas.ServiceRequest",
'IAAS.UCSDINFO': "iaas.UcsdInfo",
'IAAS.UCSDMANAGEDINFRA': "iaas.UcsdManagedInfra",
'IAAS.UCSDMESSAGES': "iaas.UcsdMessages",
'IAM.ACCOUNT': "iam.Account",
'IAM.ACCOUNTEXPERIENCE': "iam.AccountExperience",
'IAM.APIKEY': "iam.ApiKey",
'IAM.APPREGISTRATION': "iam.AppRegistration",
'IAM.BANNERMESSAGE': "iam.BannerMessage",
'IAM.CERTIFICATE': "iam.Certificate",
'IAM.CERTIFICATEREQUEST': "iam.CertificateRequest",
'IAM.DOMAINGROUP': "iam.DomainGroup",
'IAM.ENDPOINTPRIVILEGE': "iam.EndPointPrivilege",
'IAM.ENDPOINTROLE': "iam.EndPointRole",
'IAM.ENDPOINTUSER': "iam.EndPointUser",
'IAM.ENDPOINTUSERPOLICY': "iam.EndPointUserPolicy",
'IAM.ENDPOINTUSERROLE': "iam.EndPointUserRole",
'IAM.IDP': "iam.Idp",
'IAM.IDPREFERENCE': "iam.IdpReference",
'IAM.IPACCESSMANAGEMENT': "iam.IpAccessManagement",
'IAM.IPADDRESS': "iam.IpAddress",
'IAM.LDAPGROUP': "iam.LdapGroup",
'IAM.LDAPPOLICY': "iam.LdapPolicy",
'IAM.LDAPPROVIDER': "iam.LdapProvider",
'IAM.LOCALUSERPASSWORD': "iam.LocalUserPassword",
'IAM.LOCALUSERPASSWORDPOLICY': "iam.LocalUserPasswordPolicy",
'IAM.OAUTHTOKEN': "iam.OAuthToken",
'IAM.PERMISSION': "iam.Permission",
'IAM.PRIVATEKEYSPEC': "iam.PrivateKeySpec",
'IAM.PRIVILEGE': "iam.Privilege",
'IAM.PRIVILEGESET': "iam.PrivilegeSet",
'IAM.QUALIFIER': "iam.Qualifier",
'IAM.RESOURCELIMITS': "iam.ResourceLimits",
'IAM.RESOURCEPERMISSION': "iam.ResourcePermission",
'IAM.RESOURCEROLES': "iam.ResourceRoles",
'IAM.ROLE': "iam.Role",
'IAM.SECURITYHOLDER': "iam.SecurityHolder",
'IAM.SERVICEPROVIDER': "iam.ServiceProvider",
'IAM.SESSION': "iam.Session",
'IAM.SESSIONLIMITS': "iam.SessionLimits",
'IAM.SYSTEM': "iam.System",
'IAM.TRUSTPOINT': "iam.TrustPoint",
'IAM.USER': "iam.User",
'IAM.USERGROUP': "iam.UserGroup",
'IAM.USERPREFERENCE': "iam.UserPreference",
'INVENTORY.DEVICEINFO': "inventory.DeviceInfo",
'INVENTORY.DNMOBINDING': "inventory.DnMoBinding",
'INVENTORY.GENERICINVENTORY': "inventory.GenericInventory",
'INVENTORY.GENERICINVENTORYHOLDER': "inventory.GenericInventoryHolder",
'INVENTORY.REQUEST': "inventory.Request",
'IPMIOVERLAN.POLICY': "ipmioverlan.Policy",
'IPPOOL.BLOCKLEASE': "ippool.BlockLease",
'IPPOOL.IPLEASE': "ippool.IpLease",
'IPPOOL.POOL': "ippool.Pool",
'IPPOOL.POOLMEMBER': "ippool.PoolMember",
'IPPOOL.SHADOWBLOCK': "ippool.ShadowBlock",
'IPPOOL.SHADOWPOOL': "ippool.ShadowPool",
'IPPOOL.UNIVERSE': "ippool.Universe",
'IQNPOOL.BLOCK': "iqnpool.Block",
'IQNPOOL.LEASE': "iqnpool.Lease",
'IQNPOOL.POOL': "iqnpool.Pool",
'IQNPOOL.POOLMEMBER': "iqnpool.PoolMember",
'IQNPOOL.UNIVERSE': "iqnpool.Universe",
'IWOTENANT.TENANTSTATUS': "iwotenant.TenantStatus",
'KUBERNETES.ACICNIAPIC': "kubernetes.AciCniApic",
'KUBERNETES.ACICNIPROFILE': "kubernetes.AciCniProfile",
'KUBERNETES.ACICNITENANTCLUSTERALLOCATION': "kubernetes.AciCniTenantClusterAllocation",
'KUBERNETES.ADDONDEFINITION': "kubernetes.AddonDefinition",
'KUBERNETES.ADDONPOLICY': "kubernetes.AddonPolicy",
'KUBERNETES.ADDONREPOSITORY': "kubernetes.AddonRepository",
'KUBERNETES.BAREMETALNODEPROFILE': "kubernetes.BaremetalNodeProfile",
'KUBERNETES.CATALOG': "kubernetes.Catalog",
'KUBERNETES.CLUSTER': "kubernetes.Cluster",
'KUBERNETES.CLUSTERADDONPROFILE': "kubernetes.ClusterAddonProfile",
'KUBERNETES.CLUSTERPROFILE': "kubernetes.ClusterProfile",
'KUBERNETES.CONFIGRESULT': "kubernetes.ConfigResult",
'KUBERNETES.CONFIGRESULTENTRY': "kubernetes.ConfigResultEntry",
'KUBERNETES.CONTAINERRUNTIMEPOLICY': "kubernetes.ContainerRuntimePolicy",
'KUBERNETES.DAEMONSET': "kubernetes.DaemonSet",
'KUBERNETES.DEPLOYMENT': "kubernetes.Deployment",
'KUBERNETES.INGRESS': "kubernetes.Ingress",
'KUBERNETES.NETWORKPOLICY': "kubernetes.NetworkPolicy",
'KUBERNETES.NODE': "kubernetes.Node",
'KUBERNETES.NODEGROUPPROFILE': "kubernetes.NodeGroupProfile",
'KUBERNETES.POD': "kubernetes.Pod",
'KUBERNETES.SERVICE': "kubernetes.Service",
'KUBERNETES.STATEFULSET': "kubernetes.StatefulSet",
'KUBERNETES.SYSCONFIGPOLICY': "kubernetes.SysConfigPolicy",
'KUBERNETES.TRUSTEDREGISTRIESPOLICY': "kubernetes.TrustedRegistriesPolicy",
'KUBERNETES.VERSION': "kubernetes.Version",
'KUBERNETES.VERSIONPOLICY': "kubernetes.VersionPolicy",
'KUBERNETES.VIRTUALMACHINEINFRACONFIGPOLICY': "kubernetes.VirtualMachineInfraConfigPolicy",
'KUBERNETES.VIRTUALMACHINEINFRASTRUCTUREPROVIDER': "kubernetes.VirtualMachineInfrastructureProvider",
'KUBERNETES.VIRTUALMACHINEINSTANCETYPE': "kubernetes.VirtualMachineInstanceType",
'KUBERNETES.VIRTUALMACHINENODEPROFILE': "kubernetes.VirtualMachineNodeProfile",
'KVM.POLICY': "kvm.Policy",
'KVM.SESSION': "kvm.Session",
'KVM.TUNNEL': "kvm.Tunnel",
'LICENSE.ACCOUNTLICENSEDATA': "license.AccountLicenseData",
'LICENSE.CUSTOMEROP': "license.CustomerOp",
'LICENSE.IKSCUSTOMEROP': "license.IksCustomerOp",
'LICENSE.IKSLICENSECOUNT': "license.IksLicenseCount",
'LICENSE.IWOCUSTOMEROP': "license.IwoCustomerOp",
'LICENSE.IWOLICENSECOUNT': "license.IwoLicenseCount",
'LICENSE.LICENSEINFO': "license.LicenseInfo",
'LICENSE.LICENSERESERVATIONOP': "license.LicenseReservationOp",
'LICENSE.SMARTLICENSETOKEN': "license.SmartlicenseToken",
'LS.SERVICEPROFILE': "ls.ServiceProfile",
'MACPOOL.IDBLOCK': "macpool.IdBlock",
'MACPOOL.LEASE': "macpool.Lease",
'MACPOOL.POOL': "macpool.Pool",
'MACPOOL.POOLMEMBER': "macpool.PoolMember",
'MACPOOL.UNIVERSE': "macpool.Universe",
'MANAGEMENT.CONTROLLER': "management.Controller",
'MANAGEMENT.ENTITY': "management.Entity",
'MANAGEMENT.INTERFACE': "management.Interface",
'MEMORY.ARRAY': "memory.Array",
'MEMORY.PERSISTENTMEMORYCONFIGRESULT': "memory.PersistentMemoryConfigResult",
'MEMORY.PERSISTENTMEMORYCONFIGURATION': "memory.PersistentMemoryConfiguration",
'MEMORY.PERSISTENTMEMORYNAMESPACE': "memory.PersistentMemoryNamespace",
'MEMORY.PERSISTENTMEMORYNAMESPACECONFIGRESULT': "memory.PersistentMemoryNamespaceConfigResult",
'MEMORY.PERSISTENTMEMORYPOLICY': "memory.PersistentMemoryPolicy",
'MEMORY.PERSISTENTMEMORYREGION': "memory.PersistentMemoryRegion",
'MEMORY.PERSISTENTMEMORYUNIT': "memory.PersistentMemoryUnit",
'MEMORY.UNIT': "memory.Unit",
'META.DEFINITION': "meta.Definition",
'NETWORK.ELEMENT': "network.Element",
'NETWORK.ELEMENTSUMMARY': "network.ElementSummary",
'NETWORK.FCZONEINFO': "network.FcZoneInfo",
'NETWORK.VLANPORTINFO': "network.VlanPortInfo",
'NETWORKCONFIG.POLICY': "networkconfig.Policy",
'NIAAPI.APICCCOPOST': "niaapi.ApicCcoPost",
'NIAAPI.APICFIELDNOTICE': "niaapi.ApicFieldNotice",
'NIAAPI.APICHWEOL': "niaapi.ApicHweol",
'NIAAPI.APICLATESTMAINTAINEDRELEASE': "niaapi.ApicLatestMaintainedRelease",
'NIAAPI.APICRELEASERECOMMEND': "niaapi.ApicReleaseRecommend",
'NIAAPI.APICSWEOL': "niaapi.ApicSweol",
'NIAAPI.DCNMCCOPOST': "niaapi.DcnmCcoPost",
'NIAAPI.DCNMFIELDNOTICE': "niaapi.DcnmFieldNotice",
'NIAAPI.DCNMHWEOL': "niaapi.DcnmHweol",
'NIAAPI.DCNMLATESTMAINTAINEDRELEASE': "niaapi.DcnmLatestMaintainedRelease",
'NIAAPI.DCNMRELEASERECOMMEND': "niaapi.DcnmReleaseRecommend",
'NIAAPI.DCNMSWEOL': "niaapi.DcnmSweol",
'NIAAPI.FILEDOWNLOADER': "niaapi.FileDownloader",
'NIAAPI.NIAMETADATA': "niaapi.NiaMetadata",
'NIAAPI.NIBFILEDOWNLOADER': "niaapi.NibFileDownloader",
'NIAAPI.NIBMETADATA': "niaapi.NibMetadata",
'NIAAPI.VERSIONREGEX': "niaapi.VersionRegex",
'NIATELEMETRY.AAALDAPPROVIDERDETAILS': "niatelemetry.AaaLdapProviderDetails",
'NIATELEMETRY.AAARADIUSPROVIDERDETAILS': "niatelemetry.AaaRadiusProviderDetails",
'NIATELEMETRY.AAATACACSPROVIDERDETAILS': "niatelemetry.AaaTacacsProviderDetails",
'NIATELEMETRY.APICAPPPLUGINDETAILS': "niatelemetry.ApicAppPluginDetails",
'NIATELEMETRY.APICCOREFILEDETAILS': "niatelemetry.ApicCoreFileDetails",
'NIATELEMETRY.APICDBGEXPRSEXPORTDEST': "niatelemetry.ApicDbgexpRsExportDest",
'NIATELEMETRY.APICDBGEXPRSTSSCHEDULER': "niatelemetry.ApicDbgexpRsTsScheduler",
'NIATELEMETRY.APICFANDETAILS': "niatelemetry.ApicFanDetails",
'NIATELEMETRY.APICFEXDETAILS': "niatelemetry.ApicFexDetails",
'NIATELEMETRY.APICFLASHDETAILS': "niatelemetry.ApicFlashDetails",
'NIATELEMETRY.APICNTPAUTH': "niatelemetry.ApicNtpAuth",
'NIATELEMETRY.APICPSUDETAILS': "niatelemetry.ApicPsuDetails",
'NIATELEMETRY.APICREALMDETAILS': "niatelemetry.ApicRealmDetails",
'NIATELEMETRY.APICSNMPCLIENTGRPDETAILS': "niatelemetry.ApicSnmpClientGrpDetails",
'NIATELEMETRY.APICSNMPCOMMUNITYACCESSDETAILS': "niatelemetry.ApicSnmpCommunityAccessDetails",
'NIATELEMETRY.APICSNMPCOMMUNITYDETAILS': "niatelemetry.ApicSnmpCommunityDetails",
'NIATELEMETRY.APICSNMPTRAPDETAILS': "niatelemetry.ApicSnmpTrapDetails",
'NIATELEMETRY.APICSNMPTRAPFWDSERVERDETAILS': "niatelemetry.ApicSnmpTrapFwdServerDetails",
'NIATELEMETRY.APICSNMPVERSIONTHREEDETAILS': "niatelemetry.ApicSnmpVersionThreeDetails",
'NIATELEMETRY.APICSYSLOGGRP': "niatelemetry.ApicSysLogGrp",
'NIATELEMETRY.APICSYSLOGSRC': "niatelemetry.ApicSysLogSrc",
'NIATELEMETRY.APICTRANSCEIVERDETAILS': "niatelemetry.ApicTransceiverDetails",
'NIATELEMETRY.APICUIPAGECOUNTS': "niatelemetry.ApicUiPageCounts",
'NIATELEMETRY.APPDETAILS': "niatelemetry.AppDetails",
'NIATELEMETRY.COMMONPOLICIES': "niatelemetry.CommonPolicies",
'NIATELEMETRY.DCNMFANDETAILS': "niatelemetry.DcnmFanDetails",
'NIATELEMETRY.DCNMFEXDETAILS': "niatelemetry.DcnmFexDetails",
'NIATELEMETRY.DCNMMODULEDETAILS': "niatelemetry.DcnmModuleDetails",
'NIATELEMETRY.DCNMPSUDETAILS': "niatelemetry.DcnmPsuDetails",
'NIATELEMETRY.DCNMTRANSCEIVERDETAILS': "niatelemetry.DcnmTransceiverDetails",
'NIATELEMETRY.EPG': "niatelemetry.Epg",
'NIATELEMETRY.FABRICMODULEDETAILS': "niatelemetry.FabricModuleDetails",
'NIATELEMETRY.FABRICPODPROFILE': "niatelemetry.FabricPodProfile",
'NIATELEMETRY.FABRICPODSS': "niatelemetry.FabricPodSs",
'NIATELEMETRY.FAULT': "niatelemetry.Fault",
'NIATELEMETRY.HTTPSACLCONTRACTDETAILS': "niatelemetry.HttpsAclContractDetails",
'NIATELEMETRY.HTTPSACLCONTRACTFILTERMAP': "niatelemetry.HttpsAclContractFilterMap",
'NIATELEMETRY.HTTPSACLEPGCONTRACTMAP': "niatelemetry.HttpsAclEpgContractMap",
'NIATELEMETRY.HTTPSACLEPGDETAILS': "niatelemetry.HttpsAclEpgDetails",
'NIATELEMETRY.HTTPSACLFILTERDETAILS': "niatelemetry.HttpsAclFilterDetails",
'NIATELEMETRY.LC': "niatelemetry.Lc",
'NIATELEMETRY.MSOCONTRACTDETAILS': "niatelemetry.MsoContractDetails",
'NIATELEMETRY.MSOEPGDETAILS': "niatelemetry.MsoEpgDetails",
'NIATELEMETRY.MSOSCHEMADETAILS': "niatelemetry.MsoSchemaDetails",
'NIATELEMETRY.MSOSITEDETAILS': "niatelemetry.MsoSiteDetails",
'NIATELEMETRY.MSOTENANTDETAILS': "niatelemetry.MsoTenantDetails",
'NIATELEMETRY.NEXUSDASHBOARDCONTROLLERDETAILS': "niatelemetry.NexusDashboardControllerDetails",
'NIATELEMETRY.NEXUSDASHBOARDDETAILS': "niatelemetry.NexusDashboardDetails",
'NIATELEMETRY.NEXUSDASHBOARDMEMORYDETAILS': "niatelemetry.NexusDashboardMemoryDetails",
'NIATELEMETRY.NEXUSDASHBOARDS': "niatelemetry.NexusDashboards",
'NIATELEMETRY.NIAFEATUREUSAGE': "niatelemetry.NiaFeatureUsage",
'NIATELEMETRY.NIAINVENTORY': "niatelemetry.NiaInventory",
'NIATELEMETRY.NIAINVENTORYDCNM': "niatelemetry.NiaInventoryDcnm",
'NIATELEMETRY.NIAINVENTORYFABRIC': "niatelemetry.NiaInventoryFabric",
'NIATELEMETRY.NIALICENSESTATE': "niatelemetry.NiaLicenseState",
'NIATELEMETRY.PASSWORDSTRENGTHCHECK': "niatelemetry.PasswordStrengthCheck",
'NIATELEMETRY.PODCOMMPOLICIES': "niatelemetry.PodCommPolicies",
'NIATELEMETRY.PODSNMPPOLICIES': "niatelemetry.PodSnmpPolicies",
'NIATELEMETRY.PODTIMESERVERPOLICIES': "niatelemetry.PodTimeServerPolicies",
'NIATELEMETRY.SITEINVENTORY': "niatelemetry.SiteInventory",
'NIATELEMETRY.SNMPSRC': "niatelemetry.SnmpSrc",
'NIATELEMETRY.SSHVERSIONTWO': "niatelemetry.SshVersionTwo",
'NIATELEMETRY.SUPERVISORMODULEDETAILS': "niatelemetry.SupervisorModuleDetails",
'NIATELEMETRY.SYSLOGREMOTEDEST': "niatelemetry.SyslogRemoteDest",
'NIATELEMETRY.SYSLOGSYSMSG': "niatelemetry.SyslogSysMsg",
'NIATELEMETRY.SYSLOGSYSMSGFACFILTER': "niatelemetry.SyslogSysMsgFacFilter",
'NIATELEMETRY.SYSTEMCONTROLLERDETAILS': "niatelemetry.SystemControllerDetails",
'NIATELEMETRY.TENANT': "niatelemetry.Tenant",
'NOTIFICATION.ACCOUNTSUBSCRIPTION': "notification.AccountSubscription",
'NTP.POLICY': "ntp.Policy",
'OAUTH.ACCESSTOKEN': "oauth.AccessToken",
'OAUTH.AUTHORIZATION': "oauth.Authorization",
'OPRS.DEPLOYMENT': "oprs.Deployment",
'OPRS.SYNCTARGETLISTMESSAGE': "oprs.SyncTargetListMessage",
'ORGANIZATION.ORGANIZATION': "organization.Organization",
'OS.BULKINSTALLINFO': "os.BulkInstallInfo",
'OS.CATALOG': "os.Catalog",
'OS.CONFIGURATIONFILE': "os.ConfigurationFile",
'OS.DISTRIBUTION': "os.Distribution",
'OS.INSTALL': "os.Install",
'OS.OSSUPPORT': "os.OsSupport",
'OS.SUPPORTEDVERSION': "os.SupportedVersion",
'OS.TEMPLATEFILE': "os.TemplateFile",
'OS.VALIDINSTALLTARGET': "os.ValidInstallTarget",
'PCI.COPROCESSORCARD': "pci.CoprocessorCard",
'PCI.DEVICE': "pci.Device",
'PCI.LINK': "pci.Link",
'PCI.SWITCH': "pci.Switch",
'PORT.GROUP': "port.Group",
'PORT.MACBINDING': "port.MacBinding",
'PORT.SUBGROUP': "port.SubGroup",
'POWER.CONTROLSTATE': "power.ControlState",
'POWER.POLICY': "power.Policy",
'PROCESSOR.UNIT': "processor.Unit",
'RACK.UNITPERSONALITY': "rack.UnitPersonality",
'RECOMMENDATION.CAPACITYRUNWAY': "recommendation.CapacityRunway",
'RECOMMENDATION.PHYSICALITEM': "recommendation.PhysicalItem",
'RECOVERY.BACKUPCONFIGPOLICY': "recovery.BackupConfigPolicy",
'RECOVERY.BACKUPPROFILE': "recovery.BackupProfile",
'RECOVERY.CONFIGRESULT': "recovery.ConfigResult",
'RECOVERY.CONFIGRESULTENTRY': "recovery.ConfigResultEntry",
'RECOVERY.ONDEMANDBACKUP': "recovery.OnDemandBackup",
'RECOVERY.RESTORE': "recovery.Restore",
'RECOVERY.SCHEDULECONFIGPOLICY': "recovery.ScheduleConfigPolicy",
'RESOURCE.GROUP': "resource.Group",
'RESOURCE.GROUPMEMBER': "resource.GroupMember",
'RESOURCE.LICENSERESOURCECOUNT': "resource.LicenseResourceCount",
'RESOURCE.MEMBERSHIP': "resource.Membership",
'RESOURCE.MEMBERSHIPHOLDER': "resource.MembershipHolder",
'RESOURCE.RESERVATION': "resource.Reservation",
'RESOURCEPOOL.LEASE': "resourcepool.Lease",
'RESOURCEPOOL.LEASERESOURCE': "resourcepool.LeaseResource",
'RESOURCEPOOL.POOL': "resourcepool.Pool",
'RESOURCEPOOL.POOLMEMBER': "resourcepool.PoolMember",
'RESOURCEPOOL.UNIVERSE': "resourcepool.Universe",
'RPROXY.REVERSEPROXY': "rproxy.ReverseProxy",
'SDCARD.POLICY': "sdcard.Policy",
'SDWAN.PROFILE': "sdwan.Profile",
'SDWAN.ROUTERNODE': "sdwan.RouterNode",
'SDWAN.ROUTERPOLICY': "sdwan.RouterPolicy",
'SDWAN.VMANAGEACCOUNTPOLICY': "sdwan.VmanageAccountPolicy",
'SEARCH.SEARCHITEM': "search.SearchItem",
'SEARCH.TAGITEM': "search.TagItem",
'SECURITY.UNIT': "security.Unit",
'SERVER.CONFIGCHANGEDETAIL': "server.ConfigChangeDetail",
'SERVER.CONFIGIMPORT': "server.ConfigImport",
'SERVER.CONFIGRESULT': "server.ConfigResult",
'SERVER.CONFIGRESULTENTRY': "server.ConfigResultEntry",
'SERVER.PROFILE': "server.Profile",
'SERVER.PROFILETEMPLATE': "server.ProfileTemplate",
'SMTP.POLICY': "smtp.Policy",
'SNMP.POLICY': "snmp.Policy",
'SOFTWARE.APPLIANCEDISTRIBUTABLE': "software.ApplianceDistributable",
'SOFTWARE.DOWNLOADHISTORY': "software.DownloadHistory",
'SOFTWARE.HCLMETA': "software.HclMeta",
'SOFTWARE.HYPERFLEXBUNDLEDISTRIBUTABLE': "software.HyperflexBundleDistributable",
'SOFTWARE.HYPERFLEXDISTRIBUTABLE': "software.HyperflexDistributable",
'SOFTWARE.RELEASEMETA': "software.ReleaseMeta",
'SOFTWARE.SOLUTIONDISTRIBUTABLE': "software.SolutionDistributable",
'SOFTWARE.UCSDBUNDLEDISTRIBUTABLE': "software.UcsdBundleDistributable",
'SOFTWARE.UCSDDISTRIBUTABLE': "software.UcsdDistributable",
'SOFTWAREREPOSITORY.AUTHORIZATION': "softwarerepository.Authorization",
'SOFTWAREREPOSITORY.CACHEDIMAGE': "softwarerepository.CachedImage",
'SOFTWAREREPOSITORY.CATALOG': "softwarerepository.Catalog",
'SOFTWAREREPOSITORY.CATEGORYMAPPER': "softwarerepository.CategoryMapper",
'SOFTWAREREPOSITORY.CATEGORYMAPPERMODEL': "softwarerepository.CategoryMapperModel",
'SOFTWAREREPOSITORY.CATEGORYSUPPORTCONSTRAINT': "softwarerepository.CategorySupportConstraint",
'SOFTWAREREPOSITORY.DOWNLOADSPEC': "softwarerepository.DownloadSpec",
'SOFTWAREREPOSITORY.OPERATINGSYSTEMFILE': "softwarerepository.OperatingSystemFile",
'SOFTWAREREPOSITORY.RELEASE': "softwarerepository.Release",
'SOL.POLICY': "sol.Policy",
'SSH.POLICY': "ssh.Policy",
'STORAGE.CONTROLLER': "storage.Controller",
'STORAGE.DISKGROUP': "storage.DiskGroup",
'STORAGE.DISKSLOT': "storage.DiskSlot",
'STORAGE.DRIVEGROUP': "storage.DriveGroup",
'STORAGE.ENCLOSURE': "storage.Enclosure",
'STORAGE.ENCLOSUREDISK': "storage.EnclosureDisk",
'STORAGE.ENCLOSUREDISKSLOTEP': "storage.EnclosureDiskSlotEp",
'STORAGE.FLEXFLASHCONTROLLER': "storage.FlexFlashController",
'STORAGE.FLEXFLASHCONTROLLERPROPS': "storage.FlexFlashControllerProps",
'STORAGE.FLEXFLASHPHYSICALDRIVE': "storage.FlexFlashPhysicalDrive",
'STORAGE.FLEXFLASHVIRTUALDRIVE': "storage.FlexFlashVirtualDrive",
'STORAGE.FLEXUTILCONTROLLER': "storage.FlexUtilController",
'STORAGE.FLEXUTILPHYSICALDRIVE': "storage.FlexUtilPhysicalDrive",
'STORAGE.FLEXUTILVIRTUALDRIVE': "storage.FlexUtilVirtualDrive",
'STORAGE.HITACHIARRAY': "storage.HitachiArray",
'STORAGE.HITACHICONTROLLER': "storage.HitachiController",
'STORAGE.HITACHIDISK': "storage.HitachiDisk",
'STORAGE.HITACHIHOST': "storage.HitachiHost",
'STORAGE.HITACHIHOSTLUN': "storage.HitachiHostLun",
'STORAGE.HITACHIPARITYGROUP': "storage.HitachiParityGroup",
'STORAGE.HITACHIPOOL': "storage.HitachiPool",
'STORAGE.HITACHIPORT': "storage.HitachiPort",
'STORAGE.HITACHIVOLUME': "storage.HitachiVolume",
'STORAGE.HYPERFLEXSTORAGECONTAINER': "storage.HyperFlexStorageContainer",
'STORAGE.HYPERFLEXVOLUME': "storage.HyperFlexVolume",
'STORAGE.ITEM': "storage.Item",
'STORAGE.NETAPPAGGREGATE': "storage.NetAppAggregate",
'STORAGE.NETAPPBASEDISK': "storage.NetAppBaseDisk",
'STORAGE.NETAPPCLUSTER': "storage.NetAppCluster",
'STORAGE.NETAPPETHERNETPORT': "storage.NetAppEthernetPort",
'STORAGE.NETAPPEXPORTPOLICY': "storage.NetAppExportPolicy",
'STORAGE.NETAPPFCINTERFACE': "storage.NetAppFcInterface",
'STORAGE.NETAPPFCPORT': "storage.NetAppFcPort",
'STORAGE.NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'STORAGE.NETAPPIPINTERFACE': "storage.NetAppIpInterface",
'STORAGE.NETAPPLICENSE': "storage.NetAppLicense",
'STORAGE.NETAPPLUN': "storage.NetAppLun",
'STORAGE.NETAPPLUNMAP': "storage.NetAppLunMap",
'STORAGE.NETAPPNODE': "storage.NetAppNode",
'STORAGE.NETAPPNTPSERVER': "storage.NetAppNtpServer",
'STORAGE.NETAPPSENSOR': "storage.NetAppSensor",
'STORAGE.NETAPPSTORAGEVM': "storage.NetAppStorageVm",
'STORAGE.NETAPPVOLUME': "storage.NetAppVolume",
'STORAGE.NETAPPVOLUMESNAPSHOT': "storage.NetAppVolumeSnapshot",
'STORAGE.PHYSICALDISK': "storage.PhysicalDisk",
'STORAGE.PHYSICALDISKEXTENSION': "storage.PhysicalDiskExtension",
'STORAGE.PHYSICALDISKUSAGE': "storage.PhysicalDiskUsage",
'STORAGE.PUREARRAY': "storage.PureArray",
'STORAGE.PURECONTROLLER': "storage.PureController",
'STORAGE.PUREDISK': "storage.PureDisk",
'STORAGE.PUREHOST': "storage.PureHost",
'STORAGE.PUREHOSTGROUP': "storage.PureHostGroup",
'STORAGE.PUREHOSTLUN': "storage.PureHostLun",
'STORAGE.PUREPORT': "storage.PurePort",
'STORAGE.PUREPROTECTIONGROUP': "storage.PureProtectionGroup",
'STORAGE.PUREPROTECTIONGROUPSNAPSHOT': "storage.PureProtectionGroupSnapshot",
'STORAGE.PUREREPLICATIONSCHEDULE': "storage.PureReplicationSchedule",
'STORAGE.PURESNAPSHOTSCHEDULE': "storage.PureSnapshotSchedule",
'STORAGE.PUREVOLUME': "storage.PureVolume",
'STORAGE.PUREVOLUMESNAPSHOT': "storage.PureVolumeSnapshot",
'STORAGE.SASEXPANDER': "storage.SasExpander",
'STORAGE.SASPORT': "storage.SasPort",
'STORAGE.SPAN': "storage.Span",
'STORAGE.STORAGEPOLICY': "storage.StoragePolicy",
'STORAGE.VDMEMBEREP': "storage.VdMemberEp",
'STORAGE.VIRTUALDRIVE': "storage.VirtualDrive",
'STORAGE.VIRTUALDRIVECONTAINER': "storage.VirtualDriveContainer",
'STORAGE.VIRTUALDRIVEEXTENSION': "storage.VirtualDriveExtension",
'STORAGE.VIRTUALDRIVEIDENTITY': "storage.VirtualDriveIdentity",
'SYSLOG.POLICY': "syslog.Policy",
'TAM.ADVISORYCOUNT': "tam.AdvisoryCount",
'TAM.ADVISORYDEFINITION': "tam.AdvisoryDefinition",
'TAM.ADVISORYINFO': "tam.AdvisoryInfo",
'TAM.ADVISORYINSTANCE': "tam.AdvisoryInstance",
'TAM.SECURITYADVISORY': "tam.SecurityAdvisory",
'TASK.HITACHISCOPEDINVENTORY': "task.HitachiScopedInventory",
'TASK.HYPERFLEXSCOPEDINVENTORY': "task.HyperflexScopedInventory",
'TASK.IWESCOPEDINVENTORY': "task.IweScopedInventory",
'TASK.NETAPPSCOPEDINVENTORY': "task.NetAppScopedInventory",
'TASK.PUBLICCLOUDSCOPEDINVENTORY': "task.PublicCloudScopedInventory",
'TASK.PURESCOPEDINVENTORY': "task.PureScopedInventory",
'TASK.SERVERSCOPEDINVENTORY': "task.ServerScopedInventory",
'TECHSUPPORTMANAGEMENT.COLLECTIONCONTROLPOLICY': "techsupportmanagement.CollectionControlPolicy",
'TECHSUPPORTMANAGEMENT.DOWNLOAD': "techsupportmanagement.Download",
'TECHSUPPORTMANAGEMENT.TECHSUPPORTBUNDLE': "techsupportmanagement.TechSupportBundle",
'TECHSUPPORTMANAGEMENT.TECHSUPPORTSTATUS': "techsupportmanagement.TechSupportStatus",
'TERMINAL.AUDITLOG': "terminal.AuditLog",
'TERRAFORM.EXECUTOR': "terraform.Executor",
'THERMAL.POLICY': "thermal.Policy",
'TOP.SYSTEM': "top.System",
'UCSD.BACKUPINFO': "ucsd.BackupInfo",
'UUIDPOOL.BLOCK': "uuidpool.Block",
'UUIDPOOL.POOL': "uuidpool.Pool",
'UUIDPOOL.POOLMEMBER': "uuidpool.PoolMember",
'UUIDPOOL.UNIVERSE': "uuidpool.Universe",
'UUIDPOOL.UUIDLEASE': "uuidpool.UuidLease",
'VIRTUALIZATION.CISCOHYPERVISORMANAGER': "virtualization.CiscoHypervisorManager",
'VIRTUALIZATION.ESXICONSOLE': "virtualization.EsxiConsole",
'VIRTUALIZATION.HOST': "virtualization.Host",
'VIRTUALIZATION.IWECLUSTER': "virtualization.IweCluster",
'VIRTUALIZATION.IWEDATACENTER': "virtualization.IweDatacenter",
'VIRTUALIZATION.IWEDVUPLINK': "virtualization.IweDvUplink",
'VIRTUALIZATION.IWEDVSWITCH': "virtualization.IweDvswitch",
'VIRTUALIZATION.IWEHOST': "virtualization.IweHost",
'VIRTUALIZATION.IWEHOSTINTERFACE': "virtualization.IweHostInterface",
'VIRTUALIZATION.IWEHOSTVSWITCH': "virtualization.IweHostVswitch",
'VIRTUALIZATION.IWENETWORK': "virtualization.IweNetwork",
'VIRTUALIZATION.IWEVIRTUALDISK': "virtualization.IweVirtualDisk",
'VIRTUALIZATION.IWEVIRTUALMACHINE': "virtualization.IweVirtualMachine",
'VIRTUALIZATION.IWEVIRTUALMACHINENETWORKINTERFACE': "virtualization.IweVirtualMachineNetworkInterface",
'VIRTUALIZATION.VIRTUALDISK': "virtualization.VirtualDisk",
'VIRTUALIZATION.VIRTUALMACHINE': "virtualization.VirtualMachine",
'VIRTUALIZATION.VIRTUALNETWORK': "virtualization.VirtualNetwork",
'VIRTUALIZATION.VMWARECLUSTER': "virtualization.VmwareCluster",
'VIRTUALIZATION.VMWAREDATACENTER': "virtualization.VmwareDatacenter",
'VIRTUALIZATION.VMWAREDATASTORE': "virtualization.VmwareDatastore",
'VIRTUALIZATION.VMWAREDATASTORECLUSTER': "virtualization.VmwareDatastoreCluster",
'VIRTUALIZATION.VMWAREDISTRIBUTEDNETWORK': "virtualization.VmwareDistributedNetwork",
'VIRTUALIZATION.VMWAREDISTRIBUTEDSWITCH': "virtualization.VmwareDistributedSwitch",
'VIRTUALIZATION.VMWAREFOLDER': "virtualization.VmwareFolder",
'VIRTUALIZATION.VMWAREHOST': "virtualization.VmwareHost",
'VIRTUALIZATION.VMWAREKERNELNETWORK': "virtualization.VmwareKernelNetwork",
'VIRTUALIZATION.VMWARENETWORK': "virtualization.VmwareNetwork",
'VIRTUALIZATION.VMWAREPHYSICALNETWORKINTERFACE': "virtualization.VmwarePhysicalNetworkInterface",
'VIRTUALIZATION.VMWAREUPLINKPORT': "virtualization.VmwareUplinkPort",
'VIRTUALIZATION.VMWAREVCENTER': "virtualization.VmwareVcenter",
'VIRTUALIZATION.VMWAREVIRTUALDISK': "virtualization.VmwareVirtualDisk",
'VIRTUALIZATION.VMWAREVIRTUALMACHINE': "virtualization.VmwareVirtualMachine",
'VIRTUALIZATION.VMWAREVIRTUALMACHINESNAPSHOT': "virtualization.VmwareVirtualMachineSnapshot",
'VIRTUALIZATION.VMWAREVIRTUALNETWORKINTERFACE': "virtualization.VmwareVirtualNetworkInterface",
'VIRTUALIZATION.VMWAREVIRTUALSWITCH': "virtualization.VmwareVirtualSwitch",
'VMEDIA.POLICY': "vmedia.Policy",
'VMRC.CONSOLE': "vmrc.Console",
'VNC.CONSOLE': "vnc.Console",
'VNIC.ETHADAPTERPOLICY': "vnic.EthAdapterPolicy",
'VNIC.ETHIF': "vnic.EthIf",
'VNIC.ETHNETWORKPOLICY': "vnic.EthNetworkPolicy",
'VNIC.ETHQOSPOLICY': "vnic.EthQosPolicy",
'VNIC.FCADAPTERPOLICY': "vnic.FcAdapterPolicy",
'VNIC.FCIF': "vnic.FcIf",
'VNIC.FCNETWORKPOLICY': "vnic.FcNetworkPolicy",
'VNIC.FCQOSPOLICY': "vnic.FcQosPolicy",
'VNIC.ISCSIADAPTERPOLICY': "vnic.IscsiAdapterPolicy",
'VNIC.ISCSIBOOTPOLICY': "vnic.IscsiBootPolicy",
'VNIC.ISCSISTATICTARGETPOLICY': "vnic.IscsiStaticTargetPolicy",
'VNIC.LANCONNECTIVITYPOLICY': "vnic.LanConnectivityPolicy",
'VNIC.LCPSTATUS': "vnic.LcpStatus",
'VNIC.SANCONNECTIVITYPOLICY': "vnic.SanConnectivityPolicy",
'VNIC.SCPSTATUS': "vnic.ScpStatus",
'VRF.VRF': "vrf.Vrf",
'WORKFLOW.ANSIBLEBATCHEXECUTOR': "workflow.AnsibleBatchExecutor",
'WORKFLOW.BATCHAPIEXECUTOR': "workflow.BatchApiExecutor",
'WORKFLOW.BUILDTASKMETA': "workflow.BuildTaskMeta",
'WORKFLOW.BUILDTASKMETAOWNER': "workflow.BuildTaskMetaOwner",
'WORKFLOW.CATALOG': "workflow.Catalog",
'WORKFLOW.CUSTOMDATATYPEDEFINITION': "workflow.CustomDataTypeDefinition",
'WORKFLOW.ERRORRESPONSEHANDLER': "workflow.ErrorResponseHandler",
'WORKFLOW.PENDINGDYNAMICWORKFLOWINFO': "workflow.PendingDynamicWorkflowInfo",
'WORKFLOW.ROLLBACKWORKFLOW': "workflow.RollbackWorkflow",
'WORKFLOW.SOLUTIONACTIONDEFINITION': "workflow.SolutionActionDefinition",
'WORKFLOW.SOLUTIONACTIONINSTANCE': "workflow.SolutionActionInstance",
'WORKFLOW.SOLUTIONDEFINITION': "workflow.SolutionDefinition",
'WORKFLOW.SOLUTIONINSTANCE': "workflow.SolutionInstance",
'WORKFLOW.SOLUTIONOUTPUT': "workflow.SolutionOutput",
'WORKFLOW.SSHBATCHEXECUTOR': "workflow.SshBatchExecutor",
'WORKFLOW.TASKDEBUGLOG': "workflow.TaskDebugLog",
'WORKFLOW.TASKDEFINITION': "workflow.TaskDefinition",
'WORKFLOW.TASKINFO': "workflow.TaskInfo",
'WORKFLOW.TASKMETADATA': "workflow.TaskMetadata",
'WORKFLOW.TASKNOTIFICATION': "workflow.TaskNotification",
'WORKFLOW.TEMPLATEEVALUATION': "workflow.TemplateEvaluation",
'WORKFLOW.TEMPLATEFUNCTIONMETA': "workflow.TemplateFunctionMeta",
'WORKFLOW.WORKFLOWDEFINITION': "workflow.WorkflowDefinition",
'WORKFLOW.WORKFLOWINFO': "workflow.WorkflowInfo",
'WORKFLOW.WORKFLOWMETA': "workflow.WorkflowMeta",
'WORKFLOW.WORKFLOWMETADATA': "workflow.WorkflowMetadata",
'WORKFLOW.WORKFLOWNOTIFICATION': "workflow.WorkflowNotification",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'moid': (str,), # noqa: E501
'selector': (str,), # noqa: E501
'link': (str,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
'device_mo_id': (str,), # noqa: E501
'dn': (str,), # noqa: E501
'rn': (str,), # noqa: E501
'oper_state': (str,), # noqa: E501
'oper_state_qual': (str,), # noqa: E501
'port_id': (int,), # noqa: E501
'role': (str,), # noqa: E501
'slot_id': (int,), # noqa: E501
'switch_id': (str,), # noqa: E501
'admin_speed': (str,), # noqa: E501
'admin_state': (str,), # noqa: E501
'b2b_credit': (int,), # noqa: E501
'max_speed': (str,), # noqa: E501
'mode': (str,), # noqa: E501
'oper_speed': (str,), # noqa: E501
'peer_dn': (str,), # noqa: E501
'port_channel_id': (int,), # noqa: E501
'transceiver_type': (str,), # noqa: E501
'vsan': (int,), # noqa: E501
'wwn': (str,), # noqa: E501
'inventory_device_info': (InventoryDeviceInfoRelationship,), # noqa: E501
'port_group': (PortGroupRelationship,), # noqa: E501
'port_sub_group': (PortSubGroupRelationship,), # noqa: E501
'registered_device': (AssetDeviceRegistrationRelationship,), # noqa: E501
'object_type': (str,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'fc.PhysicalPort': FcPhysicalPort,
'mo.MoRef': MoMoRef,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'moid': 'Moid', # noqa: E501
'selector': 'Selector', # noqa: E501
'link': 'link', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
'device_mo_id': 'DeviceMoId', # noqa: E501
'dn': 'Dn', # noqa: E501
'rn': 'Rn', # noqa: E501
'oper_state': 'OperState', # noqa: E501
'oper_state_qual': 'OperStateQual', # noqa: E501
'port_id': 'PortId', # noqa: E501
'role': 'Role', # noqa: E501
'slot_id': 'SlotId', # noqa: E501
'switch_id': 'SwitchId', # noqa: E501
'admin_speed': 'AdminSpeed', # noqa: E501
'admin_state': 'AdminState', # noqa: E501
'b2b_credit': 'B2bCredit', # noqa: E501
'max_speed': 'MaxSpeed', # noqa: E501
'mode': 'Mode', # noqa: E501
'oper_speed': 'OperSpeed', # noqa: E501
'peer_dn': 'PeerDn', # noqa: E501
'port_channel_id': 'PortChannelId', # noqa: E501
'transceiver_type': 'TransceiverType', # noqa: E501
'vsan': 'Vsan', # noqa: E501
'wwn': 'Wwn', # noqa: E501
'inventory_device_info': 'InventoryDeviceInfo', # noqa: E501
'port_group': 'PortGroup', # noqa: E501
'port_sub_group': 'PortSubGroup', # noqa: E501
'registered_device': 'RegisteredDevice', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FcPhysicalPortRelationship - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "mo.MoRef", must be one of ["mo.MoRef", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
moid (str): The Moid of the referenced REST resource.. [optional] # noqa: E501
selector (str): An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. 1. If 'moid' is set this field is ignored. 1. If 'selector' is set and 'moid' is empty/absent from the request, Intersight determines the Moid of the resource matching the filter expression and populates it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.. [optional] # noqa: E501
link (str): A URL to an instance of the 'mo.MoRef' class.. [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
device_mo_id (str): The database identifier of the registered device of an object.. [optional] # noqa: E501
dn (str): The Distinguished Name unambiguously identifies an object in the system.. [optional] # noqa: E501
rn (str): The Relative Name uniquely identifies an object within a given context.. [optional] # noqa: E501
oper_state (str): Operational state of this port (enabled/disabled).. [optional] # noqa: E501
oper_state_qual (str): Reason for this port's Operational state.. [optional] # noqa: E501
port_id (int): Switch physical port identifier.. [optional] # noqa: E501
role (str): The role assigned to this port.. [optional] # noqa: E501
slot_id (int): Switch expansion slot module identifier.. [optional] # noqa: E501
switch_id (str): Switch Identifier that is local to a cluster.. [optional] # noqa: E501
admin_speed (str): Administrator configured Speed applied on the port.. [optional] # noqa: E501
admin_state (str): Administratively configured state (enabled/disabled) for this port.. [optional] # noqa: E501
b2b_credit (int): Buffer to Buffer credits of FC port.. [optional] # noqa: E501
max_speed (str): Maximum Speed with which the port operates.. [optional] # noqa: E501
mode (str): Mode information N_proxy, F or E associated to the Fibre Channel port.. [optional] # noqa: E501
oper_speed (str): Operational Speed with which the port operates.. [optional] # noqa: E501
peer_dn (str): PeerDn for fibre channel physical port.. [optional] # noqa: E501
port_channel_id (int): Port channel id of FC port channel created on FI switch.. [optional] # noqa: E501
transceiver_type (str): Transceiver type of a Fibre Channel port.. [optional] # noqa: E501
vsan (int): Virtual San that is associated to the port.. [optional] # noqa: E501
wwn (str): World Wide Name of a Fibre Channel port.. [optional] # noqa: E501
inventory_device_info (InventoryDeviceInfoRelationship): [optional] # noqa: E501
port_group (PortGroupRelationship): [optional] # noqa: E501
port_sub_group (PortSubGroupRelationship): [optional] # noqa: E501
registered_device (AssetDeviceRegistrationRelationship): [optional] # noqa: E501
object_type (str): The fully-qualified name of the remote type referred by this relationship.. [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "mo.MoRef")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
FcPhysicalPort,
MoMoRef,
none_type,
],
}
|
import os
import hashlib
import unittest
from scrapy.http import Request, Response
from scrapy.spider import Spider
from scrapy.utils.test import get_crawler
from scrapy.exceptions import NotConfigured
from hubstorage import HubstorageClient
from scrapy_hcf import HcfMiddleware
HS_ENDPOINT = os.getenv('HS_ENDPOINT', 'http://localhost:8003')
HS_AUTH = os.getenv('HS_AUTH')
@unittest.skipUnless(HS_AUTH, 'No valid hubstorage credentials set')
class HcfTestCase(unittest.TestCase):
hcf_cls = HcfMiddleware
projectid = '2222222'
spidername = 'hs-test-spider'
frontier = 'test'
slot = '0'
number_of_slots = 1
@classmethod
def setUpClass(cls):
cls.endpoint = HS_ENDPOINT
cls.auth = HS_AUTH
cls.hsclient = HubstorageClient(auth=cls.auth, endpoint=cls.endpoint)
cls.project = cls.hsclient.get_project(cls.projectid)
cls.fclient = cls.project.frontier
@classmethod
def tearDownClass(cls):
cls.project.frontier.close()
cls.hsclient.close()
def setUp(self):
class TestSpider(Spider):
name = self.spidername
start_urls = [
'http://www.example.com/'
]
self.spider = TestSpider()
self.hcf_settings = {'HS_ENDPOINT': self.endpoint,
'HS_AUTH': self.auth,
'HS_PROJECTID': self.projectid,
'HS_FRONTIER': self.frontier,
'HS_CONSUME_FROM_SLOT': self.slot,
'HS_NUMBER_OF_SLOTS': self.number_of_slots}
self._delete_slot()
def tearDown(self):
self._delete_slot()
def _delete_slot(self):
self.fclient.delete_slot(self.frontier, self.slot)
def _build_response(self, url, meta=None):
return Response(url, request=Request(url="http://www.example.com/parent.html", meta=meta))
def _get_crawler(self, settings=None):
crawler = get_crawler(settings_dict=settings)
# simulate crawler engine
class Engine():
def __init__(self):
self.requests = []
def schedule(self, request, spider):
self.requests.append(request)
crawler.engine = Engine()
return crawler
def test_not_loaded(self):
crawler = self._get_crawler({})
self.assertRaises(NotConfigured, self.hcf_cls.from_crawler, crawler)
def test_start_requests(self):
crawler = self._get_crawler(self.hcf_settings)
hcf = self.hcf_cls.from_crawler(crawler)
# first time should be empty
start_urls = self.spider.start_urls
new_urls = list(hcf.process_start_requests(start_urls, self.spider))
self.assertEqual(new_urls, ['http://www.example.com/'])
# now try to store some URLs in the hcf and retrieve them
fps = [{'fp': 'http://www.example.com/index.html'},
{'fp': 'http://www.example.com/index2.html'}]
self.fclient.add(self.frontier, self.slot, fps)
self.fclient.flush()
new_urls = [r.url for r in hcf.process_start_requests(start_urls, self.spider)]
expected_urls = [r['fp'] for r in fps]
self.assertEqual(new_urls, expected_urls)
self.assertEqual(len(hcf.batch_ids), 1)
def test_spider_output(self):
crawler = self._get_crawler(self.hcf_settings)
hcf = self.hcf_cls.from_crawler(crawler)
# process new GET request
response = self._build_response("http://www.example.com/qxg1231")
request = Request(url="http://www.example.com/product/?qxp=12&qxg=1231", meta={'use_hcf': True})
outputs = list(hcf.process_spider_output(response, [request], self.spider))
self.assertEqual(outputs, [])
expected_links = {'0': set(['http://www.example.com/product/?qxp=12&qxg=1231'])}
self.assertEqual(dict(hcf.new_links), expected_links)
# process new POST request (don't add it to the hcf)
response = self._build_response("http://www.example.com/qxg456")
request = Request(url="http://www.example.com/product/?qxp=456", method='POST')
outputs = list(hcf.process_spider_output(response, [request], self.spider))
self.assertEqual(outputs, [request])
expected_links = {'0': set(['http://www.example.com/product/?qxp=12&qxg=1231'])}
self.assertEqual(dict(hcf.new_links), expected_links)
# process new GET request (without the use_hcf meta key)
response = self._build_response("http://www.example.com/qxg1231")
request = Request(url="http://www.example.com/product/?qxp=789")
outputs = list(hcf.process_spider_output(response, [request], self.spider))
self.assertEqual(outputs, [request])
expected_links = {'0': set(['http://www.example.com/product/?qxp=12&qxg=1231'])}
self.assertEqual(dict(hcf.new_links), expected_links)
# Simulate close spider
hcf.close_spider(self.spider, 'finished')
def test_close_spider(self):
crawler = self._get_crawler(self.hcf_settings)
hcf = self.hcf_cls.from_crawler(crawler)
# Save 2 batches in the HCF
fps = [{'fp': 'http://www.example.com/index_%s.html' % i} for i in range(0, 200)]
self.fclient.add(self.frontier, self.slot, fps)
self.fclient.flush()
# Read the first batch
start_urls = self.spider.start_urls
new_urls = [r.url for r in hcf.process_start_requests(start_urls, self.spider)]
expected_urls = [r['fp'] for r in fps]
self.assertEqual(new_urls, expected_urls)
# Simulate extracting some new urls
response = self._build_response("http://www.example.com/parent.html")
new_fps = ["http://www.example.com/child_%s.html" % i for i in range(0, 50)]
for fp in new_fps:
request = Request(url=fp, meta={'use_hcf': True})
list(hcf.process_spider_output(response, [request], self.spider))
self.assertEqual(len(hcf.new_links[self.slot]), 50)
# Simulate emptying the scheduler
crawler.engine.requests = []
# Simulate close spider
hcf.close_spider(self.spider, 'finished')
self.assertEqual(len(hcf.new_links[self.slot]), 0)
self.assertEqual(len(hcf.batch_ids), 0)
# HCF must be have 1 new batch
batches = [b for b in self.fclient.read(self.frontier, self.slot)]
self.assertEqual(len(batches), 1)
def test_hcf_params(self):
crawler = self._get_crawler(self.hcf_settings)
hcf = self.hcf_cls.from_crawler(crawler)
# Simulate extracting some new urls and adding them to the HCF
response = self._build_response("http://www.example.com/parent.html")
new_fps = ["http://www.example.com/child_%s.html" % i for i in range(0, 5)]
new_requests = []
for fp in new_fps:
hcf_params = {'qdata': {'a': '1', 'b': '2', 'c': '3'},
'fdata': {'x': '1', 'y': '2', 'z': '3'},
'p': 1}
request = Request(url=fp, meta={'use_hcf': True, "hcf_params": hcf_params})
new_requests.append(request)
list(hcf.process_spider_output(response, [request], self.spider))
expected = set(['http://www.example.com/child_4.html',
'http://www.example.com/child_1.html',
'http://www.example.com/child_0.html',
'http://www.example.com/child_3.html',
'http://www.example.com/child_2.html'])
self.assertEqual(hcf.new_links[self.slot], expected)
# Simulate close spider
hcf.close_spider(self.spider, 'finished')
# Similate running another spider
start_urls = self.spider.start_urls
stored_requests = list(hcf.process_start_requests(start_urls, self.spider))
for a, b in zip(new_requests, stored_requests):
self.assertEqual(a.url, b.url)
self.assertEqual(a.meta.get('qdata'), b.meta.get('qdata'))
# Simulate emptying the scheduler
crawler.engine.requests = []
# Simulate close spider
hcf.close_spider(self.spider, 'finished')
def test_spider_output_override_slot(self):
crawler = self._get_crawler(self.hcf_settings)
hcf = self.hcf_cls.from_crawler(crawler)
def get_slot_callback(request):
md5 = hashlib.md5()
md5.update(request.url)
digest = md5.hexdigest()
return str(int(digest, 16) % 5)
self.spider.slot_callback = get_slot_callback
# process new GET request
response = self._build_response("http://www.example.com/qxg1231")
request = Request(url="http://www.example.com/product/?qxp=12&qxg=1231",
meta={'use_hcf': True})
outputs = list(hcf.process_spider_output(response, [request], self.spider))
self.assertEqual(outputs, [])
expected_links = {'4': set(['http://www.example.com/product/?qxp=12&qxg=1231'])}
self.assertEqual(dict(hcf.new_links), expected_links)
# Simulate close spider
hcf.close_spider(self.spider, 'finished')
|
from ctypes import (
string_at,
addressof,
sizeof,
create_string_buffer,
cast,
pointer,
POINTER,
)
# converts a ctype into a string
def pack(ctype_instance):
return string_at(addressof(ctype_instance), sizeof(ctype_instance))
# convert from string to ctype
def unpack(ctype, string):
buf = b""
for i in string:
buf += i.to_bytes(1, "little")
cstring = create_string_buffer(buf)
ctype_instance = cast(pointer(cstring), POINTER(ctype)).contents
return ctype_instance
|
## -*- coding: UTF-8 -*-
## init.py
##
## Copyright (c) 2019 libcommon
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
from sqlalchemy import (
Column,
DateTime,
ForeignKey,
Integer,
Text,
)
from sqlalchemy.ext.declarative import declarative_base
__author__ = "libcommon"
# NOTE: Dyanamic base classes (returned from function) are not
# handled by MyPy, and thus type checking must be ignored here
# See: https://github.com/python/mypy/issues/4284
BaseTable = declarative_base() # type: ignore
class User(BaseTable): # type: ignore
__tablename__ = "user"
id = Column(Integer, primary_key=True)
first_name = Column(Text, nullable=False)
last_name = Column(Text, nullable=False)
email = Column(Text, nullable=False)
class Post(BaseTable): # type: ignore
__tablename__ = "post"
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey("user.id"), nullable=False)
content = Column(Text, nullable=False)
created_at = Column(DateTime, nullable=False)
|
from django.contrib.auth import get_user_model, logout
from django.contrib.auth.forms import UserCreationForm
from django.conf import settings
from django import forms
from django.forms.utils import ErrorDict
from django.test import override_settings
from django.core.cache import cache
from django.core.exceptions import SuspiciousOperation
from django.contrib.auth.hashers import make_password, check_password
from ..utils import SecurePickling, send_template_email
UserModel = get_user_model()
secure_pickle = SecurePickling()
override_setting_decorator = override_settings(PASSWORD_HASHERS=settings.REGISTRATION_HASHERS)
hash_data = override_setting_decorator(make_password)
check_data = override_setting_decorator(check_password)
class RegistrationForm(UserCreationForm):
error_messages = {
'password_mismatch': "The two password fields didn't match.",
}
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
if settings.SEND_CONFIRMATION_EMAIL:
uid = forms.CharField(max_length=256, required=False)
if settings.ENABLE_AGREEMENT_TERMS:
agreement = forms.BooleanField(required=False)
class Meta(UserCreationForm.Meta):
model = UserModel
fields = [
'username',
'first_name',
'last_name',
'email',
'password1',
'password2',
]
def build_confirmation_url(self, uid):
# pylint: disable=no-member
return self.request.build_absolute_uri(f'?uid={uid}')
def get_email_context(self):
return {}
def register_with_confirmation(self, commit):
cache_key = self.cleaned_data.get('uid', None)
if cache_key in (None, ''):
cache_key = hash_data(self.cleaned_data['email'])
self.cleaned_data['uid'] = cache_key
secured_data = secure_pickle.dumps(self.cleaned_data)
cache.set(cache_key, secured_data)
context_data = self.get_email_context()
context_data['action_url'] = self.build_confirmation_url(cache_key)
if context_data.get('application_name', None) is None:
context_data['application_name'] = settings.PROJECT_GUI_NAME
send_template_email(
subject='Registration Confirmation.',
template_name='registration/confirm_email.html',
email=self.cleaned_data['email'],
context_data=context_data
)
return super().save(commit=False)
if not check_data(self.cleaned_data['email'], cache_key):
raise SuspiciousOperation('Invalid registration email send.')
return super().save(commit)
def save(self, commit=True):
if settings.SEND_CONFIRMATION_EMAIL:
return self.register_with_confirmation(commit)
return super(RegistrationForm, self).save(commit)
def _clean_fields(self):
super()._clean_fields()
if settings.SEND_CONFIRMATION_EMAIL:
uid = self.cleaned_data.get('uid', None)
if self.errors and uid not in (None, ''):
self.cleaned_data.update(secure_pickle.loads(cache.get(uid)))
self._errors = ErrorDict()
# method clean is not needed?
def clean(self):
super().clean()
if settings.ENABLE_AGREEMENT_TERMS:
agreement = self.cleaned_data.get('agreement', None)
if not agreement:
self.add_error('agreement', 'To continue, need to accept the terms agreement.')
class TwoFaForm(forms.Form):
pin = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(TwoFaForm, self).__init__(*args, **kwargs)
def clean(self):
pin = self.cleaned_data.get('pin')
if self.request.user.twofa.verify(pin):
self.request.session['2fa'] = True
else:
self.add_error('pin', 'Invalid authentication code')
attempts = self.request.session.get('2fa_attempts', 0) + 1
if attempts >= settings.MAX_TFA_ATTEMPTS:
logout(self.request)
else:
self.request.session['2fa_attempts'] = attempts
return super().clean()
def get_user(self):
return self.request.user
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('robocrm', '0045_machine_toolbox_id'),
]
operations = [
migrations.AlterField(
model_name='machine',
name='toolbox_id',
field=models.PositiveIntegerField(default=None, blank=True, null=True, unique=True),
preserve_default=True,
),
]
|
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
ans = list()
nums = sorted(nums)
for i, num_0 in enumerate(nums):
if i>0 and nums[i] == nums[i-1]:
continue
left = i+1
right = len(nums)-1
while left < right:
sum = num_0 + nums[left] + nums[right]
if sum < 0:
left += 1
elif sum > 0:
right -=1
else:
ans.append([num_0, nums[left], nums[right]])
while left < right and nums[left]==nums[left+1]:
left +=1
while left < right and nums[right]==nums[right-1]:
right -=1
left += 1
right -= 1
return ans
def test():
nums = [-1, 0, 1, 2, -1, -4]
solution = Solution()
ans = solution.threeSum(nums)
print(ans)
if __name__=='__main__':
test()
|
# Questão 2: Estruturas de repetição
""" Use a função len(string) para saber o tamanho de um texto (número de caracteres). """
class Contagem:
def escreverTexto(self):
texto = input("Escreva o texto: ")
contagem = len(texto)
print(f"O texto possui {contagem} caracteres.")
a = Contagem()
a.escreverTexto()
|
from tests.testcase import TestCase
from edmunds.localization.localizationmanager import LocalizationManager
from edmunds.localization.location.drivers.googleappengine import GoogleAppEngine
class TestLocalization(TestCase):
"""
Test the Localization
"""
def test_not_enabled(self):
"""
Test not enabled
:return: void
"""
# Write config
self.write_config([
"from edmunds.localization.location.drivers.googleappengine import GoogleAppEngine \n",
"APP = { \n",
" 'localization': { \n",
" 'enabled': False, \n",
" 'locale': { \n",
" 'fallback': 'en', \n",
" 'supported': ['en'], \n",
" }, \n",
" }, \n",
"} \n",
])
# Create app
app = self.create_application()
self.assert_is_none(app.localization())
def test_enabled(self):
"""
Test enabled
:return: void
"""
rule = '/' + self.rand_str(20)
# Write config
self.write_config([
"from edmunds.localization.location.drivers.googleappengine import GoogleAppEngine \n",
"APP = { \n",
" 'localization': { \n",
" 'enabled': True, \n",
" 'locale': { \n",
" 'fallback': 'en', \n",
" 'supported': ['en'], \n",
" }, \n",
" 'location': { \n",
" 'enabled': True, \n",
" 'instances': [ \n",
" { \n",
" 'name': 'gae',\n",
" 'driver': GoogleAppEngine,\n",
" }, \n",
" ], \n",
" }, \n",
" }, \n",
"} \n",
])
# Create app
app = self.create_application()
# Test session
with app.test_request_context(rule):
self.assert_is_not_none(app.localization())
self.assert_is_instance(app.localization(), LocalizationManager)
self.assert_is_not_none(app.localization().location())
self.assert_is_instance(app.localization().location(), GoogleAppEngine)
self.assert_is_not_none(app.localization().location('gae'))
self.assert_is_instance(app.localization().location('gae'), GoogleAppEngine)
with self.assert_raises_regexp(RuntimeError, 'No instance declared'):
app.localization().location('gae2')
self.assert_is_none(app.localization().location('gae2', no_instance_error=True))
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import time
from TestUtils import TestUtilsMixin, ROOT, ROOT_PASSWORD
log = logging.getLogger('test.auto')
N = 100000
COUNT = 5
class SimpleBulkTest(TestUtilsMixin, unittest.TestCase):
"Start a clean accumulo, make some bulk data and import it"
order = 25
def testIngest(self, host, args, **kwargs):
return self.runClassOn(host,
'org.apache.accumulo.server.test.TestIngest',
args,
**kwargs)
def bulkLoad(self, host, dir):
handle = self.runClassOn(
self.masterHost(),
'org.apache.accumulo.server.test.BulkImportDirectory',
[ROOT, ROOT_PASSWORD,
'test_ingest', dir, '/testBulkFail'])
self.wait(handle)
self.assert_(handle.returncode == 0)
def createRFiles(self):
args = '-rFile /testrf/rf%02d -timestamp 1 -size 50 -random 56 %1d %ld 1'
log.info('creating rfiles')
handles = []
for i in range(COUNT):
handles.append(self.testIngest(
self.hosts[i%len(self.hosts)],
(args % (i, N, (N * i))).split()))
#create a rfile with one entry, there was a bug with this
handles.append(self.testIngest(self.hosts[0], (args % (COUNT, 1, COUNT * N)).split()))
log.info('waiting to finish')
for h in handles:
h.communicate()
self.assert_(h.returncode == 0)
log.info('done')
def execute(self, host, cmd, **opts):
handle = self.runOn(host, cmd, **opts)
out, err = handle.communicate()
return out, err, handle.returncode
def runTest(self):
# initialize the database
self.createTable('test_ingest')
self.execute(self.masterHost(), 'hadoop dfs -rmr /testrf'.split())
self.execute(self.masterHost(), 'hadoop dfs -rmr /testBulkFail'.split())
self.execute(self.masterHost(), 'hadoop dfs -mkdir /testBulkFail'.split())
# insert some data
self.createRFiles()
self.bulkLoad(self.masterHost(), '/testrf')
log.info("Verifying Ingestion")
handles = []
for i in range(COUNT):
handles.append(self.verify(self.hosts[i%len(self.hosts)], N, i * N))
handles.append(self.verify(self.hosts[0], 1, COUNT * N))
for h in handles:
out, err = h.communicate()
self.assert_(h.returncode == 0)
self.shutdown_accumulo()
def suite():
result = unittest.TestSuite()
result.addTest(SimpleBulkTest())
return result
|
# -*- coding: utf-8 -*-
class BaseConfig(object):
DEBUG = True
|
import collections
from typing import Dict, Any, Tuple, List
import tensorflow as tf
from dpu_utils.mlutils import Vocabulary
import encoders.utils.tree_processing
from utils import data_pipeline
from .encoder import Encoder, QueryType
def _try_to_queue_node(
node: encoders.utils.tree_processing.TreeNode,
queue: collections.deque,
nodes_queued: int,
max_nodes: int) -> bool:
if max_nodes == -1 or nodes_queued < max_nodes:
queue.append(node)
return True
else:
return False
def _get_tree_elements_seq(
root: encoders.utils.tree_processing.TreeNode,
max_nodes: int = -1) -> Tuple[List[str], List[str]]:
node_types: List[str] = []
node_tokens: List[str] = []
node_queue = collections.deque()
nodes_queued = 0
nodes_queued += _try_to_queue_node(root, node_queue, nodes_queued, max_nodes)
while node_queue:
node = node_queue.popleft()
for child in node['children']:
if _try_to_queue_node(child, node_queue, nodes_queued, max_nodes):
nodes_queued += 1
node_types.append(node['type'])
node_tokens.append(node['string'])
return node_types, node_tokens
class ASTEncoder(Encoder):
@classmethod
def get_default_hyperparameters(cls) -> Dict[str, Any]:
encoder_hypers = {
'type_vocab_size': 10000,
'type_vocab_count_threshold': 10,
'type_embedding_size': 128,
'token_vocab_size': 10000,
'token_vocab_count_threshold': 10,
'token_embedding_size': 128,
'token_use_bpe': True,
'token_pct_bpe': 0.5,
'max_num_nodes': 200,
'max_num_tokens': 200,
'max_children': 200
}
hypers = super().get_default_hyperparameters()
hypers.update(encoder_hypers)
return hypers
def __init__(self, label: str, hyperparameters: Dict[str, Any], metadata: Dict[str, Any]):
super().__init__(label, hyperparameters, metadata)
@property
def output_representation_size(self) -> int:
return self.get_hyper('type_embedding_size')
def make_model(self, is_train: bool = False) -> tf.Tensor:
raise NotImplementedError()
def embedding_layer(self, input_ids: tf.Tensor) -> tf.Tensor:
type_embeddings = tf.get_variable(
name='type_embeddings',
initializer=tf.glorot_uniform_initializer(),
shape=[len(self.metadata['type_vocab']), self.get_hyper('type_embedding_size')])
self.__embeddings = type_embeddings
type_embeddings = tf.nn.dropout(
type_embeddings,
keep_prob=self.placeholders['dropout_keep_rate'])
return tf.nn.embedding_lookup(params=type_embeddings, ids=input_ids)
@classmethod
def init_metadata(cls) -> Dict[str, Any]:
raw_metadata = super().init_metadata()
raw_metadata['type_counter'] = collections.Counter()
return raw_metadata
@classmethod
def load_metadata_from_sample(cls, data_to_load: Any, raw_metadata: Dict[str, Any], use_subtokens: bool = False,
mark_subtoken_end: bool = False) -> None:
default_hypers = cls.get_default_hyperparameters()
node_types, node_tokens = _get_tree_elements_seq(data_to_load, default_hypers['max_num_nodes'])
raw_metadata['type_counter'].update(node_types)
@classmethod
def finalise_metadata(cls, encoder_label: str, hyperparameters: Dict[str, Any],
raw_metadata_list: List[Dict[str, Any]]) -> Dict[str, Any]:
final_metadata = super().finalise_metadata(encoder_label, hyperparameters, raw_metadata_list)
merged_type_counter = collections.Counter()
for raw_metadata in raw_metadata_list:
merged_type_counter += raw_metadata['type_counter']
type_vocabulary = Vocabulary.create_vocabulary(
tokens=merged_type_counter,
max_size=hyperparameters[f'{encoder_label}_type_vocab_size'],
count_threshold=hyperparameters[f'{encoder_label}_type_vocab_count_threshold'])
final_metadata['type_vocab'] = type_vocabulary
print('Total type vocabulary words:', len(final_metadata['type_vocab'].id_to_token))
return final_metadata
def init_minibatch(self, batch_data: Dict[str, Any]) -> None:
super().init_minibatch(batch_data)
batch_data['node_type_ids'] = []
batch_data['children'] = []
def extend_minibatch_by_sample(self, batch_data: Dict[str, Any], sample: Dict[str, Any], is_train: bool = False,
query_type: QueryType = QueryType.DOCSTRING.value) -> bool:
current_sample = {}
current_sample['node_type_ids'] = sample[f'{self.label}_node_type_ids']
current_sample['children'] = sample[f'{self.label}_children']
for key, value in current_sample.items():
if key in batch_data:
batch_data[key].append(value)
return False
def get_token_embeddings(self) -> Tuple[tf.Tensor, List[str]]:
return self.__embeddings, list(self.metadata['type_vocab'].id_to_token)
|
_class_registry_cache = {}
_field_list_cache = []
def _import_class(cls_name):
"""Cache mechanism for imports.
Due to complications of circular imports mongoengine needs to do lots of
inline imports in functions. This is inefficient as classes are
imported repeated throughout the mongoengine code. This is
compounded by some recursive functions requiring inline imports.
:mod:`mongoengine.common` provides a single point to import all these
classes. Circular imports aren't an issue as it dynamically imports the
class when first needed. Subsequent calls to the
:func:`~mongoengine.common._import_class` can then directly retrieve the
class from the :data:`mongoengine.common._class_registry_cache`.
"""
if cls_name in _class_registry_cache:
return _class_registry_cache.get(cls_name)
doc_classes = ('Document', 'DynamicEmbeddedDocument', 'EmbeddedDocument',
'MapReduceDocument')
# Field Classes
if not _field_list_cache:
from mongoengine.fields import __all__ as fields
_field_list_cache.extend(fields)
from mongoengine.base.fields import __all__ as fields
_field_list_cache.extend(fields)
field_classes = _field_list_cache
deref_classes = ('DeReference',)
if cls_name == 'BaseDocument':
from mongoengine.base import document as module
import_classes = ['BaseDocument']
elif cls_name in doc_classes:
from mongoengine import document as module
import_classes = doc_classes
elif cls_name in field_classes:
from mongoengine import fields as module
import_classes = field_classes
elif cls_name in deref_classes:
from mongoengine import dereference as module
import_classes = deref_classes
else:
raise ValueError('No import set for: %s' % cls_name)
for cls in import_classes:
_class_registry_cache[cls] = getattr(module, cls)
return _class_registry_cache.get(cls_name)
|
from bs4 import BeautifulSoup
from rawdata.EN_F14 import s
def is_course(c):
if c.count("-") == 2 and len(c) == 12:
return True
def is_teacher(c):
if c.replace(" ", "").replace("-", "").isalpha():
return True
soup = BeautifulSoup(s)
td = soup.get_text().split("\n")
all_data = []
index = 0
while index < len(td):
if "TEXAS" in td[index]:
index += 99
if "REPORT" in td[index]:
index += 98
if "." in td[index] and "%" not in td[index]:
all_data.append(td[index])
index += 7
if not td[index].isspace() and td[index] and "%" not in td[index]:
all_data.append(td[index])
index += 1
base_course = ""
temp_grades = []
courses_combined = {}
courses_profs = {}
for x in range(len(all_data)):
if is_course(all_data[x]):
base_course = all_data[x]
temp_grades = [all_data[x+1], all_data[x+2], all_data[x+3],all_data[x+4], all_data[x+5], all_data[x+7]]
if "COURSE" in all_data[x]:
courses_combined[base_course[0:8]] = [all_data[x+1], all_data[x+2], all_data[x+3],
all_data[x+4], all_data[x+5], all_data[x+7]]
if is_teacher(all_data[x]):
courses_profs[base_course] = [all_data[x-7], all_data[x-6], all_data[x-5],
all_data[x-4], all_data[x-3], all_data[x-1], all_data[x]]
def search_course(c):
course = c.upper()
try:
c_avg = courses_combined[course]
c_allprofs = []
for key in sorted(courses_profs):
if course in key:
temp = []
for stat in courses_profs.get(key):
temp.append(stat)
temp.insert(0, key)
c_allprofs.append(temp)
return c_avg, c_allprofs
except KeyError:
return "Course not found.", "Course not found."
|
alph = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ_.")
while True:
try:
n, mes = input().split(" ")
n = int(n)
except ValueError:
break
mes = list(mes[::-1])
for i in range(len(mes)):
ind = alph.index(mes[i])
mes[i] = alph[(ind + n) % len(alph)]
final = ""
for char in mes:
final += char
print(final)
|
from functools import wraps
import google.oauth2.id_token
from flask import current_app, g, request
from google.auth.transport import requests
from werkzeug.exceptions import Unauthorized
google_requests = requests.Request()
def jwt_validate(f):
@wraps(f)
def decorated_function(*args, **kwargs):
id_token = request.cookies.get("token", None) or request.headers.get(
"X-API-KEY", None
)
if not id_token:
raise Unauthorized("Token is missing")
try:
claims = google.oauth2.id_token.verify_oauth2_token(
id_token, google_requests, # TODO add audience
)
g.email = claims["email"]
except Exception as e:
current_app.logger.info(f"login failed: {e}")
raise Unauthorized(f"Auth failed {e}")
return f(*args, **kwargs)
return decorated_function
|
#!/usr/bin/python
# coding=utf-8
import sys
import time
import sqlite3
import telepot
from pprint import pprint
from datetime import date, datetime
import re
import traceback
ROOT = '/root/git/stock-analyzer/'
def sendMessage(id, msg):
try:
bot.sendMessage(id, msg)
except:
print str(datetime.now()).split('.')[0]
traceback.print_exc(file=sys.stdout)
def help(id):
sendMessage(id, """< Stock analyzer 명령어 >
/sub : 구독
/unsub : 구독해제
""")
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type != 'text':
sendMessage(chat_id, '난 텍스트 이외의 메시지는 처리하지 못해요.')
return
#pprint(msg["from"])
try:
name = msg["from"]["last_name"] + msg["from"]["first_name"]
except:
name = ""
text = msg['text'].lower()
args = text.split(' ')
if text.startswith('/'):
if text.startswith('/sub'):
conn = sqlite3.connect(ROOT+'subscribe.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS subscribe( user TEXT, name TEXT, PRIMARY KEY(user) )')
conn.commit()
try:
c.execute( 'INSERT INTO subscribe (user, name) VALUES ("%s", "%s")'%(chat_id,name) )
except sqlite3.IntegrityError:
# means already inserted
sendMessage(chat_id, "동일한 신청목록이 존재합니다.")
else:
# means success
conn.commit()
sendMessage(chat_id, "성공적으로 추가되었습니다.")
elif text.startswith('/unsub'):
conn = sqlite3.connect(ROOT+'subscribe.db')
c = conn.cursor()
try:
c.execute( 'DELETE FROM subscribe WHERE user="%s"'%(chat_id) )
except sqlite3.IntegrityError:
# means already inserted
sendMessage(chat_id, "삭제가 실패했습니다.")
else:
# means success
conn.commit()
sendMessage(chat_id, "성공적으로 삭제 되었습니다.")
else:
help(chat_id)
else:
help(chat_id)
TOKEN = sys.argv[1]
print 'received token :', TOKEN
bot = telepot.Bot(TOKEN)
pprint( bot.getMe() )
bot.notifyOnMessage(handle)
print 'Listening...'
while 1:
time.sleep(10)
|
import os
HUClist = ["1002", "1003", "1004"] # HUC4 geospatial tiles to search over.
inDir = "../data/cov/static" # Source parameter grid folder.
FCPGdir = "../FCPGs" # Output FCPG folder.
covList = [] #Initialize list of parameter grids.
# iterate through all source parameter grids.
if os.path.isdir(inDir):
for path, subdirs, files in os.walk(inDir):
for name in files:
#Check if file is .tif or .vrt file, and if so add it to covariate list
if os.path.splitext(name)[1] == ".tif" or os.path.splitext(name)[1] == ".vrt":
covList.append(os.path.join(path, name))
print("The following covariate files were located in the specified directory:")
print(covList)
missingList = [] #Initialize list of missing files
# iterate through source parameter grids and test if FCPGs have been created.
for cov in covList:
covname = os.path.splitext(os.path.basename(cov))[0] #Get the name of the parameter grid
if os.path.isdir(FCPGdir):
for HUC in HUClist:
#Create the file name corresponding to the HUC and parameter grid
FCPGFile = os.path.join(FCPGdir, HUC,covname + "_HUC" + HUC +"_FCPG.tif") #Create filepath for parameter FCPG file
if not os.path.isfile(FCPGFile):
print("Missing File: {0}".format(FCPGFile))
missingList.append(FCPGFile)
else:
print("Error FCPG directory does not exist: {0}".format(FCPGdir))
print("{0} missing files found".format(len(missingList)))
else:
print("Error input directory does not exist: {0}".format(inDir))
|
# TODO: Mock test file read/write
from app import view, list
def test_export_view_json(runner):
"""
Tests exporting single article to json
"""
res = runner.invoke(view, ["new_wangonya", "-e", "json"])
assert res.exit_code == 0
def test_export_list_json(runner):
"""
Tests exporting articles list to json
"""
res = runner.invoke(list, ["-e", "json"])
assert res.exit_code == 0
def test_export_list_csv(runner):
"""
Tests exporting articles list to csv
"""
res = runner.invoke(list, ["-e", "csv"])
assert res.exit_code == 0
def test_export_list_sqlite(runner):
"""
Tests exporting articles list to sqlite
"""
res = runner.invoke(list, ["-e", "sqlite"])
assert res.exit_code == 0
|
# coding=utf-8
# Copyright 2021 rinna Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from tqdm import tqdm
from corpus.jp_cc100.config import Config
config = Config()
N_LINES_PER_FILE = 1e6
if __name__ == "__main__":
if not os.path.exists(f"{config.doc_data_dir}"):
os.makedirs(f"{config.doc_data_dir}")
output_file_id = 0
output_file = open(f"{config.doc_data_dir}/{output_file_id}.txt", "w+", encoding="utf-8")
cur_n_lines = 0
with open(f"{config.raw_data_dir}/ja.txt", "r", encoding="utf-8") as input_file:
with tqdm() as pbar:
for line in input_file:
line = line.strip()
output_file.write(line + "\n")
cur_n_lines += 1
if line == "":
output_file.write
if cur_n_lines >= N_LINES_PER_FILE:
output_file.flush()
output_file.close()
output_file_id += 1
cur_n_lines = 0
output_file = open(f"{config.doc_data_dir}/{output_file_id}.txt", "w+", encoding="utf-8")
pbar.set_description(f"created {output_file_id} files")
|
import pya
from pya import *
from SiEPIC.utils import get_technology, get_technology_by_name
from SiEPIC.utils import arc, arc_wg, arc_to_waveguide, points_per_circle#,layout
import math
class swg_fc_test(pya.PCellDeclarationHelper):
"""
Sub-wavelength-grating fibre coupler PCell litho test structure.
2017/07/12: Lukas Chrostowski, initial version, based on swg_fc by Tim
Input:
"""
def __init__(self):
# Important: initialize the super class
super(swg_fc_test, self).__init__()
# declare the parameters
self.param("wavelength", self.TypeDouble, "Design Wavelength (micron)", default = 2.9)
self.param("n_t", self.TypeDouble, "Fiber Mode", default = 1.0)
self.param("n_e", self.TypeDouble, "Grating Index Parameter", default = 3.1)
self.param("angle_e", self.TypeDouble, "Taper Angle (deg)", default = 20.0)
self.param("grating_length", self.TypeDouble, "Grating Length (micron)", default = 3.0)
self.param("taper_length", self.TypeDouble, "Taper Length (micron)", default = 32.0)
self.param("dc", self.TypeDouble, "Duty Cycle", default = 0.488193)
self.param("period", self.TypeDouble, "Grating Period", default = 1.18939)
self.param("ff", self.TypeDouble, "Fill Factor", default = 0.244319)
self.param("t", self.TypeDouble, "Waveguide Width (micron)", default = 1.0)
self.param("theta_c", self.TypeDouble, "Insertion Angle (deg)", default = 8.0)
self.param("fab_error", self.TypeDouble, "Fab Process error max (micron)", default = 0.05)
# Layer parameters
TECHNOLOGY = get_technology_by_name('EBeam')
self.param("layer", self.TypeLayer, "Layer", default = TECHNOLOGY['Waveguide'])
self.param("pinrec", self.TypeLayer, "PinRec Layer", default = TECHNOLOGY['PinRec'])
self.param("devrec", self.TypeLayer, "DevRec Layer", default = TECHNOLOGY['DevRec'])
self.param("textl", self.TypeLayer, "Text Layer", default = TECHNOLOGY['Text'])
def display_text_impl(self):
# Provide a descriptive text for the cell
return "swg_fc_test_%.1f-%.2f-%.2f-%.2f-%.2f-%.2f-%.2f-%.2f" % \
(self.wavelength, self.theta_c, self.period, self.dc, self.ff, self.angle_e, self.taper_length, self.t)
# return "temporary placeholder"
def coerce_parameters_impl(self):
pass
def can_create_from_shape(self, layout, shape, layer):
return False
def produce_impl(self):
# fetch the parameters
dbu = self.layout.dbu
ly = self.layout
shapes = self.cell.shapes
LayerSi = self.layer
LayerSiN = ly.layer(self.layer)
LayerPinRecN = ly.layer(self.pinrec)
LayerDevRecN = ly.layer(self.devrec)
LayerTextN = ly.layer(self.textl)
from math import pi, cos, sin, log, sqrt, tan
lambda_0 = self.wavelength ##um wavelength of light
pin_length = 0.5 ##um extra nub for the waveguid attachment
# Geometry
wh = self.period*self.dc ##thick grating
wl = self.ff*(self.period - wh) ## thin grating
spacing = (self.period - wh - wl)/2 ##space between thick and thin
gc_number = int(round(self.grating_length/self.period)) ##number of periods
gc_number = 3
e = self.n_t*sin((pi/180)*self.theta_c)/self.n_e
N = round(self.taper_length*(1+e)*self.n_e/lambda_0) ##allows room for the taper
start = (pi - (pi/180)*self.angle_e/2)
stop = (pi + (pi/180)*self.angle_e/2)
# Draw coupler grating.
for j in range(gc_number):
# number of points in the arcs:
# calculate such that the vertex & edge placement error is < 0.5 nm.
# see "SiEPIC_EBeam_functions - points_per_circle" for more details
radius = N*lambda_0 / ( self.n_e*( 1 - e )) + j*self.period + spacing
seg_points = int(points_per_circle(radius/dbu)/360.*self.angle_e) # number of points grating arc
theta_up = []
for m in range(seg_points+1):
theta_up = theta_up + [start + m*(stop-start)/seg_points]
theta_down = theta_up[::-1]
##small one
r_up = []
r_down = []
rng = range(len(theta_up))
# find the divider to get desired fab error:
th = min(theta_up)
div = (2*sin(th)/self.fab_error)*(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + spacing)
err = (2*sin(th)/div)*(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + spacing)
# print("div %s, err (double check) %s" % (div, err))
for k in rng:
th = theta_up[k]
# print("%s, %s, %s" % (th, sin(th), 1+sin(th)/10.) )
r_up = r_up + [(1-sin(th)/div) *(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + spacing)]
for k in rng[::-1]:
th = theta_up[k]
# print("%s, %s, %s" % (th, sin(th), 1+sin(th)/10.) )
r_down = r_down + [(1+sin(th)/div) *(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + spacing)]
xr = []
yr = []
for k in range(len(theta_up)):
xr = xr + [r_up[k]*cos(theta_up[k])]
yr = yr + [r_up[k]*sin(theta_up[k])]
xl = []
yl = []
for k in range(len(theta_down)):
xl = xl + [(r_down[k] + wl)*cos(theta_down[k])]
yl = yl + [(r_down[k] + wl)*sin(theta_down[k])]
x = xr + xl
y = yr + yl
pts = []
for i in range(len(x)):
pts.append(Point.from_dpoint(pya.DPoint(x[i]/dbu, y[i]/dbu)))
#small_one = core.Boundary(points)
polygon = Polygon(pts)
shapes(LayerSiN).insert(polygon)
if j==1:
# text label dimensions, for minor grating:
# top
shapes(LayerTextN).insert(Text("%0.0f"%((wl+self.fab_error)*1000), Trans(Trans.R0, xl[0]/dbu,yl[0]/dbu))).text_size = 0.2/dbu
# btm
shapes(LayerTextN).insert(Text("%0.0f"%((wl-self.fab_error)*1000), Trans(Trans.R0, xl[-1]/dbu,yl[-1]/dbu))).text_size = 0.2/dbu
# mid
shapes(LayerTextN).insert(Text("%0.0f"%((wl)*1000), Trans(Trans.R0, xl[int(len(theta_up)/2)]/dbu,yl[int(len(theta_up)/2)]/dbu))).text_size = 0.2/dbu
##big one
r_up = []
r_down = []
# find the divider to get desired fab error:
th = min(theta_up)
div = (2*sin(th)/self.fab_error)*(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + 2*spacing+wl)
err = (2*sin(th)/div)*(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + 2*spacing+wl)
# print("div %s, err (double check) %s" % (div, err))
rng = range(len(theta_up))
for k in rng:
th = theta_up[k]
r_up = r_up + [(1-sin(th)/div) *(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + 2*spacing+wl)]
for k in rng[::-1]:
th = theta_up[k]
r_down = r_down + [(1+sin(th)/div) *(N*lambda_0 / ( self.n_e*( 1 - e*cos(th) )) + j*self.period + 2*spacing+wl)]
xr = []
yr = []
for k in range(len(theta_up)):
xr = xr + [r_up[k]*cos(theta_up[k])]
yr = yr + [r_up[k]*sin(theta_up[k])]
xl = []
yl = []
for k in range(len(theta_down)):
xl = xl + [(r_down[k] + wh)*cos(theta_down[k])]
yl = yl + [(r_down[k] + wh)*sin(theta_down[k])]
x = xr + xl
y = yr + yl
pts = []
for i in range(len(x)):
pts.append(Point.from_dpoint(pya.DPoint(x[i]/dbu, y[i]/dbu)))
polygon = Polygon(pts)
shapes(LayerSiN).insert(polygon)
if j==1:
# text label dimensions, for major grating:
# top
shapes(LayerTextN).insert(Text("%0.0f"%((wh+self.fab_error)*1000), Trans(Trans.R0, xl[0]/dbu,yl[0]/dbu))).text_size = 0.2/dbu
# btm
shapes(LayerTextN).insert(Text("%0.0f"%((wh-self.fab_error)*1000), Trans(Trans.R0, xl[-1]/dbu,yl[-1]/dbu))).text_size = 0.2/dbu
# mid
shapes(LayerTextN).insert(Text("%0.0f"%((wh)*1000), Trans(Trans.R0, xl[int(len(theta_up)/2)]/dbu,yl[int(len(theta_up)/2)]/dbu))).text_size = 0.2/dbu
|
from bot_logger import logger
from cogs.modules.coin_market import CurrencyException, FiatException
from collections import defaultdict
from discord.errors import Forbidden
import discord
import json
CMB_ADMIN = "CMB ADMIN"
ADMIN_ONLY = "ADMIN_ONLY"
ALERT_DISABLED = "ALERT_DISABLED"
class AlertFunctionality:
"""Handles Alert Command functionality"""
def __init__(self, bot, coin_market, alert_capacity, server_data):
self.bot = bot
self.server_data = server_data
self.coin_market = coin_market
self.alert_capacity = alert_capacity
self.market_list = ""
self.acronym_list = ""
self.supported_operators = ["<", ">", "<=", ">="]
self.alert_data = self._check_alert_file()
self._save_alert_file(self.alert_data, backup=True)
def update(self, market_list=None, acronym_list=None, server_data=None):
"""
Updates utilities with new coin market and server data
"""
if server_data:
self.server_data = server_data
if market_list:
self.market_list = market_list
if acronym_list:
self.acronym_list = acronym_list
def _check_permission(self, ctx):
"""
Checks if user contains the correct permissions to use these
commands
"""
try:
user_roles = ctx.message.author.roles
server_id = ctx.message.server.id
if server_id not in self.server_data:
return True
elif (ADMIN_ONLY in self.server_data[server_id]
or ALERT_DISABLED in self.server_data[server_id]):
if CMB_ADMIN not in [role.name for role in user_roles]:
return False
return True
except:
return True
def _check_alert_file(self):
"""
Checks to see if there's a valid alerts.json file
"""
try:
with open('alerts.json') as alerts:
return json.load(alerts)
except FileNotFoundError:
self._save_alert_file()
return json.loads('{}')
except Exception as e:
print("An error has occured. See error.log.")
logger.error("Exception: {}".format(str(e)))
def _translate_operation(self, operator):
"""
Translates the supported operations for alerts
into english
@param operator - operator condition to notify the channel
"""
if operator in self.supported_operators:
if operator == "<":
operator_translation = "less than"
elif operator == "<=":
operator_translation = "less than or equal to"
elif operator == ">":
operator_translation = "greater than"
elif operator == ">=":
operator_translation = "greater than or equal to"
return operator_translation
else:
raise Exception("Unable to translate operation.")
def _check_alert(self, currency, operator, user_value, fiat, kwargs=None):
"""
Checks if the alert condition isn't true
@param currency - cryptocurrency to set an alert of
@param operator - operator condition to notify the channel
@param user_value - price or percent for condition to compare
@param fiat - desired fiat currency (i.e. 'EUR', 'USD')
@return - True if condition doesn't exist, False if it does
"""
if self.market_list is None:
return True
if currency in self.market_list:
if kwargs:
if "btc" in kwargs:
market_value = float(self.market_list[currency]["price_btc"])
elif "hour" in kwargs:
market_value = float(self.market_list[currency]["percent_change_1h"])
elif "day" in kwargs:
market_value = float(self.market_list[currency]["percent_change_24h"])
elif "week" in kwargs:
market_value = float(self.market_list[currency]["percent_change_7d"])
else:
raise Exception("Unsupported percent change format.")
else:
market_value = float(self.market_list[currency]["price_usd"])
market_value = float(self.coin_market.format_price(market_value,
fiat,
False))
if operator in self.supported_operators:
if operator == "<":
if market_value < float(user_value):
return False
elif operator == "<=":
if market_value <= float(user_value):
return False
elif operator == ">":
if market_value > float(user_value):
return False
elif operator == ">=":
if market_value >= float(user_value):
return False
return True
else:
raise Exception("Operator not supported: {}".format(operator))
else:
return False
async def _say_msg(self, msg=None, channel=None, emb=None):
"""
Bot will say msg if given correct permissions
@param msg - msg to say
@param channel - channel to send msg to
@param emb - embedded msg to say
"""
try:
if channel:
if emb:
await self.bot.send_message(channel, embed=emb)
else:
await self.bot.send_message(channel, msg)
else:
if emb:
await self.bot.say(embed=emb)
else:
await self.bot.say(msg)
except:
pass
async def add_alert(self, ctx, currency, operator, user_value, fiat, **kwargs):
"""
Adds an alert to alerts.json
@param currency - cryptocurrency to set an alert of
@param operator - operator condition to notify the channel
@param user_value - price or percent for condition to compare
@param fiat - desired fiat currency (i.e. 'EUR', 'USD')
"""
try:
if not self._check_permission(ctx):
return
alert_num = None
ucase_fiat = self.coin_market.fiat_check(fiat)
if currency.upper() in self.acronym_list:
currency = self.acronym_list[currency.upper()]
if "Duplicate" in currency:
await self._say_msg(currency)
return
if currency not in self.market_list:
raise CurrencyException("Currency is invalid: ``{}``".format(currency))
try:
if not self._check_alert(currency, operator, user_value, ucase_fiat, kwargs):
await self._say_msg("Failed to create alert. Current price "
"of **{}** already meets the condition."
"".format(currency.title()))
return
except Exception:
await self._say_msg("Invalid operator: **{}**".format(operator))
return
user_id = ctx.message.author.id
if user_id not in self.alert_data:
self.alert_data[user_id] = {}
for i in range(1, len(self.alert_data[user_id]) + 2):
if str(i) not in self.alert_data[user_id]:
alert_num = str(i)
if alert_num is None:
raise Exception("Something went wrong with adding alert.")
alert_cap = int(self.alert_capacity)
if int(alert_num) > alert_cap:
await self.bot.say("Unable to add alert, user alert capacity of"
" **{}** has been reached.".format(alert_cap))
return
alert_list = self.alert_data[user_id]
alert_list[alert_num] = {}
channel_alert = alert_list[alert_num]
channel_alert["currency"] = currency
channel_alert["channel"] = ctx.message.channel.id
if operator in self.supported_operators:
channel_alert["operation"] = operator
else:
await self._say_msg("Invalid operator: {}. Your choices are **<*"
"*, **<=**, **>**, or **>=**"
"".format(operator))
return
if kwargs:
if "btc" in kwargs:
user_value = "{:,.8f}".format(user_value).rstrip('0')
if user_value.endswith('.'):
user_value = user_value.replace('.', '')
channel_alert["unit"] = {"btc": "{}".format(user_value).rstrip('0')}
else:
channel_alert["percent"] = ("{}".format(user_value)).rstrip('0')
for arg in kwargs:
channel_alert["percent_change"] = arg
else:
channel_alert["price"] = ("{:.6f}".format(user_value)).rstrip('0')
if channel_alert["price"].endswith('.'):
channel_alert["price"] = channel_alert["price"].replace('.', '')
channel_alert["fiat"] = ucase_fiat
self._save_alert_file(self.alert_data)
await self._say_msg("Alert has been set. This bot will post the "
"alert in this specific channel.")
except CurrencyException as e:
logger.error("CurrencyException: {}".format(str(e)))
await self._say_msg(str(e))
except FiatException as e:
logger.error("FiatException: {}".format(str(e)))
await self._say_msg(str(e))
except Exception as e:
print("Failed to add alert. See error.log.")
logger.error("Exception: {}".format(str(e)))
def _save_alert_file(self, alert_data={}, backup=False):
"""
Saves alerts.json file
"""
if backup:
alert_filename = "alerts_backup.json"
else:
alert_filename = "alerts.json"
with open(alert_filename, 'w') as outfile:
json.dump(alert_data,
outfile,
indent=4)
async def remove_alert(self, ctx, alert_num):
"""
Removes an alert from the user's list of alerts
@param ctx - context of the command sent
@param alert_num - number of the specific alert to remove
"""
try:
if not self._check_permission(ctx):
return
user_id = ctx.message.author.id
user_list = self.alert_data
alert_list = user_list[user_id]
if alert_num in alert_list:
removed_alert = alert_num
alert_setting = alert_list[alert_num]
alert_currency = alert_setting["currency"]
alert_operation = self._translate_operation(alert_setting["operation"])
if "unit" in alert_setting:
if "btc" in alert_setting["unit"]:
alert_btc = alert_setting["unit"]["btc"]
if alert_btc.endswith('.'):
alert_btc = alert_btc.replace('.', '')
alert_value = "{} BTC".format(alert_btc)
elif "percent" in alert_setting:
alert_percent = alert_setting["percent"]
if alert_percent.endswith('.'):
alert_percent = alert_percent.replace('.', '')
alert_value = "{}%".format(alert_percent)
if "hour" == alert_setting["percent_change"]:
alert_value += " (1H)"
elif "day" == alert_setting["percent_change"]:
alert_value += " (24H)"
elif "week" == alert_setting["percent_change"]:
alert_value += " (7D)"
else:
alert_value = alert_setting["price"]
alert_fiat = alert_setting["fiat"]
alert_list.pop(str(alert_num))
self._save_alert_file(self.alert_data)
msg = ("Alert **{}** where **{}** is **{}** **{}** "
"".format(removed_alert,
alert_currency.title(),
alert_operation,
alert_value))
if "price" in alert_setting:
msg += "**{}** ".format(alert_fiat)
msg += "was successfully removed."
await self._say_msg(msg)
else:
await self._say_msg("The number you've entered does not exist "
"in the alert list. Use `$geta` to receive "
"a list of ongoing alerts.")
except Forbidden:
pass
except CurrencyException as e:
logger.error("CurrencyException: {}".format(str(e)))
await self._say_msg(str(e))
except Exception as e:
print("Failed to remove alert. See error.log.")
logger.error("Exception: {}".format(str(e)))
async def get_alert_list(self, ctx):
"""
Gets the list of alerts and displays them
@param ctx - context of the command sent
"""
try:
if not self._check_permission(ctx):
return
user_id = ctx.message.author.id
user_list = self.alert_data
msg = {}
result_msg = ""
if user_id in user_list:
alert_list = user_list[user_id]
if len(alert_list) != 0:
for alert in alert_list:
currency = alert_list[alert]["currency"].title()
operation = self._translate_operation(alert_list[alert]["operation"])
if "unit" in alert_list[alert]:
if "btc" in alert_list[alert]["unit"]:
alert_btc = alert_list[alert]["unit"]["btc"]
if alert_btc.endswith('.'):
alert_btc = alert_btc.replace('.', '')
alert_value = "{}".format(alert_btc)
elif "percent" in alert_list[alert]:
alert_percent = alert_list[alert]["percent"]
if alert_percent.endswith('.'):
alert_percent = alert_percent.replace('.', '')
alert_value = "{}%".format(alert_percent)
else:
alert_value = alert_list[alert]["price"]
msg[int(alert)] = ("[**{}**] Alert when **{}** is "
"**{}** **{}** "
"".format(alert,
currency,
operation,
alert_value))
if "unit" in alert_list[alert]:
if "btc" in alert_list[alert]["unit"]:
msg[int(alert)] += "**BTC**\n"
elif "percent_change" in alert_list[alert]:
if "hour" == alert_list[alert]["percent_change"]:
msg[int(alert)] += "(**1H**)\n"
elif "day" == alert_list[alert]["percent_change"]:
msg[int(alert)] += "(**24H**)\n"
elif "week" == alert_list[alert]["percent_change"]:
msg[int(alert)] += "(**7D**)\n"
else:
msg[int(alert)] += ("**{}**\n"
"".format(alert_list[alert]["fiat"]))
for line in sorted(msg):
result_msg += msg[line]
color = 0x00FF00
else:
result_msg = "Channel does not have any alerts to display."
color = 0xD14836
else:
result_msg = "User never created any alerts."
color = 0xD14836
em = discord.Embed(title="Alerts",
description=result_msg,
colour=color)
await self.bot.say(embed=em)
except Forbidden:
pass
except CurrencyException as e:
logger.error("CurrencyException: {}".format(str(e)))
await self._say_msg(str(e))
except Exception as e:
print("Failed to create alert list. See error.log.")
logger.error("Exception: {}".format(str(e)))
async def alert_user(self):
"""
Checks and displays alerts that have met the condition of the
cryptocurrency price
"""
try:
kwargs = {}
raised_alerts = defaultdict(list)
for user in self.alert_data:
alert_list = self.alert_data[str(user)]
for alert in alert_list:
alert_currency = alert_list[alert]["currency"]
operator_symbol = alert_list[alert]["operation"]
if "unit" in alert_list[alert]:
if "btc" in alert_list[alert]["unit"]:
alert_value = alert_list[alert]["unit"]["btc"]
kwargs["btc"] = True
elif "percent_change" in alert_list[alert]:
alert_value = alert_list[alert]["percent"]
if alert_value.endswith('.'):
alert_value = alert_value.replace('.', '')
kwargs[alert_list[alert]["percent_change"]] = True
else:
alert_value = alert_list[alert]["price"]
alert_fiat = alert_list[alert]["fiat"]
if not self._check_alert(alert_currency, operator_symbol,
alert_value, alert_fiat,
kwargs):
alert_operator = self._translate_operation(operator_symbol)
raised_alerts[user].append(alert)
if "channel" not in alert_list[alert]:
channel_obj = await self.bot.get_user_info(user)
else:
channel_obj = alert_list[alert]["channel"]
channel_obj = self.bot.get_channel(channel_obj)
if not channel_obj:
channel_obj = await self.bot.get_user_info(user)
if alert_currency in self.market_list:
msg = ("**{}** is **{}** **{}**"
"".format(alert_currency.title(),
alert_operator,
alert_value))
if "unit" in alert_list[alert]:
if "btc" in alert_list[alert]["unit"]:
msg += " **BTC**\n"
elif "percent_change" in alert_list[alert]:
if "hour" == alert_list[alert]["percent_change"]:
msg += "% (**1H**)\n"
elif "day" == alert_list[alert]["percent_change"]:
msg += "% (**24H**)\n"
elif "week" == alert_list[alert]["percent_change"]:
msg += "% (**7D**)\n"
else:
msg += " **{}**\n".format(alert_fiat)
msg += "<@{}>".format(user)
else:
msg = ("**{}** is no longer a valid currency "
"according to the coinmarketapi api. Alerts "
"related to this currency will be removed."
"".format(alert_currency.title()))
em = discord.Embed(title="Alert **{}**".format(alert),
description=msg,
colour=0xFF9900)
await self._say_msg(channel=channel_obj, emb=em)
kwargs.clear()
if raised_alerts:
for user in raised_alerts:
for alert_num in raised_alerts[user]:
self.alert_data[user].pop(str(alert_num))
self._save_alert_file(self.alert_data)
except Exception as e:
print("Failed to alert user. See error.log.")
logger.error("Exception: {}".format(str(e)))
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import csv
import os
from os import path
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--file", type=str)
parser.add_argument("--max", type=float)
parser.add_argument("--target", type=int, default=-1)
parser.add_argument("--text", type=int, default=12)
args = parser.parse_args()
line_cnt = 0
step = 0
# s, b, a, s', b', empty
matplotlib.rc('xtick', labelsize=args.text)
matplotlib.rc('ytick', labelsize=args.text)
n_size = 14
with open(args.file, newline='\n') as csvfile:
csvReader = csv.reader(csvfile)
for row in csvReader:
line_index = line_cnt % 3
# state
if (line_index == 2):
[cart_pos, angle, lb_pos, rb_pos] = row[0].split(";")
angle = int(angle)
cart_pos = int(cart_pos)
lb_pos = int(lb_pos)
rb_pos = int(rb_pos)
# Plotting
# belief
plt.ylim(0, n_size)
plt.xlim(0, n_size)
plt.xlabel('Right bump position', fontsize=args.text + 2)
plt.ylabel('Left bump position', fontsize=args.text + 2)
plt.imshow(b)
# Left bump
plt.plot(lb_pos, lb_pos, 'ro', markersize=10)
# Right bump
plt.plot(rb_pos, rb_pos, 'go', markersize=10)
# Cart position
# Cart position
if (a == 0 or a == 2):
plt.plot(cart_pos, cart_pos, 'yx', markersize=10)
else:
plt.plot(cart_pos, cart_pos, 'bx', markersize=10)
for dot in range(n_size + 1):
if dot != cart_pos and dot != rb_pos and dot != lb_pos:
plt.plot(dot, dot, 'wo', markersize=5)
plt.grid()
plt.clim(0, args.max)
plt.colorbar()
saved_path = args.file + '-' + 'true'
if not path.exists(saved_path):
os.mkdir(saved_path)
if step == args.target:
plt.savefig(saved_path + '/' + str(step) + ".png", bbox_inches='tight', dpi=600)
plt.close()
step += 1
# current belief
elif (line_index == 0):
b = row[0].split(";")
b = [float(el) for el in b]
b = np.array(b)
b = b.reshape(n_size + 1, n_size + 1)
# action
elif (line_index == 1):
a = int(row[0])
line_cnt += 1
line_cnt = 0
step = 0
|
"""Test the Admin functionality of the organizer App"""
from django.contrib.auth import get_user_model
from test_plus import TestCase
from config.test_utils import get_instance_data, omit_keys
from ..models import NewsLink, Startup, Tag
from .factories import (
NewsLinkFactory,
StartupFactory,
TagFactory,
)
def get_startup_data(startup):
"""Strip unchecked fields from Startup"""
return omit_keys(
"id", "tags", get_instance_data(startup)
)
def get_newslink_data(newslink):
"""Strip unchecked fields from NewsLink"""
return omit_keys(
"id", "startup", get_instance_data(newslink)
)
class AdminSetupMixin:
"""Utility class to provide common setup pattern"""
@classmethod
def setUpTestData(cls):
"""Generate test data for entire suite"""
User = get_user_model()
cls.test_user = User.objects.create_superuser(
email="admin@example.com", password="password"
)
class TagAdminTests(AdminSetupMixin, TestCase):
"""Test suite for TagAdmin class"""
@classmethod
def setUpTestData(cls):
"""Generate test data for entire suite"""
super().setUpTestData()
cls.t1_pk = TagFactory().pk
def test_list_get(self):
"""Is the admin list of Tags available?"""
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_tag_changelist"
)
def test_add_get(self):
"""Is the admin add form for Tags available?"""
with self.login(self.test_user):
self.get_check_200("admin:organizer_tag_add")
def test_add_post(self):
"""Can new Tags be created?"""
self.assertEqual(Tag.objects.count(), 1)
data = get_instance_data(TagFactory.build())
with self.login(self.test_user):
self.post("admin:organizer_tag_add", data=data)
self.assertEqual(Tag.objects.count(), 2)
def test_change_get(self):
"""Is the admin Tag change-form available?"""
# the Tag is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_tag_change",
object_id=self.t1_pk,
)
def test_change_post(self):
"""Can existing Tags be modified?"""
t2 = TagFactory()
data = dict(name="a new tag name")
self.assertNotEqual(get_instance_data(t2), data)
with self.login(self.test_user):
self.post(
"admin:organizer_tag_change",
data=data,
object_id=t2.pk,
)
self.response_302()
t2.refresh_from_db()
self.assertEqual(
omit_keys("id", "slug", get_instance_data(t2)),
data,
)
self.assertEqual(Tag.objects.count(), 2)
def test_delete_get(self):
"""Is the admin Tag delete-form available?"""
# the Tag is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_tag_delete",
object_id=self.t1_pk,
)
def test_delete_post(self):
"""Can Tags be deleted?"""
# the Tag is created in setUpTestData
t2_pk = TagFactory().pk
with self.login(self.test_user):
self.post(
"admin:organizer_tag_delete",
object_id=t2_pk,
data=dict(post="yes"),
)
self.response_302()
self.assertFalse(
Tag.objects.filter(id=t2_pk).exists()
)
def test_history_get(self):
"""Is a Tag's history available?"""
# the Tag is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_tag_history",
object_id=self.t1_pk,
)
class StartupAdminTests(AdminSetupMixin, TestCase):
"""Test suite for StartupAdmin class"""
@classmethod
def setUpTestData(cls): # noqa: N802
"""Generate test data for entire suite"""
super().setUpTestData()
cls.s1_pk = StartupFactory().pk
cls.t1_pk = TagFactory().pk
def test_list_get(self):
"""Is the admin list of Startups available?"""
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_startup_changelist"
)
def test_add_get(self):
"""Is the admin add form for Startups available?"""
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_startup_add"
)
def test_add_post(self):
"""Can new Startups be created?"""
self.assertEqual(Startup.objects.count(), 1)
startup_data = get_startup_data(
StartupFactory.build()
)
# the tag below is created in setUpTestData
data = dict(tags=[self.t1_pk], **startup_data)
with self.login(self.test_user):
self.post(
"admin:organizer_startup_add", data=data
)
self.assertEqual(Startup.objects.count(), 2)
def test_change_get(self):
"""Is the admin Startup change-form available?"""
# the Startup is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_startup_change",
object_id=self.s1_pk,
)
def test_change_post(self):
"""Can existing Startups be modified?"""
s2 = StartupFactory()
startup_data = get_startup_data(
StartupFactory.build()
)
self.assertNotEqual(
get_startup_data(s2), startup_data
)
# the tag below is created in setUpTestData
data = dict(tags=[self.t1_pk], **startup_data)
with self.login(self.test_user):
self.post(
"admin:organizer_startup_change",
data=data,
object_id=s2.pk,
)
self.response_302()
s2.refresh_from_db()
self.assertEqual(get_startup_data(s2), startup_data)
self.assertEqual(Startup.objects.count(), 2)
def test_delete_get(self):
"""Is the admin Startup delete-form available?"""
# the Startup is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_startup_delete",
object_id=self.s1_pk,
)
def test_delete_post(self):
"""Can Startups be deleted?"""
# the Startup is created in setUpTestData
s2_pk = StartupFactory().pk
with self.login(self.test_user):
self.post(
"admin:organizer_startup_delete",
object_id=s2_pk,
data=dict(post="yes"),
)
self.response_302()
self.assertFalse(
Startup.objects.filter(id=s2_pk).exists()
)
def test_history_get(self):
"""Is a Startup's history available?"""
# the Startup is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_startup_history",
object_id=self.s1_pk,
)
class NewsLinkAdminTests(AdminSetupMixin, TestCase):
"""Test suite for NewsLinkAdmin class"""
@classmethod
def setUpTestData(cls): # noqa: N802
"""Generate test data for entire suite"""
super().setUpTestData()
cls.nl1_pk = NewsLinkFactory().pk
cls.s1_pk = StartupFactory().pk
def test_list_get(self):
"""Is the admin list of NewsLinks available?"""
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_newslink_changelist"
)
def test_add_get(self):
"""Is the admin add form for NewsLinks available?"""
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_newslink_add"
)
def test_add_post(self):
"""Can new NewsLinks be created?"""
self.assertEqual(NewsLink.objects.count(), 1)
newslink_data = get_newslink_data(
NewsLinkFactory.build()
)
# the startup below is created in setUpTestData
data = dict(startup=self.s1_pk, **newslink_data)
with self.login(self.test_user):
self.post(
"admin:organizer_newslink_add", data=data
)
self.assertEqual(NewsLink.objects.count(), 2)
def test_change_get(self):
"""Is the admin NewsLink change-form available?"""
# the NewsLink is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_newslink_change",
object_id=self.nl1_pk,
)
def test_change_post(self):
"""Can existing NewsLinks be modified?"""
nl2 = NewsLinkFactory()
newslink_data = get_newslink_data(
NewsLinkFactory.build()
)
self.assertNotEqual(
get_newslink_data(nl2), newslink_data
)
# the startup below is created in setUpTestData
data = dict(startup=self.s1_pk, **newslink_data)
with self.login(self.test_user):
self.post(
"admin:organizer_newslink_change",
data=data,
object_id=nl2.pk,
)
self.response_302()
nl2.refresh_from_db()
self.assertEqual(
get_newslink_data(nl2), newslink_data
)
self.assertEqual(nl2.startup.pk, self.s1_pk)
self.assertEqual(NewsLink.objects.count(), 2)
def test_delete_get(self):
"""Is the admin NewsLink delete-form available?"""
# the NewsLink is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_newslink_delete",
object_id=self.nl1_pk,
)
def test_delete_post(self):
"""Can NewsLinks be deleted?"""
nl2_pk = NewsLinkFactory().pk
with self.login(self.test_user):
self.post(
"admin:organizer_newslink_delete",
object_id=nl2_pk,
data=dict(post="yes"),
)
self.response_302()
self.assertFalse(
NewsLink.objects.filter(id=nl2_pk).exists()
)
def test_history_get(self):
"""Is a NewsLink's history available?"""
# the NewsLink is created in setUpTestData
with self.login(self.test_user):
self.get_check_200(
"admin:organizer_newslink_history",
object_id=self.nl1_pk,
)
|
"""Support for Xiaomi Yeelight WiFi color bulb."""
from __future__ import annotations
import asyncio
import logging
from yeelight import BulbException
from yeelight.aio import KEY_CONNECTED
from homeassistant.const import CONF_ID, CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later
from .const import (
ACTIVE_COLOR_FLOWING,
ACTIVE_MODE_NIGHTLIGHT,
DATA_UPDATED,
STATE_CHANGE_TIME,
UPDATE_REQUEST_PROPERTIES,
)
from .scanner import YeelightScanner
_LOGGER = logging.getLogger(__name__)
@callback
def async_format_model(model: str) -> str:
"""Generate a more human readable model."""
return model.replace("_", " ").title()
@callback
def async_format_id(id_: str) -> str:
"""Generate a more human readable id."""
return hex(int(id_, 16)) if id_ else "None"
@callback
def async_format_model_id(model: str, id_: str) -> str:
"""Generate a more human readable name."""
return f"{async_format_model(model)} {async_format_id(id_)}"
@callback
def _async_unique_name(capabilities: dict) -> str:
"""Generate name from capabilities."""
model_id = async_format_model_id(capabilities["model"], capabilities["id"])
return f"Yeelight {model_id}"
def update_needs_bg_power_workaround(data):
"""Check if a push update needs the bg_power workaround.
Some devices will push the incorrect state for bg_power.
To work around this any time we are pushed an update
with bg_power, we force poll state which will be correct.
"""
return "bg_power" in data
class YeelightDevice:
"""Represents single Yeelight device."""
def __init__(self, hass, host, config, bulb):
"""Initialize device."""
self._hass = hass
self._config = config
self._host = host
self._bulb_device = bulb
self.capabilities = {}
self._device_type = None
self._available = True
self._initialized = False
self._name = None
@property
def bulb(self):
"""Return bulb device."""
return self._bulb_device
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def config(self):
"""Return device config."""
return self._config
@property
def host(self):
"""Return hostname."""
return self._host
@property
def available(self):
"""Return true is device is available."""
return self._available
@callback
def async_mark_unavailable(self):
"""Set unavailable on api call failure due to a network issue."""
self._available = False
@property
def model(self):
"""Return configured/autodetected device model."""
return self._bulb_device.model or self.capabilities.get("model")
@property
def fw_version(self):
"""Return the firmware version."""
return self.capabilities.get("fw_ver")
@property
def is_nightlight_supported(self) -> bool:
"""
Return true / false if nightlight is supported.
Uses brightness as it appears to be supported in both ceiling and other lights.
"""
return self._nightlight_brightness is not None
@property
def is_nightlight_enabled(self) -> bool:
"""Return true / false if nightlight is currently enabled."""
# Only ceiling lights have active_mode, from SDK docs:
# active_mode 0: daylight mode / 1: moonlight mode (ceiling light only)
if self._active_mode is not None:
return int(self._active_mode) == ACTIVE_MODE_NIGHTLIGHT
if self._nightlight_brightness is not None:
return int(self._nightlight_brightness) > 0
return False
@property
def is_color_flow_enabled(self) -> bool:
"""Return true / false if color flow is currently running."""
return self._color_flow and int(self._color_flow) == ACTIVE_COLOR_FLOWING
@property
def _active_mode(self):
return self.bulb.last_properties.get("active_mode")
@property
def _color_flow(self):
return self.bulb.last_properties.get("flowing")
@property
def _nightlight_brightness(self):
return self.bulb.last_properties.get("nl_br")
@property
def type(self):
"""Return bulb type."""
if not self._device_type:
self._device_type = self.bulb.bulb_type
return self._device_type
async def _async_update_properties(self):
"""Read new properties from the device."""
try:
await self.bulb.async_get_properties(UPDATE_REQUEST_PROPERTIES)
self._available = True
if not self._initialized:
self._initialized = True
except OSError as ex:
if self._available: # just inform once
_LOGGER.error(
"Unable to update device %s, %s: %s", self._host, self.name, ex
)
self._available = False
except asyncio.TimeoutError as ex:
_LOGGER.debug(
"timed out while trying to update device %s, %s: %s",
self._host,
self.name,
ex,
)
except BulbException as ex:
_LOGGER.debug(
"Unable to update device %s, %s: %s", self._host, self.name, ex
)
async def async_setup(self):
"""Fetch capabilities and setup name if available."""
scanner = YeelightScanner.async_get(self._hass)
self.capabilities = await scanner.async_get_capabilities(self._host) or {}
if self.capabilities:
self._bulb_device.set_capabilities(self.capabilities)
if name := self._config.get(CONF_NAME):
# Override default name when name is set in config
self._name = name
elif self.capabilities:
# Generate name from model and id when capabilities is available
self._name = _async_unique_name(self.capabilities)
elif self.model and (id_ := self._config.get(CONF_ID)):
self._name = f"Yeelight {async_format_model_id(self.model, id_)}"
else:
self._name = self._host # Default name is host
async def async_update(self, force=False):
"""Update device properties and send data updated signal."""
if not force and self._initialized and self._available:
# No need to poll unless force, already connected
return
await self._async_update_properties()
async_dispatcher_send(self._hass, DATA_UPDATED.format(self._host))
async def _async_forced_update(self, _now):
"""Call a forced update."""
await self.async_update(True)
@callback
def async_update_callback(self, data):
"""Update push from device."""
_LOGGER.debug("Received callback: %s", data)
was_available = self._available
self._available = data.get(KEY_CONNECTED, True)
if update_needs_bg_power_workaround(data) or (
not was_available and self._available
):
# On reconnect the properties may be out of sync
#
# If the device drops the connection right away, we do not want to
# do a property resync via async_update since its about
# to be called when async_setup_entry reaches the end of the
# function
#
async_call_later(self._hass, STATE_CHANGE_TIME, self._async_forced_update)
async_dispatcher_send(self._hass, DATA_UPDATED.format(self._host))
|
#!/usr/bin/env python2.7
# coding=utf-8
"""
@date = '8/9/16'
@author = 'chenjian'
@email = 'chenjian@cvte.com'
"""
import time
import cv2
import os
if __name__ == '__main__':
os.environ['GENICAM_ROOT_V2_4'] = '/opt/imperx/bobcat_gev/lib/genicam'
print(os.environ['GENICAM_ROOT_V2_4'])
import CameraBobcat
camera = CameraBobcat.CameraBobcat()
camera.open()
time.sleep(5)
print('set shutter', camera.set_shutter(400))
print('get shutter', camera.get_shutter())
print('set wb', camera.set_wb(200, 200, 200))
print('get wb', camera.get_white_balance_red(), camera.get_white_balance_green(), camera.get_white_balance_blue())
print('get_camera_id', camera.get_camera_id())
print('get_camera_temperature', camera.get_camera_temperature())
print('get_firmware_version', camera.get_firmware_version())
# for i in range(500):
# img = camera.get_image_in_numpy()
# # print 'frame rate: ', camera.get_frame_rate()
# if img.size > 1:
# cv2.imshow("tests", img)
# cv2.waitKey(1)
# # print img
# # cv2.waitKey(100)
# print 'get_frame_rate', camera.get_frame_rate()
# print 'end.'
|
import torch
from torch_scatter import segment_cpu, gather_cpu
from torch_scatter.helpers import min_value, max_value
if torch.cuda.is_available():
from torch_scatter import segment_cuda, gather_cuda
def seg(is_cuda):
return segment_cuda if is_cuda else segment_cpu
def gat(is_cuda):
return gather_cuda if is_cuda else gather_cpu
class SegmentCOO(torch.autograd.Function):
@staticmethod
def forward(ctx, src, index, out, dim_size, reduce):
assert reduce in ['add', 'mean', 'min', 'max']
if out is not None:
ctx.mark_dirty(out)
ctx.reduce = reduce
ctx.src_size = list(src.size())
fill_value = 0
if out is None:
dim_size = index.max().item() + 1 if dim_size is None else dim_size
size = list(src.size())
size[index.dim() - 1] = dim_size
if reduce == 'min':
fill_value = max_value(src.dtype)
elif reduce == 'max':
fill_value = min_value(src.dtype)
out = src.new_full(size, fill_value)
out, arg_out = seg(src.is_cuda).segment_coo(src, index, out, reduce)
if fill_value != 0:
out.masked_fill_(out == fill_value, 0)
ctx.save_for_backward(index, arg_out)
if reduce == 'min' or reduce == 'max':
return out, arg_out
else:
return out
@staticmethod
def backward(ctx, grad_out, *args):
(index, arg_out), src_size = ctx.saved_tensors, ctx.src_size
grad_src = None
if ctx.needs_input_grad[0]:
if ctx.reduce == 'add':
grad_src = gat(grad_out.is_cuda).gather_coo(
grad_out, index, grad_out.new_empty(src_size))
elif ctx.reduce == 'mean':
grad_src = gat(grad_out.is_cuda).gather_coo(
grad_out, index, grad_out.new_empty(src_size))
count = arg_out # Gets pre-computed on GPU but not on CPU.
if count is None:
size = list(index.size())
size[-1] = grad_out.size(index.dim() - 1)
count = segment_cpu.segment_coo(
torch.ones_like(index, dtype=grad_out.dtype), index,
grad_out.new_zeros(size), 'add')[0].clamp_(min=1)
count = gat(grad_out.is_cuda).gather_coo(
count, index, count.new_empty(src_size[:index.dim()]))
for _ in range(grad_out.dim() - index.dim()):
count = count.unsqueeze(-1)
grad_src.div_(count)
elif ctx.reduce == 'min' or ctx.reduce == 'max':
src_size[index.dim() - 1] += 1
grad_src = grad_out.new_zeros(src_size).scatter_(
index.dim() - 1, arg_out, grad_out)
grad_src = grad_src.narrow(index.dim() - 1, 0,
src_size[index.dim() - 1] - 1)
return grad_src, None, None, None, None
class SegmentCSR(torch.autograd.Function):
@staticmethod
def forward(ctx, src, indptr, out, reduce):
assert reduce in ['add', 'mean', 'min', 'max']
if out is not None:
ctx.mark_dirty(out)
ctx.reduce = reduce
ctx.src_size = list(src.size())
out, arg_out = seg(src.is_cuda).segment_csr(src, indptr, out, reduce)
ctx.save_for_backward(indptr, arg_out)
return out if arg_out is None else (out, arg_out)
@staticmethod
def backward(ctx, grad_out, *args):
(indptr, arg_out), src_size = ctx.saved_tensors, ctx.src_size
grad_src = None
if ctx.needs_input_grad[0]:
if ctx.reduce == 'add':
grad_src = gat(grad_out.is_cuda).gather_csr(
grad_out, indptr, grad_out.new_empty(src_size))
elif ctx.reduce == 'mean':
grad_src = gat(grad_out.is_cuda).gather_csr(
grad_out, indptr, grad_out.new_empty(src_size))
indptr1 = indptr.narrow(-1, 0, indptr.size(-1) - 1)
indptr2 = indptr.narrow(-1, 1, indptr.size(-1) - 1)
count = (indptr2 - indptr1).to(grad_src.dtype)
count = gat(grad_out.is_cuda).gather_csr(
count, indptr, count.new_empty(src_size[:indptr.dim()]))
for _ in range(grad_out.dim() - indptr.dim()):
count = count.unsqueeze(-1)
grad_src.div_(count)
elif ctx.reduce == 'min' or ctx.reduce == 'max':
src_size[indptr.dim() - 1] += 1
grad_src = grad_out.new_zeros(src_size).scatter_(
indptr.dim() - 1, arg_out, grad_out)
grad_src = grad_src.narrow(indptr.dim() - 1, 0,
src_size[indptr.dim() - 1] - 1)
return grad_src, None, None, None
def segment_coo(src, index, out=None, dim_size=None, reduce="add"):
r"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytorch_scatter/
master/docs/source/_figures/segment_coo.svg?sanitize=true
:align: center
:width: 400px
|
Reduces all values from the :attr:`src` tensor into :attr:`out` at the
indices specified in the :attr:`index` tensor along the last dimension of
:attr:`index`.
For each value in :attr:`src`, its output index is specified by its index
in :attr:`src` for dimensions outside of :obj:`index.dim() - 1` and by the
corresponding value in :attr:`index` for dimension :obj:`index.dim() - 1`.
The applied reduction is defined via the :attr:`reduce` argument.
Formally, if :attr:`src` and :attr:`index` are :math:`n`-dimensional and
:math:`m`-dimensional tensors with
size :math:`(x_0, ..., x_{m-1}, x_m, x_{m+1}, ..., x_{n-1})` and
:math:`(x_0, ..., x_{m-1}, x_m)`, respectively, then :attr:`out` must be an
:math:`n`-dimensional tensor with size
:math:`(x_0, ..., x_{m-1}, y, x_{m+1}, ..., x_{n-1})`.
Moreover, the values of :attr:`index` must be between :math:`0` and
:math:`y - 1` in ascending order.
The :attr:`index` tensor supports broadcasting in case its dimensions do
not match with :attr:`src`.
For one-dimensional tensors with :obj:`reduce="add"`, the operation
computes
.. math::
\mathrm{out}_i = \mathrm{out}_i + \sum_j~\mathrm{src}_j
where :math:`\sum_j` is over :math:`j` such that
:math:`\mathrm{index}_j = i`.
In contrast to :meth:`scatter`, this method expects values in :attr:`index`
**to be sorted** along dimension :obj:`index.dim() - 1`.
Due to the use of sorted indices, :meth:`segment_coo` is usually faster
than the more general :meth:`scatter` operation.
For reductions :obj:`"min"` and :obj:`"max"`, this operation returns a
second tensor representing the :obj:`argmin` and :obj:`argmax`,
respectively.
.. note::
This operation is implemented via atomic operations on the GPU and is
therefore **non-deterministic** since the order of parallel operations
to the same value is undetermined.
For floating-point variables, this results in a source of variance in
the result.
Args:
src (Tensor): The source tensor.
index (LongTensor): The sorted indices of elements to segment.
The number of dimensions of :attr:`index` needs to be less than or
equal to :attr:`src`.
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
dim_size (int, optional): If :attr:`out` is not given, automatically
create output with size :attr:`dim_size` at dimension
:obj:`index.dim() - 1`.
If :attr:`dim_size` is not given, a minimal sized output tensor
according to :obj:`index.max() + 1` is returned.
(default: :obj:`None`)
reduce (string, optional): The reduce operation (:obj:`"add"`,
:obj:`"mean"`, :obj:`"min"` or :obj:`"max"`).
(default: :obj:`"add"`)
:rtype: :class:`Tensor`, :class:`LongTensor` *(optional)*
.. code-block:: python
from torch_scatter import segment_coo
src = torch.randn(10, 6, 64)
index = torch.tensor([0, 0, 1, 1, 1, 2])
index = index.view(1, -1) # Broadcasting in the first and last dim.
out = segment_coo(src, index, reduce="add")
print(out.size())
.. code-block::
torch.Size([10, 3, 64])
"""
return SegmentCOO.apply(src, index, out, dim_size, reduce)
def segment_csr(src, indptr, out=None, reduce="add"):
r"""
Reduces all values from the :attr:`src` tensor into :attr:`out` within the
ranges specified in the :attr:`indptr` tensor along the last dimension of
:attr:`indptr`.
For each value in :attr:`src`, its output index is specified by its index
in :attr:`src` for dimensions outside of :obj:`indptr.dim() - 1` and by the
corresponding range index in :attr:`indptr` for dimension
:obj:`indptr.dim() - 1`.
The applied reduction is defined via the :attr:`reduce` argument.
Formally, if :attr:`src` and :attr:`indptr` are :math:`n`-dimensional and
:math:`m`-dimensional tensors with
size :math:`(x_0, ..., x_{m-1}, x_m, x_{m+1}, ..., x_{n-1})` and
:math:`(x_0, ..., x_{m-1}, y)`, respectively, then :attr:`out` must be an
:math:`n`-dimensional tensor with size
:math:`(x_0, ..., x_{m-1}, y - 1, x_{m+1}, ..., x_{n-1})`.
Moreover, the values of :attr:`indptr` must be between :math:`0` and
:math:`x_m` in ascending order.
The :attr:`indptr` tensor supports broadcasting in case its dimensions do
not match with :attr:`src`.
For one-dimensional tensors with :obj:`reduce="add"`, the operation
computes
.. math::
\mathrm{out}_i =
\sum_{j = \mathrm{indptr}[i]}^{\mathrm{indptr}[i+i]}~\mathrm{src}_j.
Due to the use of index pointers, :meth:`segment_csr` is the fastest
method to apply for grouped reductions.
For reductions :obj:`"min"` and :obj:`"max"`, this operation returns a
second tensor representing the :obj:`argmin` and :obj:`argmax`,
respectively.
.. note::
In contrast to :meth:`scatter()` and :meth:`segment_coo`, this
operation is **fully-deterministic**.
Args:
src (Tensor): The source tensor.
indptr (LongTensor): The index pointers between elements to segment.
The number of dimensions of :attr:`index` needs to be less than or
equal to :attr:`src`.
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
reduce (string, optional): The reduce operation (:obj:`"add"`,
:obj:`"mean"`, :obj:`"min"` or :obj:`"max"`).
(default: :obj:`"add"`)
:rtype: :class:`Tensor`, :class:`LongTensor` *(optional)*
.. code-block:: python
from torch_scatter import segment_csr
src = torch.randn(10, 6, 64)
indptr = torch.tensor([0, 2, 5, 6])
indptr = indptr.view(1, -1) # Broadcasting in the first and last dim.
out = segment_csr(src, indptr, reduce="add")
print(out.size())
.. code-block::
torch.Size([10, 3, 64])
"""
return SegmentCSR.apply(src, indptr, out, reduce)
|
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/91_notebook_export.ipynb (unless otherwise specified).
__all__ = ['read_nb', 'check_re', 'is_export', 'find_default_export', 'export_names', 'extra_add', 'notebook2script',
'get_name', 'qual_name', 'source_nb', 'script2notebook', 'diff_nb_script']
#Cell
from ..imports import *
from .core import *
import nbformat,inspect
from nbformat.sign import NotebookNotary
#Cell
def read_nb(fname):
"Read the notebook in `fname`."
with open(Path(fname),'r') as f: return nbformat.reads(f.read(), as_version=4)
#Cell
def check_re(cell, pat, code_only=True):
"Check if `cell` contains a line with regex `pat`"
if code_only and cell['cell_type'] != 'code': return
if isinstance(pat, str): pat = re.compile(pat, re.IGNORECASE | re.MULTILINE)
return pat.search(cell['source'])
#Cell
_re_blank_export = re.compile(r"""
# Matches any line with #export or #exports without any module name:
^ # beginning of line (since re.MULTILINE is passed)
\s* # any number of whitespace
\#\s* # # then any number of whitespace
exports? # export or exports
\s* # any number of whitespace
$ # end of line (since re.MULTILINE is passed)
""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
#Cell
_re_mod_export = re.compile(r"""
# Matches any line with #export or #exports with a module name and catches it in group 1:
^ # beginning of line (since re.MULTILINE is passed)
\s* # any number of whitespace
\#\s* # # then any number of whitespace
exports? # export or exports
\s* # any number of whitespace
(\S+) # catch a group with any non-whitespace chars
\s* # any number of whitespace
$ # end of line (since re.MULTILINE is passed)
""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
#Cell
def is_export(cell, default):
"Check if `cell` is to be exported and returns the name of the module."
if check_re(cell, _re_blank_export):
if default is None:
print(f"This cell doesn't have an export destination and was ignored:\n{cell['source'][1]}")
return default
tst = check_re(cell, _re_mod_export)
return os.path.sep.join(tst.groups()[0].split('.')) if tst else None
#Cell
_re_default_exp = re.compile(r"""
# Matches any line with #default_exp with a module name and catches it in group 1:
^ # beginning of line (since re.MULTILINE is passed)
\s* # any number of whitespace
\#\s* # # then any number of whitespace
default_exp # export or exports
\s* # any number of whitespace
(\S+) # catch a group with any non-whitespace chars
\s* # any number of whitespace
$ # end of line (since re.MULTILINE is passed)
""", re.IGNORECASE | re.MULTILINE | re.VERBOSE)
#Cell
def find_default_export(cells):
"Find in `cells` the default export module."
for cell in cells:
tst = check_re(cell, _re_default_exp)
if tst: return tst.groups()[0]
#Cell
def _create_mod_file(fname, nb_path):
"Create a module file for `fname`."
fname.parent.mkdir(parents=True, exist_ok=True)
with open(fname, 'w') as f:
f.write(f"#AUTOGENERATED! DO NOT EDIT! File to edit: dev/{nb_path.name} (unless otherwise specified).")
f.write('\n\n__all__ = []')
#Cell
_re_patch_func = re.compile(r"""
# Catches any function decorated with @patch, its name in group 1 and the patched class in group 2
@patch # At any place in the cell, something that begins with @patch
\s*def # Any number of whitespace (including a new line probably) followed by def
\s+ # One whitespace or more
([^\(\s]*) # Catch a group composed of anything but whitespace or an opening parenthesis (name of the function)
\s*\( # Any number of whitespace followed by an opening parenthesis
[^:]* # Any number of character different of : (the name of the first arg that is type-annotated)
:\s* # A column followed by any number of whitespace
(?: # Non-catching group with either
([^,\s\(\)]*) # a group composed of anything but a comma, a parenthesis or whitespace (name of the class)
| # or
(\([^\)]*\))) # a group composed of something between parenthesis (tuple of classes)
\s* # Any number of whitespace
(?:,|\)) # Non-catching group with either a comma or a closing parenthesis
""", re.VERBOSE)
#Cell
_re_class_func_def = re.compile(r"""
# Catches any 0-indented function or class definition with its name in group 1
^ # Beginning of a line (since re.MULTILINE is passed)
(?:def|class) # Non-catching group for def or class
\s+ # One whitespace or more
([^\(\s]*) # Catching group with any character except an opening parenthesis or a whitespace (name)
\s* # Any number of whitespace
(?:\(|:) # Non-catching group with either an opening parenthesis or a : (classes don't need ())
""", re.MULTILINE | re.VERBOSE)
#Cell
_re_obj_def = re.compile(r"""
# Catches any 0-indented object definition (bla = thing) with its name in group 1
^ # Beginning of a line (since re.MULTILINE is passed)
([^=\s]*) # Catching group with any character except a whitespace or an equal sign
\s*= # Any number of whitespace followed by an =
""", re.MULTILINE | re.VERBOSE)
#Cell
def _not_private(n):
for t in n.split('.'):
if t.startswith('_'): return False
return '\\' not in t and '^' not in t and '[' not in t
def export_names(code, func_only=False):
"Find the names of the objects, functions or classes defined in `code` that are exported."
#Format monkey-patches with @patch
def _f(gps):
nm, cls, t = gps.groups()
if cls is not None: return f"def {cls}.{nm}():"
return '\n'.join([f"def {c}.{nm}():" for c in re.split(', *', t[1:-1])])
code = _re_patch_func.sub(_f, code)
names = _re_class_func_def.findall(code)
if not func_only: names += _re_obj_def.findall(code)
return [n for n in names if _not_private(n)]
#Cell
_re_all_def = re.compile(r"""
# Catches a cell with defines \_all\_ = [\*\*] and get that \*\* in group 1
^_all_ # Beginning of line (since re.MULTILINE is passed)
\s*=\s* # Any number of whitespace, =, any number of whitespace
\[ # Opening [
([^\n\]]*) # Catching group with anything except a ] or newline
\] # Closing ]
""", re.MULTILINE | re.VERBOSE)
#Same with __all__
_re__all__def = re.compile(r'^__all__\s*=\s*\[([^\]]*)\]', re.MULTILINE)
#Cell
def extra_add(code):
"Catch adds to `__all__` required by a cell with `_all_=`"
if _re_all_def.search(code):
names = _re_all_def.search(code).groups()[0]
names = re.sub('\s*,\s*', ',', names)
names = names.replace('"', "'")
code = _re_all_def.sub('', code)
code = re.sub(r'([^\n]|^)\n*$', r'\1', code)
return names.split(','),code
return [],code
#Cell
def _add2add(fname, names, line_width=120):
if len(names) == 0: return
with open(fname, 'r') as f: text = f.read()
tw = TextWrapper(width=120, initial_indent='', subsequent_indent=' '*11, break_long_words=False)
re_all = _re__all__def.search(text)
start,end = re_all.start(),re_all.end()
text_all = tw.wrap(f"{text[start:end-1]}{'' if text[end-2]=='[' else ', '}{', '.join(names)}]")
with open(fname, 'w') as f: f.write(text[:start] + '\n'.join(text_all) + text[end:])
#Cell
def _relative_import(name, fname):
mods = name.split('.')
splits = str(fname).split(os.path.sep)
if mods[0] not in splits: return name
splits = splits[splits.index(mods[0]):]
while len(mods)>0 and splits[0] == mods[0]: splits,mods = splits[1:],mods[1:]
return '.' * (len(splits)) + '.'.join(mods)
#Cell
#Catches any from local.bla import something and catches local.bla in group 1, the imported thing(s) in group 2.
_re_import = re.compile(r'^(\s*)from (local.\S*) import (.*)$')
#Cell
def _deal_import(code_lines, fname):
pat = re.compile(r'from (local.\S*) import (\S*)$')
lines = []
def _replace(m):
sp,mod,obj = m.groups()
return f"{sp}from {_relative_import(mod, fname)} import {obj}"
for line in code_lines:
line = re.sub('_'+'file_', '__'+'file__', line) #Need to break __file__ or that line will be treated
lines.append(_re_import.sub(_replace,line))
return lines
#Cell
def _get_index():
if not (Path(__file__).parent/'index.txt').exists(): return {}
return json.load(open(Path(__file__).parent/'index.txt', 'r'))
def _save_index(index):
fname = Path(__file__).parent/'index.txt'
fname.parent.mkdir(parents=True, exist_ok=True)
json.dump(index, open(fname, 'w'), indent=2)
def _reset_index():
if (Path(__file__).parent/'index.txt').exists():
os.remove(Path(__file__).parent/'index.txt')
#Cell
def _notebook2script(fname, silent=False, to_pkl=False):
"Finds cells starting with `#export` and puts them into a new module"
if os.environ.get('IN_TEST',0): return # don't export if running tests
fname = Path(fname)
nb = read_nb(fname)
default = find_default_export(nb['cells'])
if default is not None:
default = os.path.sep.join(default.split('.'))
if not to_pkl: _create_mod_file(Path.cwd()/'local'/f'{default}.py', fname)
index = _get_index()
exports = [is_export(c, default) for c in nb['cells']]
cells = [(i,c,e) for i,(c,e) in enumerate(zip(nb['cells'],exports)) if e is not None]
for (i,c,e) in cells:
fname_out = Path.cwd()/'local'/f'{e}.py'
orig = ('#C' if e==default else f'#Comes from {fname.name}, c') + 'ell\n'
code = '\n\n' + orig + '\n'.join(_deal_import(c['source'].split('\n')[1:], fname_out))
# remove trailing spaces
names = export_names(code)
extra,code = extra_add(code)
if not to_pkl: _add2add(fname_out, [f"'{f}'" for f in names if '.' not in f and len(f) > 0] + extra)
index.update({f: fname.name for f in names})
code = re.sub(r' +$', '', code, flags=re.MULTILINE)
if code != '\n\n' + orig[:-1]:
if to_pkl: _update_pkl(fname_out, (i, fname, code))
else:
with open(fname_out, 'a') as f: f.write(code)
_save_index(index)
if not silent: print(f"Converted {fname}.")
#Cell
def _get_sorted_files(all_fs: Union[bool,str], up_to=None):
"Return the list of files corresponding to `g` in the current dir."
if (all_fs==True): ret = glob.glob('*.ipynb') # Checks both that is bool type and that is True
else: ret = glob.glob(all_fs) if isinstance(g,str) else []
if len(ret)==0: print('WARNING: No files found')
ret = [f for f in ret if not f.startswith('_')]
if up_to is not None: ret = [f for f in ret if str(f)<=str(up_to)]
return sorted(ret)
#Cell
def notebook2script(fname=None, all_fs=None, up_to=None, silent=False, to_pkl=False):
"Convert `fname` or all the notebook satisfying `all_fs`."
# initial checks
if os.environ.get('IN_TEST',0): return # don't export if running tests
assert fname or all_fs
if all_fs: _reset_index()
if (all_fs is None) and (up_to is not None): all_fs=True # Enable allFiles if upTo is present
fnames = _get_sorted_files(all_fs, up_to=up_to) if all_fs else [fname]
[_notebook2script(f, silent=silent, to_pkl=to_pkl) for f in fnames]
#Cell
def _get_property_name(p):
"Get the name of property `p`"
if hasattr(p, 'fget'):
return p.fget.func.__qualname__ if hasattr(p.fget, 'func') else p.fget.__qualname__
else: return next(iter(re.findall(r'\'(.*)\'', str(p)))).split('.')[-1]
def get_name(obj):
"Get the name of `obj`"
if hasattr(obj, '__name__'): return obj.__name__
elif getattr(obj, '_name', False): return obj._name
elif hasattr(obj,'__origin__'): return str(obj.__origin__).split('.')[-1] #for types
elif type(obj)==property: return _get_property_name(obj)
else: return str(obj).split('.')[-1]
#Cell
def qual_name(obj):
"Get the qualified name of `obj`"
if hasattr(obj,'__qualname__'): return obj.__qualname__
if inspect.ismethod(obj): return f"{get_name(obj.__self__)}.{get_name(fn)}"
return get_name(obj)
#Cell
def source_nb(func, is_name=None, return_all=False):
"Return the name of the notebook where `func` was defined"
is_name = is_name or isinstance(func, str)
index = _get_index()
name = func if is_name else qual_name(func)
while len(name) > 0:
if name in index: return (name,index[name]) if return_all else index[name]
name = '.'.join(name.split('.')[:-1])
#Cell
_re_default_nb = re.compile(r'File to edit: dev/(\S+)\s+')
_re_cell = re.compile(r'^#Cell|^#Comes from\s+(\S+), cell')
#Cell
def _split(code):
lines = code.split('\n')
default_nb = _re_default_nb.search(lines[0]).groups()[0]
s,res = 1,[]
while _re_cell.search(lines[s]) is None: s += 1
e = s+1
while e < len(lines):
while e < len(lines) and _re_cell.search(lines[e]) is None: e += 1
grps = _re_cell.search(lines[s]).groups()
nb = grps[0] or default_nb
content = lines[s+1:e]
while len(content) > 1 and content[-1] == '': content = content[:-1]
res.append((nb, '\n'.join(content)))
s,e = e,e+1
return res
#Cell
def _relimport2name(name, mod_name):
if mod_name.endswith('.py'): mod_name = mod_name[:-3]
mods = mod_name.split(os.path.sep)
mods = mods[mods.index('local'):]
i = 0
while name[i] == '.': i += 1
return '.'.join(mods[:-i] + [name[i:]])
#Cell
#Catches any from .bla import something and catches local.bla in group 1, the imported thing(s) in group 2.
_re_loc_import = re.compile(r'(^\s*)from (\.\S*) import (.*)$')
#Cell
def _deal_loc_import(code, fname):
lines = []
def _replace(m):
sp,mod,obj = m.groups()
return f"{sp}from {_relimport2name(mod, fname)} import {obj}"
for line in code.split('\n'):
line = re.sub('__'+'file__', '_'+'file_', line) #Need to break __file__ or that line will be treated
lines.append(_re_loc_import.sub(_replace,line))
return '\n'.join(lines)
#Cell
def _update_pkl(fname, cell):
dic = pickle.load(open((Path.cwd()/'lib.pkl'), 'rb')) if (Path.cwd()/'lib.pkl').exists() else collections.defaultdict(list)
dic[fname].append(cell)
pickle.dump(dic, open((Path.cwd()/'lib.pkl'), 'wb'))
#Cell
def _script2notebook(fname, dic, silent=False):
"Put the content of `fname` back in the notebooks it came from."
if os.environ.get('IN_TEST',0): return # don't export if running tests
fname = Path(fname)
with open(fname) as f: code = f.read()
splits = _split(code)
assert len(splits) == len(dic[fname]), f"Exported file from notebooks should have {len(dic[fname])} cells but has {len(splits)}."
assert np.all([c1[0]==c2[1]] for c1,c2 in zip(splits, dic[fname]))
splits = [(c2[0],c1[0],c1[1]) for c1,c2 in zip(splits, dic[fname])]
nb_fnames = {s[1] for s in splits}
for nb_fname in nb_fnames:
nb = read_nb(nb_fname)
for i,f,c in splits:
c = _deal_loc_import(c, str(fname))
if f == nb_fname:
l = nb['cells'][i]['source'].split('\n')[0]
nb['cells'][i]['source'] = l + '\n' + c
NotebookNotary().sign(nb)
nbformat.write(nb, nb_fname, version=4)
if not silent: print(f"Converted {fname}.")
#Cell
_manual_mods = ['__init__.py imports.py torch_imports.py all.py torch_basics.py fp16_utils.py test_utils.py basics.py'.split()]
#Cell
def script2notebook(folder='local', silent=False):
if (Path.cwd()/'lib.pkl').exists(): os.remove(Path.cwd()/'lib.pkl')
notebook2script(all_fs=True, silent=True, to_pkl=True)
dic = pickle.load(open(Path.cwd()/'lib.pkl', 'rb'))
os.remove(Path.cwd()/'lib.pkl')
if os.environ.get('IN_TEST',0): return # don't export if running tests
for f in (Path.cwd()/folder).glob('**/*.py'):
if f.name not in _manual_mods: _script2notebook(f, dic, silent=silent)
#Cell
import subprocess
#Cell
def _print_diff(code1, code2, fname):
diff = difflib.ndiff(code1, code2)
sys.stdout.writelines(diff)
#for l in difflib.context_diff(code1, code2): print(l)
#_print_diff_py(code1, code2, fname) if fname.endswith('.py') else _print_diff_txt(code1, code2, fname)
#Cell
def diff_nb_script(lib_folder='local'):
"Print the diff between the notebooks and the library in `lib_folder`"
tmp_path1,tmp_path2 = Path.cwd()/'tmp_lib',Path.cwd()/'tmp_lib1'
shutil.copytree(Path.cwd()/lib_folder, tmp_path1)
try:
notebook2script(all_fs=True, silent=True)
shutil.copytree(Path.cwd()/lib_folder, tmp_path2)
shutil.rmtree(Path.cwd()/lib_folder)
shutil.copytree(tmp_path1, Path.cwd()/lib_folder)
res = subprocess.run(['diff', '-ru', 'tmp_lib1', lib_folder], stdout=subprocess.PIPE)
print(res.stdout.decode('utf-8'))
finally:
shutil.rmtree(tmp_path1)
shutil.rmtree(tmp_path2)
|
from sauronlab.core.core_imports import *
from sauronlab.extras.addon_tools import AddonTools
from sauronlab.extras.video_core import VideoCore
from sauronlab.extras.videos import SauronxVideo, SauronxVideos
from sauronlab.model.cache_interfaces import AVideoCache
DEFAULT_SHIRE_STORE = PurePath(sauronlab_env.shire_path) / "store"
class VideoDownloadError(DownloadError):
""" """
pass
@abcd.auto_eq()
@abcd.auto_repr_str()
class VideoCache(AVideoCache):
"""
A cache for videos for runs.
Downloads videos from the Shire, saves the native h265 video files, and loads them with moveipy.
"""
def __init__(
self,
cache_dir: PathLike = sauronlab_env.video_cache_dir,
shire_store: PathLike = DEFAULT_SHIRE_STORE,
):
"""
Constructor.
Args:
cache_dir: The directory to save video files under.
shire_store: The local or remote path to the Shire.
If local, will copy the files.
If remote, will download with SCP on Windows and rsync on other systems.
"""
self._cache_dir = Tools.prepped_dir(cache_dir)
self.shire_store = PurePath(shire_store)
@property
def cache_dir(self) -> Path:
""" """
return self._cache_dir
@abcd.overrides
def path_of(self, run: RunLike) -> Path:
"""
Args:
run: RunLike:
Returns:
"""
run = Tools.run(run)
return self.cache_dir / str(run.id) / (str(run.id) + VideoCore.video_ext)
@abcd.overrides
def key_from_path(self, path: PathLike) -> RunLike:
"""
Args:
path: PathLike:
Returns:
"""
path = Path(path).relative_to(self.cache_dir)
return int(re.compile(r"^([0-9]+)\..+$").fullmatch(path.name).group(1))
@abcd.overrides
def load(self, run: RunLike) -> SauronxVideo:
"""
Loads from the cache, downloading if necessary, and loads.
Args:
run: A run ID, instance, name, tag, or submission hash or instance
Returns:
A SauronxVideo
"""
self.download(run)
return self._load(run)
@abcd.overrides
def download(self, *runs: RunLike) -> None:
"""
Args:
*runs: RunLike:
"""
for run in Tools.runs(runs):
video_path = self.path_of(run)
t0 = time.monotonic()
if video_path.exists():
logger.debug(f"Run {run.id} is already at {video_path}")
else:
generation = ValarTools.generation_of(run)
logger.minor(
f"Downloading {generation.name} video of r{run.id} to {video_path} ..."
)
remote_path = self.shire_store / VideoCore.get_remote_path(run)
self._copy_from_shire(remote_path, video_path)
# TODO check for properties file
logger.notice(
f"Downloaded video of r{run.id}. Took {round(time.monotonic() - t0, 1)}s."
)
def _load(self, run: RunLike) -> SauronxVideo:
"""
Loads from the cache. Will raise an error if the video is not already in the cache.
Args:
run: A run ID, instance, name, tag, or submission hash or instance
Returns:
A SauronxVideo
"""
return SauronxVideos.of(self.path_of(run), run)
def validate(self, run: RunLike) -> None:
"""
Raises a HashValidationFailedException if the hash doesn't validate.
Args:
run: RunLike:
"""
path = self.path_of(run)
if not VideoCore.video_hasher.check_hash(path):
raise HashValidationFailedError(f"Video at {path} did not validate")
def _copy_from_shire(self, remote_path, local_path) -> None:
"""
Args:
remote_path:
local_path:
"""
try:
AddonTools.download_file(remote_path, local_path, False)
AddonTools.download_file(
str(remote_path) + ".sha256", str(local_path) + ".sha256", False
)
except Exception as e:
raise VideoDownloadError(f"Failed to copy from the Shire at path {remote_path}") from e
if not Path(local_path).exists():
raise VideoDownloadError(
f"Video directory was downloaded to {local_path}, but the video does not exist"
)
__all__ = ["VideoCache"]
|
from flask import Flask, render_template, url_for, redirect, request
import pprint
import json
import requests
places = {
"Chitrakala Parishath": (12.9794, 77.5910),
"Jawaharlal Nehru Planetarium": (12.984731000000099, 77.589573000000001),
"Cafe Coffee Day": (12.926442000000099, 77.680487000000099),
"Chai Point": (12.87615, 77.59551),
"Truffles": (12.97187, 77.600900),
"Shanti Sagar": (12.9537, 77.56732),
"Pothy's": (12.8084, 77.509169),
"Gnana Bharathi": (12.94374, 77.50707)
}
times = {
"Chitrakala Parishath": 60,
"Jawaharlal Nehru Planetarium": 90,
"Cafe Coffee Day": 20,
"Chai Point": 15,
"Truffles": 60,
"Shanti Sagar": 40,
"Pothy's": 60,
"Gnana Bharathi": 50
}
maps = {
'tea': 'Chai Point',
'coffee': 'Cafe Coffee Day',
'North Indian': 'Shanti Sagar',
'Continental': 'Truffles',
'Art': 'Chitrakala Parishath',
'Dance': "Gnana Bharathi",
'Clothing': "Pothy's",
'Science': "Jawaharlal Nehru Planetarium"
}
app = Flask("__app__")
app.config['SECRET_KEY'] = 'a551d32359baf371b9095f28d45347c8b8621830'
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('index.html', title='Home')
@app.route('/trip', methods=['GET', 'POST'])
def trips():
return render_template('trips.html', title='Trips')
@app.route('/POIs', methods=['GET', 'POST'])
def pois():
if 'location' not in request.args.keys():
return render_template('pois.html', title='POIs', location='Vidhana Soudha')
else:
return render_template('pois.html', title='POIs', location=request.args['location'])
@app.route('/References', methods=['GET', 'POST'])
def refer():
return render_template('references.html', title='References')
@app.route('/userinput', methods=['GET', 'POST'])
def user_input():
if request.args is None or len(request.args) == 0:
return render_template('userInput.html')
else:
mapval = maps[request.args['touring']]
timeval = times[mapval]
placeval = places[mapval]
j = json.dumps({'mapval':mapval, 'timeval':timeval, 'placeval':placeval})
pprint.pprint(j)
return j
@app.route('/jsonParse', methods=['GET', 'POST'])
def printJson():
js = json.dumps(request.json)
pprint.pprint(js)
return '', 200
@app.route('/timeinput', methods=['GET', 'POST'])
def user_time_input():
return render_template("userTimeInput.html")
@app.route('/oauth', methods=['GET', 'POST'])
def oauth():
r = requests.post(url="https://outpost.mapmyindia.com/api/security/oauth/token", params={
'grant_type': 'client_credentials',
'client_id': '5DAD84w4c-D9yPWId0GNzmW-RRJdm0awDtySdkT-NdBZkq1AdBiNsw==',
'client_secret': '7I5-OsHoU1hf8uyUl049idx1-L7sLzdAoPrapP8cfvjQVK3vTTb1hY16mIcO8ATR'
}, headers={'content-type': 'application/x-www-form-urlencoded'})
r2 = requests.get(url="https://atlas.mapmyindia.com/api/places/nearby/json", headers={
'Authorization': r.json()['token_type'] + ' ' + r.json()['access_token'],
'content-type': 'text/plain',
'dataType': 'json'
}, data={
'keywords': 'coffee;beer',
'refLocation': '28.631460,77.217423'
})
return str(r2.status_code), 200
@app.route('/chat')
def chat():
return render_template('chatbot.html')
app.run(debug=True)
|
from lib.actions import BaseAction
__all__ = [
'ListVMsAction'
]
NODE_ATTRIBUTES = [
'id',
'name',
'state',
'public_ips',
'private_ips'
]
class ListVMsAction(BaseAction):
api_type = 'compute'
def run(self, credentials):
driver = self._get_driver_for_credentials(credentials=credentials)
vms = driver.list_nodes()
result = []
for vm in vms:
values = vm.__dict__
item = dict([(k, v) for k, v in values.items()
if k in NODE_ATTRIBUTES])
result.append(item)
return result
|
"""
The new Google ReCaptcha implementation for Flask without Flask-WTF
Can be used as standalone
"""
__NAME__ = "Flask-ReCaptcha"
__version__ = "0.4.2"
__license__ = "MIT"
__author__ = "Mardix"
__copyright__ = "(c) 2015 Mardix"
try:
from flask import request
from jinja2 import Markup
import requests
except ImportError as ex:
print("Missing dependencies")
class DEFAULTS(object):
IS_ENABLED = True
THEME = "light"
TYPE = "image"
SIZE = "normal"
TABINDEX = 0
class ReCaptcha(object):
VERIFY_URL = "https://www.google.com/recaptcha/api/siteverify"
site_key = None
secret_key = None
is_enabled = False
def __init__(self, app=None, site_key=None, secret_key=None, is_enabled=True, **kwargs):
if site_key:
self.site_key = site_key
self.secret_key = secret_key
self.is_enabled = is_enabled
self.theme = kwargs.get('theme', DEFAULTS.THEME)
self.type = kwargs.get('type', DEFAULTS.TYPE)
self.size = kwargs.get('size', DEFAULTS.SIZE)
self.tabindex = kwargs.get('tabindex', DEFAULTS.TABINDEX)
elif app:
self.init_app(app=app)
def init_app(self, app=None):
self.__init__(site_key=app.config.get("RECAPTCHA_SITE_KEY"),
secret_key=app.config.get("RECAPTCHA_SECRET_KEY"),
is_enabled=app.config.get("RECAPTCHA_ENABLED", DEFAULTS.IS_ENABLED),
theme=app.config.get("RECAPTCHA_THEME", DEFAULTS.THEME),
type=app.config.get("RECAPTCHA_TYPE", DEFAULTS.TYPE),
size=app.config.get("RECAPTCHA_SIZE", DEFAULTS.SIZE),
tabindex=app.config.get("RECAPTCHA_TABINDEX", DEFAULTS.TABINDEX))
@app.context_processor
def get_code():
return dict(recaptcha=Markup(self.get_code()))
def get_code(self):
"""
Returns the new ReCaptcha code
:return:
"""
return "" if not self.is_enabled else ("""
<script src='//www.google.com/recaptcha/api.js'></script>
<div class="g-recaptcha" data-sitekey="{SITE_KEY}" data-theme="{THEME}" data-type="{TYPE}" data-size="{SIZE}"\
data-tabindex="{TABINDEX}"></div>
""".format(SITE_KEY=self.site_key, THEME=self.theme, TYPE=self.type, SIZE=self.size, TABINDEX=self.tabindex))
def verify(self, response=None, remote_ip=None):
if self.is_enabled:
data = {
"secret": self.secret_key,
"response": response or request.form.get('g-recaptcha-response'),
"remoteip": remote_ip or request.environ.get('REMOTE_ADDR')
}
r = requests.get(self.VERIFY_URL, params=data)
return r.json()["success"] if r.status_code == 200 else False
return True
|
from django.apps import AppConfig
class CurlyConfig(AppConfig):
name = 'curly'
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WorkspaceUser(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, account_id=None, account_name=None, active_since=None, created=None, created_by_id=None, email=None, error_details=None, invitation_email_blurb=None, invitation_email_subject=None, last_modified=None, last_modified_by_id=None, status=None, type=None, user_id=None, user_name=None, workspace_id=None, workspace_user_base_url=None, workspace_user_id=None, workspace_user_uri=None):
"""
WorkspaceUser - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'account_id': 'str',
'account_name': 'str',
'active_since': 'str',
'created': 'str',
'created_by_id': 'str',
'email': 'str',
'error_details': 'ErrorDetails',
'invitation_email_blurb': 'str',
'invitation_email_subject': 'str',
'last_modified': 'str',
'last_modified_by_id': 'str',
'status': 'str',
'type': 'str',
'user_id': 'str',
'user_name': 'str',
'workspace_id': 'str',
'workspace_user_base_url': 'str',
'workspace_user_id': 'str',
'workspace_user_uri': 'str'
}
self.attribute_map = {
'account_id': 'accountId',
'account_name': 'accountName',
'active_since': 'activeSince',
'created': 'created',
'created_by_id': 'createdById',
'email': 'email',
'error_details': 'errorDetails',
'invitation_email_blurb': 'invitationEmailBlurb',
'invitation_email_subject': 'invitationEmailSubject',
'last_modified': 'lastModified',
'last_modified_by_id': 'lastModifiedById',
'status': 'status',
'type': 'type',
'user_id': 'userId',
'user_name': 'userName',
'workspace_id': 'workspaceId',
'workspace_user_base_url': 'workspaceUserBaseUrl',
'workspace_user_id': 'workspaceUserId',
'workspace_user_uri': 'workspaceUserUri'
}
self._account_id = account_id
self._account_name = account_name
self._active_since = active_since
self._created = created
self._created_by_id = created_by_id
self._email = email
self._error_details = error_details
self._invitation_email_blurb = invitation_email_blurb
self._invitation_email_subject = invitation_email_subject
self._last_modified = last_modified
self._last_modified_by_id = last_modified_by_id
self._status = status
self._type = type
self._user_id = user_id
self._user_name = user_name
self._workspace_id = workspace_id
self._workspace_user_base_url = workspace_user_base_url
self._workspace_user_id = workspace_user_id
self._workspace_user_uri = workspace_user_uri
@property
def account_id(self):
"""
Gets the account_id of this WorkspaceUser.
The account ID associated with the envelope.
:return: The account_id of this WorkspaceUser.
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""
Sets the account_id of this WorkspaceUser.
The account ID associated with the envelope.
:param account_id: The account_id of this WorkspaceUser.
:type: str
"""
self._account_id = account_id
@property
def account_name(self):
"""
Gets the account_name of this WorkspaceUser.
The name of the account that the workspace user belongs to.
:return: The account_name of this WorkspaceUser.
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""
Sets the account_name of this WorkspaceUser.
The name of the account that the workspace user belongs to.
:param account_name: The account_name of this WorkspaceUser.
:type: str
"""
self._account_name = account_name
@property
def active_since(self):
"""
Gets the active_since of this WorkspaceUser.
:return: The active_since of this WorkspaceUser.
:rtype: str
"""
return self._active_since
@active_since.setter
def active_since(self, active_since):
"""
Sets the active_since of this WorkspaceUser.
:param active_since: The active_since of this WorkspaceUser.
:type: str
"""
self._active_since = active_since
@property
def created(self):
"""
Gets the created of this WorkspaceUser.
The UTC DateTime when the workspace user was created.
:return: The created of this WorkspaceUser.
:rtype: str
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this WorkspaceUser.
The UTC DateTime when the workspace user was created.
:param created: The created of this WorkspaceUser.
:type: str
"""
self._created = created
@property
def created_by_id(self):
"""
Gets the created_by_id of this WorkspaceUser.
:return: The created_by_id of this WorkspaceUser.
:rtype: str
"""
return self._created_by_id
@created_by_id.setter
def created_by_id(self, created_by_id):
"""
Sets the created_by_id of this WorkspaceUser.
:param created_by_id: The created_by_id of this WorkspaceUser.
:type: str
"""
self._created_by_id = created_by_id
@property
def email(self):
"""
Gets the email of this WorkspaceUser.
:return: The email of this WorkspaceUser.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""
Sets the email of this WorkspaceUser.
:param email: The email of this WorkspaceUser.
:type: str
"""
self._email = email
@property
def error_details(self):
"""
Gets the error_details of this WorkspaceUser.
:return: The error_details of this WorkspaceUser.
:rtype: ErrorDetails
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""
Sets the error_details of this WorkspaceUser.
:param error_details: The error_details of this WorkspaceUser.
:type: ErrorDetails
"""
self._error_details = error_details
@property
def invitation_email_blurb(self):
"""
Gets the invitation_email_blurb of this WorkspaceUser.
:return: The invitation_email_blurb of this WorkspaceUser.
:rtype: str
"""
return self._invitation_email_blurb
@invitation_email_blurb.setter
def invitation_email_blurb(self, invitation_email_blurb):
"""
Sets the invitation_email_blurb of this WorkspaceUser.
:param invitation_email_blurb: The invitation_email_blurb of this WorkspaceUser.
:type: str
"""
self._invitation_email_blurb = invitation_email_blurb
@property
def invitation_email_subject(self):
"""
Gets the invitation_email_subject of this WorkspaceUser.
:return: The invitation_email_subject of this WorkspaceUser.
:rtype: str
"""
return self._invitation_email_subject
@invitation_email_subject.setter
def invitation_email_subject(self, invitation_email_subject):
"""
Sets the invitation_email_subject of this WorkspaceUser.
:param invitation_email_subject: The invitation_email_subject of this WorkspaceUser.
:type: str
"""
self._invitation_email_subject = invitation_email_subject
@property
def last_modified(self):
"""
Gets the last_modified of this WorkspaceUser.
Utc date and time the comment was last updated (can only be done by creator.)
:return: The last_modified of this WorkspaceUser.
:rtype: str
"""
return self._last_modified
@last_modified.setter
def last_modified(self, last_modified):
"""
Sets the last_modified of this WorkspaceUser.
Utc date and time the comment was last updated (can only be done by creator.)
:param last_modified: The last_modified of this WorkspaceUser.
:type: str
"""
self._last_modified = last_modified
@property
def last_modified_by_id(self):
"""
Gets the last_modified_by_id of this WorkspaceUser.
:return: The last_modified_by_id of this WorkspaceUser.
:rtype: str
"""
return self._last_modified_by_id
@last_modified_by_id.setter
def last_modified_by_id(self, last_modified_by_id):
"""
Sets the last_modified_by_id of this WorkspaceUser.
:param last_modified_by_id: The last_modified_by_id of this WorkspaceUser.
:type: str
"""
self._last_modified_by_id = last_modified_by_id
@property
def status(self):
"""
Gets the status of this WorkspaceUser.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later.
:return: The status of this WorkspaceUser.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this WorkspaceUser.
Indicates the envelope status. Valid values are: * sent - The envelope is sent to the recipients. * created - The envelope is saved as a draft and can be modified and sent later.
:param status: The status of this WorkspaceUser.
:type: str
"""
self._status = status
@property
def type(self):
"""
Gets the type of this WorkspaceUser.
Type of the user. Valid values: type_owner, type_participant.
:return: The type of this WorkspaceUser.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this WorkspaceUser.
Type of the user. Valid values: type_owner, type_participant.
:param type: The type of this WorkspaceUser.
:type: str
"""
self._type = type
@property
def user_id(self):
"""
Gets the user_id of this WorkspaceUser.
:return: The user_id of this WorkspaceUser.
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this WorkspaceUser.
:param user_id: The user_id of this WorkspaceUser.
:type: str
"""
self._user_id = user_id
@property
def user_name(self):
"""
Gets the user_name of this WorkspaceUser.
:return: The user_name of this WorkspaceUser.
:rtype: str
"""
return self._user_name
@user_name.setter
def user_name(self, user_name):
"""
Sets the user_name of this WorkspaceUser.
:param user_name: The user_name of this WorkspaceUser.
:type: str
"""
self._user_name = user_name
@property
def workspace_id(self):
"""
Gets the workspace_id of this WorkspaceUser.
:return: The workspace_id of this WorkspaceUser.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""
Sets the workspace_id of this WorkspaceUser.
:param workspace_id: The workspace_id of this WorkspaceUser.
:type: str
"""
self._workspace_id = workspace_id
@property
def workspace_user_base_url(self):
"""
Gets the workspace_user_base_url of this WorkspaceUser.
The relative URI that may be used to access a workspace user.
:return: The workspace_user_base_url of this WorkspaceUser.
:rtype: str
"""
return self._workspace_user_base_url
@workspace_user_base_url.setter
def workspace_user_base_url(self, workspace_user_base_url):
"""
Sets the workspace_user_base_url of this WorkspaceUser.
The relative URI that may be used to access a workspace user.
:param workspace_user_base_url: The workspace_user_base_url of this WorkspaceUser.
:type: str
"""
self._workspace_user_base_url = workspace_user_base_url
@property
def workspace_user_id(self):
"""
Gets the workspace_user_id of this WorkspaceUser.
:return: The workspace_user_id of this WorkspaceUser.
:rtype: str
"""
return self._workspace_user_id
@workspace_user_id.setter
def workspace_user_id(self, workspace_user_id):
"""
Sets the workspace_user_id of this WorkspaceUser.
:param workspace_user_id: The workspace_user_id of this WorkspaceUser.
:type: str
"""
self._workspace_user_id = workspace_user_id
@property
def workspace_user_uri(self):
"""
Gets the workspace_user_uri of this WorkspaceUser.
:return: The workspace_user_uri of this WorkspaceUser.
:rtype: str
"""
return self._workspace_user_uri
@workspace_user_uri.setter
def workspace_user_uri(self, workspace_user_uri):
"""
Sets the workspace_user_uri of this WorkspaceUser.
:param workspace_user_uri: The workspace_user_uri of this WorkspaceUser.
:type: str
"""
self._workspace_user_uri = workspace_user_uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
# -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import slugify
class Module(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField(max_length=50, blank=True, unique=True)
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Module, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
path = 'admininterface.{0}'.format(self.slug)
details = {
'id': self.id,
'slug': self.slug
}
return (path, (), details)
|
"""Minimal test for testing test-runner"""
from toxtest import __version__
def test_version() -> None:
"""Test the version string"""
assert __version__ == "0.1.0"
|
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
from tqdm import tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_set = datasets.CIFAR10(root="ds/", transform=transforms.ToTensor(), download=True)
train_loader = DataLoader(dataset=train_set, batch_size=64, shuffle=True)
def get_mean_std(loader):
# var[X] = E[X**2] - E[X]**2
channels_sum, channels_sqrd_sum, num_batches = 0, 0, 0
for data, _ in tqdm(loader):
channels_sum += torch.mean(data, dim=[0, 2, 3])
channels_sqrd_sum += torch.mean(data ** 2, dim=[0, 2, 3])
num_batches += 1
mean = channels_sum / num_batches
std = (channels_sqrd_sum / num_batches - mean ** 2) ** 0.5
return mean, std
mean, std = get_mean_std(train_loader)
print(mean)
print(std)
|
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (c) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
"""Unit tests for the openclean API extensions of the openclean engine."""
import pkg_resources
import pytest
from openclean_notebook.engine import DB
# -- Mock pkg_resources.resource_string ---------------------------------------
@pytest.fixture
def mock_pkg_resources(monkeypatch):
"""Mock reading the build Javascript files."""
def mock_read_file(*args, **kwargs):
if args[1].endswith('.html'):
return """<html>
<head>
</head>
<body>
<script>
{bundle}
</script>
<div id="{id}">
</div>
<script>
opencleanVis.renderOpencleanVisBundle("#{id}", {data});
</script>
</body>
</html>
""".encode('utf-8')
else:
return 'function(){};'.encode('utf-8')
monkeypatch.setattr(pkg_resources, "resource_string", mock_read_file)
def test_edit_dataset(mock_pkg_resources, dataset, tmpdir):
"""Test to ensure that we can call the edit function without an error."""
engine = DB(str(tmpdir))
engine.create(source=dataset, name='DS', primary_key='A')
engine.edit('DS')
engine.edit('DS', n=1)
def test_registry_id_collision():
"""Test to ensure that collisions for engine identifier during registration
are handled properly.
"""
class IDGenerator:
def __init__(self):
self._count = 0
def __call__(self, length=None):
self._count += 1
if self._count < 10:
return '0'
else:
return '1'
uid = IDGenerator()
engine_1 = DB(uid=uid)
assert engine_1.identifier == '0'
engine_2 = DB(uid=uid)
assert engine_2.identifier == '1'
|
from pypresence import Presence
from pypresence.exceptions import InvalidPipe
import time
def run_persence(current_version: str):
try:
RPC = Presence("EDIT-HERE") # add your RPC ID
RPC.connect()
RPC.update(
state=f"Version {current_version}",
details="python-bot-hyper-boilerplate",
start=time.time(),
large_image="EDIT-HERE", # add your RPC image name
large_text="EDIT-HERE",
small_image="EDIT-HERE",
small_text="EDIT-HERE"
)
except InvalidPipe:
pass
|
import glob,sys
success=False
in_ironpython="IronPython" in sys.version
if in_ironpython:
try:
from ironpython_console import *
success=True
except ImportError:
raise
else:
try:
from console import *
success=True
except ImportError:
pass
if not success:
raise ImportError("Could not find a console implementation for your platform")
|
# UCI Electronics for Scientists
# https://github.com/dkirkby/E4S
#
# Blink an external LED connected to D12.
import board
import digitalio
import time
led = digitalio.DigitalInOut(board.D12)
led.direction = digitalio.Direction.OUTPUT
while True:
led.value = not led.value # toggle on/off
time.sleep(0.5) # seconds
|
''' Comandos Break e loops infinitos '''
'''cont = 1
while cont <= 10: # para contar até 10
print(cont, '-> ', end='')
cont += 1
print('Acabou')'''
'''while True: -> comando para loop infinito
print(cont, '-> ', end='')
cont += 1
print('Acabou')'''
# exemplo:
'''n = 0
while n != 999: # -> só para de contar quanto digitar 999, estrutura de repetição usando flag = ponto de parada
n= int(input('Dígite um número: '))'''
# Exemplo:
'''n = cont = 0
while cont < 3: # -> para contar 3 números
n = int(input('Dígite um número: '))
cont += 1'''
'''n = s = 0
while n != 999:
n = int(input('Digite um número: '))
s += n
s -= 999 # gambiarra
print('A soma vale {}.'. format(s))'''
'''n = s = 0
while n != 999:
n = int(input('Dígite um número: '))
if n == 999:
break
s += n
print('A soma vale {}.'. format(s))'''
|
import discord
from discord.ext import commands
from discord import utils
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name='ping')
async def ping(self, ctx):
"""Pings to someone."""
await ctx.send(f'{ctx.author.mention} :ping_pong:')
@commands.has_permissions(kick_members=True)
@commands.command(name='kick', aliases=['пнуть'])
async def kick(self, ctx, member: discord.Member, *, reason=None):
"""Kicks an user."""
await member.kick(reason=reason)
@commands.has_permissions(ban_members=True)
@commands.command(name='ban', aliases=['бан'])
async def ban(self, ctx, member: discord.Member, *, reason=None):
"""Bans an user."""
await member.ban(reason=reason)
def setup(bot):
bot.add_cog(Moderation(bot))
|
from django.apps import AppConfig
class ArtConfig(AppConfig):
name = 'Calture'
verbose_name = 'ماژول آثار هنری'
|
import time
from collections import OrderedDict
from typing import TypeVar, Generic, Optional, Callable, Dict, Any, List
T = TypeVar("T")
class ExpirationQueue(Generic[T]):
"""
Handles queue of item that need to be expired and removed from the queue over time
"""
time_to_live_sec: int
# NOTE: this cannot be annotated as an collections.OrderedDict
queue: Dict[T, float]
def __init__(self, time_to_live_sec: int) -> None:
if time_to_live_sec < 0:
raise ValueError("Time to live cannot be negative.")
self.time_to_live_sec = time_to_live_sec
self.queue = OrderedDict()
def __len__(self) -> int:
return len(self.queue)
def __bool__(self) -> bool:
return len(self) > 0
def add(self, item: T) -> None:
"""
Adds item to the queue
:param item: item
"""
self.queue[item] = time.time()
def remove(self, item: T) -> None:
"""
Removes item from expiration queue
:param item: item to remove
"""
if item in self.queue:
del self.queue[item]
def remove_expired(
self,
current_time: Optional[float] = None,
remove_callback: Optional[Callable[[T], Any]] = None,
limit: Optional[int] = None,
**kwargs
) -> List[T]:
"""
Removes expired items from the queue
:param current_time: time to use as current time for expiration
:param remove_callback: reference to a callback function that is being called when item is removed
:param limit: max number of entries to remove in one call
:param kwargs: keyword args to pass into the callback method
"""
if current_time is None:
current_time = time.time()
if limit is None:
limit = len(self.queue)
iterations = 0
removed = []
while (
len(self.queue) > 0 and
iterations < limit and
# pyre-fixme[6]: Expected `float` for 1st param but got `Optional[float]`.
current_time - self.get_oldest_item_timestamp() > self.time_to_live_sec
):
# noinspection PyArgumentList
# pyre-fixme[28]: Unexpected keyword argument `last`.
item, _timestamp = self.queue.popitem(last=False)
if remove_callback is not None:
remove_callback(item, **kwargs)
removed.append(item)
iterations += 1
return removed
def get_oldest(self) -> Optional[T]:
"""
Returns the value of oldest item in the queue
:return: value of oldest item
"""
if not self.queue:
return None
return next(iter(self.queue.keys()))
def get_oldest_item_timestamp(self) -> Optional[float]:
"""
Returns timestamp of the oldest item
:return: timestamp of the oldest item
"""
if not self.queue:
return None
oldest_item = self.get_oldest()
assert oldest_item is not None
return self.queue[oldest_item]
def remove_oldest(
self, remove_callback: Optional[Callable[[T], None]] = None, **kwargs
) -> None:
"""
Remove one oldest item from the queue
:param remove_callback: reference to a callback function that is being called when item is removed
"""
if len(self.queue) > 0:
# pyre-fixme[28]: Unexpected keyword argument `last`.
item, _timestamp = self.queue.popitem(last=False)
if remove_callback is not None:
remove_callback(item, **kwargs)
def clear(self) -> None:
self.queue.clear()
|
import graphene
from graphene_django import DjangoObjectType
from .models import Pitstop
class PitstopType(DjangoObjectType):
class Meta:
model = Pitstop
class Query(graphene.ObjectType):
pitstops = graphene.List(PitstopType)
def resolve_pitstops(self, info, **kwargs):
return Pitstop.objects.all()
|
from datetime import datetime
from dataclasses import dataclass
import requests
class UmbrellaClient:
def __init__(self, integration_key, secret_key, organizationid, hostname="https://reports.api.umbrella.com/v2/organizations", limit=300):
self.limit = limit
self.hostname = hostname
self.secret_key = secret_key
self.organizationid = organizationid
self.integration_key = integration_key
self.valid_types = ["dns", "proxy", "firewall", "ip", "amp-retrospective"]
self.token_validity_time = 0
self.token = self.authenticate()
if not self.token:
raise Exception(f"Could not obtain authentication token")
def timestamp(self, timestamp=None):
if timestamp:
timestamp = timestamp + 1000
else:
timestamp = datetime.timestamp(datetime.fromisoformat(datetime.utcnow().strftime("%Y-%m-%dT00:00:00+00:00"))).__int__()
timestamp = timestamp * 1000
return (timestamp)
def authenticate(self, url="https://management.api.umbrella.com/auth/v2/oauth2/token"):
'''
Grabs a Bearer token from the API. A dictionary is returned that can be used direcly as headers
with the requests module
Function will be used by init, and store the dictionary as self.token.
https://developer.cisco.com/docs/cloud-security/#!reporting-v2-getting-started/create-api-access-token
'''
r = requests.post(url, timeout=30, auth=requests.auth.HTTPBasicAuth(self.integration_key, self.secret_key))
if r.ok:
token = r.json()["access_token"]
auth_header = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json"
}
now = datetime.timestamp(datetime.now()).__int__()
self.token_validity_time = now + r.json()["expires_in"]
return (auth_header)
else:
return False
def send_request(self,url):
'''
Main function for interacting with the API
'''
now = datetime.timestamp(datetime.now()).__int__()
if now >= self.token_validity_time:
self.token = self.authenticate()
r = requests.get(f"{url}", timeout=30, allow_redirects=False, headers=self.token)
if not r.ok:
raise Exception(f"Could not connect to {url}. {r.json()}")
'''
The umbrella API tends to redirect (HTTP 302) the request. So we will check if the domain is the same.
We tell requests that we do not allow redirects so we can run logic against is_redirect and is_permanent_redirect
If you plan to use this in production, you may want to consider if this check is secure enough.
'''
if r.is_redirect or r.is_permanent_redirect:
new_hostname = r.next.url.split("/")[2].split(".")[-2:]
old_hostname = r.url.split("/")[2].split(".")[-2:]
if new_hostname != old_hostname:
raise Exception(f"Old and new hostname does not have matching domains: {old_hostname} / {new_hostname}")
r = requests.get(f"{r.next.url}", timeout=30, allow_redirects=False, headers=self.token)
if not r.ok:
raise Exception(f"Error in connecting to re-directed url {r.url}. {r.json()}")
return (r.json())
def validate_parameters(self, valid_parameters_list=[], parameters_list=[]):
'''
Function used to validate the kwargs from the other functions. The valid list is stored within the function
itself, and this function expects a list of valid parameters and the kwargs to be passed.
'''
parameters = []
for key, value in parameters_list.items():
if key in valid_parameters_list:
parameters.append(f"{key}={value}")
if parameters:
# We insert an empty parameter in the list so when "&".join() is used, an & is inserted at the begninning in the URL
parameters.insert(0, "")
return parameters
def get_categories(self):
'''
Function to get the current categories from umbrella.
This can later be used as a parameter in the get_something functions
'''
r = self.send_request(f"{self.hostname}/{self.organizationid}/categories")
data = UmbrellaCategories(r)
return data
def get_activity(self, timestamp=None, type=None, **kwargs):
'''
Function to retrive data "activity" data from the API
https://developer.cisco.com/docs/cloud-security/#!reporting-v2-endpoints
'''
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories",
"ip", "ports", "identityids", "identitytypes", "applicationid",
"verdict", "ruleid", "filename", "securityoverridden", "bundleid",
"threats", "threattypes", "ampdisposition", "antivirusthreats",
"x-traffic-type", "isolatedstate", "isolatedfileaction",
"datalosspreventionstate", "filternoisydomains", "httperrors"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if not type:
url = f"{self.hostname}/{self.organizationid}/activity?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
elif type.lower() in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/activity/{type.lower()}?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
else:
raise Exception(f"{type} not a valid activity type: Valid types: {','.join(self.valid_types)}")
data = self.send_request(url)
return data
def get_top_identities(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "ports",
"identityids", "identitytypes", "applicationid", "verdict",
"securityoverridden", "bundleid", "threats", "threattypes",
"ampdisposition", "antivirusthreats", "datalosspreventionstate",
"filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if not type:
url = f"{self.hostname}/{self.organizationid}/top-identities?from={timestamp}&to=now&limit={self.limit}&offset=0{'&'.join(parameters)}"
elif type.lower() in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/top-identities/{type.lower()}?from={timestamp}&to=now&limit={self.limit}&offset=0{'&'.join(parameters)}"
else:
raise Exception(f"{type} not a valid identity type: Valid types: {','.join(self.valid_types)}")
data = self.send_request(url)
return data
def get_top_destinations(self, timestamp=None, destination_type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "ports",
"identityids", "identitytypes", "applicationid", "verdict", "sha256",
"securityoverridden", "bundleid", "threats", "threattypes",
"ampdisposition", "antivirusthreats", "datalosspreventionstate",
"filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if not destination_type:
raise Exception(f"Identity type is required for this function")
elif destination_type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/top-destinations/{destination_type.lower()}?from={timestamp}&to=now&limit={self.limit}&offset=0{'&'.join(parameters)}"
else:
raise Exception(f"{destination_type} is not a valid destination type. Valid types: {','.join(self.valid_types)}")
data = self.send_request(url)
return data
def get_top_categories(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "identityids",
"identitytypes", "applicationid", "verdict", "sha256", "securityoverridden",
"bundleid", "threats", "threattypes", "ampdisposition", "antivirusthreats",
"datalosspreventionstate", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if not type:
url = f"{self.hostname}/{self.organizationid}/top-categories?from={timestamp}&to=now&limit={self.limit}&offset=0{'&'.join(parameters)}"
elif type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/top-categories/{type.lower()}?from={timestamp}&to=now&limit={self.limit}&offset=0{'&'.join(parameters)}"
else:
raise Exception(f"{type} not a valid type. Valid types: {','.join(self.valid_types)}")
data = self.send_request(url)
return data
def get_top_event_types(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"organizationid", "from", "to", "domains", "urls", "categories",
"policycategories", "ip", "identityids", "identitytypes",
"applicationid", "verdict", "securityoverridden", "bundleid",
"threats", "threattypes", "ampdisposition", "antivirusthreats",
"datalosspreventionstate", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/top-eventtypes?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_top_dns_query_types(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"order", "domains", "categories", "policycategories", "ip",
"identityids", "identitytypes", "applicationid", "verdict",
"threats", "threattypes", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/top-dns-query-types?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_organization_requests_by_hour(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip",
"ports", "identityids", "identitytypes", "applicationid",
"verdict", "sha256", "securityoverridden", "bundleid", "threats",
"threattypes", "ampdisposition", "antivirusthreats",
"datalosspreventionstate", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/requests-by-hour/{type.lower()}?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/requests-by-hour?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_organization_requests_by_hour_and_category(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip",
"ports", "identityids", "identitytypes", "applicationid",
"verdict", "sha256", "securityoverridden", "bundleid",
"threats", "threattypes", "ampdisposition",
"antivirusthreats", "datalosspreventionstate",
"filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/categories-by-timerange/{type.lower()}?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/categories-by-timerange?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_depolyment_status(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = ["threats", "threattypes"]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/deployment-status?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_bandwidth_by_hour(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip",
"identityids", "identitytypes", "applicationid", "verdict",
"sha256", "securityoverridden", "bundleid", "ampdisposition",
"antivirusthreats", "datalosspreventionstate", "filternoisydomains",
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/bandwidth-by-hour?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_bandwidth_by_timerange(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "identityids",
"identitytypes", "applicationid", "verdict", "sha256",
"securityoverridden", "bundleid", "ampdisposition", "antivirusthreats",
"timerange", "datalosspreventionstate", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/bandwidth-by-timerange?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_top_files(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "identityids",
"identitytypes", "applicationid", "verdict", "sha256",
"securityoverridden", "bundleid", "ampdisposition", "antivirusthreats",
"datalosspreventionstate", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/top-files?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_total_requests(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "ports",
"identityids", "identitytypes", "applicationid", "verdict", "ruleid",
"sha256", "securityoverridden", "bundleid", "threats", "threattypesp",
"ampdisposition", "antivirusthreats", "datalosspreventionstate",
"filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/total-requests?from={timestamp}&to=now{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_top_threats(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "categories", "policyCategories", "ip", "identityids",
"identitytypes", "applicationid", "verdict", "threats",
"threattypes", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/top-threats/{type.lower()}?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/top-threats?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_top_threat_types(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "categories", "policyCategories", "ip", "identityids",
"identitytypes", "applicationid", "verdict", "threats",
"threattypes", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/top-threat-types/{type.lower()}?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/top-threat-types?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_top_ips(self, timestamp=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "categories", "policycategories", "ip", "identityids",
"identitytypes", "applicationid", "verdict", "threats",
"threattypes", "filternoisynomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
url = f"{self.hostname}/{self.organizationid}/top-ips?from={timestamp}&to=now&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_summary(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip", "ports",
"identityids", "identitytypes", "applicationid", "verdict", "ruleid",
"filename", "securityoverridden", "bundleid", "threats", "threattypes",
"ampdisposition", "antivirusthreats", "datalosspreventionstate",
"filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/summary/{type.lower()}?from={timestamp}&to=now&offset=0&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/summary?from={timestamp}&to=now&offset=0&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_summaries_by_category(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip",
"identityids", "identitytypes", "applicationid", "verdict",
"ruleid", "filename", "securityoverridden", "bundleid", "threats",
"threattypes", "ampdisposition", "antivirusthreats",
"datalosspreventionstate", "filternoisydomains"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/summaries-by-category/{type.lower()}?from={timestamp}&to=now&offset=0&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/summaries-by-category?from={timestamp}&to=now&offset=0&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_summaries_by_destination(self, timestamp=None, type=None, **kwargs):
timestamp = self.timestamp(timestamp)
valid_parameters = [
"domains", "urls", "categories", "policycategories", "ip",
"identityids", "identitytypes", "applicationid", "verdict",
"ruleid", "filename", "securityoverridden", "bundleid", "threats",
"threattypes", "ampdisposition", "antivirusthreats",
"datalosspreventionstate", "filternoisydomain"
]
parameters = self.validate_parameters(valid_parameters, kwargs)
if type in self.valid_types:
url = f"{self.hostname}/{self.organizationid}/summaries-by-destination/{type}?from={timestamp}&to=now&offset=0&limit={self.limit}{'&'.join(parameters)}"
else:
url = f"{self.hostname}/{self.organizationid}/summaries-by-destination?from={timestamp}&to=now&offset=0&limit={self.limit}{'&'.join(parameters)}"
data = self.send_request(url)
return data
def get_security_activity(self, timestamp=None):
'''
Ready made function to grab acitivy-logs that has events in the "security" category.
'''
timestamp = self.timestamp(timestamp)
categories = self.get_categories()
security_categories = []
for i in categories.category_by_type["security"]:
security_categories.append(str(i["id"]))
data = self.get_activity(timestamp=timestamp, categories=','.join(security_categories))
return data
@dataclass
class UmbrellaCategories:
type_list = list = []
category_by_type = dict = {}
category_by_id = dict = {}
category_by_legacy_id = dict = {}
def __init__(self,data=""):
if data:
self.populate_data(data)
def populate_data(self, data):
self.clear_cata()
for i in data["data"]:
if not i["type"] in self.type_list:
self.type_list.append(i["type"])
self.category_by_id[i["id"]] = i
self.category_by_legacy_id[i["legacyid"]] = i
if i["type"] not in self.category_by_type:
self.category_by_type[i["type"]] = []
else:
self.category_by_type[i["type"]].append(i)
def clear_cata(self):
self.type_list = []
self.category_by_id = {}
self.category_by_legacy_id = {}
self.category_by_type = {}
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn.apionly as sns
def cm2inch(value): return value/2.54
plt.style.use('custom')
dfA = pd.read_csv('/tmp/A.csv')
dfB = pd.read_csv('/tmp/B.csv')
dfB['mc'] += 10
dfC = pd.read_csv('/tmp/C.csv')
dfC['mc'] += 110
dfA['kind'] = "Direct"
dfB['kind'] = "Continuation A"
dfC['kind'] = "Continuation B"
alldf = dfA.append(dfB).append(dfC)
alldfE = alldf[alldf['observable'] == 'Energy']
alldfC = alldf[alldf['observable'] == 'Correlation']
fig = plt.figure( figsize=(cm2inch(10), cm2inch(5)) )
axE = fig.add_subplot(211)
axC = fig.add_subplot(212)
axE.set_xscale('log')
axE.legend()
axE.set_xlabel('mc')
axE.set_ylabel('E')
axC.set_xscale('log')
axC.legend()
axC.set_xlabel('mc')
axC.set_ylabel('β(1-C)')
for k,kdf in alldfE.groupby('kind'):
axE.plot(kdf['mc'], kdf['value'], label=k, lw=1)
axE.legend()
for k,kdf in alldfC.groupby('kind'):
axC.scatter(kdf['mc'], kdf['value'], label=k, s=1)
plt.show()
|
import cv2
import os.path
import xlsxwriter
import datetime
workbook_one = xlsxwriter.Workbook("report/img_one.xlsx")
worksheet_one = workbook_one.add_worksheet()
img_one_list = []
workbook_two = xlsxwriter.Workbook("report/img_two.xlsx")
worksheet_two = workbook_two.add_worksheet()
img_two_list = []
workbook_three = xlsxwriter.Workbook("report/img_three.xlsx")
worksheet_three = workbook_three.add_worksheet()
img_three_list = []
workbook_plaque_img_name = xlsxwriter.Workbook("report/plaque_img_name.xlsx")
worksheet_plaque_img_name = workbook_plaque_img_name.add_worksheet()
plaque_img_name = []
backround = cv2.createBackgroundSubtractorMOG2()
roads = cv2.VideoCapture('videos/roadsvideo.mkv')
total_car = 0
minArea = 2600
while True:
row = 0
column = 0
_, frame = roads.read()
fgmask = backround.apply(frame, None, 0.1)
erode = cv2.erode(fgmask, None, iterations=4)
moments = cv2.moments(erode, True)
#gelen yol çizgi
# x ekseni
cv2.line(frame, (0, 300), (10000, 300), (0, 255, 0), 2)
cv2.line(frame, (0, 500), (10000, 500), (0, 255, 0), 2)
#y ekseni
cv2.line(frame, (150, 0), (150, 10000), (255, 255, 0), 2)
cv2.line(frame, (500, 0), (500, 10000), (255, 255, 0), 2)
cv2.line(frame, (770, 0), (770, 10000), (255, 0, 0), 2)
cv2.line(frame, (1150, 0), (1150, 10000), (255, 0, 0), 2)
cv2.line(frame, (1300, 0), (1300, 10000), (0, 0, 0), 2)
cv2.line(frame, (1700, 0), (1700, 10000), (0, 0, 0), 2)
if moments['m00'] >= minArea:
x = int(moments['m10']/moments['m00'])
y = int(moments['m01'] / moments['m00'])
#print('moment :' + str(moments['m00'])+" x : " + str(x) + " y :" + str(y))
if (y > 150 and y < 500):
total_car = total_car + 1
img_name = "images_one/image_{}.png".format(total_car)
time = str(datetime.datetime.now())
worksheet_one.write(row, column, img_name)
worksheet_one.write(row,column+1,time)
row += 1
column += 1
cv2.imwrite(img_name, frame)
print(img_name)
print(time)
elif ( y > 770 and y < 1150):
total_car = total_car + 1
img_name = "images_two/image_{}.png".format(total_car)
time = str(datetime.datetime.now())
worksheet_two.write(row, column, img_name)
worksheet_two.write(row, column + 1, time)
row += 1
column += 1
cv2.imwrite(img_name, frame)
print('toplam = ' + str(total_car))
print(img_name)
print(time)
elif (y > 1300 and y < 1700):
total_car = total_car + 1
img_name = "images_three/image_{}.png".format(total_car)
time = str(datetime.datetime.now())
worksheet_two.write(row, column, img_name)
worksheet_two.write(row, column + 1, time)
row += 1
column += 1
cv2.imwrite(img_name, frame)
print('toplam = ' + str(total_car))
print(img_name)
print(time)
workbook_one.close()
workbook_two.close()
workbook_three.close()
cv2.putText(frame, 'Sayi: %r' % total_car, (200, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.imshow('video',frame)
key = cv2.waitKey(25)
if key==ord('q'):
break
roads.release()
cv2.destroyAllWindows()
|
import os
import json
from dvc.main import main
from tests.basic_env import TestDvc
class TestMetrics(TestDvc):
def setUp(self):
super(TestMetrics, self).setUp()
self.dvc.scm.commit('init')
for branch in ['foo', 'bar', 'baz']:
self.dvc.scm.checkout(branch, create_new=True)
with open('metric', 'w+') as fd:
fd.write(branch)
with open('metric_json', 'w+') as fd:
json.dump({'branch': branch}, fd)
with open('metric_tsv', 'w+') as fd:
fd.write(branch)
with open('metric_htsv', 'w+') as fd:
fd.write('branch\n')
fd.write(branch)
self.dvc.scm.add(['metric', 'metric_json', 'metric_tsv', 'metric_htsv'])
self.dvc.scm.commit('metric')
self.dvc.scm.checkout('master')
def test(self):
ret = self.dvc.metrics('metric')
self.assertEqual(len(ret), 4)
self.assertTrue(ret['master'] == None)
self.assertTrue(ret['foo'] == 'foo')
self.assertTrue(ret['bar'] == 'bar')
self.assertTrue(ret['baz'] == 'baz')
ret = self.dvc.metrics('metric_json', json_path='branch')
self.assertEqual(len(ret), 4)
self.assertTrue(ret['master'] == None)
self.assertTrue(ret['foo'] == ['foo'])
self.assertTrue(ret['bar'] == ['bar'])
self.assertTrue(ret['baz'] == ['baz'])
ret = self.dvc.metrics('metric_tsv', tsv_path='0,0')
self.assertEqual(len(ret), 4)
self.assertTrue(ret['master'] == None)
self.assertTrue(ret['foo'] == ['foo'])
self.assertTrue(ret['bar'] == ['bar'])
self.assertTrue(ret['baz'] == ['baz'])
ret = self.dvc.metrics('metric_htsv', htsv_path='branch,0')
self.assertEqual(len(ret), 4)
self.assertTrue(ret['master'] == None)
self.assertTrue(ret['foo'] == ['foo'])
self.assertTrue(ret['bar'] == ['bar'])
self.assertTrue(ret['baz'] == ['baz'])
class TestMetricsCLI(TestMetrics):
def test(self):
#FIXME enable on windows
if os.name == 'nt':
return
#FIXME check output
ret = main(['metrics', 'metric', '-v'])
self.assertEqual(ret, 0)
ret = main(['metrics', 'metric_json', '--json-path', 'branch'])
self.assertEqual(ret, 0)
ret = main(['metrics', 'metric_tsv', '--tsv-path', '0,0'])
self.assertEqual(ret, 0)
ret = main(['metrics', 'metric_htsv', '--htsv-path', 'branch,0'])
self.assertEqual(ret, 0)
|
# 集合 类似Java set数据结构
num = {1, 2, 3, 4, 5}
print(type(num))
# 数据元素唯一
num.add(1)
print(num) # {1, 2, 3, 4, 5}
# 不可变集合
set1 = frozenset({1, 2, 3, 4, 5})
# set1.add(6) 直接报错
|
import pickle
import numpy
import time
import math
from sklearn.linear_model import LinearRegression
aqstations = {'BL0':0, 'CD1':1, 'CD9':2, 'GN0':3, 'GN3':4, 'GR4':5, 'GR9':6, 'HV1':7, 'KF1':8, 'LW2':9,
'ST5':10, 'TH4':11, 'MY7':12}
ngrid = 861
def getCoef(idx):
py = tmpdata[24:]
linear = LinearRegression()
linear.fit(px, py)
res.append([linear.coef_, linear.intercept_])
if __name__ == "__main__":
with open("../data/london_data.pkl", "rb") as f:
dat = pickle.load(f)
l = len(dat[0])
px = numpy.zeros(((l - 48), (ngrid * 4 + 19)), dtype = numpy.float32)
for i in range(48, l):
for j in range(ngrid):
px[i - 48][j * 4] = dat[72 + j * 5][i - 24]
px[i - 48][j * 4 + 1] = dat[72 + j * 5 + 1][i - 24]
px[i - 48][j * 4 + 2] = dat[72 + j * 5 + 2][i - 24]
px[i - 48][j * 4 + 3] = dat[72 + j * 5 + 4][i - 24]
res = []
for st in aqstations:
print(st)
print("PM2.5")
idx = aqstations[st] * 3 + 0
tmpdata = numpy.zeros(l - 24)
for i in range(24, l):
tmpdata[i - 24] = dat[idx][i] - dat[idx][i - 24]
for i in range(48, l):
px[i - 48][ngrid * 4] = math.sin(i / 12 * math.pi)
for j in range(1, 19):
px[i - 48][ngrid * 4 + j] = tmpdata[i - 24 - j]
getCoef(idx)
print("PM10")
idx = aqstations[st] * 3 + 1
tmpdata = numpy.zeros(l - 24)
for i in range(24, l):
tmpdata[i - 24] = dat[idx][i] - dat[idx][i - 24]
for i in range(48, l):
px[i - 48][ngrid * 4] = math.sin(i / 12 * math.pi)
for j in range(1, 19):
px[i - 48][ngrid * 4 + j] = tmpdata[i - 24 - j]
getCoef(idx)
with open("../data/ldols_res.pkl", "wb") as f:
pickle.dump(res, f)
|
# -*- coding: utf8 -*-
import itertools
def formatLine(r):
r = list(r)
l1 = ' '.join('{:02x}'.format(c) for c in r)
l2 = ''.join(chr(c) if 32 <= c < 127 else '.' for c in r)
return l1, l2
def hexDump(data):
size, over = divmod(len(data), 16)
if over:
size += 1
offsets = range(0, size * 16, 16)
for o in offsets:
row = itertools.islice(data, o, o + 16)
yield '{:010X}: {:48} {:16}'.format(o, *formatLine(row))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# noinspection PyBroadException
try:
# import eventlet
# eventlet.monkey_patch(all=True, thread=False)
pass
except:
pass
import decimal
import logging
import logging.config
import os
import eventlet.wsgi
import sqlalchemy_utils
import yaml
from flask import Flask, request
from flask.json import JSONEncoder
from flask_babel import Babel, get_locale, gettext
from flask_cors import CORS
from flask_migrate import Migrate
from flask_restful import Api
from seed import rq
from seed.client_api import ClientDetailApi, ClientListApi
from seed.deployment_api import DeploymentDetailApi, DeploymentListApi
from seed.deployment_image_api import (DeploymentImageDetailApi,
DeploymentImageListApi)
from seed.deployment_log_api import (DeploymentLogDetailApi,
DeploymentLogListApi)
from seed.deployment_metric_api import (DeploymentMetricDetailApi,
DeploymentMetricListApi)
from seed.deployment_target_api import (DeploymentTargetDetailApi,
DeploymentTargetListApi)
from seed.models import db
class JsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
return JSONEncoder.default(self, obj)
def create_babel(app):
return Babel(app)
def create_app():
sqlalchemy_utils.i18n.get_locale = get_locale
# eventlet.monkey_patch(all=True)
app = Flask(__name__)
app.json_encoder = JsonEncoder
babel = create_babel(app)
logging.config.fileConfig('logging_config.ini')
app.secret_key = 'l3m0n4d1'
# CORS
CORS(app, resources={r"/*": {"origins": "*"}})
api = Api(app)
mappings = {
'/deployments': DeploymentListApi,
'/deployments/<int:deployment_id>': DeploymentDetailApi,
'/images/<int:deployment_image_id>': DeploymentImageDetailApi,
'/images': DeploymentImageListApi,
'/targets/<int:deployment_target_id>': DeploymentTargetDetailApi,
'/targets': DeploymentTargetListApi,
'/clients': ClientListApi,
'/clients/<int:client_id>': ClientDetailApi,
'/logs': DeploymentLogListApi,
'/logs/<int:deployment_log_id>': DeploymentLogDetailApi,
'/metrics': DeploymentMetricListApi,
'/metrics/<int:deployment_metric_id>': DeploymentMetricDetailApi,
}
for path, view in list(mappings.items()):
api.add_resource(view, path)
return app
app = create_app()
babel = create_babel(app)
@babel.localeselector
def get_locale():
return request.args.get('lang') or \
request.accept_languages.best_match(['pt', 'en']) or 'pt'
# @requires_auth
# def check_auth():
# if flask_globals.user.id != 1:
# abort(401)
def marshmallow_errors():
"""
Static list of validation errors keys used in marshmallow, required in order
to extract messages by pybabel
"""
gettext('Missing data for required field.')
gettext('Not a valid integer.')
gettext('Not a valid datetime.')
#
def main(is_main_module):
config_file = os.environ.get('SEED_CONFIG')
os.chdir(os.environ.get('SEED_HOME', '.'))
logger = logging.getLogger(__name__)
if config_file:
with open(config_file) as f:
config = yaml.load(f, Loader=yaml.FullLoader)['seed']
app.config["RESTFUL_JSON"] = {"cls": app.json_encoder}
server_config = config.get('servers', {})
app.config['SQLALCHEMY_DATABASE_URI'] = server_config.get(
'database_url')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_POOL_SIZE'] = 10
app.config['SQLALCHEMY_POOL_RECYCLE'] = 240
app.config['RQ_REDIS_URL'] = config['servers']['redis_url']
app.config['BABEL_TRANSLATION_DIRECTORIES'] = 'i18n/locales'
app.config['BABEL_DEFAULT_LOCALE'] = 'UTC'
app.config.update(config.get('config', {}))
app.config['SEED_CONFIG'] = config
db.init_app(app)
rq.init_app(app)
# RQ Dashboard
# app.config.from_object(rq_dashboard.default_settings)
# app.config.from_object(app.config)
# app.register_blueprint(rq_dashboard.blueprint, url_prefix='/dashboard')
#migrate = Migrate(app, db)
migrate = Migrate(app, db, compare_type=True)
port = int(config.get('port', 5000))
logger.debug('Running in %s mode', config.get('environment'))
if is_main_module:
if config.get('environment', 'dev') == 'dev':
app.run(debug=True, port=port)
else:
eventlet.wsgi.server(eventlet.listen(('', port)), app)
else:
logger.error('Please, set SEED_CONFIG environment variable')
exit(1)
main(__name__ == '__main__')
|
import sched as s
import signal, os
if __name__ == '__main__':
nb=0
error=0
try:
nb,error = s.scheduler()
except Exception as inst:
print(inst)
|
import smart_imports
smart_imports.all()
class TimeTest(utils_testcase.TestCase):
def test_creation(self):
dext_settings_models.Setting.objects.all().delete()
dext_settings.settings.refresh()
settings_number = dext_settings_models.Setting.objects.all().count()
self.assertEqual(game_turn.number(), 0)
self.assertEqual(dext_settings_models.Setting.objects.all().count(), settings_number)
game_turn.increment()
self.assertEqual(game_turn.number(), 1)
self.assertEqual(dext_settings_models.Setting.objects.all().count(), settings_number + 1)
def test_get_current_time(self):
self.assertEqual(game_turn.number(), 0)
def test_increment_turn(self):
self.assertEqual(game_turn.number(), 0)
game_turn.increment()
self.assertEqual(game_turn.number(), 1)
def test_ui_info(self):
game_turn.increment()
self.assertEqual(game_turn.ui_info()['number'], 1)
def test_game_time(self):
self.assertEqual(game_turn.game_datetime(), tt_calendar.DateTime(0, 0, 0, 0, 0, 0))
game_turn.increment()
self.assertEqual(game_turn.game_datetime(), tt_calendar.DateTime(0, 0, 0, 0, 2, 0))
class LinguisticsDateTests(utils_testcase.TestCase):
def setUp(self):
super(LinguisticsDateTests, self).setUp()
linguistics_logic.sync_static_restrictions()
self.date = game_turn.LinguisticsDate(tt_calendar.Date(year=1, month=2, day=3))
def test_utg_name_form(self):
self.assertEqual(self.date.utg_name_form, utg_words.WordForm(utg_words.Word(type=utg_relations.WORD_TYPE.TEXT, forms=('4 юного квинта сырого месяца 2 года',), properties=utg_words.Properties())))
def test_linguistics_restrictions__no_feasts(self):
now = datetime.datetime(year=34, month=2, day=28, hour=0, minute=0, second=0)
for feast in tt_calendar.REAL_FEAST.records:
restriction_id = linguistics_restrictions.get(feast)
self.assertNotIn(restriction_id, self.date.linguistics_restrictions(now))
def test_linguistics_restrictions__has_feast(self):
for feast in tt_calendar.REAL_FEAST.records:
restriction_id = linguistics_restrictions.get(feast)
for interval in feast.intervals:
self.assertIn(restriction_id, self.date.linguistics_restrictions(interval[0] + (interval[1] - interval[0]) / 2))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.