content
stringlengths 5
1.05M
|
|---|
import os
import unittest
import paramiko
from shutil import copyfile
from paramiko.client import RejectPolicy, WarningPolicy
from tests.utils import make_tests_data_path
from webssh.policy import (
AutoAddPolicy, get_policy_dictionary, load_host_keys,
get_policy_class, check_policy_setting
)
class TestPolicy(unittest.TestCase):
def test_get_policy_dictionary(self):
classes = [AutoAddPolicy, RejectPolicy, WarningPolicy]
dic = get_policy_dictionary()
for cls in classes:
val = dic[cls.__name__.lower()]
self.assertIs(cls, val)
def test_load_host_keys(self):
path = '/path-not-exists'
host_keys = load_host_keys(path)
self.assertFalse(host_keys)
path = '/tmp'
host_keys = load_host_keys(path)
self.assertFalse(host_keys)
path = make_tests_data_path('known_hosts_example')
host_keys = load_host_keys(path)
self.assertEqual(host_keys, paramiko.hostkeys.HostKeys(path))
def test_get_policy_class(self):
keys = ['autoadd', 'reject', 'warning']
vals = [AutoAddPolicy, RejectPolicy, WarningPolicy]
for key, val in zip(keys, vals):
cls = get_policy_class(key)
self.assertIs(cls, val)
key = 'non-exists'
with self.assertRaises(ValueError):
get_policy_class(key)
def test_check_policy_setting(self):
host_keys_filename = make_tests_data_path('host_keys_test.db')
host_keys_settings = dict(
host_keys=paramiko.hostkeys.HostKeys(),
system_host_keys=paramiko.hostkeys.HostKeys(),
host_keys_filename=host_keys_filename
)
with self.assertRaises(ValueError):
check_policy_setting(RejectPolicy, host_keys_settings)
try:
os.unlink(host_keys_filename)
except OSError:
pass
check_policy_setting(AutoAddPolicy, host_keys_settings)
self.assertEqual(os.path.exists(host_keys_filename), True)
def test_is_missing_host_key(self):
client = paramiko.SSHClient()
file1 = make_tests_data_path('known_hosts_example')
file2 = make_tests_data_path('known_hosts_example2')
client.load_host_keys(file1)
client.load_system_host_keys(file2)
autoadd = AutoAddPolicy()
for f in [file1, file2]:
entry = paramiko.hostkeys.HostKeys(f)._entries[0]
hostname = entry.hostnames[0]
key = entry.key
self.assertIsNone(
autoadd.is_missing_host_key(client, hostname, key)
)
for f in [file1, file2]:
entry = paramiko.hostkeys.HostKeys(f)._entries[0]
hostname = entry.hostnames[0][1:]
key = entry.key
self.assertTrue(
autoadd.is_missing_host_key(client, hostname, key)
)
file3 = make_tests_data_path('known_hosts_example3')
entry = paramiko.hostkeys.HostKeys(file3)._entries[0]
hostname = entry.hostnames[0]
key = entry.key
with self.assertRaises(paramiko.BadHostKeyException):
autoadd.is_missing_host_key(client, hostname, key)
def test_missing_host_key(self):
client = paramiko.SSHClient()
file1 = make_tests_data_path('known_hosts_example')
file2 = make_tests_data_path('known_hosts_example2')
filename = make_tests_data_path('known_hosts')
copyfile(file1, filename)
client.load_host_keys(filename)
n1 = len(client._host_keys)
autoadd = AutoAddPolicy()
entry = paramiko.hostkeys.HostKeys(file2)._entries[0]
hostname = entry.hostnames[0]
key = entry.key
autoadd.missing_host_key(client, hostname, key)
self.assertEqual(len(client._host_keys), n1 + 1)
self.assertEqual(paramiko.hostkeys.HostKeys(filename),
client._host_keys)
os.unlink(filename)
|
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v1/agent.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='v1/agent.proto',
package='v1',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0ev1/agent.proto\x12\x02v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xf9\x01\n\x05\x41gent\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\tnamespace\x18\x03 \x01(\t\x12.\n\x0bversion_api\x18\x04 \x03(\x0b\x32\x19.v1.Agent.VersionApiEntry\x12.\n\ncreated_at\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nupdated_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a\x31\n\x0fVersionApiEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\";\n\x10\x41gentBodyRequest\x12\r\n\x05owner\x18\x01 \x01(\t\x12\x18\n\x05\x61gent\x18\x02 \x01(\x0b\x32\t.v1.Agent\"_\n\x12ListAgentsResponse\x12\r\n\x05\x63ount\x18\x01 \x01(\x05\x12\x1a\n\x07results\x18\x02 \x03(\x0b\x32\t.v1.Agent\x12\x10\n\x08previous\x18\x03 \x01(\t\x12\x0c\n\x04next\x18\x04 \x01(\t\"\xb9\x01\n\x05Queue\x12\x0c\n\x04uuid\x18\x01 \x01(\t\x12\r\n\x05\x61gent\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x10\n\x08priority\x18\x04 \x01(\x05\x12\x13\n\x0b\x63oncurrency\x18\x05 \x01(\x05\x12.\n\ncreated_at\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12.\n\nupdated_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"J\n\x10QueueBodyRequest\x12\r\n\x05owner\x18\x01 \x01(\t\x12\r\n\x05\x61gent\x18\x02 \x01(\t\x12\x18\n\x05queue\x18\x03 \x01(\x0b\x32\t.v1.Queue\"_\n\x12ListQueuesResponse\x12\r\n\x05\x63ount\x18\x01 \x01(\x05\x12\x1a\n\x07results\x18\x02 \x03(\x0b\x32\t.v1.Queue\x12\x10\n\x08previous\x18\x03 \x01(\t\x12\x0c\n\x04next\x18\x04 \x01(\tb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,])
_AGENT_VERSIONAPIENTRY = _descriptor.Descriptor(
name='VersionApiEntry',
full_name='v1.Agent.VersionApiEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='v1.Agent.VersionApiEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='v1.Agent.VersionApiEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=256,
serialized_end=305,
)
_AGENT = _descriptor.Descriptor(
name='Agent',
full_name='v1.Agent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='v1.Agent.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='v1.Agent.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='v1.Agent.namespace', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version_api', full_name='v1.Agent.version_api', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created_at', full_name='v1.Agent.created_at', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updated_at', full_name='v1.Agent.updated_at', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_AGENT_VERSIONAPIENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=56,
serialized_end=305,
)
_AGENTBODYREQUEST = _descriptor.Descriptor(
name='AgentBodyRequest',
full_name='v1.AgentBodyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner', full_name='v1.AgentBodyRequest.owner', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agent', full_name='v1.AgentBodyRequest.agent', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=307,
serialized_end=366,
)
_LISTAGENTSRESPONSE = _descriptor.Descriptor(
name='ListAgentsResponse',
full_name='v1.ListAgentsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='v1.ListAgentsResponse.count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='results', full_name='v1.ListAgentsResponse.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='previous', full_name='v1.ListAgentsResponse.previous', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next', full_name='v1.ListAgentsResponse.next', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=368,
serialized_end=463,
)
_QUEUE = _descriptor.Descriptor(
name='Queue',
full_name='v1.Queue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uuid', full_name='v1.Queue.uuid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agent', full_name='v1.Queue.agent', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='v1.Queue.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='priority', full_name='v1.Queue.priority', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='concurrency', full_name='v1.Queue.concurrency', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='created_at', full_name='v1.Queue.created_at', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updated_at', full_name='v1.Queue.updated_at', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=466,
serialized_end=651,
)
_QUEUEBODYREQUEST = _descriptor.Descriptor(
name='QueueBodyRequest',
full_name='v1.QueueBodyRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='owner', full_name='v1.QueueBodyRequest.owner', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='agent', full_name='v1.QueueBodyRequest.agent', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='queue', full_name='v1.QueueBodyRequest.queue', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=653,
serialized_end=727,
)
_LISTQUEUESRESPONSE = _descriptor.Descriptor(
name='ListQueuesResponse',
full_name='v1.ListQueuesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='count', full_name='v1.ListQueuesResponse.count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='results', full_name='v1.ListQueuesResponse.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='previous', full_name='v1.ListQueuesResponse.previous', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next', full_name='v1.ListQueuesResponse.next', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=729,
serialized_end=824,
)
_AGENT_VERSIONAPIENTRY.containing_type = _AGENT
_AGENT.fields_by_name['version_api'].message_type = _AGENT_VERSIONAPIENTRY
_AGENT.fields_by_name['created_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_AGENT.fields_by_name['updated_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_AGENTBODYREQUEST.fields_by_name['agent'].message_type = _AGENT
_LISTAGENTSRESPONSE.fields_by_name['results'].message_type = _AGENT
_QUEUE.fields_by_name['created_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QUEUE.fields_by_name['updated_at'].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QUEUEBODYREQUEST.fields_by_name['queue'].message_type = _QUEUE
_LISTQUEUESRESPONSE.fields_by_name['results'].message_type = _QUEUE
DESCRIPTOR.message_types_by_name['Agent'] = _AGENT
DESCRIPTOR.message_types_by_name['AgentBodyRequest'] = _AGENTBODYREQUEST
DESCRIPTOR.message_types_by_name['ListAgentsResponse'] = _LISTAGENTSRESPONSE
DESCRIPTOR.message_types_by_name['Queue'] = _QUEUE
DESCRIPTOR.message_types_by_name['QueueBodyRequest'] = _QUEUEBODYREQUEST
DESCRIPTOR.message_types_by_name['ListQueuesResponse'] = _LISTQUEUESRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Agent = _reflection.GeneratedProtocolMessageType('Agent', (_message.Message,), {
'VersionApiEntry' : _reflection.GeneratedProtocolMessageType('VersionApiEntry', (_message.Message,), {
'DESCRIPTOR' : _AGENT_VERSIONAPIENTRY,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.Agent.VersionApiEntry)
})
,
'DESCRIPTOR' : _AGENT,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.Agent)
})
_sym_db.RegisterMessage(Agent)
_sym_db.RegisterMessage(Agent.VersionApiEntry)
AgentBodyRequest = _reflection.GeneratedProtocolMessageType('AgentBodyRequest', (_message.Message,), {
'DESCRIPTOR' : _AGENTBODYREQUEST,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.AgentBodyRequest)
})
_sym_db.RegisterMessage(AgentBodyRequest)
ListAgentsResponse = _reflection.GeneratedProtocolMessageType('ListAgentsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTAGENTSRESPONSE,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.ListAgentsResponse)
})
_sym_db.RegisterMessage(ListAgentsResponse)
Queue = _reflection.GeneratedProtocolMessageType('Queue', (_message.Message,), {
'DESCRIPTOR' : _QUEUE,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.Queue)
})
_sym_db.RegisterMessage(Queue)
QueueBodyRequest = _reflection.GeneratedProtocolMessageType('QueueBodyRequest', (_message.Message,), {
'DESCRIPTOR' : _QUEUEBODYREQUEST,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.QueueBodyRequest)
})
_sym_db.RegisterMessage(QueueBodyRequest)
ListQueuesResponse = _reflection.GeneratedProtocolMessageType('ListQueuesResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTQUEUESRESPONSE,
'__module__' : 'v1.agent_pb2'
# @@protoc_insertion_point(class_scope:v1.ListQueuesResponse)
})
_sym_db.RegisterMessage(ListQueuesResponse)
_AGENT_VERSIONAPIENTRY._options = None
# @@protoc_insertion_point(module_scope)
|
# users/views.py
from rest_framework import viewsets
from . import models
from . import serializers
from rest_framework import permissions
class UserViewSet(viewsets.ModelViewSet):
queryset = models.CustomUser.objects.all()
serializer_class = serializers.UserSerializer
permission_classes = (permissions.IsAuthenticated,)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.signup, name='signup'),
]
|
# 133. Clone Graph 133
# ttungl@gmail.com
# Definition for a undirected graph node
# class UndirectedGraphNode:
# def __init__(self, x):
# self.label = x
# self.neighbors = []
class Solution:
def __init__(self):
self.visited = {}
# @param node, a undirected graph node
# @return a undirected graph node
def cloneGraph(self, node):
# sol 1
# use BFS (queue)
# time O(n^2)
# space O(n)
# runtime: 72ms
if not node:
return node
root = UndirectedGraphNode(node.label)
queue, visit = [node], {} # add nodes to queue.
visit[node.label] = root # init value for dict.
while queue:
top = queue.pop()
for n in top.neighbors: # check its neighbors if visited.
if n.label not in visit: # add node.
queue.append(n)
visit[n.label] = UndirectedGraphNode(n.label)
visit[top.label].neighbors.append(visit[n.label])
return root
# sol 2:
# use DFS
# runtime: 79ms
# time: O(n^2)
# space: O(n)
if not node:
return node
if node.label in self.visited:
return self.visited[node.label]
clone = UndirectedGraphNode(node.label) # init graphnode
self.visited[node.label] = clone # init value for dict.
[clone.neighbors.append(self.cloneGraph(n))
for n in node.neighbors]
return clone
# sol 3
# use DFS (stack)
# runtime: 122ms
# time: O(n^2)
# space: O(n)
def DFS(node, visited):
if node in visited:
return visited[node]
clone = UndirectedGraphNode(node.label)
visited[node] = clone
[clone.neighbors.append(DFS(x, visited)) for x in node.neighbors]
return clone
if not node:
return node
return DFS(node, {})
# sol 4
# use BFS (queue)
# runtime: 95ms
# time: O(n^2)
# space: O(n)
def BFS(node, visited, queue):
clone = UndirectedGraphNode(node.label)
visited[node] = clone
while queue:
top = queue.pop()
for nb in top.neighbors:
if nb not in visited: # neighbor not in visited dictionary
clonecp = UndirectedGraphNode(nb.label)
visited[nb] = clonecp
visited[top].neighbors.append(clonecp)
queue.append(nb)
else:
visited[top].neighbors.append(visited[nb])
return clone
if not node:
return node
visited, queue = {}, [node]
return BFS(node, visited, queue)
|
from .Bible import *
class BibleParserBase:
name = "Base"
fileEndings = []
def __init__(self, file_name):
self.file_name = file_name
self.bible = Bible()
def isValidFileEnding(self, file_name):
for ending in self.fileEndings:
if '.'+ending in file_name:
return True
return False
def getParserName (self):
return self.name
def getParserEndings (self):
return self.fileEndings
def loadAll(self):
pass
def loadInfo(self):
pass
def check_extention(type_class, file_name):
return type_class.isValidFileEnding(type_class, file_name)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('api', '0050_remove_userpreferences_accept_friend_requests'),
]
operations = [
migrations.AddField(
model_name='userpreferences',
name='mal',
field=models.CharField(validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\.]*$', b'Only alphanumeric and - _ characters are allowed.')], max_length=20, blank=True, help_text='Write your username only, no URL.', null=True, verbose_name=b'MyAnimeList'),
preserve_default=True,
),
migrations.AddField(
model_name='userpreferences',
name='otonokizaka',
field=models.CharField(validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\.]*$', b'Only alphanumeric and - _ characters are allowed.')], max_length=20, blank=True, help_text='Write your username only, no URL.', null=True, verbose_name=b'Otonokizaka.org Forum'),
preserve_default=True,
),
migrations.AddField(
model_name='userpreferences',
name='twitch',
field=models.CharField(blank=True, max_length=20, null=True, help_text='Write your username only, no URL.', validators=[django.core.validators.RegexValidator(b'^[0-9a-zA-Z-_\\.]*$', b'Only alphanumeric and - _ characters are allowed.')]),
preserve_default=True,
),
]
|
""" Unit Conversion Agent for Whyis
Uses <http://tetherless-world.github.io/whyis/inference>
as a template.
"""
from __future__ import division
from past.utils import old_div
import nltk, re, pprint
from rdflib import *
from rdflib.resource import Resource
from time import time
from whyis import autonomic
from whyis import nanopub
from whyis.namespace import sioc_types, sioc, sio, dc, prov, whyis
from .attr_converter import convert_attr_to_units
class UnitConverter(autonomic.GlobalChangeService):
activity_class = URIRef("http://nanomine.org/ns/WhyisUnitConverterV002")
def getInputClass(self):
return sio.Entity
def getOutputClass(self):
return URIRef("StandardizedConversionEntity")
def get_query(self):
query = '''SELECT ?s WHERE {
?s <http://semanticscience.org/resource/hasAttribute> ?attr.
?attr <http://semanticscience.org/resource/hasUnit> [];
<http://semanticscience.org/resource/hasValue> [];
a [ <http://nanomine.org/ns/hasPreferredUnit> ?prefUnit ].
}'''
return query
def process(self, i, o):
for attr in i.objects(sio.hasAttribute):
converted = convert_attr_to_units(attr)
if converted:
activity = BNode()
for new_meas in converted:
# Add new measurement to graph
o.add(sio.hasAttribute, new_meas)
# note provenance of new data--SUPERSEDED by superclass's explain() function
# o.graph.add((new_meas.identifier, prov.wasGeneratedBy, activity))
# o.graph.add((activity, prov.used, attr.identifier))
# o.graph.add((activity, prov.generated, new_meas.identifier))
# o.graph.add((activity, prov.atTime, Literal(util.date_time(t=time()))))
# o.graph.add((activity, prov.wasAssociatedWith, URIRef("http://nanomine.org/ns/WhyisUnitConverterV002")))
# Add all triples for the measurement
for p_, o_ in new_meas.predicate_objects():
if isinstance(o_, Resource):
o.graph.add((new_meas.identifier, p_.identifier, o_.identifier))
else:
o.graph.add((new_meas.identifier, p_.identifier, o_))
|
import discord
from discord.commands import slash_command
from discord.ext import commands
class BonkV1(commands.Cog):
def __init__(self, bot):
self.bot = bot
@slash_command(name="bonk", description="bonkkk")
async def bonk_user(self, ctx):
embedVar = discord.Embed()
embedVar.description = f"Bonked {ctx.author.mention}"
file = discord.File("./Bot/Cogs/images/bonk.gif")
embedVar.set_image(url="attachment://bonk.gif")
await ctx.respond(embed=embedVar, file=file)
def setup(bot):
bot.add_cog(BonkV1(bot))
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import six
from six.moves import http_client as http
import webob.exc
from wsme.rest import json
from glance.api import policy
from glance.api.v2 import metadef_namespaces as namespaces
from glance.api.v2.model.metadef_namespace import Namespace
from glance.api.v2.model.metadef_property_type import PropertyType
from glance.api.v2.model.metadef_property_type import PropertyTypes
from glance.common import exception
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _
import glance.notifier
import glance.schema
LOG = logging.getLogger(__name__)
class NamespacePropertiesController(object):
def __init__(self, db_api=None, policy_enforcer=None, notifier=None):
self.db_api = db_api or glance.db.get_api()
self.policy = policy_enforcer or policy.Enforcer()
self.notifier = notifier or glance.notifier.Notifier()
self.gateway = glance.gateway.Gateway(db_api=self.db_api,
notifier=self.notifier,
policy_enforcer=self.policy)
def _to_dict(self, model_property_type):
# Convert the model PropertyTypes dict to a JSON encoding
db_property_type_dict = dict()
db_property_type_dict['schema'] = json.tojson(
PropertyType, model_property_type)
db_property_type_dict['name'] = model_property_type.name
return db_property_type_dict
def _to_model(self, db_property_type):
# Convert the persisted json schema to a dict of PropertyTypes
property_type = json.fromjson(
PropertyType, db_property_type.schema)
property_type.name = db_property_type.name
return property_type
def index(self, req, namespace):
try:
filters = dict()
filters['namespace'] = namespace
prop_repo = self.gateway.get_metadef_property_repo(req.context)
db_properties = prop_repo.list(filters=filters)
property_list = Namespace.to_model_properties(db_properties)
namespace_properties = PropertyTypes()
namespace_properties.properties = property_list
except exception.Forbidden as e:
LOG.debug("User not permitted to retrieve metadata properties "
"within '%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
return namespace_properties
def show(self, req, namespace, property_name, filters=None):
try:
if filters and filters['resource_type']:
rs_repo = self.gateway.get_metadef_resource_type_repo(
req.context)
db_resource_type = rs_repo.get(filters['resource_type'],
namespace)
prefix = db_resource_type.prefix
if prefix and property_name.startswith(prefix):
property_name = property_name[len(prefix):]
else:
msg = (_("Property %(property_name)s does not start "
"with the expected resource type association "
"prefix of '%(prefix)s'.")
% {'property_name': property_name,
'prefix': prefix})
raise exception.NotFound(msg)
prop_repo = self.gateway.get_metadef_property_repo(req.context)
db_property = prop_repo.get(namespace, property_name)
property = self._to_model(db_property)
except exception.Forbidden as e:
LOG.debug("User not permitted to show metadata property '%s' "
"within '%s' namespace", property_name, namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
return property
def create(self, req, namespace, property_type):
prop_factory = self.gateway.get_metadef_property_factory(req.context)
prop_repo = self.gateway.get_metadef_property_repo(req.context)
try:
new_property_type = prop_factory.new_namespace_property(
namespace=namespace, **self._to_dict(property_type))
prop_repo.add(new_property_type)
except exception.Forbidden as e:
LOG.debug("User not permitted to create metadata property within "
"'%s' namespace", namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.Invalid as e:
msg = (_("Couldn't create metadata property: %s")
% encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
return self._to_model(new_property_type)
def update(self, req, namespace, property_name, property_type):
prop_repo = self.gateway.get_metadef_property_repo(req.context)
try:
db_property_type = prop_repo.get(namespace, property_name)
db_property_type._old_name = db_property_type.name
db_property_type.name = property_type.name
db_property_type.schema = (self._to_dict(property_type))['schema']
updated_property_type = prop_repo.save(db_property_type)
except exception.Invalid as e:
msg = (_("Couldn't update metadata property: %s")
% encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.Forbidden as e:
LOG.debug("User not permitted to update metadata property '%s' "
"within '%s' namespace", property_name, namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Duplicate as e:
raise webob.exc.HTTPConflict(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
return self._to_model(updated_property_type)
def delete(self, req, namespace, property_name):
prop_repo = self.gateway.get_metadef_property_repo(req.context)
try:
property_type = prop_repo.get(namespace, property_name)
property_type.delete()
prop_repo.remove(property_type)
except exception.Forbidden as e:
LOG.debug("User not permitted to delete metadata property '%s' "
"within '%s' namespace", property_name, namespace)
raise webob.exc.HTTPForbidden(explanation=e.msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except Exception as e:
LOG.error(encodeutils.exception_to_unicode(e))
raise webob.exc.HTTPInternalServerError()
class RequestDeserializer(wsgi.JSONRequestDeserializer):
_disallowed_properties = ['created_at', 'updated_at']
def __init__(self, schema=None):
super(RequestDeserializer, self).__init__()
self.schema = schema or get_schema()
def _get_request_body(self, request):
output = super(RequestDeserializer, self).default(request)
if 'body' not in output:
msg = _('Body expected in request.')
raise webob.exc.HTTPBadRequest(explanation=msg)
return output['body']
@classmethod
def _check_allowed(cls, image):
for key in cls._disallowed_properties:
if key in image:
msg = _("Attribute '%s' is read-only.") % key
raise webob.exc.HTTPForbidden(explanation=msg)
def create(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
property_type = json.fromjson(PropertyType, body)
return dict(property_type=property_type)
def update(self, request):
body = self._get_request_body(request)
self._check_allowed(body)
try:
self.schema.validate(body)
except exception.InvalidObject as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
property_type = json.fromjson(PropertyType, body)
return dict(property_type=property_type)
def show(self, request):
params = request.params.copy()
query_params = {
'filters': params
}
return query_params
class ResponseSerializer(wsgi.JSONResponseSerializer):
def __init__(self, schema=None):
super(ResponseSerializer, self).__init__()
self.schema = schema
def show(self, response, result):
property_type_json = json.tojson(PropertyType, result)
body = jsonutils.dumps(property_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def index(self, response, result):
property_type_json = json.tojson(PropertyTypes, result)
body = jsonutils.dumps(property_type_json, ensure_ascii=False)
response.unicode_body = six.text_type(body)
response.content_type = 'application/json'
def create(self, response, result):
response.status_int = http.CREATED
self.show(response, result)
def update(self, response, result):
response.status_int = http.OK
self.show(response, result)
def delete(self, response, result):
response.status_int = http.NO_CONTENT
def _get_base_definitions():
return {
"positiveInteger": {
"type": "integer",
"minimum": 0
},
"positiveIntegerDefault0": {
"allOf": [
{"$ref": "#/definitions/positiveInteger"},
{"default": 0}
]
},
"stringArray": {
"type": "array",
"items": {"type": "string"},
"minItems": 1,
"uniqueItems": True
}
}
def _get_base_properties():
base_def = namespaces.get_schema_definitions()
return base_def['property']['additionalProperties']['properties']
def get_schema(require_name=True):
definitions = _get_base_definitions()
properties = _get_base_properties()
mandatory_attrs = PropertyType.get_mandatory_attrs()
if require_name:
# name is required attribute when use as single property type
mandatory_attrs.append('name')
schema = glance.schema.Schema(
'property',
properties,
required=mandatory_attrs,
definitions=definitions
)
return schema
def get_collection_schema():
namespace_properties_schema = get_schema()
# Property name is a dict key and not a required attribute in
# individual property schema inside property collections
namespace_properties_schema.required.remove('name')
return glance.schema.DictCollectionSchema('properties',
namespace_properties_schema)
def create_resource():
"""NamespaceProperties resource factory method"""
schema = get_schema()
deserializer = RequestDeserializer(schema)
serializer = ResponseSerializer(schema)
controller = NamespacePropertiesController()
return wsgi.Resource(controller, deserializer, serializer)
|
import os
import win32security,win32file,win32api,ntsecuritycon,win32con
from security_enums import TRUSTEE_TYPE,TRUSTEE_FORM,ACE_FLAGS,ACCESS_MODE
fname = os.path.join(win32api.GetTempPath(), "win32security_test.txt")
f=open(fname, "w")
f.write("Hello from Python\n");
f.close()
print("Testing on file", fname)
new_privs = ((win32security.LookupPrivilegeValue('',ntsecuritycon.SE_SECURITY_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_SHUTDOWN_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_RESTORE_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_TAKE_OWNERSHIP_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('',ntsecuritycon.SE_CREATE_PERMANENT_NAME),win32con.SE_PRIVILEGE_ENABLED),
(win32security.LookupPrivilegeValue('','SeEnableDelegationPrivilege'),win32con.SE_PRIVILEGE_ENABLED) ##doesn't seem to be in ntsecuritycon.py ?
)
ph = win32api.GetCurrentProcess()
th = win32security.OpenProcessToken(ph,win32security.TOKEN_ALL_ACCESS) ##win32con.TOKEN_ADJUST_PRIVILEGES)
win32security.AdjustTokenPrivileges(th,0,new_privs)
all_security_info = \
win32security.OWNER_SECURITY_INFORMATION|win32security.GROUP_SECURITY_INFORMATION| \
win32security.DACL_SECURITY_INFORMATION|win32security.SACL_SECURITY_INFORMATION
sd=win32security.GetFileSecurity(fname,all_security_info)
old_sacl=sd.GetSecurityDescriptorSacl()
if old_sacl==None:
old_sacl=win32security.ACL()
old_dacl=sd.GetSecurityDescriptorDacl()
if old_dacl==None:
old_dacl=win32security.ACL()
my_sid = win32security.GetTokenInformation(th,ntsecuritycon.TokenUser)[0]
tmp_sid = win32security.LookupAccountName('','tmp')[0]
pwr_sid = win32security.LookupAccountName('','Power Users')[0]
## MultipleTrustee,MultipleTrusteeOperation,TrusteeForm,TrusteeType,Identifier
## first two are ignored
my_trustee = {}
my_trustee['MultipleTrustee']=None
my_trustee['MultipleTrusteeOperation']=0
my_trustee['TrusteeForm']=TRUSTEE_FORM.TRUSTEE_IS_SID
my_trustee['TrusteeType']=TRUSTEE_TYPE.TRUSTEE_IS_USER
my_trustee['Identifier']=my_sid
tmp_trustee = {}
tmp_trustee['MultipleTrustee']=None
tmp_trustee['MultipleTrusteeOperation']=0
tmp_trustee['TrusteeForm']=TRUSTEE_FORM.TRUSTEE_IS_NAME
tmp_trustee['TrusteeType']=TRUSTEE_TYPE.TRUSTEE_IS_USER
tmp_trustee['Identifier']='rupole\\tmp'
pwr_trustee = {}
pwr_trustee['MultipleTrustee']=None
pwr_trustee['MultipleTrusteeOperation']=0
pwr_trustee['TrusteeForm']=TRUSTEE_FORM.TRUSTEE_IS_SID
pwr_trustee['TrusteeType']=TRUSTEE_TYPE.TRUSTEE_IS_USER
pwr_trustee['Identifier']=pwr_sid
expl_list=[]
expl_list.append(
{
'Trustee':my_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.SET_AUDIT_SUCCESS, ##|ACCESS_MODE.SET_AUDIT_FAILURE,
'AccessPermissions':win32con.GENERIC_ALL
}
)
expl_list.append(
{
'Trustee':my_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.SET_AUDIT_FAILURE,
'AccessPermissions':win32con.GENERIC_ALL
}
)
expl_list.append(
{
'Trustee':tmp_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.SET_AUDIT_SUCCESS,
'AccessPermissions':win32con.GENERIC_ALL
}
)
expl_list.append(
{
'Trustee':tmp_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.SET_AUDIT_FAILURE,
'AccessPermissions':win32con.GENERIC_ALL
}
)
old_sacl.SetEntriesInAcl(expl_list)
expl_list=[]
expl_list.append(
{
'Trustee':tmp_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.DENY_ACCESS,
'AccessPermissions':win32con.DELETE
}
)
expl_list.append(
{
'Trustee':tmp_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.GRANT_ACCESS,
'AccessPermissions':win32con.WRITE_OWNER
}
)
expl_list.append(
{
'Trustee':pwr_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.GRANT_ACCESS,
'AccessPermissions':win32con.GENERIC_READ
}
)
expl_list.append(
{
'Trustee':my_trustee,
'Inheritance':ACE_FLAGS.NO_INHERITANCE,
'AccessMode':ACCESS_MODE.GRANT_ACCESS,
'AccessPermissions':win32con.GENERIC_ALL
}
)
old_dacl.SetEntriesInAcl(expl_list)
sd.SetSecurityDescriptorSacl(1,old_sacl,1)
sd.SetSecurityDescriptorDacl(1,old_dacl,1)
sd.SetSecurityDescriptorOwner(pwr_sid,1)
win32security.SetFileSecurity(fname,
all_security_info,
sd)
|
# -*- coding:utf-8 -*-
# __author__="X1gang"
# Date:2018/12/02
import os,sys
from conf.settings import USER_BANK_FILE
from core.log_write import user_logger,bank_logger
from core.login import json_func,check_online
users_atm = json_func(USER_BANK_FILE)
def check_amount(amount):
if not amount.isdigit() or not(float(amount) > 0):
return False
return float(amount)
def check_user(user):
if user in users_atm:
return True
def check_user_amount(user,amount):
if users_atm[user]["usable"] < amount:
return False
return True
def check_user_status(func):
def wrapper(*args,**kwargs):
user = args[0]
if users_atm[user]["status"] == 1:
return func(*args,**kwargs)
user_logger.info("%s 账户已被冻结,不可进行此次操作,请联系管理员解冻!" % user)
bank_logger.info("已冻结账户 %s 尝试进行ATM操作,请管理员留意!" % user)
return wrapper
@check_online
@check_user_status
def transfer_amount(user,in_user,amount):
if not check_user(in_user):
print("入账账户 %s 不存在!"%(user,in_user))
user_logger.warning("%s转账时,入账账户%s不存在!"%(user,in_user))
return False
amount = check_amount(amount)
if not amount:
print("输入的金额非正数")
user_logger.warning("%s转账时,输入的金额非正数"%user)
return False
fee = 0.05*amount
if not check_user_amount(user,(amount+fee)):
print("账户余额为 %s元,不足转账!"%(users_atm[user]["usable"]))
user_logger.warning("%s转账时,账户余额 %s 已不足!"%(user,users_atm[user]["usable"]))
return False
users_atm[user]["usable"] -= (amount+fee)
users_atm[in_user]["usable"] += amount
users_atm["admin"]["usable"] += fee
print("成功转账给 %s 金额 %s 元!收取手续费用 %s 元!" % (in_user,amount,fee))
user_logger.info("%s 成功转账给 %s 金额 %s 元!手续费用 %s !"% (user,in_user,amount,fee))
bank_logger.info("%s 成功转账给 %s 金额 %s 元!手续费用 %s !"% (user,in_user,amount,fee))
bank_logger.info("admin 账户收入手续费金额 %s 元!"%fee)
@check_online
@check_user_status
def look_info(user):
user_logger.debug("%s 查询余额!"% user)
print("您的信用卡额度为 %s 元,当前可用余额为:%s 元"%(users_atm[user]["limit"],users_atm[user]["usable"]))
@check_online
@check_user_status
def withdraw_amount(user,amount):
amount = check_amount(amount)
if not amount:
print("输入的提现金额非正数")
user_logger.warning("%s提现时,输入的提现金额非正数" %user)
return False
fee = 0.05 * amount
if not check_user_amount(user, (amount + fee)):
print("账户余额 %s元 不足提现!" % (users_atm[user]["usable"]))
user_logger.warning("%s提现时,账户余额 %s 已不足!" % (user, users_atm[user]["usable"]))
return False
users_atm[user]["usable"] -= (amount + fee)
users_atm["admin"]["usable"] += fee
print("成功提现金额 %s 元!收取手续费用 %s 元" % (amount,fee))
user_logger.info("%s 成功提现金额 %s 元!手续费用 %s 元" % (user, amount,fee))
bank_logger.info("%s 成功提现金额 %s 元!手续费用 %s 元" % (user, amount,fee))
bank_logger.info("admin 账户收入手续费金额 %s 元!" % fee)
@check_online
@check_user_status
def repay_amount(user,amount):
amount = check_amount(amount)
used_amount = users_atm[user]["limit"] - users_atm[user]["usable"]
if not amount:
print("输入的还款金额非正数")
user_logger.warning("%s还款时,输入的还款金额非正数" %user)
return False
if amount > used_amount:
print("输入的还款金额大于已用金额,只需还款 %s 元!" %(used_amount))
user_logger.info("%s还款时,输入的还款金额大于已用金额,只需还款 %s 元!" %(user,used_amount))
return False
users_atm[user]["usable"] += amount
users_atm["admin"]["usable"] += amount
print("成功还款金额 %s 元!" % (amount))
user_logger.info("%s 成功还款金额 %s 元!" % (user, amount))
bank_logger.info("%s 成功还款金额 %s 元!" % (user, amount))
bank_logger.info("admin 账户收入 %s 的还款金额 %s 元!" % (user, amount))
@check_online
@check_user_status
def consume_amount(user,amount):
usable_amount = users_atm[user]["usable"]
if not amount:
print("扣款金额非正数" % user)
user_logger.warning("%s扣款时,传递的扣款金额非正数" % user)
return False
if amount > usable_amount:
print("扣款金额 %s 大于可用金额%s 元!扣款失败!" % ( amount, usable_amount))
user_logger.warning("%s扣款时,扣款金额 %s 大于可用金额%s 元!扣款失败!" % (user, amount, usable_amount))
return False
users_atm[user]["usable"] -= amount
print("成功扣款您的金额 %s 元!" % (amount))
user_logger.info("%s 成功扣款金额 %s 元!" % (user, amount))
bank_logger.info("%s 成功扣款金额 %s 元!" % (user, amount))
return True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import familymemberhistory
from .fhirdate import FHIRDate
class FamilyMemberHistoryTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("FamilyMemberHistory", js["resourceType"])
return familymemberhistory.FamilyMemberHistory(js)
def testFamilyMemberHistory1(self):
inst = self.instantiate_from("familymemberhistory-example.json")
self.assertIsNotNone(inst, "Must have instantiated a FamilyMemberHistory instance")
self.implFamilyMemberHistory1(inst)
js = inst.as_json()
self.assertEqual("FamilyMemberHistory", js["resourceType"])
inst2 = familymemberhistory.FamilyMemberHistory(js)
self.implFamilyMemberHistory1(inst2)
def implFamilyMemberHistory1(self, inst):
self.assertEqual(inst.condition[0].code.coding[0].code, "315619001")
self.assertEqual(inst.condition[0].code.coding[0].display, "Myocardial Infarction")
self.assertEqual(inst.condition[0].code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.condition[0].code.text, "Heart Attack")
self.assertTrue(inst.condition[0].contributedToDeath)
self.assertEqual(inst.condition[0].note[0].text, "Was fishing at the time. At least he went doing someting he loved.")
self.assertEqual(inst.condition[0].onsetAge.code, "a")
self.assertEqual(inst.condition[0].onsetAge.system, "http://unitsofmeasure.org")
self.assertEqual(inst.condition[0].onsetAge.unit, "yr")
self.assertEqual(inst.condition[0].onsetAge.value, 74)
self.assertEqual(inst.date.date, FHIRDate("2011-03-18").date)
self.assertEqual(inst.date.as_json(), "2011-03-18")
self.assertEqual(inst.id, "father")
self.assertEqual(inst.identifier[0].value, "12345")
self.assertEqual(inst.instantiatesUri[0], "http://example.org/family-member-history-questionnaire")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.relationship.coding[0].code, "FTH")
self.assertEqual(inst.relationship.coding[0].display, "father")
self.assertEqual(inst.relationship.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-RoleCode")
self.assertEqual(inst.sex.coding[0].code, "male")
self.assertEqual(inst.sex.coding[0].display, "Male")
self.assertEqual(inst.sex.coding[0].system, "http://hl7.org/fhir/administrative-gender")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Father died of a heart attack aged 74</div>")
self.assertEqual(inst.text.status, "generated")
def testFamilyMemberHistory2(self):
inst = self.instantiate_from("familymemberhistory-example-mother.json")
self.assertIsNotNone(inst, "Must have instantiated a FamilyMemberHistory instance")
self.implFamilyMemberHistory2(inst)
js = inst.as_json()
self.assertEqual("FamilyMemberHistory", js["resourceType"])
inst2 = familymemberhistory.FamilyMemberHistory(js)
self.implFamilyMemberHistory2(inst2)
def implFamilyMemberHistory2(self, inst):
self.assertEqual(inst.condition[0].code.coding[0].code, "371041009")
self.assertEqual(inst.condition[0].code.coding[0].display, "Embolic Stroke")
self.assertEqual(inst.condition[0].code.coding[0].system, "http://snomed.info/sct")
self.assertEqual(inst.condition[0].code.text, "Stroke")
self.assertEqual(inst.condition[0].onsetAge.code, "a")
self.assertEqual(inst.condition[0].onsetAge.system, "http://unitsofmeasure.org")
self.assertEqual(inst.condition[0].onsetAge.unit, "yr")
self.assertEqual(inst.condition[0].onsetAge.value, 56)
self.assertEqual(inst.id, "mother")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.relationship.coding[0].code, "MTH")
self.assertEqual(inst.relationship.coding[0].display, "mother")
self.assertEqual(inst.relationship.coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-RoleCode")
self.assertEqual(inst.status, "completed")
self.assertEqual(inst.text.div, "<div xmlns=\"http://www.w3.org/1999/xhtml\">Mother died of a stroke aged 56</div>")
self.assertEqual(inst.text.status, "generated")
|
import git
import os
import shutil
import platform
from tkinter import *
import jsonCreator
#import imp
from git import Repo,remote
import webbrowser
#import finalPrinter
import subprocess
from subprocess import Popen, PIPE, STDOUT
import stat
#try:
#imp.find_module('pyperclip')
#found = True
#except ImportError:
#found = False
#if found:
import pyperclip
devMode = False
src=os.path.dirname(os.path.realpath(__file__))
usn='Insert username here!'
pwd='Insert password here!'
abortWhenClose=True
def onerror(func, path, exc_info):
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
if(os.path.exists(f'{src}/quiz')):
shutil.rmtree(f'{src}/quiz', onerror=onerror)
git.Git(".").clone("https://github.com/cubered/quiz.git")
def clearScreen():
if(platform.system()=='Windows'):
os.system('cls')
else:
os.system('clear')
return
root=Tk()
final=[]
tempbool=False
temp=[]
name=""
root.title('Földrajz quiz')
root.iconphoto(True, PhotoImage(file="./Data/bolygo.png"))
NevText=Label(root, text="Név: ")
NevText.pack()
NevEntry=Entry(root)
NevEntry.pack()
index = 1
def closeAddQTab(entry, tab):
global temp
global final
temp[0]=str(entry.get())
tab.destroy()
print('tempwhenclose: ', temp)
final.append(temp)
return
def closeAddAnsTab(entry, tab, checkbx):
global temp
global tempbool
temp.append([str(entry.get()), tempbool])
tab.destroy()
return
def makeAddQTab():
global temp
temp=[]
temp.append("")
addQTab=Tk()
KerdesText=Label(addQTab, text="Kérdés: ")
entry = Entry(addQTab)
KerdesText.pack()
entry.pack()
addAnsButton=Button(addQTab, text='Válasz hozzáadása', command= lambda: addAnswer())
closeBTN=Button(addQTab, text='Kész', command= lambda: closeAddQTab(entry, addQTab))
addAnsButton.pack()
closeBTN.pack()
addQTab.title('Kérdés hohzzáadása')
#addQTab.iconphoto(True, PhotoImage(file="./Data/bolygo.png"))
addQTab.mainloop()
print('temp: ', temp)
#del temp[-1]
#del temp[-1]
return
def toggle():
global tempbool
tempbool=not(tempbool)
def addAnswer():
global temp
global tempbool
tempbool=False
addAnsTab=Tk()
addAnsTab.title('Válasz hozzáadása')
#addAnsTab.iconphoto(True, PhotoImage(file="./Data/bolygo.png"))
ValaszText=Label(addAnsTab, text="Válasz: ")
entry = Entry(addAnsTab)
ValaszText.pack()
entry.pack()
correct=Checkbutton(addAnsTab, text="Helyes?", command=toggle)
correct.pack()
closeBTN=Button(addAnsTab, text='Kész', command= lambda: closeAddAnsTab(entry, addAnsTab, correct))
closeBTN.pack()
def addQuestion():
makeAddQTab()
print('final:', final)
return
def cancelQuiz():
global root
root.destroy()
def done(entry):
global root
global name
global abortWhenClose
name=str(entry.get())
if(name==''):
name='unnamed'
print(name)
abortWhenClose=False
root.destroy()
addQButton=Button(root, text='Kérdés hozzáadása', command= lambda: addQuestion())
addQButton.pack()
doneButton=Button(root, text='Kész', command= lambda: done(NevEntry))
doneButton.pack()
cancelButton=Button(root, text='Mégse', command= lambda: cancelQuiz())
cancelButton.pack()
root.mainloop()
if not(abortWhenClose):
print('final2: ', final)
jsonCreator.main(final)
clearScreen()
def callback(url):
webbrowser.open_new(url)
nameind=2
if(os.path.exists(f'{src}/quiz/{name}')):
while(True):
if(os.path.exists(f'{src}/quiz/{name}({nameind}))')):
nameind+=1
else:
break
name=f'{name}({nameind})'
os.system(f'mkdir {src}/quiz/{name}')
dirstocopy = ['img','sass']
filestocopy = ['index.html','output.css','output.css.map']
for item in dirstocopy:
shutil.copytree(f'{src}/quiz/template/{item}', f'{src}/quiz/{name}/{item}')
for item in filestocopy:
shutil.copyfile(f'{src}/quiz/template/{item}', f'{src}/quiz/{name}/{item}')
shutil.copyfile(f'{src}/final.js', f'{src}/quiz/{name}/index.js')
os.remove('final.js')
print('done!')
bf=open('push.bat','w')
bf.write(f'cd quiz && git init && git remote set-url origin https://{usn}:{pwd}@github.com/cubered/quiz.git && git add . && git commit -m "Added {name}" && git push origin master')
bf.close()
os.system('push')
'''subprocess.call(['git init'], cwd=f'{src}/quiz', shell=True)
subprocess.call([f'git remote set-url origin https://{usn}:{pwd}@github.com/cubered/quiz.git'], cwd=f'{src}/quiz', shell=True)
#subprocess.call(['git checkout gh-pages'], cwd=f'{src}/quiz', shell=True)
#subprocess.call(['git branch -u origin/gh-pages gh-pages'], cwd=f'{src}/quiz', shell=True)
subprocess.call(['git add .'], cwd=f'{src}/quiz', shell=True)
subprocess.call([f'git commit -m "Added {name}"'], cwd=f'{src}/quiz', shell=True)
subprocess.call(['git push origin master'], cwd=f'{src}/quiz', shell=True)'''
'''p = Popen(
['git push origin master'],
cwd=f'{src}/quiz',
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
p.stdin.write(f'{usn}')
p.stdin.write(f'{pwd}')
stdout, stderr = p.communicate()
print('---STDOUT---')
print(stdout)
print('---STDERR---')
print(stderr)'''
print('---')
clearScreen()
'''print('Siker! Lehetséges, hogy a quiz csak pár perc múlva lesz látható.')
print(f'Link: https://quiz.cubered.xyz/{name}')'''
os.remove(f'{src}/push.bat')
'''if found:
pyperclip.copy(f'https://quiz.cubered.xyz/{name}')'''
if not(devMode):
shutil.rmtree(f'{src}/quiz', onerror=onerror)
'''finalfile=open('textfile.txt','w')
finalfile.write(f'Siker! Lehetséges, hogy a quiz csak pár perc múlva lesz látható!\nLink: https://quiz.cubered.xyz/{name}')
finalfile.close()
if devMode:
finalPrinter.main()
else:
subprocess.Popen(([r".\finalPrinter.exe"]))'''
#finalPrinter.main(f'https://quiz.cubered.xyz/{name}',True)
finalTab=Tk()
def closefinaltab():
global finalTab
finalTab.destroy()
def copytoclipboard():
pyperclip.copy(f'https://quiz.cubered.xyz/{name}')
finalTab.title('Kész!')
finalTab.iconphoto(True, PhotoImage(file="./Data/bolygo.png"))
finalText=Label(finalTab, text="Siker! Lehetséges, hogy a quiz csak pár perc múlva lesz látható!")
Link=Label(finalTab,text=f'https://quiz.cubered.xyz/{name}',fg='blue',cursor='hand2')
copyButton=Button(finalTab,text='Link másolása',command=lambda: copytoclipboard())
finalButton=Button(finalTab,text='Bezárás',command=lambda: closefinaltab())
finalText.pack()
Link.pack()
copyButton.pack()
finalButton.pack()
Link.bind("<Button-1>", lambda e: callback(f'https://quiz.cubered.xyz/{name}'))
finalTab.mainloop()
print('Done!')
|
import matplotlib.pyplot as plt
class Stduent(object):
def __init__(self,name,score):
self.name = name
self.score = score
bart = Stduent()
|
import os,sys; sys.path.insert(0, os.path.abspath('.'))
from absl import app, flags, logging
import json
from time import sleep
import torch
import random
import pyglet
from truss_state import TrussState, BreakableTrussState
from models.config import args
from bfs import AStarNode, GreedyNode, search
from view import View
FLAGS = flags.FLAGS
def render_build(view, path, state, save_images=False, stay_open=True):
"""
run the build path actions in the simulation environment and render the results step by step
"""
for i, action in enumerate(path):
logging.debug("Doing action {}".format(action))
sleep(0.5)
state.action_update(action)
view.show(state)
if save_images:
filename = "logs/images/step_{:03d}.png".format(i)
pyglet.image.get_buffer_manager().get_color_buffer().save(filename)
if not view.window_still_open:
break
while stay_open and view.window_still_open:
view.show(state)
sleep(0.1)
def save_log_file(file_path, stats):
"""
Save the environment definition and action path for action replay visualisation
"""
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
all_stats = json.load(f)
else:
all_stats = []
all_stats.append(stats)
with open(file_path, 'w') as f:
json.dump(all_stats, f)
def plan_path(start_state, greedy, heuristic, render,
checkpoint=None, model_config=None, batch_size=32,
eps=1000, save_images=False,
timeout=0, return_examples=False, pretrained_net=None, show_search=False):
"""
Runs the search to plan an action path for the truss build
Args:
start_state
greedy: use greedy search instead of A*
heuristic: type of heuristic to use, see astar.py
render: if True, render the build process graphically
checkpoint
model_config
batch_size
eps
save_images
timeout
return_examples: returns training examples if True
pretrained_net
"""
view = View() if render else None
if view:
view.show(start_state)
Node = GreedyNode if greedy else AStarNode
Node.heuristic = heuristic
Node.batch_size = batch_size
if pretrained_net is not None:
Node.nnet = pretrained_net
Node.device = 'cuda' if torch.cuda.is_available() else 'cpu'
elif (heuristic == 'HNet') or (heuristic == 'HNet_batch'):
Node.device = 'cuda' if torch.cuda.is_available() else 'cpu'
config = args[model_config]
Node.nnet = config['nnet'](config).to(Node.device)
if checkpoint is not None:
checkpoint = torch.load(checkpoint, map_location=Node.device)
Node.nnet.load_state_dict(checkpoint['state_dict'])
root = Node(state=start_state.clone())
end_state, stats = search(root, eps=eps, view=view if show_search else None)
if timeout and stats['time'] > timeout:
logging.debug("Timed out after {} seconds".format(stats['time']))
else:
logging.debug("Search took {} seconds".format(stats['time']))
logging.debug("Action path {}".format(stats['path']))
if view and view.window_still_open:
render_build(view, stats['path'], start_state.clone(), save_images=save_images)
logging.debug("Construction took {} seconds with {} steps and {} nodes explored".format(
stats['time'],
len(stats['path']),
stats['explored_nodes']
))
stats['scene_config'] = root.scene_config
if return_examples:
return root.get_train_examples(stats['path']) if stats['goal_complete'] else [], end_state._state
else:
return stats
def main(_argv):
if FLAGS.debug:
logging.set_verbosity(logging.DEBUG)
BreakableTrussState.max_unbraced_struts = FLAGS.max_unsupported_struts
if FLAGS.scene_config_file:
with open(FLAGS.scene_config_file, "r") as f:
config = json.load(f)
else:
config = random.choice(TrussState.get_start_configs(FLAGS.target_dist))
with open('logs/config.json', "w") as f:
json.dump(config, f)
checkpoint = "models/{}.pt".format(FLAGS.model_config) if FLAGS.checkpoint is None else FLAGS.checkpoint
plan_path(
start_state=BreakableTrussState.from_config(config, add_obstacles=FLAGS.add_obstacles),
greedy=FLAGS.greedy,
heuristic=FLAGS.heuristic,
render=FLAGS.render,
checkpoint=checkpoint,
model_config=FLAGS.model_config,
batch_size=FLAGS.batch_size,
eps=FLAGS.eps,
save_images=FLAGS.save_images,
show_search=FLAGS.show_search
)
if __name__ == '__main__':
flags.DEFINE_boolean('debug', False, 'show debug logging messages')
flags.DEFINE_integer('max_unsupported_struts', 1, 'maximum number of connected unsupported struts before collapse')
flags.DEFINE_integer('eps', 0, 'number of expansions to do per stage. Unlimmited if 0')
flags.DEFINE_integer('target_dist', 2, 'triangular lattice manhattan distance to target')
flags.DEFINE_string('log_file_path', "./logs/astar_log.json", 'result statistics log file')
flags.DEFINE_string('model_config', "GIN", 'nueral net configutation arguments')
flags.DEFINE_string('scene_config_file', None, 'scene configuration file')
flags.DEFINE_integer('batch_size', 32, 'network input batch size')
flags.DEFINE_boolean('render', True, 'display the build steps')
flags.DEFINE_boolean('show_search', False, 'show each search state')
flags.DEFINE_boolean('add_obstacles', False, 'add obstacles to the space')
flags.DEFINE_string('checkpoint', None, 'nueral net parameter checkpoint')
flags.DEFINE_boolean('greedy', True, 'use greedy search')
flags.DEFINE_boolean('cleanup', False, 'post processing to remove unnecessary components')
flags.DEFINE_boolean('save_images', False, 'snaphot an image of each build step in the render')
flags.DEFINE_enum('heuristic', 'HNet_batch', ['Manhattan', 'Mean', 'HNet', 'HNet_batch', 'MeanTopK'],
'type of heuristic function to use in search')
app.run(main)
|
import glfw
from OpenGL.GL import *
class DisplayManager:
delta = 0
def __init__(self, width, height, title):
self.window = None
self.title = ''
self.last_time = 0
self.create_window(width, height, title)
def create_window(self, width, height, title):
if not glfw.init():
return
self.set_hints()
self.window = glfw.create_window(width, height, title, None, None)
glfw.make_context_current(self.window)
glfw.swap_interval(0)
glViewport(0, 0, width, height)
def render_window(self):
self.set_window_fps()
self.update_delta()
glfw.poll_events()
glfw.swap_buffers(self.window)
def update_delta(self):
current_time = glfw.get_time() * 1000
DisplayManager.delta = (current_time - self.last_time) / 1000
self.last_time = current_time
__nb_frames = 0
__last_time = 0
def set_window_fps(self):
current_time = glfw.get_time()
DisplayManager.__nb_frames += 1
if current_time - DisplayManager.__last_time > 1.0:
fps = str(DisplayManager.__nb_frames) + "fps"
ms = str("%.2f" % (1000/DisplayManager.__nb_frames)) + "ms"
self.title = fps + " | " + ms + " - pyGL"
DisplayManager.__nb_frames = 0
DisplayManager.__last_time += 1.0
glfw.set_window_title(self.window, self.title)
def should_close(self):
if glfw.window_should_close(self.window):
return True
else:
return False
@staticmethod
def set_hints():
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 4)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)
@staticmethod
def terminate():
glfw.terminate()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/dialogflow_v2/proto/session.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from dialogflow_v2.proto import context_pb2 as google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_context__pb2
from dialogflow_v2.proto import intent_pb2 as google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2
from dialogflow_v2.proto import session_entity_type_pb2 as google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__entity__type__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/dialogflow_v2/proto/session.proto',
package='google.cloud.dialogflow.v2',
syntax='proto3',
serialized_pb=_b('\n.google/cloud/dialogflow_v2/proto/session.proto\x12\x1agoogle.cloud.dialogflow.v2\x1a\x1cgoogle/api/annotations.proto\x1a.google/cloud/dialogflow_v2/proto/context.proto\x1a-google/cloud/dialogflow_v2/proto/intent.proto\x1a:google/cloud/dialogflow_v2/proto/session_entity_type.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x17google/rpc/status.proto\x1a\x18google/type/latlng.proto\"\xbb\x01\n\x13\x44\x65tectIntentRequest\x12\x0f\n\x07session\x18\x01 \x01(\t\x12\x41\n\x0cquery_params\x18\x02 \x01(\x0b\x32+.google.cloud.dialogflow.v2.QueryParameters\x12;\n\x0bquery_input\x18\x03 \x01(\x0b\x32&.google.cloud.dialogflow.v2.QueryInput\x12\x13\n\x0binput_audio\x18\x05 \x01(\x0c\"\x96\x01\n\x14\x44\x65tectIntentResponse\x12\x13\n\x0bresponse_id\x18\x01 \x01(\t\x12=\n\x0cquery_result\x18\x02 \x01(\x0b\x32\'.google.cloud.dialogflow.v2.QueryResult\x12*\n\x0ewebhook_status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"\x95\x02\n\x0fQueryParameters\x12\x11\n\ttime_zone\x18\x01 \x01(\t\x12)\n\x0cgeo_location\x18\x02 \x01(\x0b\x32\x13.google.type.LatLng\x12\x35\n\x08\x63ontexts\x18\x03 \x03(\x0b\x32#.google.cloud.dialogflow.v2.Context\x12\x16\n\x0ereset_contexts\x18\x04 \x01(\x08\x12K\n\x14session_entity_types\x18\x05 \x03(\x0b\x32-.google.cloud.dialogflow.v2.SessionEntityType\x12(\n\x07payload\x18\x06 \x01(\x0b\x32\x17.google.protobuf.Struct\"\xcb\x01\n\nQueryInput\x12\x44\n\x0c\x61udio_config\x18\x01 \x01(\x0b\x32,.google.cloud.dialogflow.v2.InputAudioConfigH\x00\x12\x35\n\x04text\x18\x02 \x01(\x0b\x32%.google.cloud.dialogflow.v2.TextInputH\x00\x12\x37\n\x05\x65vent\x18\x03 \x01(\x0b\x32&.google.cloud.dialogflow.v2.EventInputH\x00\x42\x07\n\x05input\"\xb8\x04\n\x0bQueryResult\x12\x12\n\nquery_text\x18\x01 \x01(\t\x12\x15\n\rlanguage_code\x18\x0f \x01(\t\x12%\n\x1dspeech_recognition_confidence\x18\x02 \x01(\x02\x12\x0e\n\x06\x61\x63tion\x18\x03 \x01(\t\x12+\n\nparameters\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12#\n\x1b\x61ll_required_params_present\x18\x05 \x01(\x08\x12\x18\n\x10\x66ulfillment_text\x18\x06 \x01(\t\x12H\n\x14\x66ulfillment_messages\x18\x07 \x03(\x0b\x32*.google.cloud.dialogflow.v2.Intent.Message\x12\x16\n\x0ewebhook_source\x18\x08 \x01(\t\x12\x30\n\x0fwebhook_payload\x18\t \x01(\x0b\x32\x17.google.protobuf.Struct\x12<\n\x0foutput_contexts\x18\n \x03(\x0b\x32#.google.cloud.dialogflow.v2.Context\x12\x32\n\x06intent\x18\x0b \x01(\x0b\x32\".google.cloud.dialogflow.v2.Intent\x12#\n\x1bintent_detection_confidence\x18\x0c \x01(\x02\x12\x30\n\x0f\x64iagnostic_info\x18\x0e \x01(\x0b\x32\x17.google.protobuf.Struct\"\xde\x01\n\x1cStreamingDetectIntentRequest\x12\x0f\n\x07session\x18\x01 \x01(\t\x12\x41\n\x0cquery_params\x18\x02 \x01(\x0b\x32+.google.cloud.dialogflow.v2.QueryParameters\x12;\n\x0bquery_input\x18\x03 \x01(\x0b\x32&.google.cloud.dialogflow.v2.QueryInput\x12\x18\n\x10single_utterance\x18\x04 \x01(\x08\x12\x13\n\x0binput_audio\x18\x06 \x01(\x0c\"\xf3\x01\n\x1dStreamingDetectIntentResponse\x12\x13\n\x0bresponse_id\x18\x01 \x01(\t\x12R\n\x12recognition_result\x18\x02 \x01(\x0b\x32\x36.google.cloud.dialogflow.v2.StreamingRecognitionResult\x12=\n\x0cquery_result\x18\x03 \x01(\x0b\x32\'.google.cloud.dialogflow.v2.QueryResult\x12*\n\x0ewebhook_status\x18\x04 \x01(\x0b\x32\x12.google.rpc.Status\"\x8a\x02\n\x1aStreamingRecognitionResult\x12X\n\x0cmessage_type\x18\x01 \x01(\x0e\x32\x42.google.cloud.dialogflow.v2.StreamingRecognitionResult.MessageType\x12\x12\n\ntranscript\x18\x02 \x01(\t\x12\x10\n\x08is_final\x18\x03 \x01(\x08\x12\x12\n\nconfidence\x18\x04 \x01(\x02\"X\n\x0bMessageType\x12\x1c\n\x18MESSAGE_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nTRANSCRIPT\x10\x01\x12\x1b\n\x17\x45ND_OF_SINGLE_UTTERANCE\x10\x02\"\x9d\x01\n\x10InputAudioConfig\x12\x41\n\x0e\x61udio_encoding\x18\x01 \x01(\x0e\x32).google.cloud.dialogflow.v2.AudioEncoding\x12\x19\n\x11sample_rate_hertz\x18\x02 \x01(\x05\x12\x15\n\rlanguage_code\x18\x03 \x01(\t\x12\x14\n\x0cphrase_hints\x18\x04 \x03(\t\"0\n\tTextInput\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\x15\n\rlanguage_code\x18\x02 \x01(\t\"^\n\nEventInput\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\nparameters\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x15\n\rlanguage_code\x18\x03 \x01(\t*\xfb\x01\n\rAudioEncoding\x12\x1e\n\x1a\x41UDIO_ENCODING_UNSPECIFIED\x10\x00\x12\x1c\n\x18\x41UDIO_ENCODING_LINEAR_16\x10\x01\x12\x17\n\x13\x41UDIO_ENCODING_FLAC\x10\x02\x12\x18\n\x14\x41UDIO_ENCODING_MULAW\x10\x03\x12\x16\n\x12\x41UDIO_ENCODING_AMR\x10\x04\x12\x19\n\x15\x41UDIO_ENCODING_AMR_WB\x10\x05\x12\x1b\n\x17\x41UDIO_ENCODING_OGG_OPUS\x10\x06\x12)\n%AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE\x10\x07\x32\xd4\x02\n\x08Sessions\x12\xb4\x01\n\x0c\x44\x65tectIntent\x12/.google.cloud.dialogflow.v2.DetectIntentRequest\x1a\x30.google.cloud.dialogflow.v2.DetectIntentResponse\"A\x82\xd3\xe4\x93\x02;\"6/v2/{session=projects/*/agent/sessions/*}:detectIntent:\x01*\x12\x90\x01\n\x15StreamingDetectIntent\x12\x38.google.cloud.dialogflow.v2.StreamingDetectIntentRequest\x1a\x39.google.cloud.dialogflow.v2.StreamingDetectIntentResponse(\x01\x30\x01\x42\x9b\x01\n\x1e\x63om.google.cloud.dialogflow.v2B\x0cSessionProtoP\x01ZDgoogle.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow\xf8\x01\x01\xa2\x02\x02\x44\x46\xaa\x02\x1aGoogle.Cloud.Dialogflow.V2b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_context__pb2.DESCRIPTOR,google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2.DESCRIPTOR,google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__entity__type__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,google_dot_type_dot_latlng__pb2.DESCRIPTOR,])
_AUDIOENCODING = _descriptor.EnumDescriptor(
name='AudioEncoding',
full_name='google.cloud.dialogflow.v2.AudioEncoding',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_LINEAR_16', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_FLAC', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_MULAW', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_AMR', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_AMR_WB', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_OGG_OPUS', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2791,
serialized_end=3042,
)
_sym_db.RegisterEnumDescriptor(_AUDIOENCODING)
AudioEncoding = enum_type_wrapper.EnumTypeWrapper(_AUDIOENCODING)
AUDIO_ENCODING_UNSPECIFIED = 0
AUDIO_ENCODING_LINEAR_16 = 1
AUDIO_ENCODING_FLAC = 2
AUDIO_ENCODING_MULAW = 3
AUDIO_ENCODING_AMR = 4
AUDIO_ENCODING_AMR_WB = 5
AUDIO_ENCODING_OGG_OPUS = 6
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7
_STREAMINGRECOGNITIONRESULT_MESSAGETYPE = _descriptor.EnumDescriptor(
name='MessageType',
full_name='google.cloud.dialogflow.v2.StreamingRecognitionResult.MessageType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MESSAGE_TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TRANSCRIPT', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='END_OF_SINGLE_UTTERANCE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2394,
serialized_end=2482,
)
_sym_db.RegisterEnumDescriptor(_STREAMINGRECOGNITIONRESULT_MESSAGETYPE)
_DETECTINTENTREQUEST = _descriptor.Descriptor(
name='DetectIntentRequest',
full_name='google.cloud.dialogflow.v2.DetectIntentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='session', full_name='google.cloud.dialogflow.v2.DetectIntentRequest.session', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query_params', full_name='google.cloud.dialogflow.v2.DetectIntentRequest.query_params', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query_input', full_name='google.cloud.dialogflow.v2.DetectIntentRequest.query_input', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_audio', full_name='google.cloud.dialogflow.v2.DetectIntentRequest.input_audio', index=3,
number=5, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=345,
serialized_end=532,
)
_DETECTINTENTRESPONSE = _descriptor.Descriptor(
name='DetectIntentResponse',
full_name='google.cloud.dialogflow.v2.DetectIntentResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response_id', full_name='google.cloud.dialogflow.v2.DetectIntentResponse.response_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query_result', full_name='google.cloud.dialogflow.v2.DetectIntentResponse.query_result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='webhook_status', full_name='google.cloud.dialogflow.v2.DetectIntentResponse.webhook_status', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=535,
serialized_end=685,
)
_QUERYPARAMETERS = _descriptor.Descriptor(
name='QueryParameters',
full_name='google.cloud.dialogflow.v2.QueryParameters',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time_zone', full_name='google.cloud.dialogflow.v2.QueryParameters.time_zone', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='geo_location', full_name='google.cloud.dialogflow.v2.QueryParameters.geo_location', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='contexts', full_name='google.cloud.dialogflow.v2.QueryParameters.contexts', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reset_contexts', full_name='google.cloud.dialogflow.v2.QueryParameters.reset_contexts', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='session_entity_types', full_name='google.cloud.dialogflow.v2.QueryParameters.session_entity_types', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='google.cloud.dialogflow.v2.QueryParameters.payload', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=688,
serialized_end=965,
)
_QUERYINPUT = _descriptor.Descriptor(
name='QueryInput',
full_name='google.cloud.dialogflow.v2.QueryInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='audio_config', full_name='google.cloud.dialogflow.v2.QueryInput.audio_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='text', full_name='google.cloud.dialogflow.v2.QueryInput.text', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='event', full_name='google.cloud.dialogflow.v2.QueryInput.event', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='input', full_name='google.cloud.dialogflow.v2.QueryInput.input',
index=0, containing_type=None, fields=[]),
],
serialized_start=968,
serialized_end=1171,
)
_QUERYRESULT = _descriptor.Descriptor(
name='QueryResult',
full_name='google.cloud.dialogflow.v2.QueryResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='query_text', full_name='google.cloud.dialogflow.v2.QueryResult.query_text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.cloud.dialogflow.v2.QueryResult.language_code', index=1,
number=15, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='speech_recognition_confidence', full_name='google.cloud.dialogflow.v2.QueryResult.speech_recognition_confidence', index=2,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action', full_name='google.cloud.dialogflow.v2.QueryResult.action', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameters', full_name='google.cloud.dialogflow.v2.QueryResult.parameters', index=4,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='all_required_params_present', full_name='google.cloud.dialogflow.v2.QueryResult.all_required_params_present', index=5,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fulfillment_text', full_name='google.cloud.dialogflow.v2.QueryResult.fulfillment_text', index=6,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fulfillment_messages', full_name='google.cloud.dialogflow.v2.QueryResult.fulfillment_messages', index=7,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='webhook_source', full_name='google.cloud.dialogflow.v2.QueryResult.webhook_source', index=8,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='webhook_payload', full_name='google.cloud.dialogflow.v2.QueryResult.webhook_payload', index=9,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_contexts', full_name='google.cloud.dialogflow.v2.QueryResult.output_contexts', index=10,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='intent', full_name='google.cloud.dialogflow.v2.QueryResult.intent', index=11,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='intent_detection_confidence', full_name='google.cloud.dialogflow.v2.QueryResult.intent_detection_confidence', index=12,
number=12, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='diagnostic_info', full_name='google.cloud.dialogflow.v2.QueryResult.diagnostic_info', index=13,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1174,
serialized_end=1742,
)
_STREAMINGDETECTINTENTREQUEST = _descriptor.Descriptor(
name='StreamingDetectIntentRequest',
full_name='google.cloud.dialogflow.v2.StreamingDetectIntentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='session', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentRequest.session', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query_params', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_params', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query_input', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentRequest.query_input', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='single_utterance', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentRequest.single_utterance', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='input_audio', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentRequest.input_audio', index=4,
number=6, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1745,
serialized_end=1967,
)
_STREAMINGDETECTINTENTRESPONSE = _descriptor.Descriptor(
name='StreamingDetectIntentResponse',
full_name='google.cloud.dialogflow.v2.StreamingDetectIntentResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response_id', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentResponse.response_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='recognition_result', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentResponse.recognition_result', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query_result', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentResponse.query_result', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='webhook_status', full_name='google.cloud.dialogflow.v2.StreamingDetectIntentResponse.webhook_status', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1970,
serialized_end=2213,
)
_STREAMINGRECOGNITIONRESULT = _descriptor.Descriptor(
name='StreamingRecognitionResult',
full_name='google.cloud.dialogflow.v2.StreamingRecognitionResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message_type', full_name='google.cloud.dialogflow.v2.StreamingRecognitionResult.message_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='transcript', full_name='google.cloud.dialogflow.v2.StreamingRecognitionResult.transcript', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_final', full_name='google.cloud.dialogflow.v2.StreamingRecognitionResult.is_final', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.dialogflow.v2.StreamingRecognitionResult.confidence', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_STREAMINGRECOGNITIONRESULT_MESSAGETYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2216,
serialized_end=2482,
)
_INPUTAUDIOCONFIG = _descriptor.Descriptor(
name='InputAudioConfig',
full_name='google.cloud.dialogflow.v2.InputAudioConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='audio_encoding', full_name='google.cloud.dialogflow.v2.InputAudioConfig.audio_encoding', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='sample_rate_hertz', full_name='google.cloud.dialogflow.v2.InputAudioConfig.sample_rate_hertz', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.cloud.dialogflow.v2.InputAudioConfig.language_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='phrase_hints', full_name='google.cloud.dialogflow.v2.InputAudioConfig.phrase_hints', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2485,
serialized_end=2642,
)
_TEXTINPUT = _descriptor.Descriptor(
name='TextInput',
full_name='google.cloud.dialogflow.v2.TextInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='text', full_name='google.cloud.dialogflow.v2.TextInput.text', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.cloud.dialogflow.v2.TextInput.language_code', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2644,
serialized_end=2692,
)
_EVENTINPUT = _descriptor.Descriptor(
name='EventInput',
full_name='google.cloud.dialogflow.v2.EventInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.dialogflow.v2.EventInput.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='parameters', full_name='google.cloud.dialogflow.v2.EventInput.parameters', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='language_code', full_name='google.cloud.dialogflow.v2.EventInput.language_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2694,
serialized_end=2788,
)
_DETECTINTENTREQUEST.fields_by_name['query_params'].message_type = _QUERYPARAMETERS
_DETECTINTENTREQUEST.fields_by_name['query_input'].message_type = _QUERYINPUT
_DETECTINTENTRESPONSE.fields_by_name['query_result'].message_type = _QUERYRESULT
_DETECTINTENTRESPONSE.fields_by_name['webhook_status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_QUERYPARAMETERS.fields_by_name['geo_location'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_QUERYPARAMETERS.fields_by_name['contexts'].message_type = google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_context__pb2._CONTEXT
_QUERYPARAMETERS.fields_by_name['session_entity_types'].message_type = google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_session__entity__type__pb2._SESSIONENTITYTYPE
_QUERYPARAMETERS.fields_by_name['payload'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_QUERYINPUT.fields_by_name['audio_config'].message_type = _INPUTAUDIOCONFIG
_QUERYINPUT.fields_by_name['text'].message_type = _TEXTINPUT
_QUERYINPUT.fields_by_name['event'].message_type = _EVENTINPUT
_QUERYINPUT.oneofs_by_name['input'].fields.append(
_QUERYINPUT.fields_by_name['audio_config'])
_QUERYINPUT.fields_by_name['audio_config'].containing_oneof = _QUERYINPUT.oneofs_by_name['input']
_QUERYINPUT.oneofs_by_name['input'].fields.append(
_QUERYINPUT.fields_by_name['text'])
_QUERYINPUT.fields_by_name['text'].containing_oneof = _QUERYINPUT.oneofs_by_name['input']
_QUERYINPUT.oneofs_by_name['input'].fields.append(
_QUERYINPUT.fields_by_name['event'])
_QUERYINPUT.fields_by_name['event'].containing_oneof = _QUERYINPUT.oneofs_by_name['input']
_QUERYRESULT.fields_by_name['parameters'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_QUERYRESULT.fields_by_name['fulfillment_messages'].message_type = google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2._INTENT_MESSAGE
_QUERYRESULT.fields_by_name['webhook_payload'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_QUERYRESULT.fields_by_name['output_contexts'].message_type = google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_context__pb2._CONTEXT
_QUERYRESULT.fields_by_name['intent'].message_type = google_dot_cloud_dot_dialogflow__v2_dot_proto_dot_intent__pb2._INTENT
_QUERYRESULT.fields_by_name['diagnostic_info'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_STREAMINGDETECTINTENTREQUEST.fields_by_name['query_params'].message_type = _QUERYPARAMETERS
_STREAMINGDETECTINTENTREQUEST.fields_by_name['query_input'].message_type = _QUERYINPUT
_STREAMINGDETECTINTENTRESPONSE.fields_by_name['recognition_result'].message_type = _STREAMINGRECOGNITIONRESULT
_STREAMINGDETECTINTENTRESPONSE.fields_by_name['query_result'].message_type = _QUERYRESULT
_STREAMINGDETECTINTENTRESPONSE.fields_by_name['webhook_status'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_STREAMINGRECOGNITIONRESULT.fields_by_name['message_type'].enum_type = _STREAMINGRECOGNITIONRESULT_MESSAGETYPE
_STREAMINGRECOGNITIONRESULT_MESSAGETYPE.containing_type = _STREAMINGRECOGNITIONRESULT
_INPUTAUDIOCONFIG.fields_by_name['audio_encoding'].enum_type = _AUDIOENCODING
_EVENTINPUT.fields_by_name['parameters'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['DetectIntentRequest'] = _DETECTINTENTREQUEST
DESCRIPTOR.message_types_by_name['DetectIntentResponse'] = _DETECTINTENTRESPONSE
DESCRIPTOR.message_types_by_name['QueryParameters'] = _QUERYPARAMETERS
DESCRIPTOR.message_types_by_name['QueryInput'] = _QUERYINPUT
DESCRIPTOR.message_types_by_name['QueryResult'] = _QUERYRESULT
DESCRIPTOR.message_types_by_name['StreamingDetectIntentRequest'] = _STREAMINGDETECTINTENTREQUEST
DESCRIPTOR.message_types_by_name['StreamingDetectIntentResponse'] = _STREAMINGDETECTINTENTRESPONSE
DESCRIPTOR.message_types_by_name['StreamingRecognitionResult'] = _STREAMINGRECOGNITIONRESULT
DESCRIPTOR.message_types_by_name['InputAudioConfig'] = _INPUTAUDIOCONFIG
DESCRIPTOR.message_types_by_name['TextInput'] = _TEXTINPUT
DESCRIPTOR.message_types_by_name['EventInput'] = _EVENTINPUT
DESCRIPTOR.enum_types_by_name['AudioEncoding'] = _AUDIOENCODING
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DetectIntentRequest = _reflection.GeneratedProtocolMessageType('DetectIntentRequest', (_message.Message,), dict(
DESCRIPTOR = _DETECTINTENTREQUEST,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """The request to detect user's intent.
Attributes:
session:
Required. The name of the session this query is sent to.
Format: ``projects/<Project ID>/agent/sessions/<Session ID>``.
It's up to the API caller to choose an appropriate session ID.
It can be a random number or some type of user identifier
(preferably hashed). The length of the session ID must not
exceed 36 bytes.
query_params:
Optional. The parameters of this query.
query_input:
Required. The input specification. It can be set to: 1. an
audio config which instructs the speech recognizer how to
process the speech audio, 2. a conversational query in the
form of text, or 3. an event that specifies which intent to
trigger.
input_audio:
Optional. The natural language speech audio to be processed.
This field should be populated iff ``query_input`` is set to
an input audio config. A single request can contain up to 1
minute of speech audio data.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.DetectIntentRequest)
))
_sym_db.RegisterMessage(DetectIntentRequest)
DetectIntentResponse = _reflection.GeneratedProtocolMessageType('DetectIntentResponse', (_message.Message,), dict(
DESCRIPTOR = _DETECTINTENTRESPONSE,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """The message returned from the DetectIntent method.
Attributes:
response_id:
The unique identifier of the response. It can be used to
locate a response in the training example set or for reporting
issues.
query_result:
The results of the conversational query or event processing.
webhook_status:
Specifies the status of the webhook request.
``webhook_status`` is never populated in webhook requests.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.DetectIntentResponse)
))
_sym_db.RegisterMessage(DetectIntentResponse)
QueryParameters = _reflection.GeneratedProtocolMessageType('QueryParameters', (_message.Message,), dict(
DESCRIPTOR = _QUERYPARAMETERS,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Represents the parameters of the conversational query.
Attributes:
time_zone:
Optional. The time zone of this conversational query from the
`time zone database <https://www.iana.org/time-zones>`__,
e.g., America/New\_York, Europe/Paris. If not provided, the
time zone specified in agent settings is used.
geo_location:
Optional. The geo location of this conversational query.
contexts:
Optional. The collection of contexts to be activated before
this query is executed.
reset_contexts:
Optional. Specifies whether to delete all contexts in the
current session before the new ones are activated.
session_entity_types:
Optional. The collection of session entity types to replace or
extend developer entities with for this query only. The entity
synonyms apply to all languages.
payload:
Optional. This field can be used to pass custom data into the
webhook associated with the agent. Arbitrary JSON objects are
supported.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.QueryParameters)
))
_sym_db.RegisterMessage(QueryParameters)
QueryInput = _reflection.GeneratedProtocolMessageType('QueryInput', (_message.Message,), dict(
DESCRIPTOR = _QUERYINPUT,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Represents the query input. It can contain either:
1. An audio config which instructs the speech recognizer how to process
the speech audio.
2. A conversational query in the form of text,.
3. An event that specifies which intent to trigger.
Attributes:
input:
Required. The input specification.
audio_config:
Instructs the speech recognizer how to process the speech
audio.
text:
The natural language text to be processed.
event:
The event to be processed.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.QueryInput)
))
_sym_db.RegisterMessage(QueryInput)
QueryResult = _reflection.GeneratedProtocolMessageType('QueryResult', (_message.Message,), dict(
DESCRIPTOR = _QUERYRESULT,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Represents the result of conversational query or event processing.
Attributes:
query_text:
The original conversational query text: - If natural language
text was provided as input, ``query_text`` contains a copy of
the input. - If natural language speech audio was provided as
input, ``query_text`` contains the speech recognition result.
If speech recognizer produced multiple alternatives, a
particular one is picked. - If an event was provided as input,
``query_text`` is not set.
language_code:
The language that was triggered during intent detection. See
`Language Support
<https://dialogflow.com/docs/reference/language>`__ for a list
of the currently supported language codes.
speech_recognition_confidence:
The Speech recognition confidence between 0.0 and 1.0. A
higher number indicates an estimated greater likelihood that
the recognized words are correct. The default of 0.0 is a
sentinel value indicating that confidence was not set. You
should not rely on this field as it isn't guaranteed to be
accurate, or even set. In particular this field isn't set in
Webhook calls and for StreamingDetectIntent since the
streaming endpoint has separate confidence estimates per
portion of the audio in StreamingRecognitionResult.
action:
The action name from the matched intent.
parameters:
The collection of extracted parameters.
all_required_params_present:
This field is set to: - ``false`` if the matched intent has
required parameters and not all of the required parameter
values have been collected. - ``true`` if all required
parameter values have been collected, or if the matched intent
doesn't contain any required parameters.
fulfillment_text:
The text to be pronounced to the user or shown on the screen.
fulfillment_messages:
The collection of rich messages to present to the user.
webhook_source:
If the query was fulfilled by a webhook call, this field is
set to the value of the ``source`` field returned in the
webhook response.
webhook_payload:
If the query was fulfilled by a webhook call, this field is
set to the value of the ``payload`` field returned in the
webhook response.
output_contexts:
The collection of output contexts. If applicable,
``output_contexts.parameters`` contains entries with name
``<parameter name>.original`` containing the original
parameter values before the query.
intent:
The intent that matched the conversational query. Some, not
all fields are filled in this message, including but not
limited to: ``name``, ``display_name`` and ``webhook_state``.
intent_detection_confidence:
The intent detection confidence. Values range from 0.0
(completely uncertain) to 1.0 (completely certain).
diagnostic_info:
The free-form diagnostic info. For example, this field could
contain webhook call latency.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.QueryResult)
))
_sym_db.RegisterMessage(QueryResult)
StreamingDetectIntentRequest = _reflection.GeneratedProtocolMessageType('StreamingDetectIntentRequest', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGDETECTINTENTREQUEST,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """The top-level message sent by the client to the
``StreamingDetectIntent`` method.
Multiple request messages should be sent in order:
1. The first message must contain ``session``, ``query_input`` plus
optionally ``query_params`` and/or ``single_utterance``. The message
must not contain ``input_audio``.
2. If ``query_input`` was set to a streaming input audio config, all
subsequent messages must contain only ``input_audio``. Otherwise,
finish the request stream.
Attributes:
session:
Required. The name of the session the query is sent to. Format
of the session name: ``projects/<Project
ID>/agent/sessions/<Session ID>``. It’s up to the API caller
to choose an appropriate . It can be a random number or some
type of user identifier (preferably hashed). The length of the
session ID must not exceed 36 characters.
query_params:
Optional. The parameters of this query.
query_input:
Required. The input specification. It can be set to: 1. an
audio config which instructs the speech recognizer how to
process the speech audio, 2. a conversational query in the
form of text, or 3. an event that specifies which intent to
trigger.
single_utterance:
Optional. If ``false`` (default), recognition does not cease
until the client closes the stream. If ``true``, the
recognizer will detect a single spoken utterance in input
audio. Recognition ceases when it detects the audio's voice
has stopped or paused. In this case, once a detected intent is
received, the client should close the stream and start a new
request with a new stream as needed. This setting is ignored
when ``query_input`` is a piece of text or an event.
input_audio:
Optional. The input audio content to be recognized. Must be
sent if ``query_input`` was set to a streaming input audio
config. The complete audio over all streaming messages must
not exceed 1 minute.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.StreamingDetectIntentRequest)
))
_sym_db.RegisterMessage(StreamingDetectIntentRequest)
StreamingDetectIntentResponse = _reflection.GeneratedProtocolMessageType('StreamingDetectIntentResponse', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGDETECTINTENTRESPONSE,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """The top-level message returned from the ``StreamingDetectIntent``
method.
Multiple response messages can be returned in order:
1. If the input was set to streaming audio, the first one or more
messages contain ``recognition_result``. Each ``recognition_result``
represents a more complete transcript of what the user said. The last
``recognition_result`` has ``is_final`` set to ``true``.
2. The next message contains ``response_id``, ``query_result`` and
optionally ``webhook_status`` if a WebHook was called.
Attributes:
response_id:
The unique identifier of the response. It can be used to
locate a response in the training example set or for reporting
issues.
recognition_result:
The result of speech recognition.
query_result:
The result of the conversational query or event processing.
webhook_status:
Specifies the status of the webhook request.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.StreamingDetectIntentResponse)
))
_sym_db.RegisterMessage(StreamingDetectIntentResponse)
StreamingRecognitionResult = _reflection.GeneratedProtocolMessageType('StreamingRecognitionResult', (_message.Message,), dict(
DESCRIPTOR = _STREAMINGRECOGNITIONRESULT,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Contains a speech recognition result corresponding to a portion of the
audio that is currently being processed or an indication that this is
the end of the single requested utterance.
Example:
1. transcript: "tube"
2. transcript: "to be a"
3. transcript: "to be"
4. transcript: "to be or not to be" is\_final: true
5. transcript: " that's"
6. transcript: " that is"
7. recognition\_event\_type:
``RECOGNITION_EVENT_END_OF_SINGLE_UTTERANCE``
8. transcript: " that is the question" is\_final: true
Only two of the responses contain final results (#4 and #8 indicated by
``is_final: true``). Concatenating these generates the full transcript:
"to be or not to be that is the question".
In each response we populate:
- for ``MESSAGE_TYPE_TRANSCRIPT``: ``transcript`` and possibly
``is_final``.
- for ``MESSAGE_TYPE_END_OF_SINGLE_UTTERANCE``: only ``event_type``.
Attributes:
message_type:
Type of the result message.
transcript:
Transcript text representing the words that the user spoke.
Populated if and only if ``event_type`` =
``RECOGNITION_EVENT_TRANSCRIPT``.
is_final:
The default of 0.0 is a sentinel value indicating
``confidence`` was not set. If ``false``, the
``StreamingRecognitionResult`` represents an interim result
that may change. If ``true``, the recognizer will not return
any further hypotheses about this piece of the audio. May only
be populated for ``event_type`` =
``RECOGNITION_EVENT_TRANSCRIPT``.
confidence:
The Speech confidence between 0.0 and 1.0 for the current
portion of audio. A higher number indicates an estimated
greater likelihood that the recognized words are correct. The
default of 0.0 is a sentinel value indicating that confidence
was not set. This field is typically only provided if
``is_final`` is true and you should not rely on it being
accurate or even set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.StreamingRecognitionResult)
))
_sym_db.RegisterMessage(StreamingRecognitionResult)
InputAudioConfig = _reflection.GeneratedProtocolMessageType('InputAudioConfig', (_message.Message,), dict(
DESCRIPTOR = _INPUTAUDIOCONFIG,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Instructs the speech recognizer how to process the audio content.
Attributes:
audio_encoding:
Required. Audio encoding of the audio content to process.
sample_rate_hertz:
Required. Sample rate (in Hertz) of the audio content sent in
the query. Refer to `Cloud Speech API documentation
</speech/docs/basics>`__ for more details.
language_code:
Required. The language of the supplied audio. Dialogflow does
not do translations. See `Language Support
<https://dialogflow.com/docs/languages>`__ for a list of the
currently supported language codes. Note that queries in the
same session do not necessarily need to specify the same
language.
phrase_hints:
Optional. The collection of phrase hints which are used to
boost accuracy of speech recognition. Refer to `Cloud Speech
API documentation </speech/docs/basics#phrase-hints>`__ for
more details.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.InputAudioConfig)
))
_sym_db.RegisterMessage(InputAudioConfig)
TextInput = _reflection.GeneratedProtocolMessageType('TextInput', (_message.Message,), dict(
DESCRIPTOR = _TEXTINPUT,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Represents the natural language text to be processed.
Attributes:
text:
Required. The UTF-8 encoded natural language text to be
processed. Text length must not exceed 256 bytes.
language_code:
Required. The language of this conversational query. See
`Language Support <https://dialogflow.com/docs/languages>`__
for a list of the currently supported language codes. Note
that queries in the same session do not necessarily need to
specify the same language.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.TextInput)
))
_sym_db.RegisterMessage(TextInput)
EventInput = _reflection.GeneratedProtocolMessageType('EventInput', (_message.Message,), dict(
DESCRIPTOR = _EVENTINPUT,
__module__ = 'google.cloud.dialogflow_v2.proto.session_pb2'
,
__doc__ = """Events allow for matching intents by event name instead of the natural
language input. For instance, input
``<event: { name: “welcome_event”, parameters: { name: “Sam” } }>`` can
trigger a personalized welcome response. The parameter ``name`` may be
used by the agent in the response:
``“Hello #welcome_event.name! What can I do for you today?”``.
Attributes:
name:
Required. The unique identifier of the event.
parameters:
Optional. The collection of parameters associated with the
event.
language_code:
Required. The language of this query. See `Language Support
<https://dialogflow.com/docs/languages>`__ for a list of the
currently supported language codes. Note that queries in the
same session do not necessarily need to specify the same
language.
""",
# @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2.EventInput)
))
_sym_db.RegisterMessage(EventInput)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\036com.google.cloud.dialogflow.v2B\014SessionProtoP\001ZDgoogle.golang.org/genproto/googleapis/cloud/dialogflow/v2;dialogflow\370\001\001\242\002\002DF\252\002\032Google.Cloud.Dialogflow.V2'))
_SESSIONS = _descriptor.ServiceDescriptor(
name='Sessions',
full_name='google.cloud.dialogflow.v2.Sessions',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=3045,
serialized_end=3385,
methods=[
_descriptor.MethodDescriptor(
name='DetectIntent',
full_name='google.cloud.dialogflow.v2.Sessions.DetectIntent',
index=0,
containing_service=None,
input_type=_DETECTINTENTREQUEST,
output_type=_DETECTINTENTRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002;\"6/v2/{session=projects/*/agent/sessions/*}:detectIntent:\001*')),
),
_descriptor.MethodDescriptor(
name='StreamingDetectIntent',
full_name='google.cloud.dialogflow.v2.Sessions.StreamingDetectIntent',
index=1,
containing_service=None,
input_type=_STREAMINGDETECTINTENTREQUEST,
output_type=_STREAMINGDETECTINTENTRESPONSE,
options=None,
),
])
_sym_db.RegisterServiceDescriptor(_SESSIONS)
DESCRIPTOR.services_by_name['Sessions'] = _SESSIONS
# @@protoc_insertion_point(module_scope)
|
from typing import List
from moodle import BaseMoodle
from moodle.base.general import GeneralStatus
from .page import PagesResponse
class BasePage(BaseMoodle):
def get_pages_by_courses(self, courseids: List[int]) -> PagesResponse:
"""Returns a list of pages in a provided list of courses, if no list is provided all pages that the user can view will be returned.
Args:
courseids (List[int]): Array of course ids
Returns:
PagesResponse: Response
"""
data = self.moodle.post(
"mod_page_get_pages_by_courses",
courseids=courseids,
)
return self._tr(PagesResponse, **data)
def view_page(self, pageid: int) -> GeneralStatus:
"""Simulate the view.php web interface page: trigger events, completion, etc...
Args:
pageid (int): page instance id
Returns:
GeneralStatus: Response
"""
data = self.moodle.post(
"mod_page_view_page",
pageid=pageid,
)
return self._tr(GeneralStatus, **data)
|
import numpy as np
import cv2
class DinoResultsHandler:
def __init__(self, database):
self.database = database
def drawGreenBox(self, queryImage, segImage, kpImage):
# green box
gray = cv2.cvtColor(segImage,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
(_, cnts, _) = cv2.findContours(thresh,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if len(cnts) > 0:
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
rect = np.int32(cv2.boxPoints(cv2.minAreaRect(cnt)))
cv2.drawContours(kpImage, [rect], -1, (0,255,0),2)
return kpImage
def showTexts(self, matchedResults):
if len(matchedResults) == 0:
print("No samples are matched to the query !")
else:
for(i, (score, samplePath)) in enumerate(matchedResults):
description = self.database[samplePath[samplePath.rfind("/") + 1:]]
print("{}.{:.2f}% : {}".format(i + 1, score * 100, description))
results = cv2.imread(samplePath) # only show the highest matching image
cv2.imshow("Right: Matched Sample", results)
cv2.waitKey(5000)
|
import numpy as np
from swervedrive.icr.timescaler import TimeScaler
def assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot):
""" Function to ensure that the inequalities in the second paper hold,
given certain velocity/acceleeration bounds and commands. """
scaler = TimeScaler(beta_dot_b, beta_2dot_b, phi_2dot_b)
ds_lower, ds_upper, d2s_lower, d2s_upper = scaler.compute_scaling_bounds(
dbeta, d2beta, dphi_dot
)
# inequalities are reversed for negative values
(lower_beta, upper_beta) = (1, 0) if dbeta < 0 else (0, 1)
(lower_phi, upper_phi) = (1, 0) if dphi_dot < 0 else (0, 1)
ignore_beta = np.isclose(dbeta, 0, atol=0.01)
ignore_phi = np.isclose(dphi_dot, 0, atol=0.01)
if not ignore_beta:
# check that we satisfy equation 36a
assert ds_lower >= beta_dot_b[lower_beta] / dbeta
assert ds_upper <= beta_dot_b[upper_beta] / dbeta
# check that we satisfy equation 36b
assert d2s_lower >= (
(beta_2dot_b[lower_beta] - d2beta * (ds_upper ** 2)) / dbeta
)
assert d2s_upper <= (
(beta_2dot_b[upper_beta] - d2beta * (ds_upper ** 2)) / dbeta
)
if not ignore_phi:
# check that we satify equation 36c
assert ds_lower >= phi_2dot_b[lower_phi] / dphi_dot
assert ds_upper <= phi_2dot_b[upper_phi] / dphi_dot
scaler.compute_scaling_parameters(ds_lower, ds_upper, d2s_lower, d2s_upper)
beta_dot, beta_2dot, phi_2dot = scaler.scale_motion(dbeta, d2beta, dphi_dot)
assert beta_dot_b[0] <= beta_dot <= beta_2dot_b[1]
assert beta_2dot_b[0] <= beta_2dot <= beta_2dot_b[1]
assert phi_2dot_b[0] <= phi_2dot <= beta_2dot_b[1]
def test_positive_velocities_in_range():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([0.5]), np.array([0.25]), np.array([0.25])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_negative_velocities_in_range():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([-0.5]), np.array([-0.25]), np.array([-0.25])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_positive_velocities_not_in_range():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([5]), np.array([1.5]), np.array([1.5])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_negative_velocities_not_in_range():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([-5]), np.array([-1.5]), np.array([-1.5])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_dbeta_zero():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = 0.01, -1.5, -1.5
dbeta, d2beta, dphi_dot = np.array([0.01]), np.array([-1.5]), np.array([-1.5])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_d2beta_zero():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([5]), np.array([0.01]), np.array([-1.5])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_dphi_dot_zero():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([-5]), np.array([-1.5]), np.array([0])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_opposing_signs():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = 5, -1.5, -5
dbeta, d2beta, dphi_dot = np.array([5]), np.array([-1.5]), np.array([-5])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
def test_all_zero():
# angular vel/accel bounds
beta_dot_b = [-1, 1] # rad/sec
beta_2dot_b = [-1, 1] # rad/sec^2
# wheel rotation bounds
phi_2dot_b = [-1, 1]
# motion commands generated from the kinematic model for this timestep
dbeta, d2beta, dphi_dot = np.array([0]), np.array([0]), np.array([0])
assert_scaling_bounds(beta_dot_b, beta_2dot_b, phi_2dot_b, dbeta, d2beta, dphi_dot)
|
"""
This plugin is primarily useful for plugin authors who want to debug
their plugins.
It prints each hook that is called to stderr, along with details of
the event that was passed to the hook.
To do that, this plugin overrides :meth:`nose2.events.Plugin.register`
and, after registration, replaces all existing
:class:`nose2.events.Hook` instances in ``session.hooks`` with
instances of a Hook subclass that prints information about each call.
"""
import sys
from nose2 import events
INDENT = []
__unittest = True
class PrintHooks(events.Plugin):
"""Print hooks as they are called"""
configSection = 'print-hooks'
commandLineSwitch = ('P', 'print-hooks',
'Print names of hooks in order of execution')
def register(self):
"""Override to inject noisy hook instances.
Replaces Hook instances in ``self.session.hooks.hooks`` with
noisier objects.
"""
super(PrintHooks, self).register()
# now we can be sure that all other plugins have loaded
# and this plugin is active, patch in our hook class
self.session.hooks.hookClass = NoisyHook
for attr, hook in self.session.hooks.hooks.items():
newhook = NoisyHook(attr)
newhook.plugins = hook.plugins
self.session.hooks.hooks[attr] = newhook
class NoisyHook(events.Hook):
def __call__(self, event):
_report(self.method, event)
_indent()
try:
return super(NoisyHook, self).__call__(event)
finally:
_dedent()
def _report(method, event):
sys.stderr.write("\n%s%s: %s" % (''.join(INDENT), method, event))
def _indent():
INDENT.append(' ')
def _dedent():
if INDENT:
INDENT.pop()
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle.fluid as fluid
from paddle.fluid.dygraph.dygraph_to_static import ProgramTranslator
from paddle.fluid.dygraph.jit import declarative
program_translator = ProgramTranslator()
# 0. for in range var.numpy()[0]
@declarative
def for_in_range(x):
z = fluid.layers.fill_constant([1], 'int32', 0)
x = fluid.dygraph.to_variable(x)
for i in range(x.numpy()[0]):
z = z + i
return z
# 1. for iter list
@declarative
def for_iter_list(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0)
for x in x_array:
z = z + x
return z
# 2. for enumerate list
@declarative
def for_enumerate_list(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0)
for i, x in enumerate(x_array):
z = z + x + i
return z
# 3. for iter var.numpy()
@declarative
def for_iter_var_numpy(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for x in x_array.numpy():
z = z + x
return z
# 4. for enumerate var.numpy()
@declarative
def for_enumerate_var_numpy(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy()):
y = y + i
z = z + x
return y, z
# 5. for enumerate var.numpy() with start
@declarative
def for_enumerate_var_numpy_with_start(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy(), 1):
y = y + i
z = z + x
return y, z
# 6. for in range with break
@declarative
def for_in_range_with_break(x):
z = fluid.layers.fill_constant([1], 'int32', 0)
x = fluid.dygraph.to_variable(x)
for i in range(x.numpy()[0]):
z = z + i
if i > 2:
break
return z
# 7. for enumerate var.numpy() with break
@declarative
def for_enumerate_var_numpy_with_break(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy()):
y = y + i
z = z + x
if i > 2:
break
return y, z
# 8. for enumerate var.numpy() with continue
@declarative
def for_enumerate_var_numpy_with_continue(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy()):
y = y + i
if i > 2:
continue
z = z + x
return y, z
# 9. for enumerate var.numpy() with start & break
@declarative
def for_enumerate_var_numpy_with_start_break(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy(), 1):
y = y + i
z = z + x
if i > 2:
break
return y, z
# 10. for enumerate var.numpy() with start & continue
@declarative
def for_enumerate_var_numpy_with_start_continue(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array.numpy(), 1):
y = y + i
if i > 2:
continue
z = z + x
return y, z
# 11. for iter var
@declarative
def for_iter_var(x_array):
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for x in x_array:
z = z + x
return z
# 12. for enumerate var
@declarative
def for_enumerate_var(x_array):
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
x_array = fluid.dygraph.to_variable(x_array)
for i, x in enumerate(x_array):
y = y + i
z = z + x
return y, z
# 13. for iter list[var]
@declarative
def for_iter_var_list(x):
# 1. prepare data, ref test_list.py
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(shape=[1], value=5, dtype="int32")
a = []
for i in range(iter_num):
a.append(x + i)
# 2. iter list[var]
y = fluid.layers.fill_constant([1], 'int32', 0)
for x in a:
y = y + x
return y
# 14. for enumerate list[var]
@declarative
def for_enumerate_var_list(x):
# 1. prepare data, ref test_list.py
x = fluid.dygraph.to_variable(x)
iter_num = fluid.layers.fill_constant(shape=[1], value=5, dtype="int32")
a = []
for i in range(iter_num):
a.append(x + i)
# 2. iter list[var]
y = fluid.layers.fill_constant([1], 'int32', 0)
z = fluid.layers.fill_constant([1], 'int32', 0)
for i, x in enumerate(a):
y = y + i
z = z + x
return y, z
class TestTransformBase(unittest.TestCase):
def setUp(self):
self.place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda(
) else fluid.CPUPlace()
self.set_input()
self.set_test_func()
def set_input(self):
self.input = [1, 2, 3]
def set_test_func(self):
raise NotImplementedError(
"For Enumerate test should implement set_test_func")
def _run(self, to_static):
program_translator.enable(to_static)
with fluid.dygraph.guard():
return self.dygraph_func(self.input)
def get_dygraph_output(self):
return self._run(to_static=False)
def get_static_output(self):
return self._run(to_static=True)
class TestTransform(TestTransformBase):
def transformed_result_compare(self):
dy_outs = self.get_dygraph_output()
if not isinstance(dy_outs, tuple):
dy_outs = (dy_outs, )
st_outs = self.get_static_output()
if not isinstance(st_outs, tuple):
st_outs = (st_outs, )
for x, y in zip(dy_outs, st_outs):
self.assertTrue(np.allclose(x.numpy(), y.numpy()))
class TestTransformError(TestTransformBase):
def transformed_error(self, etype):
with self.assertRaises(etype):
dy_out = self.get_dygraph_output()
st_out = self.get_static_output()
class TestForInRange(TestTransform):
def set_input(self):
self.input = np.array([5])
def set_test_func(self):
self.dygraph_func = for_in_range
def test_transformed_result_compare(self):
self.transformed_result_compare()
class TestForIterList(TestTransform):
def set_test_func(self):
self.dygraph_func = for_iter_list
def test_transformed_result_compare(self):
self.transformed_result_compare()
class TestForEnumerateSimple(TestForIterList):
def set_test_func(self):
self.dygraph_func = for_enumerate_list
class TestForInRangeWithBreak(TestForInRange):
def set_test_func(self):
self.dygraph_func = for_in_range_with_break
class TestForIterVarNumpy(TestTransform):
def set_input(self):
self.input = np.array([1, 2, 3, 4, 5])
def set_test_func(self):
self.dygraph_func = for_iter_var_numpy
def test_transformed_result_compare(self):
self.transformed_result_compare()
class TestForEnumerateVarNumpy(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_numpy
class TestForEnumerateVarNumpyWithStart(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_numpy_with_start
class TestForEnumerateVarNumpyWithBreak(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_numpy_with_break
class TestForEnumerateVarNumpyWithBreak(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_numpy_with_continue
class TestForEnumerateVarNumpyWithStartAndBreak(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_numpy_with_start_break
class TestForEnumerateVarNumpyWithStartAndBreak(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_numpy_with_start_continue
class TestForIterVar(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_iter_var
class TestForEnumerateVar(TestForIterVarNumpy):
def set_test_func(self):
self.dygraph_func = for_enumerate_var
class TestForIterVarList(TestForInRange):
def set_test_func(self):
self.dygraph_func = for_iter_var_list
class TestForEnumerateVarList(TestForInRange):
def set_test_func(self):
self.dygraph_func = for_enumerate_var_list
if __name__ == '__main__':
unittest.main()
|
# funções
def l():
print('+ ' * 20)
def soma(a, b):
print('+ ' * 15)
print(f'A vale {a} e B vale {b}')
s = a + b
print(f'A soma de A e B vale {s}')
# programa principal
soma(b=4, a=5)
soma(8, 9)
soma(2, 1)
# empacotar parametros
def contador(*num):
tam = len(num)
s = 0
for n in num:
s += n
print(f'Recebi os valores {num} ao todo são {tam} numeros e a soma foi: {s}')
def dobra(lst):
pos = 0
while pos < len(lst):
lst[pos] *= 2
pos += 1
# programa principal
l()
contador(1, 4, 3)
contador(5, 8, 3, 9, 5)
contador(12, 3)
l()
print()
l()
print('Drobrar valores')
lista = [1, 5, 3, 9, 6, 2]
print(f'Lista normal {lista}')
dobra(lista)
print(f'Lista Dobrada {lista}')
l()
|
from flask import request
from flask_login import current_user
from oarepo_enrollment_permissions.proxies import current_enrollment_permissions
class PermissionCollection:
def __init__(self, *permissions, combining_operation='or'):
self.permissions = permissions
self.combining_operation = combining_operation
def can(self):
if not self.permissions:
return False
for perm in self.permissions:
if perm.can():
if self.combining_operation == 'or':
return True
else:
if self.combining_operation == 'and':
return False
return self.combining_operation == 'and'
def read_permission_factory(*args, **kwargs):
return current_enrollment_permissions.get_action_permission(current_user, 'read', **kwargs)
def update_permission_factory(*args, **kwargs):
return current_enrollment_permissions.get_action_permission(current_user, 'update', **kwargs)
def delete_permission_factory(*args, **kwargs):
return current_enrollment_permissions.get_action_permission(current_user, 'delete', **kwargs)
def create_permission_factory(*args, **kwargs):
return current_enrollment_permissions.get_action_permission(
current_user, 'create', data=request.json, **kwargs)
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from .base_camera import BaseCamera
from .perspective import PerspectiveCamera
from .panzoom import PanZoomCamera
from .arcball import ArcballCamera
from .turntable import TurntableCamera
from .fly import FlyCamera
def make_camera(cam_type, *args, **kwargs):
""" Factory function for creating new cameras using a string name.
Parameters
----------
cam_type : str
May be one of:
* 'panzoom' : Creates :class:`PanZoomCamera`
* 'turntable' : Creates :class:`TurntableCamera`
* None : Creates :class:`Camera`
Notes
-----
All extra arguments are passed to the __init__ method of the selected
Camera class.
"""
cam_types = {None: BaseCamera}
for camType in (BaseCamera, PanZoomCamera, PerspectiveCamera,
TurntableCamera, FlyCamera, ArcballCamera):
cam_types[camType.__name__[:-6].lower()] = camType
try:
return cam_types[cam_type](*args, **kwargs)
except KeyError:
raise KeyError('Unknown camera type "%s". Options are: %s' %
(cam_type, cam_types.keys()))
|
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'bitpay')))
from splinter import Browser
import time
import six
import json
from bitpay_client import Client
import bitpay_key_utils as key_utils
import re
ROOT_ADDRESS = os.environ['RCROOTADDRESS']
USER_NAME = os.environ['RCTESTUSER']
PASSWORD = os.environ['RCTESTPASSWORD']
PEM = '-----BEGIN EC PRIVATE KEY-----\nMHQCAQEEICg7E4NN53YkaWuAwpoqjfAofjzKI7Jq1f532dX+0O6QoAcGBSuBBAAK\noUQDQgAEjZcNa6Kdz6GQwXcUD9iJ+t1tJZCx7hpqBuJV2/IrQBfue8jh8H7Q/4vX\nfAArmNMaGotTpjdnymWlMfszzXJhlw==\n-----END EC PRIVATE KEY-----\n'
client = Client()
invoice = None
exception = None
@given(u'the user pairs with BitPay with a valid pairing code')
def step_impl(context):
time.sleep(1)
claim_code = get_claim_code_from_server()
global client
client = Client(api_uri=ROOT_ADDRESS, insecure=True, pem=PEM)
try:
client.pair_pos_client(claim_code)
except Exception as error:
if error.args[0] == "500: Unable to create token because of too many requests.":
time.sleep(60)
client.pair_pos_client(claim_code)
assert client.tokens['pos']
@given(u'the user requests a client-side pairing')
def step_impl(context):
global pairing_code
time.sleep(1)
client = Client(api_uri=ROOT_ADDRESS, insecure=True, pem=PEM)
try:
pairing_code = client.create_token("merchant")
except Exception as error:
if error.args[0] == "500: Unable to create token because of too many requests.":
time.sleep(60)
pairing_code = client.create_token("merchant")
@then(u'they will receive a claim code')
def step_impl(context):
assert re.match("^\w{7,7}$", pairing_code) != None
@then(u'the user is paired with BitPay')
def step_impl(context):
assert client.verify_tokens()
@given(u'the user fails to pair with a semantically {valid} code {code}')
def step_impl(context, code, valid):
time.sleep(1)
try:
client.pair_pos_client(code)
except Exception as error:
global exception
exception = error
if exception.args[0] == "500: Unable to create token because of too many requests.":
time.sleep(60)
try:
client.pair_pos_client(code)
except Exception as error:
global exception
exception = error
@given(u'that a user knows an invoice id')
def step_impl(context):
global client
global invoice
client = client_from_stored_values()
create_invoice(10, "USD")
@then(u'they can retrieve that invoice')
def step_impl(context):
global client
global invoice
amount = invoice['price']
invoice_id = invoice['id']
retrieved_invoice = client.get_invoice(invoice_id)
assert amount == retrieved_invoice['price']
@then(u'they will receive a {error} matching {message}')
def step_impl(context, error, message):
assert exception.__class__.__name__ == error and exception.args[0] == message, "%s != %s" % (exception.args[0], message)
@given(u'the user is authenticated with BitPay')
def step_impl(context):
global client
client = client_from_stored_values()
assert client.verify_tokens()
@given(u'the user waits {wait:d} seconds')
def step_impl(context, wait):
time.sleep(wait)
@when(u'the user creates an invoice for {amount:f} {currency} with float input')
def step_impl(context, amount, currency):
create_invoice(amount, currency)
@when(u'the user creates an invoice for {amount:d} {currency} with integer input')
def step_impl(context, amount, currency):
create_invoice(amount, currency)
@when(u'the user creates an invoice for {amount} {currency} with string input')
def step_impl(context, amount, currency):
if amount == '""':
amount = ""
if currency == '""':
currency == ""
create_invoice(amount, currency)
@then(u'they should recieve an invoice in response for {amount:g} {currency}')
def step_impl(context, amount, currency):
global invoice
assert invoice['price'] == amount and invoice['currency'] == currency
def create_invoice(amount, currency):
global client
global invoice
try:
token = client.tokens['pos']
invoice = client.create_invoice({"price": amount, "currency": currency, "token": token })
except Exception as error:
global exception
print(error.__class__.__name__)
print(error.args[0])
exception = error
def client_from_stored_values():
for f in ["local.pem", "tokens.json"]:
try:
open("temp/" + f)
exists = True
except:
exists = False
break
if exists:
f = open("temp/local.pem", 'r')
pem = f.read()
f = open("temp/tokens.json", 'r')
token = f.read()
token = json.loads(token)
client = Client(api_uri=ROOT_ADDRESS, insecure=True, pem=pem, tokens=token)
else:
claim_code = get_claim_code_from_server()
pem = key_utils.generate_pem()
client = Client(api_uri=ROOT_ADDRESS, insecure=True, pem=pem)
token = json.dumps(client.pair_pos_client(claim_code))
if not os.path.exists("temp"):
os.makedirs("temp")
f = open("temp/local.pem", 'w')
f.write(pem)
f = open("temp/tokens.json", 'w')
f.write(token)
return client
def get_claim_code_from_server():
browser = Browser('phantomjs', service_args=['--ignore-ssl-errors=true'])
browser.visit(ROOT_ADDRESS + "/merchant-login")
time.sleep(5)
browser.fill_form({"email": USER_NAME, "password": PASSWORD})
browser.find_by_id("loginButton")[0].click()
time.sleep(1)
browser.visit(ROOT_ADDRESS + "/api-tokens")
browser.find_by_css(".token-access-new-button").find_by_css(".btn").find_by_css(".icon-plus")[0].click()
browser.find_by_id("token-new-form").find_by_css(".btn")[0].click()
return browser.find_by_css(".token-claimcode")[0].html
|
import os
import luigi
from resolving import ResolvingWorkflow
def resolve_separately(identifier, max_jobs=48, target='local'):
task = ResolvingWorkflow
path = '/g/kreshuk/data/FIB25/data.n5'
exp_path = '/g/kreshuk/data/FIB25/exp_data/mc.n5'
# objects_group = 'resolving/oracle/perfect_oracle'
objects_group = 'resolving/oracle/%s' % identifier
assignment_in_key = 'node_labels/multitcut_filtered'
assignment_out_key = 'node_labels/resolve_separately/%s' % identifier
tmp_folder = './tmp_folders/tmp_resolve_separately_%s' % identifier
os.makedirs(tmp_folder, exist_ok=True)
# TODO write to actual output
ws_key = 'volumes/segmentation/watershed'
out_key = 'volumes/segmentation/resolve_separately/%s' % identifier
t = task(tmp_folder=tmp_folder, config_dir='./configs',
max_jobs=max_jobs, target=target,
problem_path=exp_path, path=path,
objects_group=objects_group,
assignment_in_key=assignment_in_key,
assignment_out_key=assignment_out_key,
ws_key=ws_key, out_key=out_key)
ret = luigi.build([t], local_scheduler=True)
assert ret, "Resolving failed"
if __name__ == '__main__':
resolve_separately('perfect_oracle')
|
from __future__ import annotations
from dataclasses import dataclass
import bbgo_pb2
from ..enums import ChannelType
from ..enums import DepthType
@dataclass
class Subscription:
exchange: str
channel: ChannelType
symbol: str
depth: DepthType = None
interval: str = None
def to_pb(self) -> bbgo_pb2.Subscription:
subscription_pb = bbgo_pb2.Subscription(
exchange=self.exchange,
channel=self.channel.value,
symbol=self.symbol,
)
if self.depth is not None:
subscription_pb.depth = self.depth.value
if self.interval is not None:
subscription_pb.interval = self.interval
return subscription_pb
|
"""Unit tests for the Robot Framework Jenkins plugin source."""
from .jenkins_plugin_test_case import JenkinsPluginTestCase, JenkinsPluginTestsMixin
class RobotFrameworkJenkinsPluginTest(JenkinsPluginTestCase, JenkinsPluginTestsMixin):
"""Unit tests for the Robot Framework Jenkins plugin metrics."""
source_type = "robot_framework_jenkins_plugin"
def setUp(self):
super().setUp()
self.jenkins_json = dict(overallTotal=2, overallFailed=1, overallPassed=1)
async def test_nr_of_tests(self):
"""Test that the number of tests is returned."""
metric = dict(type="tests", addition="sum", sources=self.sources)
response = await self.collect(metric, get_request_json_return_value=self.jenkins_json)
self.assert_measurement(response, value="2", total="2")
async def test_failed_tests(self):
"""Test that the number of failed tests is returned."""
self.sources["source_id"]["parameters"]["test_result"] = ["fail"]
metric = dict(type="tests", addition="sum", sources=self.sources)
response = await self.collect(metric, get_request_json_return_value=self.jenkins_json)
self.assert_measurement(response, value="1", total="2")
async def test_passed_tests(self):
"""Test that the number of passed tests is returned."""
self.sources["source_id"]["parameters"]["test_result"] = ["pass"]
metric = dict(type="tests", addition="sum", sources=self.sources)
response = await self.collect(metric, get_request_json_return_value=self.jenkins_json)
self.assert_measurement(response, value="1", total="2")
|
from django.conf import settings
from .utils import is_valid_ip
from . import defaults as defs
NON_PUBLIC_IP_PREFIX = tuple([ip.lower() for ip in defs.IPWARE_NON_PUBLIC_IP_PREFIX])
TRUSTED_PROXY_LIST = tuple([ip.lower() for ip in getattr(settings, 'IPWARE_TRUSTED_PROXY_LIST', [])])
def get_ip(request, real_ip_only=False, right_most_proxy=False):
"""
Returns client's best-matched ip-address, or None
@deprecated - Do not edit
"""
best_matched_ip = None
for key in defs.IPWARE_META_PRECEDENCE_ORDER:
value = request.META.get(key, request.META.get(key.replace('_', '-'), '')).strip()
if value is not None and value != '':
ips = [ip.strip().lower() for ip in value.split(',')]
if right_most_proxy and len(ips) > 1:
ips = reversed(ips)
for ip_str in ips:
if ip_str and is_valid_ip(ip_str):
if not ip_str.startswith(NON_PUBLIC_IP_PREFIX):
return ip_str
if not real_ip_only:
loopback = defs.IPWARE_LOOPBACK_PREFIX
if best_matched_ip is None:
best_matched_ip = ip_str
elif best_matched_ip.startswith(loopback) and not ip_str.startswith(loopback):
best_matched_ip = ip_str
return best_matched_ip
def get_real_ip(request, right_most_proxy=False):
"""
Returns client's best-matched `real` `externally-routable` ip-address, or None
@deprecated - Do not edit
"""
return get_ip(request, real_ip_only=True, right_most_proxy=right_most_proxy)
def get_trusted_ip(request, right_most_proxy=False, trusted_proxies=TRUSTED_PROXY_LIST):
"""
Returns client's ip-address from `trusted` proxy server(s) or None
@deprecated - Do not edit
"""
if trusted_proxies:
meta_keys = ['HTTP_X_FORWARDED_FOR', 'X_FORWARDED_FOR']
for key in meta_keys:
value = request.META.get(key, request.META.get(key.replace('_', '-'), '')).strip()
if value:
ips = [ip.strip().lower() for ip in value.split(',')]
if len(ips) > 1:
if right_most_proxy:
ips.reverse()
for proxy in trusted_proxies:
if proxy in ips[-1]:
return ips[0]
return None
|
from setuptools import setup, find_packages
setup(
name='lib',
description='Holds the demo classes',
version='0.0.1',
author='James Dooley',
author_email='xxx@yyy.com',
packages=find_packages(exclude=('test',)),
url=''
)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 7 12:57:47 2017
@author: ayanava
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import os
iters = [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
acc_8 = [48.333333333333336, 50.0, 90.83333333333333, 100.0, 68.33333333333333, 95.83333333333334, 97.5, 88.33333333333333,
75.83333333333333, 76.66666666666667]
acc_10 = [47.5, 39.166666666666664, 51.66666666666667, 61.66666666666667, 98.33333333333333, 100.0, 72.5, 80.0, 100.0, 100.0]
fig, ax = plt.subplots(1,1)
axis_font = {'fontname':'Arial', 'size':'50'}
p1, =plt.plot( iters, acc_8, label="d", linewidth=5, marker='o', markeredgewidth= '5', markerfacecolor='black', color='b')
p2, =plt.plot( iters, acc_10, label="d", linewidth=5, marker='o', markeredgewidth= '5', markerfacecolor='black', color='r')
# major ticks every 20, minor ticks every 5
major_ticks = np.arange(0, 101, 5)
minor_ticks = np.arange(-101, 101, 1)
ax.set_xticks(major_ticks)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(major_ticks)
ax.set_yticks(minor_ticks, minor=True)
# and a corresponding grid
ax.grid(which='both')
plt.xlim([900,10100])
plt.ylim([30,110])
plt.ylabel('Accuracy Measures', **axis_font)
plt.xlabel('Number of Iterations', **axis_font)
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.xaxis.set_major_locator(ticker.MultipleLocator(1000))
ax.xaxis.set_tick_params(labelsize=30)
ax.yaxis.set_tick_params(labelsize=35)
plt.legend([p1, p2], ["B=2, M=1, C=8", "B=2, M=1, C=10"], loc='lower right', fontsize = 40, borderaxespad=0.) #
plt.show()
#save the figure
#fig.savefig('plot_small_neurons.png')
|
"""2D plots of sound fields etc."""
import matplotlib as _mpl
import matplotlib.pyplot as _plt
from mpl_toolkits import axes_grid1 as _axes_grid1
import numpy as _np
from . import default as _default
from . import util as _util
def _register_cmap_clip(name, original_cmap, alpha):
"""Create a color map with "over" and "under" values."""
from matplotlib.colors import LinearSegmentedColormap
cdata = _plt.cm.datad[original_cmap]
if isinstance(cdata, dict):
cmap = LinearSegmentedColormap(name, cdata)
else:
cmap = LinearSegmentedColormap.from_list(name, cdata)
cmap.set_over([alpha * c + 1 - alpha for c in cmap(1.0)[:3]])
cmap.set_under([alpha * c + 1 - alpha for c in cmap(0.0)[:3]])
_plt.cm.register_cmap(cmap=cmap)
# The 'coolwarm' colormap is based on the paper
# "Diverging Color Maps for Scientific Visualization" by Kenneth Moreland
# http://www.sandia.gov/~kmorel/documents/ColorMaps/
_register_cmap_clip('coolwarm_clip', 'coolwarm', 0.7)
def _register_cmap_transparent(name, color):
"""Create a color map from a given color to transparent."""
from matplotlib.colors import colorConverter, LinearSegmentedColormap
red, green, blue = colorConverter.to_rgb(color)
cdict = {'red': ((0, red, red), (1, red, red)),
'green': ((0, green, green), (1, green, green)),
'blue': ((0, blue, blue), (1, blue, blue)),
'alpha': ((0, 0, 0), (1, 1, 1))}
cmap = LinearSegmentedColormap(name, cdict)
_plt.cm.register_cmap(cmap=cmap)
_register_cmap_transparent('blacktransparent', 'black')
def virtualsource(xs, ns=None, type='point', *, ax=None):
"""Draw position/orientation of virtual source."""
xs = _np.asarray(xs)
ns = _np.asarray(ns)
if ax is None:
ax = _plt.gca()
if type == 'point':
vps = _plt.Circle(xs, .05, edgecolor='k', facecolor='k')
ax.add_artist(vps)
for n in range(1, 3):
vps = _plt.Circle(xs, .05+n*0.05, edgecolor='k', fill=False)
ax.add_artist(vps)
elif type == 'plane':
ns = 0.2 * ns
ax.arrow(xs[0], xs[1], ns[0], ns[1], head_width=0.05,
head_length=0.1, fc='k', ec='k')
def reference(xref, *, size=0.1, ax=None):
"""Draw reference/normalization point."""
xref = _np.asarray(xref)
if ax is None:
ax = _plt.gca()
ax.plot((xref[0]-size, xref[0]+size), (xref[1]-size, xref[1]+size), 'k-')
ax.plot((xref[0]-size, xref[0]+size), (xref[1]+size, xref[1]-size), 'k-')
def secondary_sources(x0, n0, *, grid=None):
"""Simple plot of secondary source locations."""
x0 = _np.asarray(x0)
n0 = _np.asarray(n0)
ax = _plt.gca()
# plot only secondary sources inside simulated area
if grid is not None:
x0, n0 = _visible_secondarysources(x0, n0, grid)
# plot symbols
for x00 in x0:
ss = _plt.Circle(x00[0:2], .05, edgecolor='k', facecolor='k')
ax.add_artist(ss)
def loudspeakers(x0, n0, a0=0.5, *, size=0.08, show_numbers=False, grid=None,
ax=None):
"""Draw loudspeaker symbols at given locations and angles.
Parameters
----------
x0 : (N, 3) array_like
Loudspeaker positions.
n0 : (N, 3) or (3,) array_like
Normal vector(s) of loudspeakers.
a0 : float or (N,) array_like, optional
Weighting factor(s) of loudspeakers.
size : float, optional
Size of loudspeakers in metres.
show_numbers : bool, optional
If ``True``, loudspeaker numbers are shown.
grid : triple of array_like, optional
If specified, only loudspeakers within the *grid* are shown.
ax : Axes object, optional
The loudspeakers are plotted into this `matplotlib.axes.Axes`
object or -- if not specified -- into the current axes.
"""
x0 = _util.asarray_of_rows(x0)
n0 = _util.asarray_of_rows(n0)
a0 = _util.asarray_1d(a0).reshape(-1, 1)
# plot only secondary sources inside simulated area
if grid is not None:
x0, n0 = _visible_secondarysources(x0, n0, grid)
# normalized coordinates of loudspeaker symbol (see IEC 60617-9)
codes, coordinates = zip(*(
(_mpl.path.Path.MOVETO, [-0.62, 0.21]),
(_mpl.path.Path.LINETO, [-0.31, 0.21]),
(_mpl.path.Path.LINETO, [0, 0.5]),
(_mpl.path.Path.LINETO, [0, -0.5]),
(_mpl.path.Path.LINETO, [-0.31, -0.21]),
(_mpl.path.Path.LINETO, [-0.62, -0.21]),
(_mpl.path.Path.CLOSEPOLY, [0, 0]),
(_mpl.path.Path.MOVETO, [-0.31, 0.21]),
(_mpl.path.Path.LINETO, [-0.31, -0.21]),
))
coordinates = _np.column_stack([coordinates, _np.zeros(len(coordinates))])
coordinates *= size
patches = []
for x00, n00 in _util.broadcast_zip(x0, n0):
# rotate and translate coordinates
R = _util.rotation_matrix([1, 0, 0], n00)
transformed_coordinates = _np.inner(coordinates, R) + x00
patches.append(_mpl.patches.PathPatch(_mpl.path.Path(
transformed_coordinates[:, :2], codes)))
# add collection of patches to current axis
p = _mpl.collections.PatchCollection(
patches, edgecolor='0', facecolor=_np.tile(1 - a0, 3))
if ax is None:
ax = _plt.gca()
ax.add_collection(p)
if show_numbers:
for idx, (x00, n00) in enumerate(_util.broadcast_zip(x0, n0)):
x, y = x00[:2] - 1.2 * size * n00[:2]
ax.text(x, y, idx + 1, horizontalalignment='center',
verticalalignment='center', clip_on=True)
def _visible_secondarysources(x0, n0, grid):
"""Determine secondary sources which lie within *grid*."""
x, y = _util.as_xyz_components(grid[:2])
idx = _np.where((x0[:, 0] > x.min()) & (x0[:, 0] < x.max()) &
(x0[:, 1] > y.min()) & (x0[:, 1] < x.max()))
idx = _np.squeeze(idx)
return x0[idx, :], n0[idx, :]
def amplitude(p, grid, *, xnorm=None, cmap='coolwarm_clip',
vmin=-2.0, vmax=2.0, xlabel=None, ylabel=None,
colorbar=True, colorbar_kwargs={}, ax=None, **kwargs):
"""Two-dimensional plot of sound field (real part).
Parameters
----------
p : array_like
Sound pressure values (or any other scalar quantity if you
like). If the values are complex, the imaginary part is
ignored.
Typically, *p* is two-dimensional with a shape of *(Ny, Nx)*,
*(Nz, Nx)* or *(Nz, Ny)*. This is the case if
`sfs.util.xyz_grid()` was used with a single number for *z*,
*y* or *x*, respectively.
However, *p* can also be three-dimensional with a shape of *(Ny,
Nx, 1)*, *(1, Nx, Nz)* or *(Ny, 1, Nz)*. This is the case if
:func:`numpy.meshgrid` was used with a scalar for *z*, *y* or
*x*, respectively (and of course with the default
``indexing='xy'``).
.. note:: If you want to plot a single slice of a pre-computed
"full" 3D sound field, make sure that the slice still
has three dimensions (including one singleton
dimension). This way, you can use the original *grid*
of the full volume without changes.
This works because the grid component corresponding to
the singleton dimension is simply ignored.
grid : triple or pair of numpy.ndarray
The grid that was used to calculate *p*, see
`sfs.util.xyz_grid()`. If *p* is two-dimensional, but
*grid* has 3 components, one of them must be scalar.
xnorm : array_like, optional
Coordinates of a point to which the sound field should be
normalized before plotting. If not specified, no normalization
is used. See `sfs.util.normalize()`.
Returns
-------
AxesImage
See :func:`matplotlib.pyplot.imshow`.
Other Parameters
----------------
xlabel, ylabel : str
Overwrite default x/y labels. Use ``xlabel=''`` and
``ylabel=''`` to remove x/y labels. The labels can be changed
afterwards with :func:`matplotlib.pyplot.xlabel` and
:func:`matplotlib.pyplot.ylabel`.
colorbar : bool, optional
If ``False``, no colorbar is created.
colorbar_kwargs : dict, optional
Further colorbar arguments, see `add_colorbar()`.
ax : Axes, optional
If given, the plot is created on *ax* instead of the current
axis (see :func:`matplotlib.pyplot.gca`).
cmap, vmin, vmax, **kwargs
All further parameters are forwarded to
:func:`matplotlib.pyplot.imshow`.
See Also
--------
sfs.plot2d.level
"""
p = _np.asarray(p)
grid = _util.as_xyz_components(grid)
# normalize sound field wrt xnorm
if xnorm is not None:
p = _util.normalize(p, grid, xnorm)
if p.ndim == 3:
if p.shape[2] == 1:
p = p[:, :, 0] # first axis: y; second axis: x
plotting_plane = 'xy'
elif p.shape[1] == 1:
p = p[:, 0, :].T # first axis: z; second axis: y
plotting_plane = 'yz'
elif p.shape[0] == 1:
p = p[0, :, :].T # first axis: z; second axis: x
plotting_plane = 'xz'
else:
raise ValueError("If p is 3D, one dimension must have length 1")
elif len(grid) == 3:
if grid[2].ndim == 0:
plotting_plane = 'xy'
elif grid[1].ndim == 0:
plotting_plane = 'xz'
elif grid[0].ndim == 0:
plotting_plane = 'yz'
else:
raise ValueError(
"If p is 2D and grid is 3D, one grid component must be scalar")
else:
# 2-dimensional case
plotting_plane = 'xy'
if plotting_plane == 'xy':
x, y = grid[[0, 1]]
elif plotting_plane == 'xz':
x, y = grid[[0, 2]]
elif plotting_plane == 'yz':
x, y = grid[[1, 2]]
dx = 0.5 * x.ptp() / p.shape[0]
dy = 0.5 * y.ptp() / p.shape[1]
if ax is None:
ax = _plt.gca()
# see https://github.com/matplotlib/matplotlib/issues/10567
if _mpl.__version__.startswith('2.1.'):
p = _np.clip(p, -1e15, 1e15) # clip to float64 range
im = ax.imshow(_np.real(p), cmap=cmap, origin='lower',
extent=[x.min()-dx, x.max()+dx, y.min()-dy, y.max()+dy],
vmax=vmax, vmin=vmin, **kwargs)
if xlabel is None:
xlabel = plotting_plane[0] + ' / m'
if ylabel is None:
ylabel = plotting_plane[1] + ' / m'
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if colorbar:
add_colorbar(im, **colorbar_kwargs)
return im
def level(p, grid, *, xnorm=None, power=False, cmap=None, vmax=3, vmin=-50,
**kwargs):
"""Two-dimensional plot of level (dB) of sound field.
Takes the same parameters as `sfs.plot2d.amplitude()`.
Other Parameters
----------------
power : bool, optional
See `sfs.util.db()`.
"""
# normalize before converting to dB!
if xnorm is not None:
p = _util.normalize(p, grid, xnorm)
L = _util.db(p, power=power)
return amplitude(L, grid=grid, xnorm=None, cmap=cmap,
vmax=vmax, vmin=vmin, **kwargs)
def particles(x, *, trim=None, ax=None, xlabel='x (m)', ylabel='y (m)',
edgecolor='', marker='.', s=15, **kwargs):
"""Plot particle positions as scatter plot"""
XX, YY = [_np.real(c) for c in x[:2]]
if trim is not None:
xmin, xmax, ymin, ymax = trim
idx = _np.where((XX > xmin) & (XX < xmax) & (YY > ymin) & (YY < ymax))
XX = XX[idx]
YY = YY[idx]
if ax is None:
ax = _plt.gca()
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
return ax.scatter(XX, YY, edgecolor=edgecolor, marker=marker, s=s,
**kwargs)
def vectors(v, grid, *, cmap='blacktransparent', headlength=3,
headaxislength=2.5, ax=None, clim=None, **kwargs):
"""Plot a vector field in the xy plane.
Parameters
----------
v : triple or pair of array_like
x, y and optionally z components of vector field. The z
components are ignored.
If the values are complex, the imaginary parts are ignored.
grid : triple or pair of array_like
The grid that was used to calculate *v*, see
`sfs.util.xyz_grid()`. Any z components are ignored.
Returns
-------
Quiver
See :func:`matplotlib.pyplot.quiver`.
Other Parameters
----------------
ax : Axes, optional
If given, the plot is created on *ax* instead of the current
axis (see :func:`matplotlib.pyplot.gca`).
clim : pair of float, optional
Limits for the scaling of arrow colors.
See :func:`matplotlib.pyplot.quiver`.
cmap, headlength, headaxislength, **kwargs
All further parameters are forwarded to
:func:`matplotlib.pyplot.quiver`.
"""
v = _util.as_xyz_components(v[:2]).apply(_np.real)
X, Y = _util.as_xyz_components(grid[:2])
speed = _np.linalg.norm(v)
with _np.errstate(invalid='ignore'):
U, V = v.apply(_np.true_divide, speed)
if ax is None:
ax = _plt.gca()
if clim is None:
v_ref = 1 / (_default.rho0 * _default.c) # reference particle velocity
clim = 0, 2 * v_ref
return ax.quiver(X, Y, U, V, speed, cmap=cmap, pivot='mid', units='xy',
angles='xy', headlength=headlength,
headaxislength=headaxislength, clim=clim, **kwargs)
def add_colorbar(im, *, aspect=20, pad=0.5, **kwargs):
r"""Add a vertical color bar to a plot.
Parameters
----------
im : ScalarMappable
The output of `sfs.plot2d.amplitude()`, `sfs.plot2d.level()` or any
other `matplotlib.cm.ScalarMappable`.
aspect : float, optional
Aspect ratio of the colorbar. Strictly speaking, since the
colorbar is vertical, it's actually the inverse of the aspect
ratio.
pad : float, optional
Space between image plot and colorbar, as a fraction of the
width of the colorbar.
.. note:: The *pad* argument of
:meth:`matplotlib.figure.Figure.colorbar` has a
slightly different meaning ("fraction of original
axes")!
\**kwargs
All further arguments are forwarded to
:meth:`matplotlib.figure.Figure.colorbar`.
See Also
--------
matplotlib.pyplot.colorbar
"""
ax = im.axes
divider = _axes_grid1.make_axes_locatable(ax)
width = _axes_grid1.axes_size.AxesY(ax, aspect=1/aspect)
pad = _axes_grid1.axes_size.Fraction(pad, width)
current_ax = _plt.gca()
cax = divider.append_axes("right", size=width, pad=pad)
_plt.sca(current_ax)
return ax.figure.colorbar(im, cax=cax, orientation='vertical', **kwargs)
|
import sys
import numpy as np
def minCostPath(cost,m,n):
# cost is the matrix we are trying to traverse
# m is the row idx of cost matrix
# n is the column index of the cost matrix
if (n < 0) or (m < 0):
return sys.maxsize
elif n == 0 and m == 0:
return cost[m][n]
else:
return cost[m][n] + min (minCostPath(cost,m-1,n),\
minCostPath(cost,m,n-1),minCostPath(cost,m-1,n-1))
cost = [[1, 2, 3],
[4, 7, 20],
[1, 4, 3]]
print(minCostPath(cost,2,2))
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : progressHelper.py
@Time : 2018/12/28
@Author : Yaronzz
@Version : 2.0
@Contact : yaronhuang@foxmail.com
@Desc : Show ProgressBar
"""
import sys
import threading
class ProgressTool(object):
def __init__(self, maxCount, barLength=50, icon='▓', unit='', desc=''):
self.curCount = 0 # 当前计数
self.maxCount = maxCount # 最大数量
self.barLength = barLength # 进度条长度
self.icon = icon # 进度符号
self.mutex = threading.Lock() # 互斥锁
self.isFinish = False
self.unit = unit
self.desc = ''
if len(desc) > 0:
self.desc = '(' + desc + ')'
def reset(self, maxCount):
if self.mutex.acquire():
self.curCount = 0
self.maxCount = maxCount
self.isFinish = False
self.mutex.release()
def setCurCount(self, curCount):
if self.mutex.acquire():
if self.isFinish is False:
if curCount >= self.maxCount:
curCount = self.maxCount
self.isFinish = True
self.curCount = curCount
self.__show__()
self.mutex.release()
def addCurCount(self, addCount):
count = self.curCount + addCount
self.setCurCount(count)
def step(self):
count = self.curCount + 1
self.setCurCount(count)
def __show__(self):
try:
# 计算显示几个进度块
numBlock = int(self.curCount * self.barLength / self.maxCount) # 计算显示多少个'>'
# 计算显示几个空格
numEmpty = self.barLength - numBlock
# 计算百分比
percent = self.curCount * 100.0 / self.maxCount
# 输出字符串
process = '%3d' % percent + '%|'
process += self.icon * numBlock + ' ' * numEmpty + '| '
process += str(round(self.curCount, 2)) + '/'
process += str(round(self.maxCount, 2)) + ' ' + self.unit + self.desc
# 判断是否要换行
process += '\r' if self.curCount < self.maxCount else '\n'
sys.stdout.write(process)
sys.stdout.flush()
except:
pass
|
from sortedcontainers import SortedList
import numpy as np
from .utils import log_grad
def identify_turning_points(
x_raw, local_radius=17, peak_ratio=0.2, min_log_grad=0.01):
"""
Identifies the set of 'turning points' in a time series.
Time complexity is O(N log(local_radius)).
Parameters
----------
x_raw : array_like
the time series, should be convertible to a 1D numpy array
local_radius : int
a peak/trough must satisfy the condition of being the max/min of any
values within 'local_radius' time steps forwards or backwards
peak_ratio : float
a peak must satisfy the condition of being at least peak_ratio * the
value of the previous peak
min_log_grad : float
a turning point must satisfy the condition of having a log_gradient
magnitude of at least min_log_grad from the previous turning point
Returns
-------
array-like
sequence of 0-based indices representing the identified turning points.
The first turning point will be a trough, and proceed to alternate between
peak and trough.
"""
x = np.array(x_raw)
x[x<0] = 0
# Preprocess: cache right-side peak/trough neighbourhood validity, O(N logN)
# valid_peak[i] = True iff x[i] >= max(x[i+1], ..., x[i+local_radius])
# valid_trough[i] = True iff x[i] <= min(x[i+1], ..., x[i+local_radius])
valid_peak = np.full((len(x)), False)
valid_trough = np.full((len(x)), False)
next_values = SortedList([x[-1]])
valid_peak[-1] = True
valid_trough[-1] = True
for i in range(len(x)-2, -1, -1):
valid_peak[i] = x[i] >= next_values[-1]
valid_trough[i] = x[i] <= next_values[0]
if i + local_radius < len(x):
next_values.remove(x[i+local_radius]) # O(log l)
next_values.add(x[i]) # O(log l)
# For now, we assume the first TP will be a trough.
# TODO: Generalise to allow for starting at a peak.
tps = [0]
recent_values = SortedList([x[0]])
for i in range(1, len(x)):
# Update peak/trough validity based on left-side neighbourhood.
valid_peak[i] &= (x[i] >= recent_values[-1])
valid_trough[i] &= (x[i] <= recent_values[0])
if len(tps) % 2 == 1:
# The last TP we addded was a trough (odd number of turning points).
if x[i] < x[tps[-1]]:
# Replace last trough with this lower one.
tps[-1] = i
elif (x[i] > x[tps[-1]]
and valid_peak[i]
and (len(tps) < 2 or x[i] >= x[tps[-2]] * peak_ratio)
and abs(log_grad(tps[-1], x[tps[-1]], i, x[i])) >= min_log_grad):
# New peak: greater-or-equal to surrounding 'l' values and greater than
# previous trough and passes peak ratio check with prev peak and
# log_grad ratio check with prev trough.
tps.append(i)
else:
# The last TP we added was a peak.
if x[i] > x[tps[-1]]:
# Replace recent peak with this one.
tps[-1] = i
elif (x[i] < x[tps[-1]]
and valid_trough[i]
and abs(log_grad(tps[-1], x[tps[-1]], i, x[i])) >= min_log_grad):
# New trough: less-or-equal to surrounding 'l' values and less than
# previous peak and passes log_grad ratio check with prev peak.
tps.append(i)
if i >= local_radius:
recent_values.remove(x[i-local_radius])
recent_values.add(x[i])
return tps
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: convert_camrest_data.py
"""
import json
import ast
import numpy as np
def convert_text_for_sample(input_file, out_file):
kbs = []
dialogs = []
count = 0
with open(input_file, 'r') as fr, open(out_file, 'w') as fw:
for line in fr:
line = line.strip()
if line:
if 'R_' in line:
triple = line.split()[1:]
triple_str = ' '.join(triple).replace('R_', '')
kbs.append(triple_str)
elif 'api_call' in line:
usr_sent = line.split('\t')[0]
usr_sent = ' '.join(usr_sent.split()[1:])
elif '<SILENCE>' in line:
sys_sent = line.split('\t')[1]
assert usr_sent is not None
dialog = usr_sent + '\t' + sys_sent
dialogs.append(dialog)
else:
u, s = line.split('\t')
u = ' '.join(u.split()[1:])
dialog = u + '\t' + s
dialogs.append(dialog)
else:
new_kbs = []
entities = []
for triple in kbs:
subj, rel, obj = triple.split()
entities.append(subj)
entities.append(obj)
poi_triple = [subj, 'poi', subj]
poi_triple = ' '.join(poi_triple)
if poi_triple not in new_kbs:
new_kbs.append(poi_triple)
new_kbs.append(triple)
gold_ents = []
entities = set(entities)
for i, dialog in enumerate(dialogs):
u, s = dialog.split('\t')
sys_toks = s.split()
gold_entity = []
for tok in sys_toks:
if tok in entities:
gold_entity.append(tok)
gold_ents.append(gold_entity)
for triple in new_kbs:
kb_line = '0 ' + triple
fw.write(kb_line)
fw.write('\n')
assert len(gold_ents) == len(dialogs)
for i, dialog in enumerate(dialogs):
dialog_line = str(i+1) + ' ' + dialog + '\t' + str(gold_ents[i])
fw.write(dialog_line)
fw.write('\n')
fw.write('\n')
kbs = []
dialogs = []
count += 1
print("total dialogs:", count)
def convert_text_for_model(input_file, out_file):
all_samples = []
sample = {}
uid = []
dialog = []
kb = []
gold_entity = []
ptr_index = []
kb_index = []
with open(input_file, 'r') as fr:
for line in fr:
line = line.strip()
if line:
if line.startswith('0'):
triple = line.split()[1:]
kb_triple = ' '.join(triple)
kb.append(kb_triple)
else:
u, s, gold_ent = line.split('\t')
u = " ".join(u.split()[1:])
uid.append('1')
dialog.append(u)
uid.append('0')
dialog.append(s)
gold_ent = ast.literal_eval(gold_ent)
gold_entity.append(gold_ent)
ptr = [1 if (w in gold_ent and len(kb) > 0) else 0 for w in s.split()]
ptr_index.append(ptr)
if len(kb) == 0:
kb_ptr = [0]
else:
kb_ptr = []
for triple in kb:
tup = triple.split()
assert len(tup) == 3
sub, rel, obj = tup[0], tup[1], tup[2]
if obj in s.split():
kb_ptr.append(1)
else:
kb_ptr.append(0)
kb_index.append(kb_ptr)
else:
sample['task'] = 'restaurant'
sample['uid'] = uid
sample['dialog'] = dialog
sample['gold_entity'] = gold_entity
sample['ptr_index'] = ptr_index
sample['kb_index'] = kb_index
if len(kb) == 0:
sample['kb'] = ["<pad> <pad> <pad>"]
else:
sample['kb'] = kb
all_samples.append(sample)
sample = {}
uid = []
dialog = []
kb = []
gold_entity = []
ptr_index = []
kb_index = []
print("total samples:", len(all_samples))
for i, s in enumerate(all_samples):
if len(s['uid']) == 0:
print("index=%d utterance is None! filtered." % i)
del all_samples[i]
print("max utterances:", max([len(s['uid']) for s in all_samples])) # 16
print("min utterances:", min([len(s['uid']) for s in all_samples])) # 4
print("avg utterances:", np.mean([len(s['uid']) for s in all_samples])) # 7.98 / 8.32 / 8.32
print("max kb triples:", max([len(s['kb']) for s in all_samples])) # 452 / 248 / 112
print("min kb triples:", min([len(s['kb']) for s in all_samples])) # 1
print("avg kb triples:", np.mean([len(s['kb']) for s in all_samples])) # 23.57 / 21.64 / 22.62
with open(out_file, 'w') as fw:
for sample in all_samples:
line = json.dumps(sample)
fw.write(line)
fw.write('\n')
if __name__ == '__main__':
data_dir = "./data/CamRest"
modes = ['train', 'dev', 'test']
for mode in modes:
input_file1 = "%s/camrest676-%s.txt" % (data_dir, mode)
out_file1 = "%s/%s.txt" % (data_dir, mode)
convert_text_for_sample(input_file1, out_file1)
for mode in modes:
input_file2 = "%s/%s.txt" % (data_dir, mode)
out_file2 = "%s/%s.data.txt" % (data_dir, mode)
convert_text_for_model(input_file2, out_file2)
|
#!/usr/bin/env python3
""" FormatBlock - Escape Codes
Functions to test against/strip terminal escape codes from strings.
-Christopher Welborn 2-17-18
"""
import re
from typing import (
Any,
Dict,
List,
)
_codepats = (
# Colors.
r'(([\d;]+)?m{1})',
# Cursor show/hide.
r'(\?25l)',
r'(\?25h)',
# Move position.
r'(([\d]+[;])?([\d]+[Hf]))',
# Save/restore position.
r'([su])',
# Others (move, erase).
r'([\d]+[ABCDEFGHJKST])',
)
# Used to strip escape codes from a string.
codepat = re.compile(
'\033\[({})'.format('|'.join(_codepats))
)
# Used to grab codes from a string.
codegrabpat = re.compile('\033\[[\d;]+?m{1}')
def get_codes(s: Any) -> List[str]:
""" Grab all escape codes from a string.
Returns a list of all escape codes.
"""
return codegrabpat.findall(str(s))
def get_code_indices(s: Any) -> Dict[int, str]:
""" Retrieve a dict of {index: escape_code} for a given string.
If no escape codes are found, an empty dict is returned.
"""
indices = {}
i = 0
codes = get_codes(s)
for code in codes:
codeindex = s.index(code)
realindex = i + codeindex
indices[realindex] = code
codelen = len(code)
i = realindex + codelen
s = s[codeindex + codelen:]
return indices
def get_indices(s: Any) -> Dict[int, str]:
""" Retrieve a dict of characters and escape codes with their real index
into the string as the key.
"""
codes = get_code_indices(s)
if not codes:
# This function is not for non-escape-code stuff, but okay.
return {i: c for i, c in enumerate(s)}
indices = {}
for codeindex in sorted(codes):
code = codes[codeindex]
if codeindex == 0:
indices[codeindex] = code
continue
# Grab characters before codeindex.
start = max(indices or {0: ''}, key=int)
startcode = indices.get(start, '')
startlen = start + len(startcode)
indices.update({i: s[i] for i in range(startlen, codeindex)})
indices[codeindex] = code
if not indices:
return {i: c for i, c in enumerate(s)}
lastindex = max(indices, key=int)
lastitem = indices[lastindex]
start = lastindex + len(lastitem)
textlen = len(s)
if start < (textlen - 1):
# Grab chars after last code.
indices.update({i: s[i] for i in range(start, textlen)})
return indices
def get_indices_list(s: Any) -> List[str]:
""" Retrieve a list of characters and escape codes where each escape
code uses only one index. The indexes will not match up with the
indexes in the original string.
"""
indices = get_indices(s)
return [
indices[i] for i in sorted(indices, key=int)
]
def is_escape_code(s: Any) -> bool:
""" Returns True if `s` appears to be any kind of escape code. """
return codepat.match(str(s)) is not None
def strip_codes(s: Any) -> str:
""" Strip all color codes from a string.
Returns empty string for "falsey" inputs.
"""
return codepat.sub('', str(s) if (s or (s == 0)) else '')
|
#coding=utf-8
import pandas as pd
import statsmodels.api as sm
#import pylab
import glob
def Lowess_detrend(x,y):
# z = sm.nonparametric.lowess(y, x)
# z1 = sm.nonparametric.lowess(y, x, frac=0.1)
# z45 = sm.nonparametric.lowess(y, x, frac=0.45)
z9 = sm.nonparametric.lowess(y, x, frac=0.9)
# pylab.plot(x, y, 'o')
# pylab.plot(z[:,0], z[:,1], 'r-')
# pylab.plot(z1[:,0], z1[:,1], 'g-')
# pylab.plot(z45[:,0], z45[:,1], 'b-')
# pylab.plot(z9[:,0], z9[:,1], 'y-')
# pylab.show()
return z9[:,1]
if __name__ == '__main__':
base_dir = r'F:\crop-climate\crucsv\*.csv'
filelist = glob.glob(base_dir)
zero_list=[0]*116
for filename in filelist:
df = pd.read_csv(filename) #用pandas读入数据
grid_id=filename[-10:-4]
year_list = df['Year'] #获取年份列("Year")的数据
# dataframe1=pd.DataFrame({'Year':year_list})
dataframe2=pd.DataFrame({'Year':year_list})
factor=['Cld','Pre','Tmn','Tmp','Tmx']
for f in factor:
cru_list = df[f]
if len(set(cru_list))==1:
break
ys=Lowess_detrend(year_list, cru_list)
if list(ys)==zero_list:
break
# dataframe1[f]=cru_list-ys
dataframe2[f]=cru_list/ys
if len(set(cru_list))==1 or list(ys)==zero_list:
continue
# dataframe1.to_csv(r'F:\crop-climate\cru_detrend\lowess-additive/%s.csv' % (grid_id),index=False)
dataframe2.to_csv(r'F:\crop-climate\cru_detrend\lowess-multiplicative/%s.csv' % (grid_id),index=False)
|
# -*- coding: utf-8 -*-
'''
Anonymize reactions: randomize participant ids so that they do not match
the ids of the source data.
'''
import gazelib
import random
def run(input_files, output_files):
# Read reaction sequences
seqs = gazelib.io.load_json(input_files[0])
# Generate 100 random participant ids and
# consume them, one per participant. This way we avoid
# overlapping ids. Still, ensure that the sequence remains the same
# between runs by seeding the generator.
random.seed(420)
new_ids = list(map(lambda x: str(x).zfill(4), range(100,200)))
random.shuffle(new_ids)
# Mapping from head_id to new id. If head_id is faced after generating
# iteration, we do not generate a new one but use the one stored here.
head_id_to_new_id = {}
for seq in seqs:
if len(seq) > 0:
head_id = seq[0]['head_id']
if head_id in head_id_to_new_id:
new_id = head_id_to_new_id[head_id]
else:
# Get new id
new_id = new_ids.pop(0)
head_id_to_new_id[head_id] = new_id
# Overwrite true participant ids
for trial in seq:
trial['head_id'] = new_id
gazelib.io.write_json(output_files[0], seqs, human_readable=True)
|
__author__ = 'Thomas Kountis'
class BaseWhitelist(object):
def __init__(self):
pass
def allow(self, host, port):
pass
class DefaultWhitelist(BaseWhitelist):
def __init__(self):
BaseWhitelist.__init__(self)
def allow(self, host, port):
return True
class StaticListWhitelist(BaseWhitelist):
def __init__(self, allowed):
BaseWhitelist.__init__(self)
self.allowed = allowed
def allow(self, host, port):
return host in self.allowed
|
# Copyright (c) 2015-2020 The Botogram Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from . import syntaxes
def process(bot, chains, update):
"""Process an inline update"""
for hook in chains["inline"]:
bot.logger.debug("Processing update #%s with the hook %s..." %
(update.update_id, hook.name))
result = hook.call(bot, update)
if result is {'ok': True, 'result': True}:
bot.logger.debug("Update #%s was just processed by the %s hook."
% (update.update_id, hook.name))
return
bot.logger.debug("No hook actually processed the #%s update." %
update.update_id)
def inline_feedback_process(bot, chains, update):
"""Process a chosen inline result update"""
for hook in chains["inline_feedback"]:
bot.logger.debug("Processing update #%s with the hook %s..." %
(update.update_id, hook.name))
result = hook.call(bot, update)
if result is {'ok': True}:
bot.logger.debug("Update #%s was just processed by the %s hook."
% (update.update_id, hook.name))
return
bot.logger.debug("No hook actually processed the #%s update." %
update.update_id)
class InlineInputMessage:
"""A factory for InputMessageContent Telegram objects"""
def __init__(self, text, syntax=None, preview=True):
self.text = text
self.syntax = syntax
self.preview = preview
def _serialize(self):
args = {
"message_text": self.text,
"disable_web_page_preview": not self.preview,
}
syntax = syntaxes.guess_syntax(self.text, self.syntax)
if syntax:
args["parse_mode"] = syntax
return args
class InlineInputLocation:
"""A factory for InputLocationMessageContent Telegram objects"""
def __init__(self, latitude, longitude, live_period=None):
self.latitude = latitude
self.longitude = longitude
self.live_period = live_period
def _serialize(self):
args = {
"latitude": self.latitude,
"longitude": self.longitude,
}
if self.live_period is not None:
args["live_period"] = self.live_period
return args
class InlineInputVenue:
"""A factory for InputVenueMessageContent Telegram objects"""
def __init__(self, latitude, longitude, title, address,
foursquare_id=None, foursquare_type=None):
self.latitude = latitude
self.longitude = longitude
self.title = title
self.address = address
self.foursquare_id = foursquare_id
self.foursquare_type = foursquare_type
def _serialize(self):
args = {
"latitude": self.latitude,
"longitude": self.longitude,
"title": self.title,
"address": self.address,
}
if self.foursquare_id is not None:
args["foursquare_id"] = self.foursquare_id
if self.foursquare_type is not None:
args["foursquare_type"] = self.foursquare_type
return args
class InlineInputContact:
"""A factory for InputContactMessageContent Telegram objects"""
def __init__(self, phone, first_name, last_name=None, vcard=None):
self.phone_number = phone
self.first_name = first_name
self.last_name = last_name
self.vcard = vcard
def _serialize(self):
args = {
"phone_number": self.phone_number,
"first_name": self.first_name,
}
if self.last_name is not None:
args["last_name"] = self.last_name
if self.vcard is not None:
args["vcard"] = self.vcard
return args
|
from flask import (
Blueprint,
redirect,
render_template,
request,
flash,
)
from flask_babel import gettext
from flask_login import login_required
from app.models import EmailOut
from .forms import *
from .logic import get_emails, send_email
from app.modules.contacts.logic import get_contact
emails = Blueprint('emails', __name__,
template_folder='templates')
# List
@emails.route("/")
@login_required
def index():
emails_out = get_emails()
return render_template('emails/index.html',
emails=emails_out
)
# View
@emails.route("/<email_id>/")
@login_required
def view(email_id):
email = EmailOut.query.get(email_id)
return render_template('emails/view.html',
email=email
)
# ADD
@emails.route("/send", methods=('GET', 'POST'))
@login_required
def send():
form_email = FormEmailOut()
if form_email.validate_on_submit():
send_email(form_email)
if 'return_url' in request.args:
return redirect(request.args.get('return_url'))
return redirect('/account/emails/')
if 'contact_id' in request.args and 'return_url' in request.args:
contact = get_contact(request.args.get('contact_id'))
form_email.email_recipient.data = contact.email
flash(gettext(
'Sending email to {}'.format(contact.email)
))
return_url = request.args.get('return_url')
return render_template('emails/send.html',
form_email=form_email,
contact=contact,
return_url=return_url
)
return render_template('emails/send.html',
form_email=form_email
)
|
import argparse
import logging
import time
from collections import Counter
from pathlib import Path
import PIL
import cv2
import numpy as np
import torch
from utils.datasets import LoadImages
from constants import DEFAULT_IOU_THRESHOLD, DEFAULT_CONF_THRESHOLD, DEFAULT_DETECTED_IMAGE_DIR, \
DEFAULT_INPUT_RESOLUTION, RED, BLUE, END_COLOR, NORMALIZATION_FACTOR
from python_model.coreml_model import CoreMLModel
from python_model.pytorch_model import PyTorchModel
from python_model.tflite_model import TFLiteModel
from python_utils.plots import plot_boxes
IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo']
class Detector:
def __init__(self, model_path, pt_input_resolution=DEFAULT_INPUT_RESOLUTION):
logging.basicConfig(format='%(asctime)s %(message)s', datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO)
self.model_path = model_path
self.pt_input_resolution = pt_input_resolution
self.__init_model()
def __init_model(self):
# Init model (TFLite, CoreML, PyTorch)
self.model_name = Path(self.model_path).name
if not Path(self.model_path).exists():
logging.info(f"{RED}Model not found:{END_COLOR} '{self.model_path}'")
exit(0)
logging.info('SETUP: finding the type of the model...')
if self.model_name.endswith('.tflite'):
logging.info('- The model is a TFLite model.')
self.prefix = 'tflite'
try:
self.model = TFLiteModel(self.model_path)
except ValueError as e:
raise ValueError(f"{RED}An error occured while initializing the model:{END_COLOR} {e}")
self.do_normalize, self.img_size, self.batch_size, self.pil_image, self.channel_first = self.model.get_input_info()
self.labels = self.model.get_labels()
elif self.model_name.endswith('.mlmodel'):
logging.info('- The model is a CoreML model.')
self.prefix = 'coreml'
try:
self.model = CoreMLModel(self.model_path)
except Exception as e:
raise Exception(f"{RED}An error occured while initializing the model:{END_COLOR} {e}")
self.do_normalize, self.img_size, self.batch_size, self.pil_image, self.channel_first = self.model.get_input_info()
self.labels = self.model.get_labels()
elif self.model_name.endswith('.onnx'):
logging.info('- The model is a ONNX model.')
self.prefix = 'onnx'
try:
self.model = ONNXModel(self.model_path)
except Exception as e:
raise Exception(f"{RED}An error occurred while initializing the model:{END_COLOR} {e}")
self.do_normalize, self.img_size, self.batch_size, self.pil_image, self.channel_first = self.model.get_input_info()
self.labels = self.model.get_labels()
elif self.model_name.endswith('.pt'):
logging.info('- The model is a PyTorch model.')
self.prefix = 'pytorch'
try:
self.model = PyTorchModel(self.model_path, self.pt_input_resolution)
except Exception as e:
raise Exception(f"{RED}An error occured while initializing the model:{END_COLOR} {e}")
self.do_normalize, self.img_size, self.batch_size, self.pil_image, self.channel_first = self.model.get_input_info()
self.labels = self.model.get_labels()
else:
logging.info(
f"{RED}Model format not supported:{END_COLOR} {self.model_name}. Supported format: .mlmodel, .onnx, .tflite, .pt.")
exit(0)
def detect_image(self, img, iou_threshold=DEFAULT_IOU_THRESHOLD, conf_threshold=DEFAULT_CONF_THRESHOLD):
img = img.float()
if self.do_normalize:
# Normalize image
img = img.float() / NORMALIZATION_FACTOR
if not self.channel_first:
img = img.permute(1, 2, 0)
if self.pil_image:
img = PIL.Image.fromarray(img.numpy().astype(np.uint8), 'RGB')
else:
img = img.unsqueeze(0)
# Inference
start_time = time.time()
yxyx, classes, scores, nb_detected = self.model.predict(img, iou_threshold, conf_threshold)
inference_time = time.time() - start_time
yxyx = yxyx if isinstance(yxyx, torch.Tensor) else torch.from_numpy(yxyx)
classes = classes if isinstance(classes, torch.Tensor) else torch.from_numpy(classes)
scores = scores if isinstance(scores, torch.Tensor) else torch.from_numpy(scores)
return yxyx, classes, scores, nb_detected, inference_time
def detect(self, img_dir, max_img=-1, out_path=DEFAULT_DETECTED_IMAGE_DIR,
iou_threshold=DEFAULT_IOU_THRESHOLD,
conf_threshold=DEFAULT_CONF_THRESHOLD, save_img=True, return_image=False, verbose=True):
img_path = Path(img_dir)
out_path = Path(out_path)
if not img_path.exists():
logging.info(f"{RED}Directory not found:{END_COLOR} {img_dir}.")
exit(1)
dataset = LoadImages(img_dir, img_size=self.img_size, auto=False)
if not out_path.exists() and save_img:
out_path.mkdir(exist_ok=True, parents=True)
detections = []
inference_times = []
image_names = []
imgs_annotated = []
try:
if verbose:
logging.info(f"{BLUE}DETECTION START{END_COLOR}")
for i, (img_path, img, img_orig, _) in enumerate(dataset):
if max_img != -1 and (i + 1) * self.batch_size > max_img:
break
img_name = Path(img_path).name
image_names.append(img_name)
if verbose:
logging.info(
f"{BLUE}Image {i + 1}:{END_COLOR} ({img_name}: {img_orig.shape[0]}x{img_orig.shape[1]})")
img = torch.from_numpy(img)
yxyx, classes, scores, nb_detected, inference_time = self.detect_image(img, iou_threshold=iou_threshold,
conf_threshold=conf_threshold)
end_time = time.time()
inference_times.append(inference_time)
# Plot the bounding box
if save_img or return_image:
plot_boxes(self.img_size, [img_orig], yxyx, classes, scores, nb_detected, self.labels)
end_plot_time = time.time()
# Save the results
img_annotated = img_orig
out_path_img = str(out_path / f"{self.prefix}_{img_name.rsplit('.')[0]}_boxes_{self.model_name.rsplit('.')[0]}.png")
if save_img:
cv2.imwrite(out_path_img, img_annotated)
if return_image:
imgs_annotated.append(img_annotated)
counter = get_counter_detections(self.labels, classes, nb_detected)
if verbose:
logging.info(f"\t- {sum([v for v in counter.values()])} detected objects")
for k, v in counter.items():
logging.info(f"\t\t{v} {k}{'s' if v > 1 else ''}")
detections.append({k: v for k, v in counter.items()})
if verbose:
logging.info(
f"\t- It took {inference_time:.3f} seconds to run the inference")
if save_img:
logging.info(f"\t- It took {end_plot_time - end_time:.3f} seconds to plot the results.")
logging.info(f"The output is saved in {out_path_img}.")
except IndexError as e:
raise IndexError(f"An error occured during the detection: {e}")
return detections, inference_times, image_names, imgs_annotated
def get_counter_detections(labels, classes, nb_detected):
# Get the number of detections and their label
nb_det = int(nb_detected[0])
detected_objects = [labels[int(x)] for x in classes[0][:nb_det]]
counter = Counter(detected_objects)
return counter
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str,
required=True,
help=f"The path to the converted model (tflite or coreml).")
parser.add_argument('--img-dir', type=str, required=True,
help=f"The path to the images.")
parser.add_argument('--max-img', type=int, default=-1,
help="The number of images to predict (maximum) among all the images in the directory. Default: -1 (no limit: all images in the directory will be processed).")
parser.add_argument('--out', type=str, default=DEFAULT_DETECTED_IMAGE_DIR,
help=f"The path to the output directory (where to save the results). Default: '{DEFAULT_DETECTED_IMAGE_DIR}'.")
parser.add_argument('--iou-threshold', type=float, default=DEFAULT_IOU_THRESHOLD,
help=f'IoU threshold. Default: {DEFAULT_IOU_THRESHOLD}')
parser.add_argument('--conf-threshold', type=float, default=DEFAULT_CONF_THRESHOLD,
help=f'Confidence threshold. Default: {DEFAULT_CONF_THRESHOLD}')
parser.add_argument('--no-save', action='store_true', help="If set, does not save the images.")
opt = parser.parse_args()
detector = Detector(opt.model)
detector.detect(img_dir=opt.img_dir, max_img=opt.max_img, out_path=opt.out,
iou_threshold=opt.iou_threshold, conf_threshold=opt.conf_threshold, save_img=not opt.no_save)
|
import tensorflow as tf
from utils.bert import bert_utils
from task_module import pretrain, classifier, pretrain_albert
def get_pretrain_logits(model_config,
model_api,
features,
labels,
logits,
mode,
target,
embedding_table_adv=None,
embedding_seq_adv=None,
stop_gradient=False,
sampled_binary_mask=None,
is_training=True,
pretrain_loss_type="normal",
emb_adv_pos="emb_adv_post",
**kargs):
model = model_api(model_config, features, labels,
mode, target, reuse=tf.AUTO_REUSE,
embedding_table_adv=embedding_table_adv,
embedding_seq_adv=embedding_seq_adv,
stop_gradient=stop_gradient,
emb_adv_pos=emb_adv_pos,
**kargs)
if model_config.model_type == 'bert':
masked_lm_fn = pretrain.get_masked_lm_output
seq_masked_lm_fn = pretrain.seq_mask_masked_lm_output
print("==apply bert masked lm==")
elif model_config.model_type == 'albert':
masked_lm_fn = pretrain_albert.get_masked_lm_output
seq_masked_lm_fn = pretrain_albert.seq_mask_masked_lm_output
print("==apply albert masked lm==")
elif model_config.model_type == 'funnelbert':
masked_lm_fn = pretrain.get_masked_lm_output
seq_masked_lm_fn = pretrain.seq_mask_masked_lm_output
print("==apply funnelbert masked lm==")
else:
masked_lm_fn = pretrain.get_masked_lm_output
seq_masked_lm_fn = pretrain_albert.seq_mask_masked_lm_output
print("==apply bert masked lm==")
if model_config.get("model_type", "bert") == "funnelbert":
if n_block > 1 and model_config.get('pretrain_loss', "ae") == "ae":
seq_masked_lm_fn = pretrain.denoise_autoencoder
discriminator_mode = model_config.get('discriminator_mode', "ce_loss")
loss_converage = model_config.get("loss_converage", 'global')
tf.logging.info("***** discriminator_mode: %s *****"%(discriminator_mode))
tf.logging.info("***** loss_converage: %s *****"%(loss_converage))
tf.logging.info(seq_masked_lm_fn)
model_config.corrupted = True
tf.logging.info("*** apply reconstruction ***")
if loss_converage in ['global']:
sampled_binary_mask = tf.identity(features['input_mask'])
tf.logging.info("***** loss_converage: %s ***** with input-mask"%(loss_converage))
elif loss_converage in ['local']:
sampled_binary_mask = tf.reduce_sum(features['target_mapping'], axis=1)
tf.logging.info("***** loss_converage: %s ***** with target-mapping mask"%(loss_converage))
else:
discriminator_mode = model_config.get('discriminator_mode', "ce_loss")
loss_converage = model_config.get("loss_converage", 'global')
tf.logging.info(seq_masked_lm_fn)
else:
discriminator_mode = "ce_loss"
loss_converage = model_config.get("loss_converage", 'global')
tf.logging.info(seq_masked_lm_fn)
tf.logging.info(masked_lm_fn)
if input_ori_ids is not None and model_config.get("corrupted", True):
(masked_lm_loss,
masked_lm_example_loss,
masked_lm_log_probs,
masked_lm_mask) = seq_masked_lm_fn(model_config,
model.get_sequence_output(output_type=return_type),
model.get_embedding_table(),
features['normal_input_mask'],
features['input_ori_ids'],
features['input_ids'],
sampled_binary_mask,
reuse=tf.AUTO_REUSE,
embedding_projection=model.get_embedding_projection_table(),
pretrain_loss_type="normal",
discriminator_mode=discriminator_mode,
loss_converage=loss_converage)
masked_lm_ids = input_ori_ids
tf.logging.info("*** apply sequential mlm loss ***")
else:
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
(masked_lm_loss,
masked_lm_example_loss,
masked_lm_log_probs,
masked_lm_mask) = masked_lm_fn(
model_config,
model.get_sequence_output(output_type=return_type),
model.get_embedding_table(),
masked_lm_positions,
masked_lm_ids,
masked_lm_weights,
reuse=tf.AUTO_REUSE,
embedding_projection=model.get_embedding_projection_table(),
pretrain_loss_type="normal",
discriminator_mode=discriminator_mode,
loss_converage=loss_converage)
tf.logging.info("*** apply bert-like mlm loss ***")
return masked_lm_log_probs
|
from pelita.player import SimpleTeam
from .demo_player import KangarooPlayer
# (please use relative imports inside your package)
# The default myteam factory function, which this package must export.
# It must return an instance of `SimpleTeam` containing
# the name of the myteam and the respective instances for
# the first and second player.
def team():
return SimpleTeam("Kangaroo Team", KangarooPlayer(), KangarooPlayer())
# For testing purposes, one may use alternate factory functions::
#
# def alternate_team():
# return SimpleTeam("Our alternate Team",
# AlternatePlayer(), AlternatePlayer())
#
# To be used as follows::
#
# $ pelita path_to/groupN/:alternate_team
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.utils import timezone
from django.test import TestCase
from BasisTypen.models import IndivWedstrijdklasse
from Competitie.models import (Competitie, DeelCompetitie, CompetitieKlasse,
LAAG_REGIO, LAAG_RK, LAAG_BK)
import datetime
def zet_competitie_fase(comp, fase):
""" deze helper weet hoe de competitie datums gemanipuleerd moeten worden
zodat models.Competitie.zet_fase() de gevraagde fase terug zal geven
"""
if fase == 'Z':
comp.alle_bks_afgesloten = True
comp.save()
return
comp.alle_bks_afgesloten = False
now = timezone.now()
vandaag = datetime.date(year=now.year, month=now.month, day=now.day)
gister = vandaag - datetime.timedelta(days=1)
morgen = vandaag + datetime.timedelta(days=1)
if fase >= 'P':
# BK fases
comp.alle_rks_afgesloten = True
if fase == 'P':
comp.bk_eerste_wedstrijd = morgen
comp.save()
return
comp.bk_eerste_wedstrijd = gister
if fase == 'Q':
comp.bk_laatste_wedstrijd = morgen # vandaag mag ook
comp.save()
return
# fase R of S: vaststellen uitslagen + afsluiten BK
comp.bk_laatste_wedstrijd = gister
comp.save()
return
comp.alle_rks_afgesloten = False
if fase >= 'K':
# RK fases
comp.alle_regiocompetities_afgesloten = True
if fase == 'K':
comp.rk_eerste_wedstrijd = morgen
comp.save()
return
comp.rk_eerste_wedstrijd = gister
if fase == 'L':
comp.rk_laatste_wedstrijd = morgen # vandaag mag ook
comp.save()
return
# fase M of N: vaststellen uitslag in elk rayon + afsluiten RK
comp.rk_laatste_wedstrijd = gister
comp.save()
return
comp.alle_regiocompetities_afgesloten = False
# fase A begon toen de competitie werd aangemaakt
if fase == 'A':
comp.begin_aanmeldingen = morgen
comp.klassegrenzen_vastgesteld = False
comp.save()
return
if comp.competitieklasse_set.count() == 0: # pragma: no cover
raise NotImplementedError("Kan niet naar fase %s zonder competitie klassen!" % fase)
comp.klassegrenzen_vastgesteld = True
comp.begin_aanmeldingen = gister
if fase == 'B':
comp.einde_aanmeldingen = morgen
comp.save()
return
comp.einde_aanmeldingen = gister
if fase == 'C':
comp.einde_teamvorming = morgen # vandaag mag ook
comp.save()
return
comp.einde_teamvorming = gister
if fase == 'D':
comp.eerste_wedstrijd = morgen
comp.save()
return
comp.eerste_wedstrijd = gister
if fase == 'E':
comp.laatst_mogelijke_wedstrijd = morgen
comp.save()
return
comp.laatst_mogelijke_wedstrijd = gister
# fase F of G: vaststellen uitslag in elke regio + afsluiten regiocompetitie
comp.save()
return
class TestCompetitieFase(TestCase):
def test_zet_fase(self):
now = timezone.now()
now = datetime.date(year=now.year, month=now.month, day=now.day)
einde_jaar = datetime.date(year=now.year, month=12, day=31)
if now == einde_jaar: # pragma: no cover
einde_jaar += datetime.timedelta(days=1) # needed once a year..
gisteren = now - datetime.timedelta(days=1)
# maak een competitie aan en controleer de fase
comp = Competitie()
comp.begin_jaar = 2000
comp.uiterste_datum_lid = datetime.date(year=2000, month=1, day=1)
comp.begin_aanmeldingen = comp.einde_aanmeldingen = comp.einde_teamvorming = einde_jaar
comp.eerste_wedstrijd = comp.laatst_mogelijke_wedstrijd = einde_jaar
comp.rk_eerste_wedstrijd = comp.rk_laatste_wedstrijd = einde_jaar
comp.bk_eerste_wedstrijd = comp.bk_laatste_wedstrijd = einde_jaar
comp.save()
deelcomp_regio = DeelCompetitie(competitie=comp,
is_afgesloten=False,
laag=LAAG_REGIO)
deelcomp_regio.save()
deelcomp_rk = DeelCompetitie(competitie=comp,
is_afgesloten=False,
laag=LAAG_RK)
deelcomp_rk.save()
deelcomp_bk = DeelCompetitie(competitie=comp,
is_afgesloten=False,
laag=LAAG_BK)
deelcomp_bk.save()
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
comp.begin_aanmeldingen = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
# maak de klassen aan
indiv = IndivWedstrijdklasse.objects.all()[0]
CompetitieKlasse(competitie=comp, indiv=indiv, min_ag=0.0).save()
comp.begin_aanmeldingen = comp.einde_aanmeldingen
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
comp.klassegrenzen_vastgesteld = True
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
# tussen begin en einde aanmeldingen = B
comp.begin_aanmeldingen = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'B')
# na einde aanmeldingen tot einde_teamvorming = C
comp.einde_aanmeldingen = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'C')
# na einde teamvorming tot eerste wedstrijd = D
comp.einde_teamvorming = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'D')
# na eerste wedstrijd = E
comp.eerste_wedstrijd = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'E')
# na laatste wedstrijd = F
comp.laatst_mogelijke_wedstrijd = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'F')
# na afsluiten regio deelcomp = G
deelcomp_regio.is_afgesloten = True
deelcomp_regio.save()
comp.bepaal_fase()
self.assertEqual(comp.fase, 'G')
comp.alle_regiocompetities_afgesloten = True
comp.bepaal_fase()
self.assertEqual(comp.fase, 'K')
comp.rk_eerste_wedstrijd = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'L')
comp.rk_laatste_wedstrijd = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'M')
# na afsluiten RK = N
deelcomp_rk.is_afgesloten = True
deelcomp_rk.save()
comp.bepaal_fase()
self.assertEqual(comp.fase, 'N')
comp.alle_rks_afgesloten = True
comp.bepaal_fase()
self.assertEqual(comp.fase, 'P')
comp.bk_eerste_wedstrijd = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'Q')
comp.bk_laatste_wedstrijd = gisteren
comp.bepaal_fase()
self.assertEqual(comp.fase, 'R')
# na afsluiten BK = S
deelcomp_bk.is_afgesloten = True
deelcomp_bk.save()
comp.bepaal_fase()
self.assertEqual(comp.fase, 'S')
comp.alle_bks_afgesloten = True
comp.bepaal_fase()
self.assertEqual(comp.fase, 'Z')
def test_zet_competitie_fase(self):
""" test de helper functie die de competitie fase forceert """
einde_jaar = datetime.date(year=2000, month=12, day=31)
comp = Competitie()
comp.begin_jaar = 2000
comp.uiterste_datum_lid = datetime.date(year=2000, month=1, day=1)
comp.begin_aanmeldingen = comp.einde_aanmeldingen = comp.einde_teamvorming = einde_jaar
comp.eerste_wedstrijd = comp.laatst_mogelijke_wedstrijd = einde_jaar
comp.rk_eerste_wedstrijd = comp.rk_laatste_wedstrijd = einde_jaar
comp.bk_eerste_wedstrijd = comp.bk_laatste_wedstrijd = einde_jaar
comp.save()
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
zet_competitie_fase(comp, 'A')
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
# maak de klassen aan en controleer de fase weer
indiv = IndivWedstrijdklasse.objects.all()[0]
CompetitieKlasse(competitie=comp, indiv=indiv, min_ag=0.0).save()
zet_competitie_fase(comp, 'A')
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
comp.klassegrenzen_vastgesteld = True
zet_competitie_fase(comp, 'A')
comp.bepaal_fase()
self.assertEqual(comp.fase, 'A')
sequence = 'BCDEGKLNPQSQPNLKGEDCBKSEBZLQC' # let op! F en R kunnen niet
for fase in sequence:
zet_competitie_fase(comp, fase)
comp.bepaal_fase()
self.assertEqual(comp.fase, fase)
# for
# end of file
|
import time
from authlib.integrations.sqla_oauth2 import OAuth2ClientMixin, OAuth2TokenMixin, OAuth2AuthorizationCodeMixin
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from core.models import Base, ModelMixin
class Client(Base, ModelMixin, OAuth2ClientMixin):
__tablename__ = 'oauth_clients'
user_id = Column(Integer, ForeignKey('user_users.id', ondelete='CASCADE'))
user = relationship('User')
class Token(Base, ModelMixin, OAuth2TokenMixin):
__tablename__ = 'oauth_tokens'
user_id = Column(Integer, ForeignKey('user_users.id', ondelete='CASCADE'))
user = relationship('User')
def is_refresh_token_active(self):
if self.revoked:
return False
expires_at = self.issued_at + self.expires_in * 2
return expires_at >= time.time()
class AuthorizationCode(Base, ModelMixin, OAuth2AuthorizationCodeMixin):
__tablename__ = 'oauth_codes'
user_id = Column(Integer, ForeignKey('user_users.id', ondelete='CASCADE'))
user = relationship('User')
|
import logging
import unittest
import psycopg2
import sqlalchemy
import testing.postgresql
from pedsnetdcc.indexes import _indexes_sql, add_indexes, drop_indexes
from pedsnetdcc.utils import make_conn_str, stock_metadata
from pedsnetdcc.transform_runner import TRANSFORMS
from pedsnetdcc.db import Statement
logging.basicConfig(level=logging.DEBUG, filename="logfile")
Postgresql = None
def setUpModule():
# Generate a Postgresql class which caches the init-ed database across
# multiple ephemeral database cluster instances.
global Postgresql
Postgresql = testing.postgresql.PostgresqlFactory(
cache_intialized_db=True)
def tearDownModule():
# Clear cached init-ed database at end of tests.
Postgresql.clear_cache()
class IndexesTest(unittest.TestCase):
def setUp(self):
self.model_version = '2.2.0'
def test_add_indexes(self):
sql = _indexes_sql(self.model_version)
sample_expected = (
'CREATE INDEX obs_otcn_89a4742c38ecb8ba35_ix ON observation (observation_type_concept_name)', # noqa
'CREATE INDEX dea_s_4906dc6995505fc71431f_ix ON death (site)',
'CREATE INDEX vis_vsaim_f1537dca8da9ab914_ix ON visit_occurrence (visit_start_age_in_months)', # noqa
)
for sample in sample_expected:
self.assertIn(sample, sql)
sample_not_expected = (
'CREATE INDEX idx_concept_vocabulary_id ON concept (vocabulary_id)', # noqa
)
for sample in sample_not_expected:
self.assertNotIn(sample, sql)
def test_drop_indexes(self):
sql = _indexes_sql(self.model_version, drop=True)
sample_expected = (
'DROP INDEX obs_otcn_89a4742c38ecb8ba35_ix',
'DROP INDEX dea_s_4906dc6995505fc71431f_ix',
'DROP INDEX vis_vsaim_f1537dca8da9ab914_ix'
)
for sample in sample_expected:
self.assertIn(sample, sql)
sample_not_expected = (
'DROP INDEX idx_concept_vocabulary_id ON concept (vocabulary_id)',
)
for sample in sample_not_expected:
self.assertNotIn(sample, sql)
def test_add_indexes_for_vocabulary(self):
sql = _indexes_sql(self.model_version, vocabulary=True)
sample_expected = (
'CREATE INDEX idx_concept_class_id ON concept (concept_class_id)',
'CREATE INDEX idx_concept_synonym_id ON concept_synonym (concept_id)' # noqa
)
for sample in sample_expected:
self.assertIn(sample, sql)
sample_not_expected = (
'CREATE INDEX con_lcn_f7a508db6a172c78291_ix ON concept_synonym (language_concept_name)', # noqa
'CREATE INDEX con_s_d9ad76e415cb919c49e49_ix ON concept_class (site)' # noqa
)
for sample in sample_not_expected:
self.assertNotIn(sample, sql)
class IndexesDatabaseTest(unittest.TestCase):
def setUp(self):
# Create a postgres database in a temp directory.
self.postgresql = Postgresql()
self.dburi = self.postgresql.url()
self.conn_str = make_conn_str(self.dburi)
self.engine = sqlalchemy.create_engine(self.dburi)
# Create transformed pedsnet metadata
self.model_version = '2.2.0'
self.metadata = stock_metadata(self.model_version)
for t in TRANSFORMS:
self.metadata = t.modify_metadata(self.metadata)
def tearDown(self):
# Destroy the postgres database.
self.postgresql.stop()
def expected_measurement_index_names(self):
# Return a set of expected measurement (non-vocab) index names.
# This may need to be modified if the PEDSnet CDM or transformations
# change.
return {'idx_measurement_concept_id',
'idx_measurement_person_id',
'idx_measurement_visit_id',
'mea_pcn_74e171086ab53fdef03_ix',
'mea_maim_fafec5cb283b981155_ix',
'mea_mcn_2396c11b8e9dc80fad6_ix',
'mea_mraim_b3652804e85e68491_ix',
'mea_ucn_a1d8526ef0526700f9b_ix',
'mea_vacn_cdbccecc93bc04359c_ix',
'mea_mtcn_0512b6f39c80e05694_ix',
'mea_ocn_adee9ca63d3ce5cf5ca_ix',
'mea_mscn_a15f3175cfbed7967a_ix',
'mea_rlocn_49286b9222656be21_ix',
'mea_s_c389be51cb02c33ef7d70_ix',
'mea_rhocn_2ddf11b3636910434_ix',
}
def expected_concept_index_names(self):
# Return a set of expected concept (vocab) index names.
return {'idx_concept_class_id',
'idx_concept_code',
'idx_concept_domain_id',
'idx_concept_vocabulary_id',
}
def test_drop(self):
# Instantiate the transformed pedsnet database structure.
self.metadata.create_all(self.engine)
# Grab the measurement table created
measurement = sqlalchemy.Table('measurement', sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
index_names = [i.name for i in measurement.indexes]
# Check that the measurement table has all extra indexes.
for idx in self.expected_measurement_index_names():
self.assertIn(idx, index_names)
# Drop indexes on the non-vocabulary tables.
drop_indexes(self.conn_str, self.model_version)
# Check that the measurement table has no indexes
measurement = sqlalchemy.Table('measurement', sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
self.assertEqual(len(measurement.indexes), 0)
# Check that vocab indexes were not dropped
concept = sqlalchemy.Table('concept', sqlalchemy.MetaData(),
autoload=True, autoload_with=self.engine)
concept_index_names = [i.name for i in concept.indexes]
self.assertNotEqual(self.expected_concept_index_names(),
concept_index_names)
# Check that an exception is raised when double-dropping
with self.assertRaises(psycopg2.ProgrammingError):
drop_indexes(self.conn_str, self.model_version)
def test_add(self):
# Instantiate the transformed pedsnet database structure.
self.metadata.create_all(self.engine)
# Drop indexes on the non-vocabulary tables.
drop_indexes(self.conn_str, self.model_version)
# Drop indexes on vocabulary tables.
drop_indexes(self.conn_str, self.model_version, vocabulary=True)
# Verify that the measurement table has no indexes
measurement = sqlalchemy.Table('measurement', sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
self.assertEqual(len(measurement.indexes), 0)
# Verify that the concept table has no indexes
concept = sqlalchemy.Table('concept', sqlalchemy.MetaData(),
autoload=True, autoload_with=self.engine)
self.assertEqual(len(concept.indexes), 0)
# Create indexes on non-vocabulary tables.
add_indexes(self.conn_str, self.model_version)
# Check that the measurement table has the right indexes
measurement = sqlalchemy.Table('measurement', sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
self.assertEqual(self.expected_measurement_index_names(),
set([i.name for i in measurement.indexes]))
# Check that the concept table has no indexes
concept = sqlalchemy.Table('concept', sqlalchemy.MetaData(),
autoload=True, autoload_with=self.engine)
self.assertEqual(len(concept.indexes), 0)
# Check that an exception is raised if we double-add
with self.assertRaises(psycopg2.ProgrammingError):
add_indexes(self.conn_str, self.model_version)
def test_add_force(self):
# Instantiate the transformed pedsnet database structure (including
# indexes)
self.metadata.create_all(self.engine)
# Create indexes on non-vocabulary tables. This should not raise
# an exception, even though the indexes already exist.
add_indexes(self.conn_str, self.model_version, force=True)
def test_drop_force(self):
# Instantiate the transformed pedsnet database structure.
self.metadata.create_all(self.engine)
# Remove an index
Statement('DROP INDEX idx_measurement_concept_id').execute(
self.conn_str)
# Verify that this index is gone
measurement = sqlalchemy.Table('measurement', sqlalchemy.MetaData(),
autoload=True,
autoload_with=self.engine)
self.assertNotIn('idx_measurement_concept_id',
[i.name for i in measurement.indexes])
# Drop indexes on the non-vocabulary tables.
# This should not raise an exception.
drop_indexes(self.conn_str, self.model_version, force=True)
|
# -----------------------------------------------------------
# Behave Step Definitions for Aries DIDComm File and MIME Types, RFC 0044:
# https://github.com/hyperledger/aries-rfcs/blob/main/features/0044-didcomm-file-and-mime-types/README.md
#
# -----------------------------------------------------------
from behave import given, then
import json
from agent_backchannel_client import agent_backchannel_POST
@given('"{agent}" is running with parameters "{parameters}"')
def step_impl(context, agent: str, parameters: str):
agent_url = context.config.userdata.get(agent)
params_json = json.loads(parameters)
data = {
"parameters": params_json
}
(resp_status, resp_text) = agent_backchannel_POST(agent_url + "/agent/command/", "agent", operation="start", data=data)
assert resp_status == 200, f'resp_status {resp_status} is not 200; {resp_text}'
@then('"{requester}" can\'t accept the invitation')
def step_impl(context, requester):
requester_url = context.config.userdata.get(requester)
data = context.responder_invitation
data["use_existing_connection"] = False
(resp_status, resp_text) = agent_backchannel_POST(requester_url + "/agent/command/", "out-of-band", operation="receive-invitation", data=data)
assert resp_status == 500, f'agent command should fail but resp_status {resp_status} is not 500; {resp_text}'
|
import ConfigSpace
def get_hyperparameter_search_space_small(seed):
"""
Small version of gradient boosting config space
Parameters
----------
seed: int
Random seed that will be used to sample random configurations
Returns
-------
cs: ConfigSpace.ConfigurationSpace
The configuration space object
"""
cs = ConfigSpace.ConfigurationSpace('sklearn.ensemble.GradientBoostingClassifier', seed)
# fixed to deviance, as exponential requires two classes
learning_rate = ConfigSpace.hyperparameters.UniformFloatHyperparameter(
name='gradientboostingclassifier__learning_rate', lower=0.01, upper=2, default_value=0.1, log=True)
n_estimators = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(
name='gradientboostingclassifier__n_estimators', lower=64, upper=512, default_value=100, log=False)
subsample = ConfigSpace.UniformFloatHyperparameter(
name='gradientboostingclassifier__subsample', lower=0.0, upper=1.0, default_value=1.0)
min_samples_split = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(
name='gradientboostingclassifier__min_samples_split', lower=2, upper=20, default_value=2)
max_depth = ConfigSpace.hyperparameters.UniformIntegerHyperparameter(
name='gradientboostingclassifier__max_depth', lower=1, upper=10, default_value=3)
cs.add_hyperparameters([
learning_rate,
n_estimators,
subsample,
min_samples_split,
max_depth,
])
return cs
|
# -*- coding: utf-8 -*-
import os, sys, random
import argparse
import numpy as np
import toml
import asteval
from pbpl import compton
# import Geant4 as g4
# from Geant4.hepunit import *
import h5py
import pbpl.common as common
from pbpl.common.units import *
from collections import namedtuple
def get_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Combine energy deposition',
epilog='''\
Example:
.. code-block:: sh
pbpl-compton-combine-deposition combine-deposition.toml
''')
parser.add_argument(
'config_filename', metavar='conf-file',
help='Configuration file')
return parser
def get_args():
parser = get_parser()
args = parser.parse_args()
args.conf = toml.load(args.config_filename)
return args
def get_input(conf):
edep = {}
for c in conf:
with h5py.File(c['Filename'], 'r') as fin:
run_index = tuple(c['RunIndex'])
_num_events = fin['num_events'][run_index]
gin = fin[c['Group']]
_edep = gin['edep'][run_index]*MeV
_xbin = gin['xbin'][:]*mm
_ybin = gin['ybin'][:]*mm
_zbin = gin['zbin'][:]*mm
if len(edep) == 0:
xbin = _xbin
ybin = _ybin
zbin = _zbin
num_events = _num_events
else:
assert(np.array_equal(xbin, _xbin))
assert(np.array_equal(ybin, _ybin))
assert(np.array_equal(zbin, _zbin))
assert(num_events == _num_events)
edep[c['Key']] = _edep
return edep, xbin, ybin, zbin, num_events
def main():
args = get_args()
conf = args.conf
edep, xbin, ybin, zbin, num_events = get_input(conf['Input'])
with h5py.File(conf['Output']['Filename'], 'w') as fout:
fout['num_events'] = np.array((num_events,))
fout['i0'] = np.array((np.string_('yo'),))
if 'Group' in conf['Output']:
gout = fout.create_group(conf['Output']['Group'])
else:
gout = fout
gout['edep'] = ((edep['A'] + edep['B'])/MeV).astype('float32')[np.newaxis,:]
gout['edep'].attrs.create('num_events', num_events)
gout['edep'].attrs.create('unit', np.string_('MeV'))
gout['xbin'] = xbin/mm
gout['ybin'] = ybin/mm
gout['zbin'] = zbin/mm
for dset_name in ['xbin', 'ybin', 'zbin']:
gout[dset_name].attrs.create('unit', np.string_('mm'))
fout.close()
return 0
if __name__ == '__main__':
sys.exit(main())
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
#
# Copyright 2017, 2018 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import os
import pipes
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: docker_creds
short_description: Creates/updates a 'docker login' file in place of using 'docker login'
version_added: "2.4"
description:
- This module creates a docker config.json file in the directory provided by 'path'
on hosts that do not support 'docker login' but need the file present for
registry authentication purposes of various other services.
options:
path:
description:
- This is the message to send to the sample module
required: true
registry:
description:
- This is the registry the credentials are for.
required: true
username:
description:
- This is the username to authenticate to the registry with.
required: true
password:
description:
- This is the password to authenticate to the registry with.
required: true
test_login:
description:
- Attempt to connect to registry with username + password provided.
default: true
required: false
test_timeout:
description:
- Timeout in seconds for each attempt to connect to registry.
default: 20
required: false
author:
- "Michael Gugino <mgugino@redhat.com>"
'''
EXAMPLES = '''
# Pass in a message
- name: Place credentials in file
docker_creds:
path: /root/.docker
registry: registry.example.com:443
username: myuser
password: mypassword
test_login: True
test_timeout: 30
'''
def check_dest_dir_exists(module, dest):
'''Check if dest dir is present and is a directory'''
dir_exists = os.path.exists(dest)
if dir_exists:
if not os.path.isdir(dest):
msg = "{} exists but is not a directory".format(dest)
result = {'failed': True,
'changed': False,
'msg': msg,
'state': 'unknown'}
module.fail_json(**result)
else:
return 1
else:
return 0
def create_dest_dir(module, dest):
try:
os.makedirs(dest, mode=0o700)
except OSError as oserror:
result = {'failed': True,
'changed': False,
'msg': str(oserror),
'state': 'unknown'}
module.fail_json(**result)
def load_config_file(module, dest):
'''load the config.json in directory dest'''
conf_file_path = os.path.join(dest, 'config.json')
if os.path.exists(conf_file_path):
# Try to open the file and load json data
try:
with open(conf_file_path) as conf_file:
data = conf_file.read()
jdata = json.loads(data)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
'msg': str(ioerror),
'state': 'unknown'}
module.fail_json(**result)
except ValueError as jsonerror:
result = {'failed': True,
'changed': False,
'msg': str(jsonerror),
'state': 'unknown'}
module.fail_json(**result)
return jdata
else:
# File doesn't exist, we just return an empty dictionary.
return {}
# pylint: disable=too-many-arguments
def gen_skopeo_cmd(registry, username, password, proxy_vars, test_timeout, test_image, tls_verify):
'''Generate skopeo command to run'''
skopeo_temp = ("{proxy_vars} timeout {test_timeout} skopeo inspect"
" {creds} docker://{registry}/{test_image}")
# this will quote the entire creds argument to account for special chars.
creds = pipes.quote('--creds={}:{}'.format(username, password))
skopeo_args = {'proxy_vars': proxy_vars, 'test_timeout': test_timeout, 'creds': creds,
'registry': registry, 'test_image': test_image,
'tls_verify': tls_verify}
return skopeo_temp.format(**skopeo_args).strip()
def validate_registry_login(module, skopeo_command):
'''Attempt to use credentials to log into registry'''
# skopeo doesn't honor docker config file proxy settings; need to specify
# proxy vars on the cli.
rtnc, _, err = module.run_command(skopeo_command, use_unsafe_shell=True)
if rtnc:
result = {'failed': True,
'changed': False,
'msg': str(err),
'state': 'unknown'}
module.fail_json(**result)
def update_config(docker_config, registry, encoded_auth):
'''Add our registry auth credentials into docker_config dict'''
# Add anything that might be missing in our dictionary
if 'auths' not in docker_config:
docker_config['auths'] = {}
if registry not in docker_config['auths']:
docker_config['auths'][registry] = {}
# check if the same value is already present for idempotency.
if 'auth' in docker_config['auths'][registry]:
if docker_config['auths'][registry]['auth'] == encoded_auth:
# No need to go further, everything is already set in file.
return False
docker_config['auths'][registry]['auth'] = encoded_auth
return True
def write_config(module, docker_config, dest):
'''Write updated credentials into dest/config.json'''
if not isinstance(docker_config, dict):
docker_config = docker_config.decode()
conf_file_path = os.path.join(dest, 'config.json')
try:
with open(conf_file_path, 'w') as conf_file:
json.dump(docker_config, conf_file, indent=8)
except IOError as ioerror:
result = {'failed': True,
'changed': False,
'msg': str(ioerror),
'state': 'unknown'}
module.fail_json(**result)
def run_module():
'''Run this module'''
module_args = dict(
path=dict(aliases=['dest', 'name'], required=True, type='path'),
registry=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
test_login=dict(type='bool', required=False, default=True),
proxy_vars=dict(type='str', required=False, default=''),
test_timeout=dict(type='int', required=False, default=60),
test_image=dict(type='str', required=True),
tls_verify=dict(type='bool', required=False, default=True)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
# First, create our dest dir if necessary
dest = module.params['path']
registry = module.params['registry']
username = module.params['username']
password = module.params['password']
test_login = module.params['test_login']
proxy_vars = module.params['proxy_vars']
test_timeout = module.params['test_timeout']
test_image = module.params['test_image']
tls_verify = module.params['tls_verify']
if not check_dest_dir_exists(module, dest):
create_dest_dir(module, dest)
docker_config = {}
else:
# We want to scrape the contents of dest/config.json
# in case there are other registries/settings already present.
docker_config = load_config_file(module, dest)
# Test the credentials
if test_login:
skopeo_command = gen_skopeo_cmd(registry, username, password,
proxy_vars, test_timeout, test_image, tls_verify)
validate_registry_login(module, skopeo_command)
# base64 encode our username:password string
encoded_auth = base64.b64encode('{}:{}'.format(username, password).encode())
# Put the registry auth info into the config dict.
changed = update_config(docker_config, registry, encoded_auth)
if changed:
write_config(module, docker_config, dest)
result = {'changed': changed, 'rc': 0}
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
"""
Elvis main module: the golden-layout panel creator.
"""
import panel as pn
import os
from .bokeh import HoloviewsBokeh
from enum import Enum
from .themes import LayoutTheme
class Block(Enum):
stack = 'stack'
row = 'row'
column = 'column'
class GoldenPanel:
"""
Generates a jinja2 template, specifically tailored for the (slightly modified)
golden-layout that can be served using panel.
Only create golden panels in one go; use one compose method and nest the stack, row,
colum, and panel methods. Do not create panels without adding them to
the composition string.
"""
def __init__(self, theme: LayoutTheme=LayoutTheme.DARK):
"""
:param theme: use elvis.LayoutTheme.DARK or elvis.LayoutTheme.LIGHT
"""
self.theme = theme
self.panels = {}
self.counter = 0
self.app = None
def serve(self, static_dirs=None, **kwargs):
""" Wrapper for pn.serve(), with the inclusion of the required static assets.
:static_dirs: Specify directories with static assets in addition to the standard elvis assets.
:kwargs: key word arguments that are passed on to pn.serve
"""
static_dirs = {} if static_dirs is None else static_dirs
assets_elvis = {'assets': os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, 'assets'))}
self._set_assets("assets\\", self.theme)
return pn.serve(self.app, static_dirs={**assets_elvis, **static_dirs}, **kwargs)
def servable(self, **kwargs) -> None:
"""
!!! NOT WORKING !!!
Wrapper for pn.app.servable(), with the inclusion of the required static assets.
"""
#raise NotImplementedError
# self._set_assets("assets\\", self.theme)
# static_dirs = {} if static_dirs is None else static_dirs
# assets_elvis = {'assets': os.path.abspath(
# os.path.join(os.path.dirname(__file__), os.pardir, 'assets'))}
self._set_assets(os.path.join(os.path.dirname(__file__), os.pardir, 'assets\\'), self.theme)
self.app.servable(**kwargs)
def _set_assets(self, root: str, theme: LayoutTheme):
""" Add the static files (.css and .js) to the panel config. """
css_base = [root + 'goldenlayout-base.css',
root + 'goldenlayout-elvis.css',
root + 'panel-customizations.css']
css_theme = {LayoutTheme.LIGHT: [root + 'goldenlayout-elvis-light.css',
root + 'panel-customizations-light.css'],
LayoutTheme.DARK: [root + 'goldenlayout-elvis-dark.css',
root + 'panel-customizations-dark.css']}
js_files = {'jquery': root + 'js\jquery-1.11.1.min.js',
'goldenlayout': root + 'js\goldenlayout.min.js'}
css_files = css_base + css_theme[theme]
pn.config.js_files = js_files
pn.config.css_files = css_files
def compose(self, golden_layout: str) -> None:
"""
Creates a servable template from a golden layout js code string.
Any GoldenPanel object needs to call compose exactly once to function as expected.
:param golden_layout: Result of nesting stacks, columns, rows, and panels
using the methods in this class.
"""
template = ClientSideCodeStrings.JINJA2_BASE % golden_layout
self.app = pn.Template(template=template)
for panel_ID, panel in self.panels.items():
self.app.add_panel(panel_ID, panel)
def stack(self, *args: str) -> str:
""" Adds a 'tab' element. Every argument should be a view or another nestable (stack, column, row)."""
return self._block(*args, type=Block.stack)
def column(self, *args: str) -> str:
""" Vertically aligned panels. Every argument should be a view or another nestable (stack, column, row)."""
return self._block(*args, type=Block.column)
def row(self, *args: str) -> str:
""" Horizontally aligned panels. Every argument should be a view or another nestable (stack, column, row)."""
return self._block(*args, type=Block.row)
def _block(self, *args: str, type: Block=Block.stack) -> str:
"""
Creates nestable js code strings. Note that 'stack', 'colum' and 'row' are the
strings dictated by the golden layout js code.
"""
content = ''.join(arg for arg in args)
return ClientSideCodeStrings.NESTABLE % (type.name, content)
def view(self, view,
title: str=None,
width: int=None,
height: int=None,
scrollable=True) -> str:
"""
Adds a viewable panel.
:param view: The panel to show in this golden layout sub section.
:param title: The text to show at the top of the panel.
:param width: Initial width.
:param height: Initial height.
:param scrollable: if True, the the view will get scroll bars, if the content is larger
than the panel size.
"""
# We need to register every panel with a unique name such that after
# composing the jinja2 template, we can perform add_panel (see compose function).
self.counter = self.counter + 1
panel_ID = "panel_" + str(self.counter)
self.panels[panel_ID] = pn.panel(view, sizing_mode='stretch_both')
title_str = "title: '%s'," % str(title) if title is not None else "title: '',"
width_str = "width: %s," % str(width) if width is not None else ""
height_str = "height: %s," % str(height) if height is not None else ""
scroll_str = "css_classes: ['not_scrollable']" if not scrollable else ""
settings = title_str + height_str + width_str + scroll_str
return ClientSideCodeStrings.VIEW % (panel_ID, settings)
def header(self, header: str, height: int=90) -> str:
""" Convenience function to make a title style view."""
return self.view(pn.pane.HTML(f"<div class='title'>{header}</div>",
sizing_mode='stretch_width'), height=height)
class ClientSideCodeStrings:
""" Namespace class to hold client size code (html, javascript and jinja2) """
JINJA2_BASE = \
"""
{%% extends base %%}
{%% block postamble %%}
<head> <link rel="icon" href="/assets/favicon.ico" type="image/x-icon"/> </head>
{%% endblock %%}
<!-- goes in body -->
{%% block contents %%}
<script type="text/javascript">
var config =
{
settings:
{
hasHeaders: true,
constrainDragToContainer: true,
reorderEnabled: true,
selectionEnabled: true,
popoutWholeStack: false,
blockedPopoutsThrowError: true,
closePopoutsOnUnload: true,
showPopoutIcon: false,
showMaximiseIcon: true,
showCloseIcon: false
},
dimensions: {
borderWidth: 5,
minItemHeight: 10,
minItemWidth: 10,
headerHeight: 30,
dragProxyWidth: 300,
dragProxyHeight: 200
},
content: [ %s ]
};
var myLayout = new GoldenLayout(config);
myLayout.registerComponent('view', function(container, componentState)
{
const {height, width, css_classes} = componentState;
if(height)
container.on('open', () => container.setSize(container.width, height));
if(width)
container.on('open', () => container.setSize(width, container.height));
if (css_classes)
css_classes.map((item) => container.getElement().addClass(item));
container.setTitle(componentState.title);
container.getElement().html(componentState.model);
container.on('resize', () => window.dispatchEvent(new Event('resize')));
});
myLayout.init();
</script>
{%% endblock %%}
"""
NESTABLE = \
"""
{
type: '%s',
content: [ %s ]
},
"""
VIEW = \
"""
{
type: 'component',
componentName: 'view',
componentState:
{
model: '{{ embed(roots.%s) }}',
%s
},
isClosable: false,
},
"""
|
import re
import random
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
from scripts.baseline_utils import process_baseline
from collections import defaultdict
def get_tokenized_text(sen, tokenizer):
marked_text = "[CLS] " + sen + " [SEP] "
tokenized_text = tokenizer.tokenize(marked_text)
return tokenized_text[1:len(tokenized_text)-1]
def tokenize_sentences(sentences, np_to_indices, tokenizer):
for i,sentence in enumerate(sentences):
gold = sentence['aligns']
gold_filtered = []
for goldalign in gold:
en = re.findall('\d+', goldalign[0] )
hu = re.findall('\d+', goldalign[1] )
gold_filtered.append((str(en[0]), str(hu[0])))
sentence["aligns_filtered"] = gold_filtered
sentence_en = []
sentence_hu = []
np_to_indices[i]["en_sen"] = []
np_to_indices[i]["hu_sen"] = []
en_str_to_tokenize = []
for token in sentence["en_sen"]:
if type(token) == tuple:
sentence_en += get_tokenized_text(" ".join(en_str_to_tokenize), tokenizer)
en_str_to_tokenize = []
start_ind = len(sentence_en)
tokenized_np = get_tokenized_text(" ".join(token[1]), tokenizer)
end_ind = start_ind + len(tokenized_np) - 1
np_to_indices[i]["en_sen"].append((token[0], start_ind, end_ind))
sentence_en += tokenized_np
else:
en_str_to_tokenize.append(token)
sentence_en += get_tokenized_text(" ".join(en_str_to_tokenize), tokenizer)
hu_str_to_tokenize = []
for token in sentence["hu_sen"]:
if type(token) == tuple:
sentence_hu += get_tokenized_text(" ".join(hu_str_to_tokenize), tokenizer)
hu_str_to_tokenize = []
start_ind = len(sentence_hu)
tokenized_np = get_tokenized_text(" ".join(token[1]), tokenizer)
end_ind = start_ind + len(tokenized_np) - 1
np_to_indices[i]["hu_sen"].append((token[0], start_ind, end_ind))
sentence_hu += tokenized_np
else:
hu_str_to_tokenize.append(token)
sentence_hu += get_tokenized_text(" ".join(hu_str_to_tokenize), tokenizer)
sentence["sentence_hun"] = sentence_hu
sentence["sentence_en"] = sentence_en
def get_sentence_embeddings(sentences, np_to_indices, tokenizer, model):
too_long_sentences = []
for i,sentence in enumerate(sentences):
batch_i = 0
text_hu = sentences[i]["sentence_hun"]
text_en = sentences[i]["sentence_en"]
tokenized_text = []
tokenized_text.append("[CLS]")
tokenized_text += text_en
tokenized_text.append("[SEP]")
tokenized_text += text_hu
if len(tokenized_text) > 512:
too_long_sentences.append(i)
continue
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [0] * (len(sentences[i]["sentence_en"]) + 2) + [1] * len(sentences[i]["sentence_hun"])
"""
print(tokenized_text)
print(len(tokenized_text))
print(len(segments_ids))
"""
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
token_embeddings = []
# For each token in the sentence...
for token_i in range(len(tokenized_text)):
# Holds 12 layers of hidden states for each token
hidden_layers = []
# For each of the 12 layers...
for layer_i in range(len(encoded_layers)):
# Lookup the vector for `token_i` in `layer_i`
vec = encoded_layers[layer_i][batch_i][token_i]
hidden_layers.append(vec)
token_embeddings.append(hidden_layers)
concatenated_last_4_layers = [torch.cat((layer[-1], layer[-2], layer[-3], layer[-4]), 0) for layer in token_embeddings] # [number_of_tokens, 3072]
summed_last_4_layers = [torch.sum(torch.stack(layer)[-4:], 0) for layer in token_embeddings] # [number_of_tokens, 768]
en_emb = []
hu_emb = []
for np in np_to_indices[i]["en_sen"]:
en_emb.append((np[0], summed_last_4_layers[np[1]+1:np[2]+2]))
for np in np_to_indices[i]["hu_sen"]:
hu_emb.append((np[0], summed_last_4_layers[np[1]+len(text_en)+2:np[2]+len(text_en)+3]))
np_to_indices[i]["en_emb"] = en_emb
np_to_indices[i]["hu_emb"] = hu_emb
def get_vocabulary(sentences, np_to_indices):
i = 0
word2idx = defaultdict(dict)
voc = []
for sen in sentences:
en_sen = []
hu_sen = []
indices = np_to_indices[sen['id']]
for ind in indices['en_sen']:
words = sen['sentence_en'][ind[1]:ind[2]+1]
np_i = []
for w in words:
np_i.append(str(i))
voc.append(str(i))
i+=1
en_sen.append((ind[0], np_i))
for ind in indices['hu_sen']:
words = sen['sentence_hun'][ind[1]:ind[2]+1]
np_i = []
for w in words:
np_i.append(str(i))
voc.append(str(i))
i+=1
hu_sen.append((ind[0], np_i))
word2idx[sen['id']]["sentence_en"] = en_sen
word2idx[sen['id']]["sentence_hun"] = hu_sen
return word2idx, voc
def init_bert_embeddings(np_to_indices):
bert_weights = []
for np in np_to_indices:
for emb in np_to_indices[np]['en_emb']:
for e in emb[1]:
bert_weights.append(e.tolist())
for emb in np_to_indices[np]['hu_emb']:
for e in emb[1]:
bert_weights.append(e.tolist())
return bert_weights
def process():
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-multilingual-cased')
# Put the model in "evaluation" mode, meaning feed-forward operation.
model.eval()
sentences = process_baseline("1984.sen-aligned.np-aligned.gold")
sentences[125]["en_sen"] = [(0, ['Audience'])]
sentences[125]["hu_sen"] = [(0, ['A', 'hallgatóság'])]
sentences[125]["aligns"] = [('0', '0')]
np_to_indices = defaultdict(dict)
tokenize_sentences(sentences, np_to_indices, tokenizer)
get_sentence_embeddings(sentences, np_to_indices, tokenizer, model)
word2idx, voc = get_vocabulary(sentences, np_to_indices)
voc_to_id = {}
for i in voc:
voc_to_id[i] = int(i)
bert_weights = init_bert_embeddings(np_to_indices)
return sentences, np_to_indices, word2idx, voc, voc_to_id, bert_weights
|
import nibabel as nib
import numpy as np
import torch
from functools import partial
from collections import defaultdict
from pairwise_measures import PairwiseMeasures
from src.utils import apply_transform, non_geometric_augmentations, generate_affine, to_var_gpu, batch_adaptation, soft_dice
def evaluate(args, preds, targets, prefix,
metrics=['dice', 'jaccard', 'sensitivity', 'specificity', 'soft_dice',
'loads', 'haus_dist', 'vol_diff', 'ppv', 'connected_elements']):
output_dict = defaultdict(list)
nifty_metrics = ['dice', 'jaccard', 'sensitivity', 'specificity',
'haus_dist', 'vol_diff', 'ppv', 'connected_elements']
for pred, target in zip(preds, targets):
seg = np.where(pred > 0.5, np.ones_like(pred, dtype=np.int64), np.zeros_like(pred, dtype=np.int64))
ref = np.where(target > 0.5, np.ones_like(target, dtype=np.int64), np.zeros_like(target, dtype=np.int64))
pairwise = PairwiseMeasures(seg, ref)
for metric in nifty_metrics:
if metric in metrics:
if metric == 'connected_elements':
TPc, FPc, FNc = pairwise.m_dict[metric][0]()
output_dict[prefix + 'TPc'].append(TPc)
output_dict[prefix + 'FPc'].append(FPc)
output_dict[prefix + 'FNc'].append(FNc)
else:
output_dict[prefix + metric].append(pairwise.m_dict[metric][0]())
if 'soft_dice' in metrics:
output_dict[prefix + 'soft_dice'].append(soft_dice(pred, ref, args.labels))
if 'loads' in metrics:
output_dict[prefix + 'loads'].append(np.sum(pred))
if 'per_pixel_diff' in metrics:
output_dict[prefix + 'per_pixel_diff'].append(np.mean(np.abs(ref - pred)))
return output_dict
def inference_tumour(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
print('Evaluating on {} subjects'.format(len(range_of_volumes)))
for index in range(len(range_of_volumes)):
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
#TODO: inputs is of size (4, 170, 240, 160), need to change inference values accordingly.
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
inputsS = np.zeros(shape=(inputs.shape[0], args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[-1]))
for slice_index in np.arange(0, inputs.shape[-1], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[-1])
batch_input = np.einsum('ijkl->lijk', inputs[:, :, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(batch_input)
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputs.detach().cpu().numpy()))
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(batch_labels.detach().cpu().numpy()))
inputsS[:, :, :, index_start:index_end] = np.einsum('ijkl->jkli', np.squeeze(batch_input.detach().cpu().numpy()))
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputstaug.detach().cpu().numpy()))
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
np.squeeze(outputs_t.detach().cpu().numpy()))
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag,
iteration) + \
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
fn = save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
else:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_0__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
for idx, modality in enumerate(['flair', 't1c', 't1', 't2']):
save_img(format_spec=format_spec, identifier='{}_mri'.format(modality), array=inputsS[idx, ...])
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
def inference_ms(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None, eval_diff=True):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
print('Evaluating on {} subjects'.format(len(whole_volume_dataset)))
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
for index in range_of_volumes:
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
inputsS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
for slice_index in np.arange(0, inputs.shape[2], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[2])
batch_input = np.einsum('ijk->kij', inputs[:, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(np.expand_dims(batch_input, axis=1).astype(np.float32))
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki', outputs.detach().cpu().numpy()[:, 0, ...])
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_labels.detach().cpu().numpy()[:, 0, ...])
inputsS[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_input.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputstaug.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputs_t.detach().cpu().numpy()[:, 0, ...])
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag, iteration) +\
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
elif eval_diff and iteration > 0:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_{0}__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
print(pred_ema_filename)
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
else:
print('Not computing diff')
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
save_img(format_spec=format_spec, identifier='mri', array=inputsS)
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
def inference_crossmoda(args, p, model, whole_volume_dataset, iteration=0, prefix='', infer_on=None, eval_diff=True):
"""
This function should run inference on a set of volumes, save the results, calculate the dice
"""
def save_img(format_spec, identifier, array):
img = nib.Nifti1Image(array, np.eye(4))
fn = format_spec.format(identifier)
nib.save(img, fn)
return fn
with torch.set_grad_enabled(False):
model.eval()
preds_0, preds_ema = [], []
preds, targets = [], []
predsAug, predsT = [], []
print('Evaluating on {} subjects'.format(len(whole_volume_dataset)))
range_of_volumes = range(len(whole_volume_dataset)) if infer_on is None else infer_on
for index in range_of_volumes:
print('Evaluating on subject {}'.format(str(index)))
inputs, labels = whole_volume_dataset[index]
subj_id = whole_volume_dataset.get_subject_id_from_index(index)
targetL = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
inputsS = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsT = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
outputsAug = np.zeros(shape=(args.paddtarget, args.paddtarget, inputs.shape[2]))
for slice_index in np.arange(0, inputs.shape[2], step=args.batch_size):
index_start = slice_index
index_end = min(slice_index+args.batch_size, inputs.shape[2])
batch_input = np.einsum('ijk->kij', inputs[:, :, index_start:index_end])
batch_labels = np.einsum('ijk->kij', labels[:, :, index_start:index_end])
batch_input = torch.tensor(np.expand_dims(batch_input, axis=1).astype(np.float32))
batch_labels = torch.tensor(np.expand_dims(batch_labels, axis=1))
batch_input, batch_labels = batch_adaptation(batch_input, batch_labels, args.paddtarget)
batch_input, batch_labels = to_var_gpu(batch_input), to_var_gpu(batch_labels)
outputs, _, _, _, _, _, _, _, _, _, _ = model(batch_input)
outputs = torch.sigmoid(outputs)
if args.method == 'A2':
Theta, Theta_inv = generate_affine(batch_input, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(batch_input, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
elif args.method == 'A4':
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
outputstaug, _, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
elif args.method in ['A3', 'adversarial', 'mean_teacher']:
batch_trs = batch_input.cpu().numpy()
batch_trs = p.map(partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = p.map(partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=args.affine_rot_degree,
scale=args.affine_scale,
shearingScale=args.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _, _ = model(inputstaug)
outputstaug = torch.sigmoid(outputstaug)
outputs_t = apply_transform(outputs, Theta)
outputS[:, :, index_start:index_end] = np.einsum('ijk->jki', outputs.detach().cpu().numpy()[:, 0, ...])
targetL[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_labels.detach().cpu().numpy()[:, 0, ...])
inputsS[:, :, index_start:index_end] = np.einsum('ijk->jki', batch_input.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
outputsAug[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputstaug.detach().cpu().numpy()[:, 0, ...])
if args.method in ['A3', 'A2', 'adversarial', 'mean_teacher']:
outputsT[:, :, index_start:index_end] = np.einsum('ijk->jki',
outputs_t.detach().cpu().numpy()[:, 0, ...])
format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source, args.target, args.tag, iteration) +\
'_{}_' + f'{str(subj_id)}.nii.gz'
ema_format_spec = '{}_{}_{}_{}_{}_{}_'.format(prefix, args.method, args.source,
args.target, args.tag, 'EMA') + \
'_{}_' + f'{str(subj_id)}.nii.gz'
if iteration == 0:
save_img(format_spec=ema_format_spec, identifier='Prediction', array=outputS)
elif eval_diff and iteration > 0:
pred_zero = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_{0}__Prediction_{str(subj_id)}.nii.gz'
outputs_0 = nib.load(pred_zero).get_data()
preds_0.append(outputs_0)
alpha = 0.9
pred_ema_filename = f'{prefix}_{args.method}_{args.source}_{args.target}' \
f'_{args.tag}_EMA__Prediction_{str(subj_id)}.nii.gz'
print(pred_ema_filename)
pred_ema_t_minus_one = nib.load(pred_ema_filename).get_data()
pred_ema = alpha * outputS + (1 - alpha) * pred_ema_t_minus_one
preds_ema.append(pred_ema)
save_img(format_spec=ema_format_spec, identifier='Prediction', array=pred_ema)
else:
print('Not computing diff')
save_img(format_spec=format_spec, identifier='Prediction', array=outputS)
save_img(format_spec=format_spec, identifier='target', array=targetL)
save_img(format_spec=format_spec, identifier='mri', array=inputsS)
preds.append(outputS)
targets.append(targetL)
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
predsAug.append(outputsAug)
save_img(format_spec=format_spec, identifier='Aug', array=outputsAug)
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
predsT.append(outputsT)
save_img(format_spec=format_spec, identifier='Transformed', array=outputsT)
performance_supervised = evaluate(args=args, preds=preds, targets=targets, prefix='supervised_')
performance_i = None
if args.method in ['A2', 'A3', 'A4', 'adversarial', 'mean_teacher']:
if args.method in ['A2', 'A3', 'adversarial', 'mean_teacher']:
performance_i = evaluate(args=args, preds=predsAug, targets=predsT, prefix='consistency_')
else:
performance_i = evaluate(args=args, preds=predsAug, targets=preds, prefix='consistency_')
if iteration == 0:
return performance_supervised, performance_i, None, None
else:
performance_compared_to_0 = evaluate(args=args, preds=preds, targets=preds_0, prefix='diff_to_0_',
metrics=['per_pixel_diff'])
performance_compared_to_ema = evaluate(args=args, preds=preds, targets=preds_ema, prefix='diff_to_ema_',
metrics=['per_pixel_diff'])
return performance_supervised, performance_i, performance_compared_to_0, performance_compared_to_ema
|
# -*- coding: utf-8 -*-
import scrapy
from ..items import YangguangItem
class YgSpider(scrapy.Spider):
name = 'yg'
allowed_domains = ['wz.sun0769.com']
start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=0']
def parse(self, response):
"""提取列表页的数据"""
# 1.提取当前页的数据,先分组,再提取
tr_list = response.xpath("//div[@class='greyframe']/table[2]/tr/td/table/tr")
for tr in tr_list:
item = YangguangItem()
item["num"] = tr.xpath("./td[1]/text()").extract_first()
item["title"] = tr.xpath("./td[2]/a[2]/text()").extract_first()
item["href"] = tr.xpath("./td[2]/a[2]/@href").extract_first()
item["status"] = tr.xpath("./td[3]/span/text()").extract_first()
item["name"] = tr.xpath("./td[4]/text()").extract_first()
item["publish_date"] = tr.xpath("td[5]/text()").extract_first()
# 构建Request对象,每次遍历时候把主页的每条数据生成request,获得response后传递到parse_detail()接收,继续请求获取对应详情页的数据
yield scrapy.Request(
item["href"],
callback=self.parse_detail,
meta={"key": item}
)
# 2.构造下一页的请求,翻页
next_url = response.xpath("//a[text()='>']/@href").extract_first()
if next_url is not None:
yield scrapy.Request(next_url, callback=self.parse)
def parse_detail(self, response):
"""提取详情页的数据"""
item = response.meta["key"]
item["img"] = response.xpath("//div[@class='textpic']/img/@src").extract_first()
# 文本内容有多个换行,结果有多个
item["content"] = response.xpath("//div[@class='c1 text14_2']//text()").extract()
yield item
|
from typing import Optional
import numbers
import dynet as dy
import numpy as np
from xnmt import logger
from xnmt.param_collections import ParamManager
from xnmt.persistence import serializable_init, Serializable
from xnmt import utils
"""
The purpose of this module is mostly to expose the DyNet trainers to YAML serialization,
but may also be extended to customize optimizers / training schedules
"""
class XnmtOptimizer(object):
"""
A base classe for trainers. Trainers are mostly simple wrappers of DyNet trainers but can add extra functionality.
Args:
optimizer: the underlying DyNet optimizer (trainer)
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
def __init__(self, optimizer: dy.Trainer, skip_noisy: bool = False) -> None:
self.optimizer = optimizer
self.skip_noisy = skip_noisy
if skip_noisy:
self.rolling_stats = utils.RollingStatistic()
def update(self) -> None:
"""
Update the parameters.
"""
try:
if not (self.skip_noisy and self._check_gradients_noisy()):
self.optimizer.update()
else:
logger.info("skipping noisy update")
except RuntimeError:
logger.warning("Failed to perform update. Skipping example and clearing gradients.")
for subcol in ParamManager.param_col.subcols.values():
for param in subcol.parameters_list():
param.scale_gradient(0)
def status(self) -> None:
"""
Outputs information about the trainer in the stderr.
(number of updates since last call, number of clipped gradients, learning rate, etc…)
"""
return self.optimizer.status()
def set_clip_threshold(self, thr: numbers.Real) -> None:
"""
Set clipping thershold
To deactivate clipping, set the threshold to be <=0
Args:
thr: Clipping threshold
"""
return self.optimizer.set_clip_threshold(thr)
def get_clip_threshold(self) -> numbers.Real:
"""
Get clipping threshold
Returns:
Gradient clipping threshold
"""
return self.optimizer.get_clip_threshold()
def restart(self) -> None:
"""
Restarts the optimizer
Clears all momentum values and assimilate (if applicable)
"""
return self.optimizer.restart()
@property
def learning_rate(self):
return self.optimizer.learning_rate
@learning_rate.setter
def learning_rate(self, value):
self.optimizer.learning_rate = value
def _check_gradients_noisy(self) -> bool:
sq_norm = 0
for subcol in ParamManager.param_col.subcols.values():
for param in subcol.parameters_list():
cur_grads = param.grad_as_array()
sq_norm += np.sum(np.square(cur_grads))
log_norm = np.log(np.sqrt(sq_norm))
self.rolling_stats.update(log_norm)
if self.rolling_stats.average is None: # too few statistics
return False
else:
req_min = self.rolling_stats.average - 4*self.rolling_stats.stddev
req_max = self.rolling_stats.average + 4*self.rolling_stats.stddev
return not (req_min < log_norm < req_max)
class SimpleSGDTrainer(XnmtOptimizer, Serializable):
"""
Stochastic gradient descent trainer
This trainer performs stochastic gradient descent, the goto optimization procedure for neural networks.
Args:
e0: Initial learning rate
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
yaml_tag = '!SimpleSGDTrainer'
@serializable_init
def __init__(self, e0: numbers.Real = 0.1, skip_noisy: bool = False) -> None:
super().__init__(optimizer=dy.SimpleSGDTrainer(ParamManager.global_collection(), e0),
skip_noisy=skip_noisy)
class MomentumSGDTrainer(XnmtOptimizer, Serializable):
"""
Stochastic gradient descent with momentum
This is a modified version of the SGD algorithm with momentum to stablize the gradient trajectory.
Args:
e0: Initial learning rate
mom: Momentum
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
yaml_tag = '!MomentumSGDTrainer'
@serializable_init
def __init__(self, e0: numbers.Real = 0.01, mom: numbers.Real = 0.9, skip_noisy: bool = False) -> None:
super().__init__(optimizer=dy.MomentumSGDTrainer(ParamManager.global_collection(), e0, mom),
skip_noisy=skip_noisy)
class AdagradTrainer(XnmtOptimizer, Serializable):
"""
Adagrad optimizer
The adagrad algorithm assigns a different learning rate to each parameter.
Args:
e0: Initial learning rate
eps: Epsilon parameter to prevent numerical instability
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
yaml_tag = '!AdagradTrainer'
@serializable_init
def __init__(self, e0: numbers.Real = 0.1, eps: numbers.Real = 1e-20, skip_noisy: bool = False) -> None:
super().__init__(optimizer=dy.AdagradTrainer(ParamManager.global_collection(), e0, eps=eps),
skip_noisy=skip_noisy)
class AdadeltaTrainer(XnmtOptimizer, Serializable):
"""
AdaDelta optimizer
The AdaDelta optimizer is a variant of Adagrad aiming to prevent vanishing learning rates.
Args:
eps: Epsilon parameter to prevent numerical instability
rho: Update parameter for the moving average of updates in the numerator
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
yaml_tag = '!AdadeltaTrainer'
@serializable_init
def __init__(self, eps: numbers.Real = 1e-6, rho: numbers.Real = 0.95, skip_noisy: bool = False) -> None:
super().__init__(optimizer=dy.AdadeltaTrainer(ParamManager.global_collection(), eps, rho),
skip_noisy=skip_noisy)
class AdamTrainer(XnmtOptimizer, Serializable):
"""
Adam optimizer
The Adam optimizer is similar to RMSProp but uses unbiased estimates of the first and second moments of the gradient
Args:
alpha: Initial learning rate
beta_1: Moving average parameter for the mean
beta_2: Moving average parameter for the variance
eps: Epsilon parameter to prevent numerical instability
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
yaml_tag = '!AdamTrainer'
@serializable_init
def __init__(self,
alpha: numbers.Real = 0.001,
beta_1: numbers.Real = 0.9,
beta_2: numbers.Real = 0.999,
eps: numbers.Real = 1e-8,
skip_noisy: bool = False) -> None:
super().__init__(optimizer=dy.AdamTrainer(ParamManager.global_collection(), alpha, beta_1, beta_2, eps),
skip_noisy=skip_noisy)
class NoamTrainer(XnmtOptimizer, Serializable):
"""
Proposed in the paper "Attention is all you need" (https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf) [Page 7, Eq. 3]
In this the learning rate of Adam Optimizer is increased for the first warmup steps followed by a gradual decay
Args:
alpha:
dim:
warmup_steps:
beta_1:
beta_2:
eps:
skip_noisy: keep track of a moving average and a moving standard deviation of the log of the gradient norm
values, and abort a step if the norm of the gradient exceeds four standard deviations of the
moving average. Reference: https://arxiv.org/pdf/1804.09849.pdf
"""
yaml_tag = '!NoamTrainer'
@serializable_init
def __init__(self,
alpha: numbers.Real = 1.0,
dim: numbers.Integral = 512,
warmup_steps: Optional[numbers.Integral] = 4000,
beta_1: numbers.Real = 0.9,
beta_2: numbers.Real = 0.98,
eps: numbers.Real = 1e-9,
skip_noisy: bool = False) -> None:
super().__init__(optimizer=dy.AdamTrainer(ParamManager.global_collection(),
alpha=alpha,
beta_1=beta_1,
beta_2=beta_2,
eps=eps),
skip_noisy=skip_noisy)
self.dim = dim
self.warmup_steps = warmup_steps
self.steps = 0
def update(self) -> None:
self.steps += 1
if self.warmup_steps:
decay = (self.dim ** (-0.5)) * np.min([self.steps ** (-0.5), self.steps * (self.warmup_steps ** (-1.5))])
else:
decay = (self.dim ** (-0.5)) * self.steps ** (-0.5)
self.optimizer.learning_rate = 1. * decay
super().update()
if self.steps % 200 == 0:
logger.info('> Optimizer Logging')
logger.info(' Steps=%d, learning_rate=%.2e' % (self.steps, self.optimizer.learning_rate))
class DummyTrainer(XnmtOptimizer, Serializable):
"""
A dummy trainer that does not perform any parameter updates.
"""
yaml_tag = "!DummyTrainer"
@serializable_init
def __init__(self) -> None:
pass
def update(self) -> None:
pass
def status(self) -> None:
pass
def set_clip_threshold(self, thr) -> None:
pass
def get_clip_threshold(self) -> None:
pass
def restart(self) -> None:
pass
@property
def learning_rate(self):
return 1.0
@learning_rate.setter
def learning_rate(self, value):
pass
|
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
if __name__ == '__main__':
# input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl'
# output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl'
# a = load_pickle(input_file)
# a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
# save_pickle(a, output_file)
input_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'
output_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'
a = load_pickle(input_file)
print(a['plans_per_stage'])
# a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
a['plans_per_stage'][0]['patch_size'] = np.array([128, 128, 128])
a['plans_per_stage'][1]['patch_size'] = np.array([128, 128, 128])
a['plans_per_stage'][0]['num_pool_per_axis'] = np.array([5, 5, 5])
a['plans_per_stage'][1]['num_pool_per_axis'] = np.array([5, 5, 5])
a['plans_per_stage'][0]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
a['plans_per_stage'][1]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
a['plans_per_stage'][0]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
a['plans_per_stage'][1]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
save_pickle(a, output_file)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ####################################################################################
# Copyright (c) 2016, Francesco De Carlo #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions are met: #
# #
# * Redistributions of source code must retain the above copyright notice, this #
# list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright notice, #
# this list of conditions and the following disclaimer in the documentation #
# and/or other materials provided with the distribution. #
# #
# * Neither the name of project nor the names of its #
# contributors may be used to endorse or promote products derived from #
# this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
# ####################################################################################
"""
Module for describing .....
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy
__authors__ = "First Name Last Name"
__copyright__ = "Copyright (c) 2016, Affiliation"
__version__ = "0.1.0"
__docformat__ = "restructuredtext en"
__all__ = ['function_03',
'function_04']
def function_03(parameter_01, parameter_02, parameter_03):
"""
Function description.
Parameters
----------
parameter_01 : type
Description.
parameter_02 : type
Description.
parameter_03 : type
Description.
Returns
-------
return_01
Description.
"""
return_01 = parameter_01 + parameter_02 + parameter_03
return return_01
def function_04(parameter_01, parameter_02, parameter_03):
"""
Function description.
Parameters
----------
parameter_01 : type
Description.
parameter_02 : type
Description.
parameter_03 : type
Description.
Returns
-------
return_01
Description.
"""
return_01 = parameter_01 + parameter_02 + parameter_03
return return_01
|
from selenium import webdriver
import constants as const
class Add_Skill:
driver = None
def __init__(self, driver):
self.driver = driver
def move_to_skills(self):
self.driver.get(const.CONNECTIONS)
def get_people(self):
xpath = '//div[contains(@class, "mn-connection-card")]'
list_of_people = self.driver.find_elements_by_xpath(xpath)
return list_of_people
|
from logics.classes.predicate.semantics import Model
class ArithmeticModel(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fixed_denotations.update({
'0': 0,
's': lambda x: x + 1,
'+': lambda x, y: x + y,
'*': lambda x, y: x * y,
'**': lambda x, y: x ** y,
'=': lambda x, y: '1' if x == y else '0',
'>': lambda x, y: '1' if x > y else '0',
'<': lambda x, y: '1' if x < y else '0',
})
class RealNumberArithmeticModel(Model):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fixed_denotations.update({
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y,
'//': lambda x, y: x // y,
'**': lambda x, y: x ** y,
'=': lambda x, y: '1' if x == y else '0',
'>': lambda x, y: '1' if x > y else '0',
'<': lambda x, y: '1' if x < y else '0',
})
def denotation(self, term, free_variable_denotation_dict=None):
"""In real number arithmetic have every numeral as constant"""
if type(term) == str:
try:
num = int(term)
return num
except ValueError:
try:
num = float(term)
return num
except ValueError:
pass
return super().denotation(term, free_variable_denotation_dict)
|
#! /usr/bin/env python
# by caozj
# Jun 4, 2019
# 8:09:11 PM
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import time
import argparse
import numpy as np
import dca_modpp.api
import Cell_BLAST as cb
import utils
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", type=str, required=True)
parser.add_argument("-g", "--genes", dest="genes", type=str, required=True)
parser.add_argument("-o", "--output", dest="output", type=str, required=True)
parser.add_argument("--n-latent", dest="n_latent", type=int, default=32)
parser.add_argument("--n-hidden", dest="n_hidden", type=int, default=64)
parser.add_argument("--n-layers", dest="n_layers", type=int, default=1)
parser.add_argument("--n-epochs", dest="n_epochs", type=int, default=1000)
parser.add_argument("--patience", dest="patience", type=int, default=30)
parser.add_argument("-s", "--seed", dest="seed", type=int, default=None) # Not exactly be reproducible though
parser.add_argument("-t", "--threads", dest="threads", type=int, default=None)
parser.add_argument("-d", "--device", dest="device", type=str, default=None)
parser.add_argument("--clean", dest="clean", type=str, default=None)
cmd_args = parser.parse_args()
cmd_args.output_path = os.path.dirname(cmd_args.output)
if not os.path.exists(cmd_args.output_path):
os.makedirs(cmd_args.output_path)
os.environ["CUDA_VISIBLE_DEVICES"] = utils.pick_gpu_lowest_memory() \
if cmd_args.device is None else cmd_args.device
return cmd_args
def main(cmd_args):
dataset = cb.data.ExprDataSet.read_dataset(cmd_args.input, sparsify=True)
if cmd_args.clean is not None:
dataset = utils.clean_dataset(dataset, cmd_args.clean)
if cmd_args.genes is not None:
genes = dataset.uns[cmd_args.genes]
else:
genes = None
dataset = dataset.to_anndata()
start_time = time.time()
dataset, model = dca_modpp.api.dca(
dataset, genes, mode="latent", normalize_per_cell=10000, scale=False,
hidden_size=
(cmd_args.n_hidden, ) * cmd_args.n_layers +
(cmd_args.n_latent, ) +
(cmd_args.n_hidden, ) * cmd_args.n_layers,
epochs=cmd_args.n_epochs, early_stop=cmd_args.patience,
random_state=cmd_args.seed, threads=cmd_args.threads,
return_model=True, copy=True
)
cb.data.write_hybrid_path(
time.time() - start_time,
"//".join([cmd_args.output, "time"])
)
cb.data.write_hybrid_path(
dataset.obsm["X_dca"],
"//".join([cmd_args.output, "latent"])
)
model.encoder.save(os.path.join(cmd_args.output_path, "model.h5"))
np.savetxt(os.path.join(cmd_args.output_path, "genes.txt"), genes, "%s")
if __name__ == "__main__":
main(parse_args())
print("Done!")
|
import typing as t
import os
import yaml
from loguru import logger
__all__ = ["config"]
class ConfigClass:
database: str
cogs: t.List[str]
admins: t.List[int]
token: str
invite: str
source: str
ball: t.Dict[str, t.List[str]]
log_channel: int
def __init__(self) -> None:
self.reload_config()
def load_env_var(self, name: str) -> str:
var = os.getenv(name)
if var is None:
raise KeyError(f"Enviroment var '{name}' not found")
return var
def reload_config(self):
logger.info("loading config.yaml")
with open("config.yaml") as f:
self.raw_data = yaml.load(f, Loader=yaml.BaseLoader)
self.cogs = self.raw_data["cogs"]
self.admins = [int(id_) for id_ in self.raw_data["admins"]]
self.invite = self.raw_data["invite"]
self.source = self.raw_data["source"]
self.ball = self.raw_data["8ball"]
self.log_channel = int(self.raw_data["botlogs"])
# .env vars
logger.info("loading enviroment vars")
self.token = self.load_env_var("TOKEN")
self.database = self.load_env_var("DATABASE_URL")
config = ConfigClass()
|
"""The Config class contains the general settings that we want all
environments to have by default.Other environment classes
inherit from it and can be used to set settings that are only unique to
them. Additionally, the dictionary app_config is used to export the
environments we've specified.
"""
import os
class Config(object):
"""Parent configuration class"""
DEBUG = False
CSRF_ENABLED = True
SECRET_KEY = os.getenv('SECRET')
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_BLACKLIST_ENABLED = True
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('EMAIL')
MAIL_PASSWORD = os.environ.get('PASSWORD')
MAIL_DEFAULT_SENDER = os.environ.get('EMAIL')
class DevelopmentConfig(Config):
"""Configurations for Development"""
DEBUG = True
MAIL_SUPPRESS_SEND = True
class TestingConfig(Config):
"""Configurations for Testing"""
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL')
class StagingConfig(Config):
"""Configuraions for Staging"""
DEBUG = True
class ProductionConfig(Config):
"""Configurations for production"""
DEBUG = False
TESTING = False
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'staging': StagingConfig,
'production': ProductionConfig
}
|
""" author:
name : Do Viet Chinh
personal email: dovietchinh1998@mgail.com
personal facebook: https://www.facebook.com/profile.php?id=100005935236259
VNOpenAI team: vnopenai@gmail.com
via team :
date:
26.3.2021
"""
import numpy as np
import cv2
import os
import math
from albumentations import (
PadIfNeeded,
HorizontalFlip,
VerticalFlip,
CenterCrop,
Crop,
Compose,
Transpose,
RandomRotate90,
ElasticTransform,
GridDistortion,
OpticalDistortion,
RandomSizedCrop,
OneOf,
CLAHE,
RandomBrightnessContrast,
RandomGamma,
HueSaturationValue,
RGBShift,
RandomBrightness,
RandomContrast,
MotionBlur,
MedianBlur,
GaussianBlur,
GaussNoise,
ChannelShuffle,
CoarseDropout,
ShiftScaleRotate
)
crop_size = (256-32, 256-32)
size = (256, 256)
x_min = 10
y_min = 10
x_max = -x_min + size[0]
y_max = -y_min + size[1]
ops = {
'CenterCrop' : CenterCrop(p=1, height=crop_size[0], width=crop_size[1]),
'Crop' : Crop(p=1, x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max),
# RandomRotate90(p=1),
#Transpose(p=1),
'ElasticTransform': ElasticTransform(
p=1, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
'GridDistortion':GridDistortion(p=1),
#OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5),
#'VerticalFlip': VerticalFlip(p=1),
'HorizontalFlip': HorizontalFlip(p=1),
'RandomBrightnessContrast': RandomBrightnessContrast(p=1),
'RandomGamma' : RandomGamma(p=1),
'HueSaturationValue': HueSaturationValue(p=1),
'RGBShift': RGBShift(p=1),
'RandomBrightness': RandomBrightness(p=1),
'RandomContrast': RandomContrast(p=1),
'MotionBlur': MotionBlur(p=1, blur_limit=7),
'MedianBlur': MedianBlur(p=1, blur_limit=9),
'GaussianBlur':GaussianBlur(p=1, blur_limit=9),
'GaussNoise': GaussNoise(p=1),
'ChannelShuffle':ChannelShuffle(p=1),
'CoarseDropout': CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32),
'ShiftScaleRotate': ShiftScaleRotate(p =1,shift_limit=0.1, scale_limit=0.1, rotate_limit=30, interpolation=cv2.INTER_NEAREST, border_mode=cv2.BORDER_REPLICATE)
}
get_policy_for_each_transformation = {
'CenterCrop': None,
'Crop': None,
'ElasticTransform': None,
'GridDistortion': None,
'HorizontalFlip': None,
'RandomBrightnessContrast': None,
'RandomContrast': None,
'MotionBlur': None,
'MedianBlur': None,
'GaussianBlur': None,
'ChannelShuffle': None,
'CoarseDropout': None,
'ShiftScaleRotate': None,
}
class RandAugment():
def __init__(self,N=3,M=10):
"""[summary]
Args:
N (int, optional): [numbers of transformations will apply to image seuqentially]. Defaults to 3.
M (int, optional): [policy for each transformations, see ]. Defaults to 10.
"""
self.N = N
self.M = M
def get_random_ops(self,):
"""[pick randomly N transformation functions in ops dictionary]
Returns:
[type]: [return list of transformation functions]
"""
n = np.random.randint(1,self.N+1)
ops_random = np.random.choice( list(ops.keys()), n)
return ops_random
def __call__(self,img,mask):
img_aug = img.copy()
mask_aug = mask.copy()
ops_random= self.get_random_ops()
for name in ops_random:
aug = ops[name]
augmented = aug(image=img_aug, mask=mask_aug)
img_aug = augmented['image']
mask_aug = augmented['mask']
if img_aug.shape[0] !=256:
img_aug = cv2.resize(img_aug,(256,256))
mask_aug = cv2.resize(mask_aug,(256,256))
return img_aug,mask_aug
if __name__ =='__main__':
x = cv2.imread('data/train/new_images/train_00000.jpg', cv2.IMREAD_COLOR)
y = cv2.imread('data/train/new_masks/train_00000.png', cv2.IMREAD_GRAYSCALE)
x_aug = x.copy()
y_aug = y.copy()
#ops_random = np.random.choice( ops, 4)
#for aug in ops_random:
for name in ops:
aug = ops[name]
print(aug)
augmented = aug(image=x_aug, mask=y_aug)
x_aug = augmented['image']
y_aug = augmented['mask']
if (x_aug.shape[0]!=256):
x_aug = cv2.resize(x_aug,(256,256))
y_aug = cv2.resize(y_aug,(256,256))
cv2.imshow('a',x_aug)
cv2.imshow('b',y_aug)
k = cv2.waitKey(0)
if k==ord('q'):
break
print(x_aug.shape,y_aug.shape)
cv2.destroyAllWindows()
|
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from django.views import generic
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from starlingx_dashboard.api import fm
@urls.register
class AlarmSummary(generic.View):
"""API for retrieving alarm summaries."""
url_regex = r'fm/alarm_summary/$'
@rest_utils.ajax()
def get(self, request):
"""Get an alarm summary for the system"""
include_suppress = request.GET.get('include_suppress', False)
result = fm.alarm_summary_get(request, include_suppress)
return result.to_dict()
|
import json
from phonenumbers import NumberParseException
from utils.db_api.database import TimedBaseModel, db
from utils.db_api.models.custumers import Custumer
class Phone(TimedBaseModel):
__tablename__ = "phones"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
customer_id = db.Column(db.BigInteger, db.ForeignKey('customers.id'))
source_number = db.Column(db.String(150))
number = db.Column(db.String(50))
region = db.Column(db.String(100))
operator = db.Column(db.String(50))
old_operator = db.Column(db.String(50))
@classmethod
async def add(cls, customer_id: int, source_number: str):
import requests
from phonenumbers import parse
try:
phone_obj = parse(number=source_number, region="RU")
except NumberParseException.NOT_A_NUMBER:
return {
'info': f"Неверный формат номера: <pre>{source_number}</pre>",
'example': ["+74959898533", "74959898533", "84959898533", "4959898533"]
}
url = "http://num.voxlink.ru/get/"
querystring = {"num": f"+{phone_obj.country_code}{phone_obj.national_number}"}
payload = ""
response = requests.request("GET", url, data=payload, params=querystring)
phone_obj = json.loads(response.text)
if phone_obj.get('info'):
return phone_obj.get('info', '') + " - разрешенный формат: " + ", ".join(phone_obj.get('example', ''))
else:
obj = cls(customer_id=customer_id, source_number=source_number,
number=phone_obj.get('full_num'), region=phone_obj.get('region'),
operator=phone_obj.get('operator'), old_operator=phone_obj.get('old_operator'))
await obj.create()
|
from fnal_column_analysis_tools import hist
from fnal_column_analysis_tools.hist import plot
from fnal_column_analysis_tools.util import numpy as np
from .systematics import jet_pt_systs,jet_weight_systs
def fill_plots_presel(dataset,gencat,systematic,leadingak8jet,weight,plots):
genW = np.sign(weight)
plots['sumw'].fill(dataset=dataset,
systematic=systematic,
sumw=genW)
plots['hjetpt'].fill(dataset=dataset,
ak8_isHadronicV=gencat.sum(),
systematic=systematic,
ak8_pt=leadingak8jet.pt.sum(),
weight=weight)
plots['hsculpt'].fill(dataset=dataset,
ak8_isHadronicV=gencat.sum(),
systematic=systematic,
ak8_pt=leadingak8jet.pt.sum(),
ak8_msd=leadingak8jet.msd_corr_8.sum(),
ak8_deepdoubleb=leadingak8jet.deepdoubleb.sum(),
ak8_deepdoublec=leadingak8jet.deepdoublec.sum(),
ak8_deepdoublecvb=leadingak8jet.deepdoublecvb.sum(),
weight=weight)
def fill_plots_sr(dataset,gencat,systematic,leadingak8jet,weight,plots):
plots['hjetpt_sr'].fill(dataset=dataset,
ak8_isHadronicV=gencat.sum(),
systematic=systematic,
ak8_pt=leadingak8jet.pt.sum(),
weight=weight)
plots['hsculpt_sr'].fill(dataset=dataset,
ak8_isHadronicV=gencat.sum(),
systematic=systematic,
ak8_pt=leadingak8jet.pt.sum(),
ak8_msd=leadingak8jet.msd_corr_8.sum(),
ak8_deepdoubleb=leadingak8jet.deepdoubleb.sum(),
ak8_deepdoublec=leadingak8jet.deepdoublec.sum(),
ak8_deepdoublecvb=leadingak8jet.deepdoublecvb.sum(),
weight=weight)
#preselection and signal region plots
def signal_region(gghbbcuts,
dataset,
gencat,
presel_weight, eventInfo, leadingak8jet,
looseMuons,looseElectrons,looseTaus,
hasTightVJet,
plots):
#for name,vari in jet_systs:
# attr = "pt"
# if len(vari) > 0: attr += "_"+vari
systematic = "central"
srweight_nomet = ( #jet selection
((leadingak8jet.pt > gghbbcuts.PTCUT).sum() > 0) &
((leadingak8jet.msd_corr_8 > gghbbcuts.MASSCUT).sum() > 0) &
((leadingak8jet.jtN2b1sdddt_8 < 0).sum() > 0) &
hasTightVJet &
#lepton vetos
(looseMuons.counts == 0) &
(looseElectrons.counts == 0) &
(looseTaus.counts == 0)
)
#met selection (remove events with large MET, fake or real)
pfmetweight = (eventInfo['pfmet'].sum() < gghbbcuts.METCUT)
jetweight = leadingak8jet.weight.sum()
#jetweight[jetweight > 0.] = 1.0 #for now
#preselection
weight = jetweight * presel_weight
fill_plots_presel(dataset,gencat,systematic,leadingak8jet,
weight,
plots)
#signal region no met cut
weight_srnomet = weight * srweight_nomet
plots['pfmet_nminus1_sr'].fill(dataset=dataset,
ak8_isHadronicV=gencat.sum(),
systematic=systematic,
ak8_pt=leadingak8jet.pt.sum(),
ak8_msd=leadingak8jet.msd_corr_8.sum(),
pfmet=eventInfo['pfmet'].sum(),
weight=weight_srnomet
)
#signal region variables
weight_sr = weight_srnomet * pfmetweight
weight_sr = weight_sr
fill_plots_sr(dataset,gencat,systematic,leadingak8jet,
weight_sr,
plots)
|
import base64
import json
from datetime import datetime
from enum import Enum
from typing import List
from urllib.parse import urljoin
import requests
from pygrocy.utils import parse_date, parse_float, parse_int, localize_datetime
DEFAULT_PORT_NUMBER = 9192
class ShoppingListItem(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._product_id = parse_int(parsed_json.get("product_id", None))
self._note = parsed_json.get("note", None)
self._amount = parse_float(parsed_json.get("amount"), 0)
self._row_created_timestamp = parse_date(
parsed_json.get("row_created_timestamp", None)
)
self._shopping_list_id = parse_int(parsed_json.get("shopping_list_id"))
self._done = parse_int(parsed_json.get("done"))
@property
def id(self) -> int:
return self._id
@property
def product_id(self) -> int:
return self._product_id
@property
def note(self) -> str:
return self._note
@property
def amount(self) -> float:
return self._amount
class MealPlanResponse(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._day = parse_date(parsed_json.get("day"))
self._type = parsed_json.get("type")
self._recipe_id = parse_int(parsed_json.get("recipe_id"))
self._recipe_servings = parse_int(parsed_json.get("recipe_servings"))
self._note = parsed_json.get("note", None)
self._product_id = parsed_json.get("product_id")
self._product_amount = parse_float(parsed_json.get("product_amount"), 0)
self._product_qu_id = parsed_json.get("product_qu_id")
self._row_created_timestamp = parse_date(
parsed_json.get("row_created_timestamp")
)
self._userfields = parsed_json.get("userfields")
@property
def id(self) -> int:
return self._id
@property
def day(self) -> datetime:
return self._day
@property
def recipe_id(self) -> int:
return self._recipe_id
@property
def recipe_servings(self) -> int:
return self._recipe_servings
@property
def note(self) -> str:
return self._note
class RecipeDetailsResponse(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._name = parsed_json.get("name")
self._description = parsed_json.get("description")
self._base_servings = parse_int(parsed_json.get("base_servings"))
self._desired_servings = parse_int(parsed_json.get("desired_servings"))
self._picture_file_name = parsed_json.get("picture_file_name")
self._row_created_timestamp = parse_date(
parsed_json.get("row_created_timestamp")
)
self._userfields = parsed_json.get("userfields")
@property
def id(self) -> int:
return self._id
@property
def name(self) -> str:
return self._name
@property
def description(self) -> str:
return self._description
@property
def base_servings(self) -> int:
return self._base_servings
@property
def desired_servings(self) -> int:
return self._desired_servings
@property
def picture_file_name(self) -> str:
return self._picture_file_name
class QuantityUnitData(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._name = parsed_json.get("name")
self._name_plural = parsed_json.get("name_plural")
self._description = parsed_json.get("description")
self._row_created_timestamp = parse_date(
parsed_json.get("row_created_timestamp")
)
class LocationData(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._name = parsed_json.get("name")
self._description = parsed_json.get("description")
self._row_created_timestamp = parse_date(
parsed_json.get("row_created_timestamp")
)
@property
def id(self) -> int:
return self._id
@property
def name(self) -> str:
return self._name
@property
def description(self) -> str:
return self._description
class ProductData(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._name = parsed_json.get("name")
self._description = parsed_json.get("description", None)
self._location_id = parse_int(parsed_json.get("location_id", None))
self._product_group_id = parse_int(parsed_json.get("product_group_id", None))
self._qu_id_stock = parse_int(parsed_json.get("qu_id_stock", None))
self._qu_id_purchase = parse_int(parsed_json.get("qu_id_purchsase", None))
self._qu_factor_purchase_to_stock = parse_float(
parsed_json.get("qu_factor_purchase_to_stock", None)
)
self._picture_file_name = parsed_json.get("picture_file_name", None)
self._allow_partial_units_in_stock = bool(
parsed_json.get("allow_partial_units_in_stock", None) == "true"
)
self._row_created_timestamp = parse_date(
parsed_json.get("row_created_timestamp", None)
)
self._min_stock_amount = parse_int(parsed_json.get("min_stock_amount", None), 0)
self._default_best_before_days = parse_int(
parsed_json.get("default_best_before_days", None)
)
barcodes_raw = parsed_json.get("barcode", "")
if barcodes_raw is None:
self._barcodes = None
else:
self._barcodes = barcodes_raw.split(",")
@property
def id(self) -> int:
return self._id
@property
def product_group_id(self) -> int:
return self._product_group_id
@property
def name(self) -> str:
return self._name
@property
def barcodes(self) -> List[str]:
return self._barcodes
class ChoreData(object):
def __init__(self, parsed_json):
self.id = parse_int(parsed_json.get("id"))
self.name = parsed_json.get("name")
self.description = parsed_json.get("description")
self.period_type = parsed_json.get("period_type")
self.period_config = parsed_json.get("period_config")
self.period_days = parse_int(parsed_json.get("period_days"))
self.track_date_only = parsed_json.get("track_date_only")
self.rollover = parsed_json.get("rollover")
self.assignment_type = parsed_json.get("assignment_type")
self.assignment_config = parsed_json.get("assignment_config")
self.next_execution_assigned_to_user_id = parse_int(
"next_execution_assigned_to_user_id"
)
self.userfields = parsed_json.get("userfields")
class UserDto(object):
def __init__(self, parsed_json):
self._id = parse_int(parsed_json.get("id"))
self._username = parsed_json.get("username")
self._first_name = parsed_json.get("first_name")
self._last_name = parsed_json.get("last_name")
self._display_name = parsed_json.get("display_name")
@property
def id(self) -> int:
return self._id
@property
def username(self) -> str:
return self._username
@property
def first_name(self) -> str:
return self._first_name
@property
def last_name(self) -> str:
return self._last_name
@property
def display_name(self) -> str:
return self._display_name
class CurrentChoreResponse(object):
def __init__(self, parsed_json):
self._chore_id = parse_int(parsed_json.get("chore_id"), None)
self._last_tracked_time = parse_date(parsed_json.get("last_tracked_time"))
self._next_estimated_execution_time = parse_date(
parsed_json.get("next_estimated_execution_time")
)
@property
def chore_id(self) -> int:
return self._chore_id
@property
def last_tracked_time(self) -> datetime:
return self._last_tracked_time
@property
def next_estimated_execution_time(self) -> datetime:
return self._next_estimated_execution_time
class CurrentStockResponse(object):
def __init__(self, parsed_json):
self._product_id = parse_int(parsed_json.get("product_id"))
self._amount = parse_float(parsed_json.get("amount"))
self._best_before_date = parse_date(parsed_json.get("best_before_date"))
self._amount_opened = parse_float(parsed_json.get("amount_opened"))
self._product = ProductData(parsed_json.get("product"))
@property
def product_id(self) -> int:
return self._product_id
@property
def amount(self) -> float:
return self._amount
@property
def best_before_date(self) -> datetime:
return self._best_before_date
@property
def amount_opened(self) -> float:
return self._amount_opened
@property
def product(self) -> ProductData:
return self._product
class MissingProductResponse(object):
def __init__(self, parsed_json):
self._product_id = parse_int(parsed_json.get("id"))
self._name = parsed_json.get("name")
self._amount_missing = parse_float(parsed_json.get("amount_missing"))
self._is_partly_in_stock = bool(
parse_int(parsed_json.get("is_partly_in_stock"))
)
@property
def product_id(self) -> int:
return self._product_id
@property
def name(self) -> str:
return self._name
@property
def amount_missing(self) -> float:
return self._amount_missing
@property
def is_partly_in_stock(self) -> bool:
return self._is_partly_in_stock
class CurrentVolatilStockResponse(object):
def __init__(self, parsed_json):
self._expiring_products = [
CurrentStockResponse(product)
for product in parsed_json.get("expiring_products")
]
self._expired_products = [
CurrentStockResponse(product)
for product in parsed_json.get("expired_products")
]
self._missing_products = [
MissingProductResponse(product)
for product in parsed_json.get("missing_products")
]
@property
def expiring_products(self) -> List[CurrentStockResponse]:
return self._expiring_products
@property
def expired_products(self) -> List[CurrentStockResponse]:
return self._expired_products
@property
def missing_products(self) -> List[MissingProductResponse]:
return self._missing_products
class ProductDetailsResponse(object):
def __init__(self, parsed_json):
self._last_purchased = parse_date(parsed_json.get("last_purchased"))
self._last_used = parse_date(parsed_json.get("last_used"))
self._stock_amount = parse_int(parsed_json.get("stock_amount"))
self._stock_amount_opened = parse_int(parsed_json.get("stock_amount_opened"))
self._next_best_before_date = parse_date(
parsed_json.get("next_best_before_date")
)
self._last_price = parse_float(parsed_json.get("last_price"))
self._product = ProductData(parsed_json.get("product"))
self._quantity_unit_purchase = QuantityUnitData(
parsed_json.get("quantity_unit_purchase")
)
self._quantity_unit_stock = QuantityUnitData(
parsed_json.get("quantity_unit_stock")
)
raw_location = parsed_json.get("location")
if raw_location is None:
self._location = None
else:
self._location = LocationData(raw_location)
@property
def last_purchased(self) -> datetime:
return self._last_purchased
@property
def last_used(self) -> datetime:
return self._last_used
@property
def stock_amount(self) -> int:
return self._stock_amount
@property
def stock_amount_opened(self) -> int:
return self._stock_amount_opened
@property
def next_best_before_date(self) -> datetime:
return self._next_best_before_date
@property
def last_price(self) -> float:
return self._last_price
@property
def product(self) -> ProductData:
return self._product
class ChoreDetailsResponse(object):
def __init__(self, parsed_json):
self._chore = ChoreData(parsed_json.get("chore"))
self._last_tracked = parse_date(parsed_json.get("last_tracked"))
self._next_estimated_execution_time = parse_date(
parsed_json.get("next_estimated_execution_time")
)
self._track_count = parse_int(parsed_json.get("track_count"))
next_user = parsed_json.get("next_execution_assigned_user")
if next_user is not None:
self._next_execution_assigned_user = UserDto(next_user)
else:
self._next_execution_assigned_user = None
if self._last_tracked is None:
self._last_done_by = None
else:
self._last_done_by = UserDto(parsed_json.get("last_done_by"))
@property
def chore(self) -> ChoreData:
return self._chore
@property
def last_done_by(self) -> UserDto:
return self._last_done_by
@property
def last_tracked(self) -> datetime:
return self._last_tracked
@property
def next_estimated_execution_time(self) -> datetime:
return self._next_estimated_execution_time
@property
def track_count(self) -> int:
return self._track_count
@property
def next_execution_assigned_user(self) -> UserDto:
return self._next_execution_assigned_user
class TransactionType(Enum):
PURCHASE = "purchase"
CONSUME = "consume"
INVENTORY_CORRECTION = "inventory-correction"
PRODUCT_OPENED = "product-opened"
class TaskResponse(object):
def __init__(self, parsed_json):
self.id = parse_int(parsed_json.get("id"))
self.name = parsed_json.get("name")
self.description = parsed_json.get("description")
self.due_date = parse_date(parsed_json.get("due_date"))
self.done = parse_int(parsed_json.get("done"))
self.done_timestamp = parse_date(parsed_json.get("done_timestamp"))
self.category_id = parse_int(parsed_json.get("category_id"))
self.assigned_to_user_id = parse_int(parsed_json.get("assigned_to_user_id"))
self.userfields = parsed_json.get("userfields")
class GrocyApiClient(object):
def __init__(
self, base_url, api_key, port: int = DEFAULT_PORT_NUMBER, verify_ssl=True
):
self._base_url = "{}:{}/api/".format(base_url, port)
self._api_key = api_key
self._verify_ssl = verify_ssl
if self._api_key == "demo_mode":
self._headers = {"accept": "application/json"}
else:
self._headers = {"accept": "application/json", "GROCY-API-KEY": api_key}
def _do_get_request(self, end_url: str):
req_url = urljoin(self._base_url, end_url)
resp = requests.get(req_url, verify=self._verify_ssl, headers=self._headers)
resp.raise_for_status()
if len(resp.content) > 0:
return resp.json()
def _do_post_request(self, end_url: str, data: dict):
req_url = urljoin(self._base_url, end_url)
resp = requests.post(
req_url, verify=self._verify_ssl, headers=self._headers, json=data
)
resp.raise_for_status()
if len(resp.content) > 0:
return resp.json()
def _do_put_request(self, end_url: str, data):
req_url = urljoin(self._base_url, end_url)
up_header = self._headers.copy()
up_header["accept"] = "*/*"
if isinstance(data, dict):
up_header["Content-Type"] = "application/json"
data = json.dumps(data)
else:
up_header["Content-Type"] = "application/octet-stream"
resp = requests.put(
req_url, verify=self._verify_ssl, headers=up_header, data=data
)
resp.raise_for_status()
if len(resp.content) > 0:
return resp.json()
def get_stock(self) -> List[CurrentStockResponse]:
parsed_json = self._do_get_request("stock")
return [CurrentStockResponse(response) for response in parsed_json]
def get_volatile_stock(self) -> CurrentVolatilStockResponse:
parsed_json = self._do_get_request("stock/volatile")
return CurrentVolatilStockResponse(parsed_json)
def get_product(self, product_id) -> ProductDetailsResponse:
url = f"stock/products/{product_id}"
parsed_json = self._do_get_request(url)
if parsed_json:
return ProductDetailsResponse(parsed_json)
def get_chores(self) -> List[CurrentChoreResponse]:
parsed_json = self._do_get_request("chores")
return [CurrentChoreResponse(chore) for chore in parsed_json]
def get_chore(self, chore_id: int) -> ChoreDetailsResponse:
url = f"chores/{chore_id}"
parsed_json = self._do_get_request(url)
if parsed_json:
return ChoreDetailsResponse(parsed_json)
def execute_chore(
self,
chore_id: int,
done_by: int = None,
tracked_time: datetime = datetime.now(),
):
localized_tracked_time = localize_datetime(tracked_time)
data = {"tracked_time": localized_tracked_time.isoformat()}
if done_by is not None:
data["done_by"] = done_by
return self._do_post_request(f"chores/{chore_id}/execute", data)
def add_product(
self,
product_id,
amount: float,
price: float,
best_before_date: datetime = None,
transaction_type: TransactionType = TransactionType.PURCHASE,
):
data = {
"amount": amount,
"transaction_type": transaction_type.value,
"price": price,
}
if best_before_date is not None:
data["best_before_date"] = best_before_date.strftime("%Y-%m-%d")
return self._do_post_request(f"stock/products/{product_id}/add", data)
def consume_product(
self,
product_id: int,
amount: float = 1,
spoiled: bool = False,
transaction_type: TransactionType = TransactionType.CONSUME,
):
data = {
"amount": amount,
"spoiled": spoiled,
"transaction_type": transaction_type.value,
}
self._do_post_request(f"stock/products/{product_id}/consume", data)
def get_shopping_list(self) -> List[ShoppingListItem]:
parsed_json = self._do_get_request("objects/shopping_list")
return [ShoppingListItem(response) for response in parsed_json]
def add_missing_product_to_shopping_list(self, shopping_list_id: int = None):
data = None
if shopping_list_id:
data = {"list_id": shopping_list_id}
self._do_post_request("stock/shoppinglist/add-missing-products", data)
def add_product_to_shopping_list(
self, product_id: int, shopping_list_id: int = 1, amount: int = 1
):
data = {
"product_id": product_id,
"list_id": shopping_list_id,
"product_amount": amount,
}
self._do_post_request("stock/shoppinglist/add-product", data)
def clear_shopping_list(self, shopping_list_id: int = 1):
data = {"list_id": shopping_list_id}
self._do_post_request("stock/shoppinglist/clear", data)
def remove_product_in_shopping_list(
self, product_id: int, shopping_list_id: int = 1, amount: int = 1
):
data = {
"product_id": product_id,
"list_id": shopping_list_id,
"product_amount": amount,
}
self._do_post_request("stock/shoppinglist/remove-product", data)
def get_product_groups(self) -> List[LocationData]:
parsed_json = self._do_get_request("objects/product_groups")
return [LocationData(response) for response in parsed_json]
def upload_product_picture(self, product_id: int, pic_path: str):
b64fn = base64.b64encode("{}.jpg".format(product_id).encode("ascii"))
req_url = "files/productpictures/" + str(b64fn, "utf-8")
with open(pic_path, "rb") as pic:
self._do_put_request(req_url, pic)
def update_product_pic(self, product_id: int):
pic_name = f"{product_id}.jpg"
data = {"picture_file_name": pic_name}
self._do_put_request(f"objects/products/{product_id}", data)
def get_userfields(self, entity: str, object_id: int):
url = f"userfields/{entity}/{object_id}"
return self._do_get_request(url)
def set_userfields(self, entity: str, object_id: int, key: str, value):
data = {key: value}
self._do_put_request(f"userfields/{entity}/{object_id}", data)
def get_last_db_changed(self):
resp = self._do_get_request("system/db-changed-time")
last_change_timestamp = parse_date(resp.get("changed_time"))
return last_change_timestamp
def get_tasks(self) -> List[TaskResponse]:
parsed_json = self._do_get_request("tasks")
return [TaskResponse(data) for data in parsed_json]
def complete_task(self, task_id: int, done_time: datetime = datetime.now()):
url = f"tasks/{task_id}/complete"
localized_done_time = localize_datetime(done_time)
data = {"done_time": localized_done_time.isoformat()}
self._do_post_request(url, data)
def get_meal_plan(self) -> List[MealPlanResponse]:
parsed_json = self._do_get_request("objects/meal_plan")
return [MealPlanResponse(data) for data in parsed_json]
def get_recipe(self, object_id: int) -> RecipeDetailsResponse:
parsed_json = self._do_get_request(f"objects/recipes/{object_id}")
if parsed_json:
return RecipeDetailsResponse(parsed_json)
def add_generic(self, entity_type: str, data: object):
self._do_post_request(f"objects/{entity_type}", data)
|
# -*- coding: utf-8 -*-
u"""履歴管理"""
from __future__ import absolute_import, division, print_function
from maya import cmds
_RECENT_FILES_KEY = "squid_recent_fbx_files"
_RECENT_FILES_LIMIT = 10
def get_recent_files():
u"""最近使用したファイルリストを返す
Returns:
list of unicode: 最近使用したファイルリスト
"""
if not cmds.optionVar(ex=_RECENT_FILES_KEY):
return []
res = list(cmds.optionVar(q=_RECENT_FILES_KEY))
res.reverse()
return res
def add_recent_file(path):
u"""指定ファイルを履歴に追加
Args:
path (unicode): パス
"""
if not path:
return
if cmds.optionVar(ex=_RECENT_FILES_KEY):
files = cmds.optionVar(q=_RECENT_FILES_KEY)
for i in xrange(0, len(files)):
if path == files[i]:
cmds.optionVar(rfa=(_RECENT_FILES_KEY, i))
break
cmds.optionVar(sva=(_RECENT_FILES_KEY, path))
files = cmds.optionVar(q=_RECENT_FILES_KEY)
if len(files) <= _RECENT_FILES_LIMIT:
return
for i in xrange(0, len(files) - _RECENT_FILES_LIMIT):
cmds.optionVar(rfa=(_RECENT_FILES_KEY, 0))
|
"""
ASGI spec conformance test suite.
Calling the functions with an ASGI channel layer instance will return you a
single TestCase instance that checks for conformity on that instance.
You MUST also pass along an expiry value to the sets of tests, to allow the
suite to wait correctly for expiry. It's suggested you configure the layer
for 1-second expiry during tests, and use a 1.1 second expiry delay.
The channel layers should be empty to start with, and discarded after use,
as they'll be full of test data. If a layer supports the "flush" extension,
it'll be flushed before every test.
"""
from __future__ import unicode_literals
import six
import time
import unittest
class ConformanceTestCase(unittest.TestCase):
"""
Tests that core ASGI functionality is maintained.
"""
channel_layer = None
expiry_delay = None
capacity_limit = None
receive_tries = 1
def receive(self, channels):
"""
Allows tests to automatically call channel_layer.receive() more than once.
This is necessary for testing ChannelLayer implementations that do not guarantee a response will
be returned on every receive() call, even when there are messages on the channel. This would be
the case, for example, for a channel layer designed for a multi-worker environment with multiple
backing hosts, that checks a different host on each call.
"""
for _ in range(self.receive_tries):
channel, message = self.channel_layer.receive(channels)
if channel is not None:
return channel, message
return None, None
@classmethod
def setUpClass(cls):
# Don't let this actual class run, it's abstract
if cls is ConformanceTestCase:
raise unittest.SkipTest("Skipping base class tests")
def setUp(self):
if self.channel_layer is None:
raise ValueError("You must define 'channel_layer' when subclassing the conformance tests.")
if self.expiry_delay is None:
raise ValueError("You must define 'expiry_delay' when subclassing the conformance tests.")
if "flush" in self.channel_layer.extensions:
self.channel_layer.flush()
def skip_if_no_extension(self, extension):
"""
Handy function for skipping things without an extension.
We can't use the decorators, as we need access to self.
"""
if extension not in self.channel_layer.extensions:
raise unittest.SkipTest("No %s extension" % extension)
def test_send_recv(self):
"""
Tests that channels can send and receive messages right.
"""
self.channel_layer.send("sr_test", {"value": "blue"})
self.channel_layer.send("sr_test", {"value": "green"})
self.channel_layer.send("sr_test", {"value": "yellow"})
self.channel_layer.send("sr_test2", {"value": "red"})
# Receive from the first channel twice
response_messages = []
for i in range(3):
channel, message = self.receive(["sr_test"])
response_messages.append(message)
self.assertEqual(channel, "sr_test")
for response in response_messages:
self.assertTrue("value" in response)
# Check that all messages were returned; order is not guaranteed
self.assertEqual(set([r["value"] for r in response_messages]), set(["blue", "green", "yellow"]))
# And the other channel with multi select
channel, message = self.receive(["sr_test", "sr_test2"])
self.assertEqual(channel, "sr_test2")
self.assertEqual(message, {"value": "red"})
def test_single_process_receive(self):
"""
Tests that single-process receive gets anything with the right prefix.
"""
self.channel_layer.send("spr_test!a", {"best": "ponies"})
channel, message = self.receive(["spr_test!"])
self.assertEqual(channel, "spr_test!a")
self.assertEqual(message, {"best": "ponies"})
self.channel_layer.send("spr_test!b", {"best": "pangolins"})
channel, message = self.receive(["spr_test!"])
self.assertEqual(channel, "spr_test!b")
self.assertEqual(message, {"best": "pangolins"})
def test_single_process_receive_error(self):
"""
Tests that single-process receive isn't allowed with a local part.
"""
with self.assertRaises(Exception):
self.receive(["spr_test!c"])
def test_message_expiry(self):
"""
Tests that messages expire correctly.
"""
self.channel_layer.send("me_test", {"value": "blue"})
time.sleep(self.expiry_delay)
channel, message = self.receive(["me_test"])
self.assertIs(channel, None)
self.assertIs(message, None)
def test_new_channel_single_reader(self):
"""
Tests that new single-reader channel names are made correctly.
"""
pattern = "test.foo?"
name1 = self.channel_layer.new_channel(pattern)
self.assertFalse(name1.endswith("?"))
self.assertTrue("?" in name1)
self.assertEqual(name1.find("?"), name1.rfind("?"))
self.assertIsInstance(name1, six.text_type)
# Send a message and make sure new_channel on second pass changes
self.channel_layer.send(name1, {"value": "blue"})
name2 = self.channel_layer.new_channel(pattern)
# Make sure we can consume off of that new channel
channel, message = self.receive([name1, name2])
self.assertEqual(channel, name1)
self.assertEqual(message, {"value": "blue"})
def test_new_channel_failures(self):
"""
Tests that we don't allow bad new channel names.
"""
with self.assertRaises(Exception):
self.channel_layer.new_channel("test!")
with self.assertRaises(Exception):
self.channel_layer.new_channel("test.foo")
def test_strings(self):
"""
Ensures byte strings and unicode strings both make it through
serialization properly.
"""
# Message. Double-nested to ensure serializers are recursing properly.
message = {
"values": {
# UTF-8 sequence for british pound, but we want it not interpreted into that.
"utf-bytes": b"\xc2\xa3",
# Actual unicode for british pound, should come back as 1 char
"unicode": "\u00a3",
# Emoji, in case someone is using 3-byte-wide unicode storage
"emoji": "\u1F612",
# Random control characters and null
"control": b"\x01\x00\x03\x21",
}
}
# Send it and receive it
self.channel_layer.send("str_test", message)
_, received = self.receive(["str_test"])
# Compare
self.assertIsInstance(received["values"]["utf-bytes"], six.binary_type)
self.assertIsInstance(received["values"]["unicode"], six.text_type)
self.assertIsInstance(received["values"]["emoji"], six.text_type)
self.assertIsInstance(received["values"]["control"], six.binary_type)
self.assertEqual(received["values"]["utf-bytes"], message["values"]["utf-bytes"])
self.assertEqual(received["values"]["unicode"], message["values"]["unicode"])
self.assertEqual(received["values"]["emoji"], message["values"]["emoji"])
self.assertEqual(received["values"]["control"], message["values"]["control"])
def test_groups(self):
"""
Tests that basic group addition and send works
"""
self.skip_if_no_extension("groups")
# Make a group and send to it
self.channel_layer.group_add("tgroup", "tg_test")
self.channel_layer.group_add("tgroup", "tg_test2")
self.channel_layer.group_add("tgroup", "tg_test3")
self.channel_layer.group_discard("tgroup", "tg_test3")
self.channel_layer.send_group("tgroup", {"value": "orange"})
# Receive from the two channels in the group and ensure messages
channel, message = self.receive(["tg_test"])
self.assertEqual(channel, "tg_test")
self.assertEqual(message, {"value": "orange"})
channel, message = self.receive(["tg_test2"])
self.assertEqual(channel, "tg_test2")
self.assertEqual(message, {"value": "orange"})
# Make sure another channel does not get a message
channel, message = self.receive(["tg_test3"])
self.assertIs(channel, None)
self.assertIs(message, None)
def test_groups_process(self):
"""
Tests that group membership and sending works with process-specific channels.
"""
self.skip_if_no_extension("groups")
# Make a group and send to it
self.channel_layer.group_add("tgroup", "tgp!test")
self.channel_layer.group_add("tgroup", "tgp!test2")
self.channel_layer.group_add("tgroup", "tgp!test3")
self.channel_layer.group_discard("tgroup", "tgp!test2")
self.channel_layer.send_group("tgroup", {"value": "orange"})
# Receive from the two channels in the group and ensure messages
channel, message = self.receive(["tgp!"])
self.assertIn(channel, ["tgp!test", "tgp!test3"])
self.assertEqual(message, {"value": "orange"})
channel, message = self.receive(["tgp!"])
self.assertIn(channel, ["tgp!test", "tgp!test3"])
self.assertEqual(message, {"value": "orange"})
# Make sure another channel does not get a message
channel, message = self.receive(["tgp!"])
self.assertIs(channel, None)
self.assertIs(message, None)
def test_group_channels(self):
"""
Tests that group membership check works
"""
self.skip_if_no_extension("groups")
# Make a group
self.channel_layer.group_add("tgroup", "tg_test")
self.channel_layer.group_add("tgroup", "tg_test2")
self.channel_layer.group_add("tgroup", "tg_test3")
# Check group members
self.assertEqual(
set(self.channel_layer.group_channels("tgroup")),
{"tg_test", "tg_test2", "tg_test3"},
)
# Discard from group
self.channel_layer.group_discard("tgroup", "tg_test3")
self.assertEqual(
set(self.channel_layer.group_channels("tgroup")),
{"tg_test", "tg_test2"},
)
def test_flush(self):
"""
Tests that messages go away after a flush.
"""
self.skip_if_no_extension("flush")
# Send something to flush
self.channel_layer.send("fl_test", {"value": "blue"})
self.channel_layer.flush()
channel, message = self.receive(["fl_test"])
self.assertIs(channel, None)
self.assertIs(message, None)
def test_flush_groups(self):
"""
Tests that groups go away after a flush.
"""
self.skip_if_no_extension("groups")
self.skip_if_no_extension("flush")
# Add things to a group and send to it
self.channel_layer.group_add("tfg_group", "tfg_test")
self.channel_layer.send_group("tfg_group", {"value": "blue"})
self.channel_layer.flush()
channel, message = self.receive(["tfg_test"])
self.assertIs(channel, None)
self.assertIs(message, None)
def test_group_expiry(self):
"""
Tests that group expiry is provided, and test it if it's less than
20 seconds.
"""
self.skip_if_no_extension("groups")
# Check group expiry is provided, and see if we can continue
expiry = getattr(self.channel_layer, "group_expiry", None)
if expiry is None:
self.fail("group_expiry is not defined")
if expiry > 20:
raise unittest.SkipTest("Expiry too long for test")
# Add things to a group
self.channel_layer.group_add("tge_group", "tge_test")
# Wait group expiry plus one
time.sleep(expiry + 1)
# Ensure message never arrives
self.channel_layer.send_group("tge_group", {"value": "blue"})
channel, message = self.receive(["tge_test"])
self.assertIs(channel, None)
self.assertIs(message, None)
def test_capacity(self):
"""
Tests that the capacity limiter on send() raises ChannelFull
after the right number of messages. Only runs if capacity_limit is set.
"""
if self.capacity_limit is None:
raise unittest.SkipTest("No test capacity specified")
for _ in range(self.capacity_limit):
self.channel_layer.send("cap_test", {"hey": "there"})
with self.assertRaises(self.channel_layer.ChannelFull):
self.channel_layer.send("cap_test", {"hey": "there"})
def test_capacity_process(self):
"""
Tests that the capacity limiter works on process-specific channels overall
"""
if self.capacity_limit is None or self.capacity_limit < 2:
raise unittest.SkipTest("Test capacity is unspecified or too low")
for i in range(self.capacity_limit):
self.channel_layer.send("capp!%s" % i, {"hey": "there"})
with self.assertRaises(self.channel_layer.ChannelFull):
self.channel_layer.send("capp!final", {"hey": "there"})
def test_capacity_group(self):
"""
Tests that the capacity limiter on group_send() never raises
ChannelFull.
"""
self.skip_if_no_extension("groups")
self.channel_layer.group_add("tcg_group", "tcg_test")
if self.capacity_limit is None:
raise unittest.SkipTest("No test capacity specified")
for _ in range(self.capacity_limit + 1):
self.channel_layer.send_group("tcg_group", {"hey": "there"})
def test_exceptions(self):
"""
Tests that the two exception classes exist on the channel layer
"""
self.assertTrue(hasattr(self.channel_layer, "MessageTooLarge"))
self.assertTrue(hasattr(self.channel_layer, "ChannelFull"))
def test_message_alteration_after_send(self):
"""
Tests that a message can be altert after it was send through a channel
without affecting the object inside the queue.
"""
message = {'value': [1, 2, 3]}
self.channel_layer.send('channel', message)
message['value'][0] = 'new value'
_, message = self.receive(['channel'])
self.assertEqual(message, {'value': [1, 2, 3]})
|
import csv
import tempfile
import pytest
from datarobot_batch_scoring.batch_scoring import run_batch_predictions
from utils import PickableMock
from datarobot_batch_scoring.reader import DETECT_SAMPLE_SIZE_SLOW
def test_gzipped_csv(live_server, ui):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=None,
dataset='tests/fixtures/temperatura_predict.csv.gz',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False,
max_batch_size=1000
)
assert ret is None
def test_explicit_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_explicit_delimiter_gzip(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/temperatura_predict.csv.gz',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_tab_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter='\t',
dataset='tests/fixtures/temperatura_predict_tab.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
def test_empty_file(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(csv.Error) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/empty.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert "The csv module failed to detect the CSV dialect." in str(ctx.value)
def test_no_delimiter(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(csv.Error) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=';',
dataset='tests/fixtures/temperatura_predict.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert str(ctx.value) == ("Could not determine delimiter")
def test_bad_newline(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/diabetes_bad_newline.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
lines = len(open('out.csv', 'rb').readlines())
assert lines == 5
ui.warning.assert_any_call('Detected empty rows in the CSV file. '
'These rows will be discarded.')
def test_header_only(live_server):
ui = PickableMock()
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
with pytest.raises(ValueError) as ctx:
run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=None,
delimiter=',',
dataset='tests/fixtures/header_only.csv',
pred_name=None,
timeout=None,
ui=ui,
auto_sample=False,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert str(ctx.value) == ("Input file 'tests/fixtures/header_only.csv' "
"is empty.")
def test_quotechar_in_keep_cols(live_server):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ui = PickableMock()
with tempfile.NamedTemporaryFile(prefix='test_',
suffix='.csv',
delete=False) as fd:
head = open("tests/fixtures/quotes_input_head.csv",
"rb").read()
body_1 = open("tests/fixtures/quotes_input_first_part.csv",
"rb").read()
body_2 = open("tests/fixtures/quotes_input_bad_part.csv",
"rb").read()
fd.file.write(head)
size = 0
while size < DETECT_SAMPLE_SIZE_SLOW:
fd.file.write(body_1)
size += len(body_1)
fd.file.write(body_2)
fd.close()
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=["b", "c"],
delimiter=None,
dataset=fd.name,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=True,
fast_mode=False,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is None
last_line = open("out.csv", "rb").readlines()[-1]
expected_last_line = b'1044,2,"eeeeeeee ""eeeeee"" eeeeeeeeeeee'
assert last_line[:len(expected_last_line)] == expected_last_line
def test_quoted_newline_in_keep_cols_in_fast_mode_fails(live_server):
base_url = '{webhost}/predApi/v1.0/'.format(webhost=live_server.url())
ui = PickableMock()
with tempfile.NamedTemporaryFile(prefix='test_',
suffix='.csv',
delete=False) as fd:
head = open("tests/fixtures/quotes_input_head.csv",
"rb").read()
body_1 = open("tests/fixtures/quotes_input_first_part.csv",
"rb").read()
body_2 = open("tests/fixtures/quotes_input_bad_part_with_newline.csv",
"rb").read()
fd.file.write(head)
size = 0
while size < DETECT_SAMPLE_SIZE_SLOW:
fd.file.write(body_1)
size += len(body_1)
fd.file.write(body_2)
fd.close()
ret = run_batch_predictions(
base_url=base_url,
base_headers={},
user='username',
pwd='password',
api_token=None,
create_api_token=False,
pid='56dd9570018e213242dfa93c',
lid='56dd9570018e213242dfa93d',
import_id=None,
n_retry=3,
concurrent=1,
resume=False,
n_samples=10,
out_file='out.csv',
keep_cols=["b", "c"],
delimiter=None,
dataset=fd.name,
pred_name=None,
timeout=None,
ui=ui,
auto_sample=True,
fast_mode=True,
dry_run=False,
encoding='',
skip_dialect=False
)
assert ret is 1
|
# LIGHT:
# 1) методами строк очистить текст от знаков препинания;
# 2) сформировать list со словами (split);
# 3) привести все слова к нижнему регистру (map);
# 4) получить из list пункта 3, dict, ключами которого являются слова, а значениями их количество появлений в тексте;
# 5) вывести 5 наиболее часто встречающихся слов (sort), вывести количество разных слов в тексте (set).
# PRO:
# 6) выполнить light с условием: в пункте 2 дополнительно к приведению к нижнему регистру выполнить лемматизацию.
#
text = """Все счастливые семьи похожи друг на друга, каждая несчастливая семья несчастлива по-своему.
Все смешалось в доме Облонских. Жена узнала, что муж был в связи с бывшею в их доме француженкою-гувернанткой, и объявила мужу, что не может жить с ним в одном доме. Положение это продолжалось уже третий день и мучительно чувствовалось и самими супругами, и всеми членами семьи, и домочадцами. Все члены семьи и домочадцы чувствовали, что нет смысла в их сожительстве и что на каждом постоялом дворе случайно сошедшиеся люди более связаны между собой, чем они, члены семьи и домочадцы Облонских. Жена не выходила из своих комнат, мужа третий день не было дома. Дети бегали по всему дому, как потерянные; англичанка поссорилась с экономкой и написала записку приятельнице, прося приискать ей новое место; повар ушел вчера со двора, во время самого обеда; черная кухарка и кучер просили расчета.
На третий день после ссоры князь Степан Аркадьич Облонский — Стива, как его звали в свете, — в обычный час, то есть в восемь часов утра, проснулся не в спальне жены, а в своем кабинете, на сафьянном диване. Он повернул свое полное, выхоленное тело на пружинах дивана, как бы желая опять заснуть надолго, с другой стороны крепко обнял подушку и прижался к ней щекой; но вдруг вскочил, сел на диван и открыл глаза.
«Да, да, как это было? — думал он, вспоминая сон. — Да, как это было? Да! Алабин давал обед в Дармштадте; нет, не в Дармштадте, а что-то американское. Да, но там Дармштадт был в Америке. Да, Алабин давал обед на стеклянных столах, да, — и столы пели: Il mio tesoro 1 и не Il mio tesoro, а что-то лучше, и какие-то маленькие графинчики, и они же женщины», — вспоминал он.
Глаза Степана Аркадьича весело заблестели, и он задумался, улыбаясь. «Да, хорошо было, очень хорошо. Много еще что-то там было отличного, да не скажешь словами и мыслями даже наяву не выразишь». И, заметив полосу света, пробившуюся сбоку одной из суконных стор, он весело скинул ноги с дивана, отыскал ими шитые женой (подарок ко дню рождения в прошлом году), обделанные в золотистый сафьян туфли, и по старой, девятилетней привычке, не вставая, потянулся рукой к тому месту, где в спальне у него висел халат. И тут он вспомнил вдруг, как и почему он спит не в спальне жены, а в кабинете; улыбка исчезла с его лица, он сморщил лоб.
«Ах, ах, ах! Ааа!..» — замычал он, вспоминая все, что было. И его воображению представились опять все подробности ссоры с женою, вся безвыходность его положения и мучительнее всего собственная вина его.
«Да! она не простит и не может простить. И всего ужаснее то, что виной всему я, виной я, а не виноват. В этом-то вся драма, — думал он. — Ах, ах, ах!» — приговаривал он с отчаянием, вспоминая самые тяжелые для себя впечатления из этой ссоры.
Неприятнее всего была та первая минута, когда он, вернувшись из театра, веселый и довольный, с огромною грушей для жены в руке, не нашел жены в гостиной; к удивлению, не нашел ее и в кабинете и, наконец, увидал ее в спальне с несчастною, открывшею все, запиской в руке.
Она, эта вечно озабоченная, и хлопотливая, и недалекая, какою он считал ее, Долли, неподвижно сидела с запиской в руке и с выражением ужаса, отчаяния и гнева смотрела на него.
— Что это? это? — спрашивала она, указывая на записку.
И при этом воспоминании, как это часто бывает, мучало Степана Аркадьича не столько самое событие, сколько то, как он ответил на эти слова жены.
С ним случилось в эту минуту то, что случается с людьми, когда они неожиданно уличены в чем-нибудь слишком постыдном. Он не сумел приготовить свое лицо к тому положению, в которое он становился перед женой после открытия его вины. Вместо того чтоб оскорбиться, отрекаться, оправдываться, просить прощения, оставаться даже равнодушным — все было бы лучше того, что он сделал! — его лицо совершенно невольно («рефлексы головного мозга», — подумал Степан Аркадьич, который любил физиологию), совершенно невольно вдруг улыбнулось привычною, доброю и потому глупою улыбкой.
Эту глупую улыбку он не мог простить себе. Увидав эту улыбку, Долли вздрогнула, как от физической боли, разразилась, со свойственною ей горячностью, потоком жестоких слов и выбежала из комнаты. С тех пор она не хотела видеть мужа."""
# 1)
text1 = ' '
text = text.replace(("!"), text1)
text = text.replace(("?"), text1)
text = text.replace((";"), text1)
text = text.replace((","), text1)
text = text.replace(("."), text1)
text = text.replace(("-"), text1)
text = text.replace((":"), text1)
text = text.replace(("—"), text1)
text = text.replace(("«"), text1)
text = text.replace(("»"), text1)
text = text.replace((" "), text1)
text = text.replace((" "), text1)
text = text.replace(("\n"), text1)
print(text)
# 2)
list_1 = text.split(' ')
print(list_1)
# 3)
list_2 = list(map(lambda x: x.lower(), list_1))
print(list_2)
#4)
dict_ = {}
for i in range(len(list_2)):
sum = 0
word = list_2[i]
for y in range(len(list_2)):
if word == list_2[y]:
sum+=1
dict_[word] = sum
print(dict_)
#5)
list_3 = (list(dict_.values()))
list_3.sort()
list_3.reverse()
list_4 = list(filter(lambda x:x>16, list_3))
inv_dict_ = {value: key for key, value in dict_.items()}
print(inv_dict_[list_4[0]],inv_dict_[list_4[1]],inv_dict_[list_4[2]],inv_dict_[list_4[3]],inv_dict_[list_4[4]],)
# почему то третьим наиболее встречающимся "словом", стал пробел... не пойму как он попал в list_2 ?
m_1 = set(list_2)
print(len(m_1))
|
""" *******************************************************************************************************************
|
| Name : get_tags.py
| Description : Retrieves a list of all tags associated with a client via the RiskSense REST API.
| Copyright : (c) RiskSense, Inc.
| License : Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0.txt)
|
******************************************************************************************************************* """
import json
import os
import toml
import requests
def read_config_file(filename):
"""
Reads TOML-formatted configuration file.
:param filename: Path to file to be read.
:type filename: str
:return: Variables found in config file.
:rtype: dict
"""
# Read the config file
toml_data = open(filename).read()
# Load the definitions contained in the config file
data = toml.loads(toml_data)
return data
def get_tags(platform, key, client_id):
"""
Retrieve a list of all of the tags that are associated with
the specified client ID.
:param platform: URL of the RiskSense platform to be queried.
:type platform: str
:param key: API Key.
:type key: str
:param client_id: ID of the client to be queried.
:type client_id: int
:return: A list of all tags returned by the API.
:rtype: list
"""
# Assemble the URL for the API call
url = platform + "/api/v1/client/" + str(client_id) + "/tag/search"
# Set the page size for returned results
page_size = 100
# Set the initial page of results to retrieve
page = 0
# Define the header for the API call
header = {
"x-api-key": key,
"content-type": "application/json"
}
# Define the filters to be used in your query. You can get a list of fields
# that can be filtered on from the /client/{clientId}/tag/filter API endpoint.
filters = [
# In this case we are filtering for all tags created in 2018. You can
# stack multiple filters here, to further narrow your results, just as
# you can in the UI.
{
"field": "created",
"exclusive": False,
"operator": "LIKE",
"value": "2018"
}
]
# Define the body for your API call.
body = {
"filters": filters, # The filters you specified above
"projection": "basic",
"sort": [ # Sort results returned by tag ID (ascending)
{
"field": "id",
"direction": "ASC"
}
],
"page": page,
"size": page_size
}
# Send your request to the API, and get the number of pages of results
# that are available.
response = requests.post(url, headers=header, data=json.dumps(body))
# If the status code returned equals success...
if response and response.status_code == 200:
jsonified_result = json.loads(response.text)
else:
print("There was an error retrieving the tags from the API.")
print(f"Status Code Returned: {response.status_code}")
print(f"Response: {response.text}")
exit(1)
number_of_pages = jsonified_result['page']['totalPages']
all_tags = []
# Cycle thorough all of the pages of tag results and add them to a list to be returned.
while page < number_of_pages:
# Send the API request
print(f"Getting page {page + 1}/{number_of_pages} of tags for client id {client_id}...")
response = requests.post(url, headers=header, data=json.dumps(body))
# If the status code returned equals success...
if response and response.status_code == 200:
jsonified_result = json.loads(response.text)
else:
print(f"There was an error retrieving page {page} of the found tags.")
print(f"Status Code: {response.status_code}")
print(f"Response: {response.text}")
exit(1)
# Append the tags found to our list to be returned.
for item in jsonified_result['_embedded']['tags']:
all_tags.append(item)
# Increment the page number to retrieve in the next run.
page += 1
body['page'] = page
return all_tags
def main():
""" Main body of script """
# Define the path to the config file, and read it.
conf_file = os.path.join(os.path.abspath(os.path.dirname(os.path.dirname(__file__))), 'conf', 'config.toml')
configuration = read_config_file(conf_file)
# Set our variables based on what is read from the config file.
rs_url = configuration['platform']['url']
api_key = configuration['platform']['api_key']
client_id = configuration['platform']['client_id']
# Get a list of the tags returned.
tags = get_tags(rs_url, api_key, client_id)
# Get the length of the list that was returned. This is the number of tags found.
number_of_tags = len(tags)
# Print basic information about each tag found to the console.
print("Tags found:")
print()
for tag in tags:
print(f"Tag ID: {tag['id']}")
print(f"Tag Name: {tag['name']}")
print(f"Tag Desc: {tag['description']}")
print()
# Print the total number of tags that were found to the console.
print(f"{number_of_tags} tag(s) were retrieved from the RiskSense API.")
print()
# Execute the script
if __name__ == '__main__':
main()
"""
Copyright 2019 RiskSense, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.TFRecordDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import python_io
from tensorflow.python.platform import test
from tensorflow.python.util import compat
@test_util.run_all_in_graph_and_eager_modes
class TFRecordDatasetTest(test_base.DatasetTestBase):
def setUp(self):
super(TFRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def dataset_fn(self,
filenames,
compression_type="",
num_epochs=1,
batch_size=None):
repeat_dataset = readers.TFRecordDataset(
filenames, compression_type).repeat(num_epochs)
if batch_size:
return repeat_dataset.batch(batch_size)
return repeat_dataset
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def testReadOneEpoch(self):
# Basic test: read from file 0.
dataset = self.dataset_fn(self.test_filenames[0])
self.assertDatasetProduces(
dataset,
expected_output=[self._record(0, i) for i in range(self._num_records)])
# Basic test: read from file 1.
dataset = self.dataset_fn(self.test_filenames[1])
self.assertDatasetProduces(
dataset,
expected_output=[self._record(1, i) for i in range(self._num_records)])
# Basic test: read from both files.
dataset = self.dataset_fn(self.test_filenames)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadTenEpochs(self):
dataset = self.dataset_fn(self.test_filenames, num_epochs=10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output * 10)
def testReadTenEpochsOfBatches(self):
dataset = self.dataset_fn(
self.test_filenames, num_epochs=10, batch_size=self._num_records)
expected_output = []
for j in range(self._num_files):
expected_output.append(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output * 10)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = self.dataset_fn(zlib_files, compression_type="ZLIB")
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = self.dataset_fn(gzip_files, compression_type="GZIP")
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadWithBuffer(self):
one_mebibyte = 2**20
dataset = readers.TFRecordDataset(
self.test_filenames, buffer_size=one_mebibyte)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadFromDatasetOfFiles(self):
files = dataset_ops.Dataset.from_tensor_slices(self.test_filenames)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = readers.TFRecordDataset(files)
self.assertDatasetProduces(dataset, expected_output=expected_output)
def testReadTenEpochsFromDatasetOfFilesInParallel(self):
files = dataset_ops.Dataset.from_tensor_slices(
self.test_filenames).repeat(10)
expected_output = []
for j in range(self._num_files):
expected_output.extend(
[self._record(j, i) for i in range(self._num_records)])
dataset = readers.TFRecordDataset(files, num_parallel_reads=4)
self.assertDatasetProduces(
dataset, expected_output=expected_output * 10, assert_items_equal=True)
if __name__ == "__main__":
test.main()
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import bibtexparser
from utility_library.constants import *
from utility_library.util import *
import collections
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.ticker as mtick
plt.rcParams['font.size'] = 22
with open('../../map.bib') as bibtex_file:
bib_db = bibtexparser.load(bibtex_file)
bib_db.entries = format_entries(bib_db)
cnt_metrics = collections.Counter()
for c, i in enumerate(bib_db.entries):
try:
if i.get('problem', None):
print(i['metrics'])
for metric in i['metrics']:
cnt_metrics[metric] += 1
except:
continue
print(len(cnt_metrics.keys()))
old_cnt_values = np.array(list(cnt_metrics.copy().values()))
for metric in cnt_metrics.copy().keys():
if metric not in PRETTY_METRIC:
print('removing', metric, 'and putting in others')
cnt_metrics['others'] += cnt_metrics.pop(metric)
cnt_metrics = {
k: v
for k, v in reversed(sorted(cnt_metrics.items(), key=lambda item: item[1]))
}
print("Number of articles", len(bib_db.entries))
cnt_metrics['others'] = cnt_metrics.pop('others')
fig, ax = plt.subplots(figsize=(8.4, 4.8))
xs = list(map(PRETTY_METRIC.get, cnt_metrics.keys()))
ys = np.array(list(cnt_metrics.values()))
ys = 100 * ys / len(bib_db.entries)
bars = ax.bar(xs, ys, color='k')
for x, bar in zip(xs, bars):
if x in 'Coverage,ILD,EPC,PRg,Others,Outros':
bar.set_color('grey')
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_horizontalalignment('right')
for x, y in zip(xs, ys):
ax.annotate("%d" % (y), xy=(x, y), ha='center', va='bottom')
if LANG == 'en':
ax.set_ylabel('Percentage of Studies')
elif LANG == 'br':
ax.set_ylabel('Porcentagem de Estudos')
ax.set_ylim(top=max(ys) + 5)
ax.yaxis.set_major_formatter(mtick.PercentFormatter())
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
fig.savefig('../../results/metrics_count.png', bbox_inches='tight')
fig.savefig('../../results/metrics_count.eps', bbox_inches='tight')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
header_test_suite = {
"name": "Header tests",
"scenarios": [
{
"name": "simple header",
"args": ["-b", "basic", "--header=0,3"],
"input": '''\
1,2
3,4
5,6
7,8
9,0
1,2
3,4
''' ,
"output": '''\
+---+---+
| 1 | 2 |
+---+---+
| 3 | 4 |
| 5 | 6 |
+---+---+
| 7 | 8 |
+---+---+
| 9 | 0 |
| 1 | 2 |
| 3 | 4 |
+---+---+
'''
},
]}
|
# getter & setter
class CovidtatusVo(object):
...
|
#!/usr/bin/env python3
# ver 1.0
import requests
import os
import datetime
from time import sleep
from lxml import html
import pandas as pd
item={'気温':'temp','降水量':'rain','湿度':'humd','気圧':'pres','風速':'wind','日照時間':'sun','積雪深':'snow'}
listdf=pd.read_csv('crawl.txt',comment='#')
for i,v in listdf.iterrows():
obsnum=v['観測所番号']
url="http://www.jma.go.jp/jp/amedas_h/yesterday-"+str(obsnum)+".html"
res = requests.get(url)
res.encoding = res.apparent_encoding
dom = html.fromstring(res.text)
table_xpath = """//*[@id="tbl_list"]"""
table = dom.xpath(table_xpath)[0]
df = pd.read_html(html.tostring(table),skiprows=[1],header=0)
df0=df[0]
now=datetime.datetime.now()
yesterday=datetime.datetime(now.year,now.month,now.day,0,0,0)-datetime.timedelta(days=1)
rrdfile=str(obsnum)+".rrd"
if not os.path.exists('data/'+rrdfile):
# temp,rain,humd,pres,wind,sun,snow - 35days/1h ,105days/3h ,548days/day ,3990days/1w
os.system("/usr/bin/rrdtool create data/"+rrdfile+" --start "+str(int(yesterday.timestamp()))+" --step 3600 DS:temp:GAUGE:7200:-100:100 DS:rain:GAUGE:7200:0:1000 DS:humd:GAUGE:7200:0:100 DS:pres:GAUGE:7200:800:1100 DS:wind:GAUGE:7200:0:100 DS:sun:GAUGE:7200:0:1 DS:snow:GAUGE:7200:0:100000 RRA:AVERAGE:0.5:1:840 RRA:AVERAGE:0.5:3:840 RRA:MIN:0.5:3:840 RRA:MAX:0.5:3:840 RRA:AVERAGE:0.5:24:548 RRA:MIN:0.5:24:548 RRA:MAX:0.5:24:548 RRA:AVERAGE:0.5:168:570 RRA:MIN:0.5:168:570 RRA:MAX:0.5:168:570")
for n in range(24):
temp = rain =humd = pres = wind = sun = snow = 'nan'
time=yesterday+datetime.timedelta(hours=n+1)
for m in range(len(df0.columns)-1):
ns=locals()
ns[item.get(df0.columns[m+1])]=df0.loc[n,[df0.columns[m+1]]][0]
os.system("/usr/bin/rrdtool update data/"+rrdfile+" "+str(int(time.timestamp()))+":"+str(temp)+":"+str(rain)+":"+str(humd)+":"+str(pres)+":"+str(wind)+":"+str(sun)+":"+str(snow))
sleep(1)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import argparse
import sys
from imma_formats import TABLES
def log(level, message):
sys.stderr.write('[{level}] {message}\n'.format(level=level,
message=message))
def parse_line(line):
'''
Parses a line in the IMMA format
(http://icoads.noaa.gov/e-doc/imma/R2.5-imma_short.pdf)
'''
data, index = parse_attachment(line, 'c0', 0)
return data
def parse_attachment(line, table_name, start_index=0):
def convert_value(data_type, string_value):
if ' ' * len(string_value) == string_value:
return data_type(0)
else:
return data_type(string_value)
def log_conversion(variable_name, data_type, value):
log('ERROR', 'Value error for variable {name}: '
'{data_type}({value}).\n'
'Exception was "{error_message}"'.
format(name=variable_name, data_type=data_type.__name__,
value=value, error_message=e.message))
data = list()
for column in TABLES[table_name]:
no, data_type, size, name, description, min_value, max_value, units =\
column
string_value = line[start_index:start_index + size]
try:
value = convert_value(data_type, string_value)
except ValueError, e:
log_conversion(name, data_type, string_value)
data.append((name, value))
start_index += size
return data, start_index
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='This tool parses text file in the '
'International Maritime Meteorological '
'Archive (IMMA) Form '
'and out them in csv format')
parser.add_argument('-in', '--infile',
help='File to read from (stdin if not specified)')
parser.add_argument('-out', '--outfile',
help='File to write to (stdout if not specified)')
parser.add_argument('-from', '--from-line', type=int,
help='Output starting from <from_line>')
parser.add_argument('-to', '--to-line', type=int,
help='Output ending at <to_line>')
args = parser.parse_args()
input_file = args.infile and open(args.infile) or sys.stdin
output_file = args.outfile and open(args.outfile, 'a+') or sys.stdout
line_number = 0
for line in input_file:
try:
line_number += 1
if args.from_line and args.from_line > line_number:
continue
if args.to_line and args.to_line < line_number:
break
data = parse_line(line)
output_file.write(','.join(
[str(element[1]) for element in data])
+ '\n')
except Exception, e:
log('ERROR',
'Error parsing line "{}".\n'
'The error message was "{}"'.format(line, e.message))
|
# coding: utf-8
import pprint
import six
from enum import Enum
class RestCountryState:
swagger_types = {
'code': 'str',
'country_code': 'str',
'id': 'str',
'name': 'str',
}
attribute_map = {
'code': 'code','country_code': 'countryCode','id': 'id','name': 'name',
}
_code = None
_country_code = None
_id = None
_name = None
def __init__(self, **kwargs):
self.discriminator = None
self.code = kwargs.get('code', None)
self.country_code = kwargs.get('country_code', None)
self.id = kwargs.get('id', None)
self.name = kwargs.get('name', None)
@property
def code(self):
"""Gets the code of this RestCountryState.
The code of the state identifies the state. The code is typically used within addresses. Some countries may not provide a code. For those the field is null.
:return: The code of this RestCountryState.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this RestCountryState.
The code of the state identifies the state. The code is typically used within addresses. Some countries may not provide a code. For those the field is null.
:param code: The code of this RestCountryState.
:type: str
"""
self._code = code
@property
def country_code(self):
"""Gets the country_code of this RestCountryState.
The country code in ISO two letter format (e.g. UK, DE, CH, US).
:return: The country_code of this RestCountryState.
:rtype: str
"""
return self._country_code
@country_code.setter
def country_code(self, country_code):
"""Sets the country_code of this RestCountryState.
The country code in ISO two letter format (e.g. UK, DE, CH, US).
:param country_code: The country_code of this RestCountryState.
:type: str
"""
self._country_code = country_code
@property
def id(self):
"""Gets the id of this RestCountryState.
The ID of the state corresponds to the subdivision identifier defined in ISO 3166-2. The format consists of the country code followed by a dash and a subdivision identifier.
:return: The id of this RestCountryState.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RestCountryState.
The ID of the state corresponds to the subdivision identifier defined in ISO 3166-2. The format consists of the country code followed by a dash and a subdivision identifier.
:param id: The id of this RestCountryState.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this RestCountryState.
The name is a human readable label of the state in the language of the region.
:return: The name of this RestCountryState.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RestCountryState.
The name is a human readable label of the state in the language of the region.
:param name: The name of this RestCountryState.
:type: str
"""
self._name = name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
elif isinstance(value, Enum):
result[attr] = value.value
else:
result[attr] = value
if issubclass(RestCountryState, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, RestCountryState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# -*- coding: utf-8 -*-
#
# infer_score.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import argparse
from .utils import load_model_config, load_raw_triplet_data, load_triplet_data
from .models.infer import ScoreInfer
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__()
self.add_argument('--model_path', type=str, default='ckpts',
help='the place where to load the model')
self.add_argument('--format', type=str,
help='The format of input data'\
'h_r_t: all lists of head, relation and tail are provied.\n' \
'h_r_*: both lists of head and relation are provided and tail includes all entities.\n' \
'h_*_t: both lists of head and tail are provied and relation includes all kinds of relations.\n' \
'*_r_t: both lists of relation and tail are provied and head includes all entities.\n' \
'h_*_*: only lists of head is provided and both relation and tail include all possible ones.\n' \
'*_r_*: only lists of relation is provided and both head and tail include all possible ones.\n' \
'*_*_t: only lists of tail is provided and both head and relation include all possible ones.\n')
self.add_argument('--data_files', type=str, default=None, nargs='+',
help='A list of data file names. This is used to provide necessary files containing the requried data ' \
'according to the format, e.g., for h_r_t, three files are required as h_data, r_data and t_data, ' \
'while for h_*_t, two files are required as h_data and t_data')
self.add_argument('--raw_data', default=False, action='store_true',
help='whether the data provided in data_files is in the raw object naming space, e.g. string name of the entity ' \
'or in DGL-KE converted integer id space \n' \
'If True, the data is in the original naming space and the inference program will do the id translation' \
'according to id mapping files generated during the training progress. \n' \
'If False, the data is just interger ids and it is assumed that user has already done the id translation')
self.add_argument('--exec_mode', type=str, default='all',
help='How to calculate scores for triplets and calculate topK: \n' \
'triplet_wise: head, relation and tail lists have the same length N, and we calculate the similarity triplet by triplet:' \
'result = topK([score(h_i, r_i, t_i) for i in N]), the result shape will be (K,)\n' \
'all: three lists of head, relation and tail ids are provided as H, R and T, and we calculate all possible combinations' \
'of all triplets (h_i, r_j, t_k):' \
'result = topK([[[score(h_i, r_j, t_k) for each h_i in H] for each r_j in R] for each t_k in T]), the result shape will be (K,)\n' \
'batch_head: three lists of head, relation and tail ids are provided as H, R and T, and we calculate topK for each element in head:' \
'result = topK([[score(h_i, r_j, t_k) for each r_j in R] for each t_k in T]) for each h_i in H, the result shape will be (sizeof(H), K)\n' \
'batch_rel: three lists of head, relation and tail ids are provided as H, R and T, and we calculate topK for each element in relation:' \
'result = topK([[score(h_i, r_j, t_k) for each h_i in H] for each t_k in T]) for each r_j in R, the result shape will be (sizeof(R), K)\n' \
'batch_tail: three lists of head, relation and tail ids are provided as H, R and T, and we calculate topK for each element in tail:' \
'result = topK([[score(h_i, r_j, t_k) for each h_i in H] for each r_j in R]) for each t_k in T, the result shape will be (sizeof(T), K)\n')
self.add_argument('--topK', type=int, default=10,
help='How many results are returned')
self.add_argument('--score_func', type=str, default='none',
help='What kind of score is used in ranking and will be output: \n' \
'none: $score = x$ \n'
'logsigmoid: $score = log(sigmoid(x))$')
self.add_argument('--output', type=str, default='result.tsv',
help='Where to store the result, should be a single file')
self.add_argument('--entity_mfile', type=str, default=None,
help='Entity ID mapping file name. Required if Raw ID is used.')
self.add_argument('--rel_mfile', type=str, default=None,
help='Relation ID mapping file name. Required if Raw ID is used.')
self.add_argument('--gpu', type=int, default=-1,
help='GPU device to use in inference, -1 means CPU')
def main():
args = ArgParser().parse_args()
config = load_model_config(os.path.join(args.model_path, 'config.json'))
emap_file = args.entity_mfile
rmap_file = args.rel_mfile
data_files = args.data_files
# parse input data first
if args.format == 'h_r_t':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 3, 'When using h_r_t, head.list, rel.list and tail.list ' \
'should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=data_files[0],
rel_f=data_files[1],
tail_f=data_files[2],
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=data_files[0],
rel_f=data_files[1],
tail_f=data_files[2])
elif args.format == 'h_r_*':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 2, 'When using h_r_*, head.list and rel.list ' \
'should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=data_files[0],
rel_f=data_files[1],
tail_f=None,
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=data_files[0],
rel_f=data_files[1],
tail_f=None)
elif args.format == 'h_*_t':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 2, 'When using h_*_t, head.list and tail.list ' \
'should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=data_files[0],
rel_f=None,
tail_f=data_files[1],
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=data_files[0],
rel_f=None,
tail_f=data_files[1])
elif args.format == '*_r_t':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 2, 'When using *_r_t rel.list and tail.list ' \
'should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=None,
rel_f=data_files[0],
tail_f=data_files[1],
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=None,
rel_f=data_files[0],
tail_f=data_files[1])
elif args.format == 'h_*_*':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 1, 'When using h_*_*, only head.list should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=data_files[0],
rel_f=None,
tail_f=None,
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=data_files[0],
rel_f=None,
tail_f=None)
elif args.format == '*_r_*':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 1, 'When using *_r_*, only rel.list should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=None,
rel_f=data_files[0],
tail_f=None,
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=None,
rel_f=data_files[0],
tail_f=None)
elif args.format == '*_*_t':
if args.raw_data:
assert emap_file is not None, 'When using RAW ID through --raw_data, ' \
'entity_mfile should be provided.'
assert rmap_file is not None, 'When using RAW ID through --raw_data, ' \
'rel_mfile should be provided.'
assert len(data_files) == 1, 'When using *_*_t, only tail.list should be provided.'
head, rel, tail, id2e_map, id2r_map = load_raw_triplet_data(head_f=None,
rel_f=None,
tail_f=data_files[0],
emap_f=emap_file,
rmap_f=rmap_file)
else:
head, rel, tail = load_triplet_data(head_f=None,
rel_f=None,
tail_f=data_files[0])
else:
assert False, "Unsupported format {}".format(args.format)
model = ScoreInfer(args.gpu, config, args.model_path, args.score_func)
model.load_model()
result = model.topK(head, rel, tail, args.exec_mode, args.topK)
with open(args.output, 'w+') as f:
f.write('head\trel\ttail\tscore\n')
for res in result:
hl, rl, tl, sl = res
hl = hl.tolist()
rl = rl.tolist()
tl = tl.tolist()
sl = sl.tolist()
for h, r, t, s in zip(hl, rl, tl, sl):
if args.raw_data:
h = id2e_map[h]
r = id2r_map[r]
t = id2e_map[t]
f.write('{}\t{}\t{}\t{}\n'.format(h, r, t, s))
print('Inference Done')
if __name__ == '__main__':
main()
|
import typer
import hashlib
from pathlib import Path
from typing import Optional
from tqdm import tqdm
app = typer.Typer()
# Main command
@app.command()
def main(algorithm: str, path: str, compare_with: Optional[str] = typer.Argument(None)):
hash = Hash(algorithm, path, compare_with) # Initialize a Hash object
typer.secho(hash.display_progress())
class Hash():
def __init__(self, algorithm, path, compare_with):
''' Default constructor '''
self.algorithm = algorithm
self.path = path
self.compare_with = compare_with
def generate_file_hash(self):
''' Function to generate and return file hash as a string '''
if self.algorithm not in hashlib.algorithms_available: # If the user defined algorithm is not available
raise TypeError(f'Algorithm: {self.algorithm} not supported!')
# Opens a file, and returns it as a file object
with open(self.path, 'rb') as f:
algorithm = hashlib.new(self.algorithm)
file_size = Path(self.path).stat().st_size
with tqdm(total=file_size, unit='B', unit_scale=True) as pbar:
while True:
chunk = f.read(8192)
if not chunk: # If no more bytes to be read
break
algorithm.update(chunk)
pbar.update(len(chunk))
return algorithm.hexdigest() # hexdigest() returns a string
def display_progress(self):
'''
Calls 'generate_file_hash' function and
displays the file hash along with
algorithm, path or file hash compared with
'''
file_hash = self.generate_file_hash()
typer.secho(
f'\nFILE_HASH: {typer.style(file_hash, fg=typer.colors.BLUE)}')
dictionary = (vars(self)) # Convert class properties into a dictionary
# Iterate over the key value pairs in the dictionary and print them
for key, value in dictionary.items():
typer.secho(
f'{key.upper()}: {typer.style(value, fg=typer.colors.BLUE)}')
if self.compare_with is not None: # If the user defined a hash to be compared with
typer.secho('\nMATCH', fg=typer.colors.GREEN) if (
file_hash == self.compare_with) else typer.secho('\nDIFFERENT', fg=typer.colors.RED)
|
# -*- coding: utf-8 -*-
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from user_manage import models
# Json结构化返回数据写法
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'password')
extra_kwargs = {
'username': {
'required': True,
},
'password': {
'write_only': True,
'required': True,
},
}
def validate(self, attrs):
print "--------------------"
if User.objects.filter(username=attrs['username']).count() != 0:
raise serializers.ValidationError('用户已注册')
return attrs
def validate_password(self, password):
print "==================="
return make_password(password)
# class StudentSerializer(serializers.ModelSerializer):
# user = UserSerializer()
# class Meta:
# model = models.Student
# fields = ('name', 'class_name', 'user')
class StudentSerializer(serializers.ModelSerializer):
username = serializers.CharField(source='user.username')
password = serializers.CharField(source='user.password')
class Meta:
model = models.Student
fields = ('username', 'password', 'name', 'class_name',)
extra_kwargs = {
'password': {'write_only': True, 'required': True, },
}
# 对应insert
def create(self, validated_data):
return super(StudentSerializer, self).create(validated_data)
# 对应update
def update(self, instance, validated_data):
return super(StudentSerializer, self).update(instance, validated_data)
# 对应updateOrinsert
def save(self, **kwargs):
return super(StudentSerializer, self).save(**kwargs)
class FieldTestSerializer(serializers.Serializer):
read_field = serializers.CharField(read_only=True)
write_field = serializers.CharField(write_only=True)
normal_field =serializers.CharField()
|
# coding:utf-8
import maya.cmds as cmds
name = "ASSET_ribbon_PART"
length = 5
ribbon = cmds.nurbsPlane(name="surf_{}".format(name), axis=[0,1,0], degree=3, lengthRatio=length, patchesU=1, patchesV=5, constructionHistory=False)[0]
cmds.rebuildSurface(ribbon, degreeU=1, degreeV=3, spansU=0, spansV=length, constructionHistory=False)
# rebuild ribbon (make U linear, keep V cubic)
for i in range(length):
loc = cmds.spaceLocator(name="rivet_{}_01".format(name, (i+1)))[0]
point_on_surface_info = cmds.createNode("pointOnSurfaceInfo", name="ptOnSurfInfo_{}".format(loc))
four_by_four_matrix = cmds.createNode("fourByFourMatrix", name = "fbfMatrix_{}".format(loc))
decompose_matrix = cmds.createNode("decomposeMatrix", name="dMatrix_{}".format(loc))
joint = cmds.createNode("joint", name= "bind_{}_0{}".format(name, (i+1)))
cmds.parent(joint, loc)
# Make connection system
cmds.connectAttr("{}.worldSpace[0]".format(ribbon), "{}.inputSurface".format(point_on_surface_info))
cmds.connectAttr("{}.normalX".format(point_on_surface_info), "{}.in00".format(four_by_four_matrix))
cmds.connectAttr("{}.normalY".format(point_on_surface_info), "{}.in01".format(four_by_four_matrix))
cmds.connectAttr("{}.normalZ".format(point_on_surface_info), "{}.in02".format(four_by_four_matrix))
cmds.connectAttr("{}.tangentVx".format(point_on_surface_info), "{}.in10".format(four_by_four_matrix))
cmds.connectAttr("{}.tangentVy".format(point_on_surface_info), "{}.in11".format(four_by_four_matrix))
cmds.connectAttr("{}.tangentVz".format(point_on_surface_info), "{}.in12".format(four_by_four_matrix))
cmds.connectAttr("{}.tangentUx".format(point_on_surface_info), "{}.in20".format(four_by_four_matrix))
cmds.connectAttr("{}.tangentUy".format(point_on_surface_info), "{}.in21".format(four_by_four_matrix))
cmds.connectAttr("{}.tangentUz".format(point_on_surface_info), "{}.in22".format(four_by_four_matrix))
cmds.connectAttr("{}.positionX".format(point_on_surface_info), "{}.in30".format(four_by_four_matrix))
cmds.connectAttr("{}.positionY".format(point_on_surface_info), "{}.in31".format(four_by_four_matrix))
cmds.connectAttr("{}.positionZ".format(point_on_surface_info), "{}.in32".format(four_by_four_matrix))
cmds.connectAttr("{}.output".format(four_by_four_matrix), "{}.inputMatrix".format(decompose_matrix))
cmds.connectAttr("{}.outputRotate".format(decompose_matrix), "{}.rotate".format(loc))
cmds.connectAttr("{}.outputTranslate".format(decompose_matrix), "{}.translate".format(loc))
# Set up U, V parameters for spacing rivet on ribbon
ratio = 1.0/length
paramV = ratio*(i+1) - 0.1
cmds.setAttr("{}.turnOnPercentage".format(point_on_surface_info), 1)
cmds.setAttr("{}.parameterU".format(point_on_surface_info), 0.5)
cmds.setAttr("{}.parameterV".format(point_on_surface_info), paramV)
|
from pytest import fixture, mark
from sls.document import Document, Position
from sls.indent.indent import Indentation
from sls.services.hub import ServiceHub
from tests.e2e.utils.fixtures import hub
indent_unit = " "
@fixture
def ws(magic):
return magic()
@fixture
def indentor():
return Indentation(service_registry=ServiceHub(hub))
@mark.parametrize(
"story,expected",
[
("http server", indent_unit),
("", ""),
("$", ""),
("ams $.", ""),
("= . ", ""),
("/foo/b", ""),
("http :", ""),
('http "foo"', ""),
("function int", indent_unit),
("http fetch", ""),
("redis get", ""),
("invalidService get", ""),
("a = 1", ""),
("b = [1,2,3].length()", ""),
("when", indent_unit),
("foreach", indent_unit),
("foreach foo", indent_unit),
("foreach foo as b", indent_unit),
("if", indent_unit),
("if 2 + 2", indent_unit),
("else", indent_unit),
("while", indent_unit),
("while [1,2,3].length() > 0", indent_unit),
('while (redis get: "foo")', indent_unit),
("try", indent_unit),
("catch", indent_unit),
("function foo", indent_unit),
("function foo arg1:int", indent_unit),
("storyscript/crontab entrypoint", ""),
("noService noAction", ""),
("http noAction", ""),
(indent_unit + "a = 1", indent_unit),
(indent_unit + "redis get", indent_unit),
(indent_unit + "when", 2 * indent_unit),
(indent_unit + "foreach", 2 * indent_unit),
(indent_unit + "if", 2 * indent_unit),
(indent_unit + "else", 2 * indent_unit),
(indent_unit + "while", 2 * indent_unit),
(indent_unit + "try", 2 * indent_unit),
(indent_unit + "catch", 2 * indent_unit),
("when srv listen", indent_unit),
('when srv listen path:"/counter"', indent_unit),
('when http server listen path:"/counter"', indent_unit),
('when http server listen path:"/counter" as request', indent_unit),
("http server as srv", indent_unit),
],
)
def test_indent(indentor, ws, story, expected):
doc = Document(uri=".my.uri.", text=story)
lines = story.split("\n")
# select the last pos in the provided story
pos = Position(line=len(lines) - 1, character=len(lines[-1]))
assert (
indentor.indent(ws, doc, pos, indent_unit=" ")["indent"] == expected
)
def test_indent_options(indentor, ws):
doc = Document(uri=".my.uri.", text=" try")
pos = Position(line=0, character=8)
assert (
indentor.indent(ws, doc, pos, indent_unit=" ")["indent"] == " "
)
def test_indent_edits(indentor, ws):
doc = Document(uri=".my.uri.", text="a = 1")
pos = Position(line=0, character=5)
assert indentor.indent(ws, doc, pos, indent_unit=" ") == {
"indent": "",
"textEdits": [
{
"newText": "\n",
"range": {
"end": {"character": 5, "line": 0},
"start": {"character": 5, "line": 0},
},
}
],
}
def test_indent_edits2(indentor, ws):
doc = Document(uri=".my.uri.", text="\ntry")
pos = Position(line=1, character=3)
assert indentor.indent(ws, doc, pos, indent_unit=" ") == {
"indent": indent_unit,
"textEdits": [
{
"newText": "\n" + indent_unit,
"range": {
"end": {"character": 3, "line": 1},
"start": {"character": 3, "line": 1},
},
}
],
}
|
#!/usr/bin/env python3
# A very simple load tester
# Copyright 2018 Google
# Sebastian Weigand tdg@google.com
import poke
import os
import signal
import time
import argparse
from multiprocessing import Pool
from functools import partial
parser = argparse.ArgumentParser(
description='A simple load tester!', epilog='A Sab handy-dandy script.')
parser.add_argument(
'url',
nargs=1,
type=str,
help='The URL you wish to test.'
)
parser.add_argument(
'--fulltext',
action='store_true',
default=False,
help=
'print the full text of HTTP response, instead of just the code (this will print a lot of text).'
)
parser.add_argument(
'-i',
'--iterations',
type=int,
nargs='?',
default=20,
help='The number of iterations to run [20].'
)
parser.add_argument(
'-c',
'--chunks',
type=int,
nargs='?',
default=os.cpu_count(),
help='The number of simultaneous processes to invoke [cpu_count].'
)
args = parser.parse_args()
args.url[0] = args.url[0].rstrip('/')
if not args.url[0].startswith('http://'):
args.url[0] = 'http://' + args.url[0]
def init_worker():
'''
We establish an init_worker so that we can intercept signals,
and kill the concurrent subprocesses:
'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
def concurrent_test(url, iterations, chunks, fulltext):
'''
A simple function which wraps the call, generates the collection
of URLs to test, and feeds the appropriate pool.
'''
print('Getting "{}", {} times, {} calls at a time...'.format(
url, iterations, chunks))
try:
pool = Pool(chunks, init_worker)
# We need to pass in a keyword argument to the map, so use a partial:
mapfunc = partial(poke.poke, full_text=args.fulltext)
pool.map(mapfunc, (url for i in range(iterations)))
except KeyboardInterrupt:
print('\n [Abort requested] - Roger that!')
pool.terminate()
pool.join()
else:
pool.close()
pool.join()
concurrent_test(args.url[0], args.iterations, args.chunks, args.fulltext)
|
import datetime
import numpy as np
import os
import json
def calc_metric_score(y=None, preds=None, metric=None, threshold=None):
if threshold:
preds = preds > threshold
return(metric(y, preds))
def calc_threshold_metric_score(y=None,
preds=None,
classes=[],
metrics=[],
thresholds=np.arange(.05, 1, 0.05)):
output = {}
for threshold_i in thresholds:
for metric_i in metrics:
# Calc overall performance
key_name = f"{metric_i.__name__}_@{threshold_i:.2f}"
output[key_name] = calc_metric_score(y=y,
preds=preds,
metric=metric_i,
threshold=threshold_i)
for class_idx in range(0, len(classes)):
key_name = f"{metric_i.__name__}_#{classes[class_idx]}_@{threshold_i:.2f}"
output[key_name] = calc_metric_score(y=y[:,class_idx],
preds=preds[:,class_idx],
metric=metric_i,
threshold=threshold_i)
return(output)
def load_json(filepath):
with open(filepath) as f:
data = json.load(f)
return(data)
def save_json(data, filepath, indent=2):
with open(filepath, 'w') as f:
json.dump(data, f, indent=indent)
def walk_dir(parent_dir):
results = []
crawl_cnt = 0
for dir_name, sub_dir_list, file_list in os.walk(parent_dir):
for file_name in file_list:
file_path = os.path.join(os.path.realpath(dir_name), file_name)
file_info = {}
file_info['path'] = file_path
file_info['filename'] = os.path.basename(file_path)
file_info['file_type'] = os.path.splitext(file_path)[1].lower()
# file_info['modified_date'] = time.strftime("%Y%m%dT%H%M%S000+10:00", time.gmtime(os.stat(file_path).st_mtime))
# file_info['created_date'] = time.strftime("%Y%m%dT%H%M%S000+10:00", time.gmtime(os.stat(file_path).st_ctime))
try:
file_info['modified_date'] = datetime.date.fromtimestamp(os.path.getmtime(file_path))
file_info['created_date'] = datetime.date.fromtimestamp(os.path.getctime(file_path))
file_info['size'] = os.path.getsize(file_path)
file_info['error'] = False
except Exception as e:
print(e)
file_info['error'] = True
file_info['crawl_cnt'] = crawl_cnt
results.append(file_info)
crawl_cnt += 1
return (results)
|
# REQUIRES: python-psutil
# llvm.org/PR33944
# REQUIRES: nowindows
# FIXME: This test is fragile because it relies on time which can
# be affected by system performance. In particular we are currently
# assuming that `short.py` can be successfully executed within 2
# seconds of wallclock time.
# Test per test timeout using external shell
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --timeout 2 --param external=1 > %t.extsh.out 2> %t.extsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.extsh.out %s
# RUN: FileCheck --check-prefix=CHECK-EXTSH-ERR < %t.extsh.err %s
#
# CHECK-EXTSH-ERR: Using external shell
# Test per test timeout using internal shell
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --timeout 2 --param external=0 > %t.intsh.out 2> %t.intsh.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-OUT < %t.intsh.out %s
# RUN: FileCheck --check-prefix=CHECK-INTSH-ERR < %t.intsh.err %s
# CHECK-INTSH-OUT: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-INTSH-OUT: command output:
# CHECK-INTSH-OUT: command reached timeout: True
# CHECK-INTSH-ERR: Using internal shell
# Test per test timeout set via a config file rather than on the command line
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --param external=0 \
# RUN: --param set_timeout=2 > %t.cfgset.out 2> %t.cfgset.err
# RUN: FileCheck --check-prefix=CHECK-OUT-COMMON < %t.cfgset.out %s
# RUN: FileCheck --check-prefix=CHECK-CFGSET-ERR < %t.cfgset.err %s
#
# CHECK-CFGSET-ERR: Using internal shell
# CHECK-OUT-COMMON: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-OUT-COMMON: Timeout: Reached timeout of 2 seconds
# CHECK-OUT-COMMON: Command {{([0-9]+ )?}}Output
# CHECK-OUT-COMMON: PASS: per_test_timeout :: short.py
# CHECK-OUT-COMMON: Expected Passes{{ *}}: 1
# CHECK-OUT-COMMON: Individual Timeouts{{ *}}: 1
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: not %{lit} \
# RUN: %{inputs}/shtest-timeout/infinite_loop.py \
# RUN: %{inputs}/shtest-timeout/short.py \
# RUN: -j 1 -v --debug --param external=0 \
# RUN: --param set_timeout=1 --timeout=2 > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-OUT < %t.cmdover.out %s
# RUN: FileCheck --check-prefix=CHECK-CMDLINE-OVERRIDE-ERR < %t.cmdover.err %s
# CHECK-CMDLINE-OVERRIDE-ERR: Forcing timeout to be 2 seconds
# CHECK-CMDLINE-OVERRIDE-OUT: TIMEOUT: per_test_timeout :: infinite_loop.py
# CHECK-CMDLINE-OVERRIDE-OUT: Timeout: Reached timeout of 2 seconds
# CHECK-CMDLINE-OVERRIDE-OUT: Command {{([0-9]+ )?}}Output
# CHECK-CMDLINE-OVERRIDE-OUT: PASS: per_test_timeout :: short.py
# CHECK-CMDLINE-OVERRIDE-OUT: Expected Passes{{ *}}: 1
# CHECK-CMDLINE-OVERRIDE-OUT: Individual Timeouts{{ *}}: 1
|
from rubiks_cube_gym.envs.rubiks_cube_222 import RubiksCube222Env
import numpy as np
from operator import itemgetter
class RubiksCube222EnvOrtega(RubiksCube222Env):
def __init__(self):
super(RubiksCube222EnvOrtega, self).__init__()
self.FF = None
self.OLL = None
def check_FF(self):
for pos in FF_POS:
if itemgetter(*pos)(self.cube_reduced) == itemgetter(*pos)("WWWWOOGGRRBBOOGGRRBBYYYY"):
return True
return False
def check_OLL(self):
for pos in OLL_POS:
if itemgetter(*pos)(self.cube_reduced) == itemgetter(*pos)("WWWWOOGGRRBBOOGGRRBBYYYY"):
return True
return False
def check_solved(self):
if self.cube_reduced == "WWWWOOGGRRBBOOGGRRBBYYYY":
return True
def reward(self):
reward = -15 * self.FF - 25 * self.OLL
done = False
if self.check_FF():
reward += 15
self.FF = True
else:
self.FF = False
if self.check_OLL():
reward += 25
self.OLL = True
else:
self.OLL = False
if self.check_solved():
reward += 60
done = True
if reward <= 0:
reward -= 1
return reward, done
def reset(self, scramble=None):
super(RubiksCube222EnvOrtega, self).reset(scramble=scramble)
self.FF = self.check_FF()
self.OLL = self.check_OLL()
return self.cube_state
FF_POS = [[0, 1, 2, 3], [4, 5, 12, 13], [6, 7, 14, 15], [8, 9, 16, 17], [10, 11, 18, 19], [20, 21, 22, 23]]
OLL_POS = [[0, 1, 2, 3, 20, 21, 22, 23], [4, 5, 8, 9, 12, 13, 16, 17], [6, 7, 10, 11, 14, 15, 18, 19], ]
|
import threading
"""
Threaded class which monitor all services health, recover from failure if possible, reports to server and
restarts App
TODO: implement
@param service_objs
"""
class MonitorThread(threading.Thread):
def __init__(self, app):
threading.Thread.__init__(self)
self.m_app = app
|
#!/usr/bin/env python
"""Publisher node for weight sensor.
"""
import glob
import os
import rospy
import serial
import time
from std_msgs.msg import Float32MultiArray
from std_srvs.srv import Empty
class WeightPublisher(object):
"""Publisher ROS node for weight sensors.
Topics
------
weights : Float32MultiArray
The weights from each of the load cell sensors in grams,
listed in order of ID.
Services
--------
tare : Empty
Zeros the scale at the current load.
"""
def __init__(self, rate=20.0, id_mask='F1804'):
"""Initialize the weight publisher.
Parameters
----------
id_mask : str
A template for the first n digits of the device IDs for valid load cells.
"""
self._rate = rospy.Rate(rate)
self._pub = rospy.Publisher('~weights', Float32MultiArray, queue_size=10)
rospy.loginfo('Connecting serial')
self._serials = self._connect(id_mask)
if len(self._serials) == 0:
raise ValueError('Error -- No loadstar weight sensors connected to machine!')
# Tare the sensor
rospy.loginfo('Tareing')
self._tare()
# Flush the sensor's communications
self._flush()
# Set up Tare service
self._tare_service = rospy.Service('~tare', Empty, self._handle_tare)
# Main loop -- read and publish
while not rospy.is_shutdown():
weights = self._read_weights()
self._pub.publish(Float32MultiArray(data=weights))
self._rate.sleep()
def _handle_tare(self, request):
"""Handler for tare service.
"""
self._tare()
return []
def _connect(self, id_mask):
"""Connects to all of the load cells serially.
"""
# Get all devices attached as USB serial
all_devices = glob.glob('/dev/ttyUSB*')
# Identify which of the devices are LoadStar Serial Sensors
sensors = []
for device in all_devices:
try:
ser = serial.Serial(port=device,
timeout=0.5,
exclusive=True)
ser.write('ID\r')
ser.flush()
time.sleep(0.05)
resp = ser.read(13)
ser.close()
if len(resp) >= 10 and resp[:len(id_mask)] == id_mask:
sensors.append((device, resp.rstrip('\r\n')))
except:
continue
sensors = sorted(sensors, key=lambda x : x[1])
# Connect to each of the serial devices
serials = []
for device, key in sensors:
ser = serial.Serial(port=device, timeout=0.5)
serials.append(ser)
rospy.loginfo('Connected to load cell {} at {}'.format(key, device))
return serials
def _flush(self):
"""Flushes all of the serial ports.
"""
for ser in self._serials:
ser.flush()
ser.flushInput()
ser.flushOutput()
time.sleep(0.02)
def _tare(self):
"""Zeros out (tare) all of the load cells.
"""
for ser in self._serials:
ser.write('TARE\r')
ser.flush()
ser.flushInput()
ser.flushOutput()
time.sleep(0.02)
def _read_weights(self):
"""Reads weights from each of the load cells.
"""
weights = []
grams_per_pound = 453.592
# Read from each of the sensors
for ser in self._serials:
ser.write('W\r')
ser.flush()
time.sleep(0.02)
for ser in self._serials:
try:
output_str = ser.readline()
weight = float(output_str) * grams_per_pound
weights.append(weight)
except:
weights.append(0.0)
# Log the output
log_output = ''
for w in weights:
log_output +='{:.2f} '.format(w)
rospy.loginfo(log_output)
return weights
if __name__ == '__main__':
try:
rospy.init_node('weight_sensor')
id_mask = rospy.get_param('~id_mask', 'F1804')
rate = rospy.get_param('~rate', 20.0)
rospy.loginfo('Starting')
WeightPublisher(rate, id_mask)
except rospy.ROSInterruptException:
pass
|
"""
This module computes the canonical HRF used in
fMRIstat, both the 2-term spectral approximation
and the Taylor series approximation, to a shifted
version of the canonical Glover HRF.
References
----------
Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H.,
Evans, A.C. (2002). \'Estimating the delay of the response in fMRI
data.\' NeuroImage, 16:593-606.
"""
import numpy as np
import numpy.linalg as L
from scipy.interpolate import interp1d
from sympy import Function
from nipy.modalities.fmri import hrf, formula
from nipy.modalities.fmri.fmristat.invert import invertR
def spectral_decomposition(hrf2decompose, ncomp=2, time=None,
delta=None):
"""
Perform a PCA expansion of a symbolic HRF, evshifted over the values in delta,
returning the first ncomp components.
This smooths out the HRF as compared to using a Taylor series
approximation.
Parameters
----------
hrf2decompose : sympy expression
An expression that can be vectorized
as a function of 't'. This is the HRF to be expanded in PCA
ncomp : int
Number of principal components to retain.
time : np.ndarray
Default value of np.linspace(-15,50,3751)
chosen to match fMRIstat implementation.
Presumed to be equally spaced.
delta : np.ndarray
Default value of np.arange(-4.5, 4.6, 0.1)
chosen to match fMRIstat implementation.
Returns
-------
hrf : [sympy expressions]
A sequence of symbolic HRFs that are the principal
components.
approx :
TODO
"""
if time is None:
time = np.linspace(-15,50,3751)
dt = time[1] - time[0]
if delta is None:
delta = np.arange(-4.5, 4.6, 0.1)
hrft = hrf.vectorize(hrf2decompose(hrf.t))
H = []
for i in range(delta.shape[0]):
H.append(hrft(time - delta[i]))
H = np.nan_to_num(np.asarray(H))
U, S, V = L.svd(H.T, full_matrices=0)
basis = []
for i in range(ncomp):
b = interp1d(time, U[:, i], bounds_error=False, fill_value=0.)
if i == 0:
d = np.fabs((b(time) * dt).sum())
b.y /= d
basis.append(b)
W = np.array([b(time) for b in basis[:ncomp]])
WH = np.dot(L.pinv(W.T), H.T)
coef = [interp1d(delta, w, bounds_error=False, fill_value=0.) for w in WH]
if coef[0](0) < 0:
coef[0].y *= -1.
basis[0].y *= -1.
def approx(time, delta):
value = 0
for i in range(ncomp):
value += coef[i](delta) * basis[i](time)
return value
approx.coef = coef
approx.components = basis
(approx.theta,
approx.inverse,
approx.dinverse,
approx.forward,
approx.dforward) = invertR(delta, approx.coef)
symbasis = []
for i, b in enumerate(basis):
symbasis.append(formula.aliased_function('%s%d' % (str(hrf2decompose), i), b))
return symbasis, approx
def taylor_approx(hrf2decompose, tmax=50, tmin=-15, dt=0.02,
delta=np.arange(-4.5, 4.6, 0.1)):
"""
Perform a PCA expansion of fn, shifted over the values in delta,
returning the first ncomp components.
This smooths out the HRF as compared to using a Taylor series
approximation.
Parameters
----------
hrf2decompose : sympy expression
An expression that can be vectorized
as a function of 't'. This is the HRF to be expanded in PCA
ncomp : int
Number of principal components to retain.
References
----------
Liao, C.H., Worsley, K.J., Poline, J-B., Aston, J.A.D., Duncan, G.H.,
Evans, A.C. (2002). \'Estimating the delay of the response in fMRI
data.\' NeuroImage, 16:593-606.
"""
hrft = hrf.vectorize(hrf2decompose(hrf.t))
time = np.arange(tmin, tmax, dt)
dhrft = interp1d(time, -np.gradient(hrft(time), dt), bounds_error=False,
fill_value=0.)
dhrft.y *= 2
H = np.array([hrft(time - d) for d in delta])
W = np.array([hrft(time), dhrft(time)])
W = W.T
WH = np.dot(L.pinv(W), H.T)
coef = [interp1d(delta, w, bounds_error=False,
fill_value=0.) for w in WH]
def approx(time, delta):
value = (coef[0](delta) * hrft(time)
+ coef[1](delta) * dhrft(time))
return value
approx.coef = coef
approx.components = [hrft, dhrft]
(approx.theta,
approx.inverse,
approx.dinverse,
approx.forward,
approx.dforward) = invertR(delta, approx.coef)
dhrf = formula.aliased_function('d%s' % str(hrf2decompose), dhrft)
return [hrf2decompose, dhrf], approx
canonical, canonical_approx = taylor_approx(hrf.glover)
spectral, spectral_approx = spectral_decomposition(hrf.glover)
|
"""
Given an integer array nums, return true if there exists a triple of indices (i, j, k) such that i < j < k and nums[i] < nums[j] < nums[k]. If no such indices exists, return false.
Example 1:
Input: nums = [1,2,3,4,5]
Output: true
Explanation: Any triplet where i < j < k is valid.
Example 2:
Input: nums = [5,4,3,2,1]
Output: false
Explanation: No triplet exists.
Example 3:
Input: nums = [2,1,5,0,4,6]
Output: true
Explanation: The triplet (3, 4, 5) is valid because nums[3] == 0 < nums[4] == 4 < nums[5] == 6.
Constraints:
1 <= nums.length <= 105
-231 <= nums[i] <= 231 - 1
Follow up: Could you implement a solution that runs in O(n) time complexity and O(1) space complexity?
"""
class Solution:
def increasingTriplet(self, nums: List[int]) -> bool:
small, big = None, None
for n in nums:
if small is None or n <= small:
small = n
elif big is None or n <= big:
big = n
else:
return True
return False
|
import numpy as np
class circlequeue:
def __init__(self, _len, _dim):
self.length = _len
self.dimension = _dim
self.data = np.zeros((_len, _dim))
self.index_start = 0
self.index_end = 0
self.datasize = 0
def add(self, new_data):
self.data[self.index_end, :] = new_data
self.index_end = (self.index_end + 1) % self.length
if self.datasize < self.length:
self.datasize = self.datasize + 1
else:
self.index_start = (self.index_start + 1) % self.length
def addArray(self, new_data):
nRowNew = new_data.shape[0]
p_new_start = (self.index_end % self.length)
p_new_end = p_new_start + nRowNew
if p_new_end <= self.length:
self.index_end = p_new_end
self.data[p_new_start: p_new_end,:] = new_data
else:
nFirstPart = self.length - self.index_end
nLeftPart = nRowNew - nFirstPart
self.data[p_new_start: self.length,:] = new_data[: nFirstPart,:]
self.data[: nLeftPart,:] = new_data[nFirstPart: nRowNew,:]
self.index_end = p_new_end - self.length
if self.datasize + nRowNew <= self.length:
self.datasize = self.datasize + nRowNew
else:
self.datasize = self.length
self.index_start = (self.index_end % self.length)
def get(self, index, *args):
if len(args) < 1:
dim = np.arange(self.data.shape[1])
else:
dim = args[1]
if index > self.length or index < 0:
raise NotImplementedError
return self.data[self.getOrignalIdx(index), dim]
def getLast(self):
return self.data[self.index_end-1,:]
def getLastN(self, n):
if self.datasize < n:
idxStart = self.index_start
idxEnd = self.index_end
else:
idxStart = self.getOrignalIdx(self.datasize - n)
idxEnd = self.index_end
if idxEnd > idxStart:
d = self.data[idxStart:idxEnd,:]
else:
mid = self.length - idxStart
d = np.zeros((n, self.dimension))
d[: mid, :] = self.data[idxStart: self.length,:]
d[mid: n, :] = self.data[:idxEnd, :]
return d
def get_fromEnd(self, index, *args):
if len(args) < 1:
dim = np.arange(self.data.shape[1])
else:
dim = args[1]
if index >= self.length or index < 0:
raise NotImplementedError
idx = self.index_end-index
if idx < 0:
idx += self.length
return self.data[idx, dim]
# return self.get(self.index_end-index)
# idx = self.getOrignalIdx(self.index_end-1-index)
# # idx = self.getOrignalIdx(self.index_start + (self.datasize-index))
# # idx = (self.index_end - index + 1 % self.length) + 1
# d = self.data[idx, dim]
#
# return d
def pop(self):
if self.datasize == 0:
return []
d = self.data[self.index_end-1, :]
self.index_end -= 1
if self.index_end < 0:
self.index_end += self.length
self.datasize -= 1
return d
def pop_fromBeginning(self):
if self.datasize == 0:
return []
d = self.data[self.index_start,:]
self.index_start += 1
if self.index_start >= self.length:
self.index_start -= self.length
# self.index_start = (self.index_start - 1 + 1 % self.length) + 1
self.datasize = self.datasize - 1
return d
def getOrignalIdx(self, idxQueue):
return (self.index_start + idxQueue) % self.length
# idxArray = (self.index_start + idxQueue) % self.length
# print(idxArray)
# idxArray = self.index_start + idxQueue
# if idxArray >= self.length:
# idxArray = idxArray - self.length
# if idxArray >= self.length:
#
# print(idxArray)
# return idxArray
def set(self, range_start, range_end, value, *args):
# range_start, range_end, value, dim
# range_start, range_end, value = args[0], args[1], args[2]
if len(args) < 1:
dim = np.arange(self.data.shape[1])
else:
dim = args[0]
idxStart = self.getOrignalIdx(range_start)
idxEnd = self.getOrignalIdx(range_end)
if idxEnd >= idxStart:
self.data[idxStart: idxEnd, dim] = value
else:
# nTotalData = size(value, 1)
nTotalData = value.shape[0]
if nTotalData > 1:
nFirstPart = self.length - idxStart
self.data[idxStart: self.length, dim] = value[: nFirstPart,:]
self.data[: idxEnd, dim] = value[nFirstPart: nTotalData,:]
else:
self.data[idxStart: self.length, dim] = value
self.data[1: idxEnd, dim] = value
def getArray(self, range_start, range_end, *args):
if len(args) < 1:
dim = np.arange(self.data.shape[1])
else:
dim = args[0]
idxStart = self.getOrignalIdx(range_start)
idxEnd = self.getOrignalIdx(range_end)
if idxEnd >= idxStart:
d = self.data[idxStart:idxEnd, dim]
else:
d1 = self.data[idxStart:self.length, dim]
d2 = self.data[:idxEnd, dim]
d = np.concatenate([d1, d2])
return d
def getArrayAll(self):
return self.getLastN(self.datasize)
|
# -*- coding: UTF-8 -*-
#-----------IMPORTACIONES Y LIBRERÍAS----------------------------------
import numpy as np
#Comandos trabajados: savetxt,array,loadtxt
import os as o
#Comandos trabajados: system,name,remove,getcwd
from time import sleep
#---------------parte gráfica de la jugabilidad-------------------------
def tablero_inicial(tablero,filas,columnas):
for y in range(filas):
if y%2 == 0:
for x in range(columnas):
if float(x%2) ==0:
tablero[y][x]='+'
else:
tablero[y][x]=' '
else:
for x in range(columnas):
tablero[y][x]=' '
def mostrar_tablero(tablero,filas,columnas): #corregir interfaz gráfica
auxc = int((columnas + 1)/2)
auxs = int((filas + 1)/2)
orden = 0
if auxs<9:
print(" ",end="")
else:
print(" ",end="")
for n in range(auxc):
if auxc>9:
print(n+1, end=" ")
else:
print(n+1,end=" ")
print()
for i in range(filas):
if float(i%2)== 0:
if auxs>9:
print(orden+1, end=" ")
else:
print(orden+1,end=" ")
orden+=1
else:
if auxs>9:
print(" ", end=" ")
else:
print(" ",end=" ")
orden+=1
for j in range(columnas):
print(tablero[i][j],end=" ")
print()
def despliegue_menu():
print("+","-"*21,"+")
print("+ 1) Nueva Partida +")
print("+ 2) Continuar Juego +")
print("+ 3) Jugadores +")
print("+ 4) Estadísticas +")
print("+ 5) Salir +")
print("+","-"*21,"+")
#------------------------condiciones de juego y configuración interna--------------------
def valido(x1,x2,y1,y2):
conf1 = (x1-x2)**2
conf2 = (y1-y2)**2
if conf1 >= 1 and conf2 >= 1:
return False
else:
return True
def libre(a,b):
if tablero[a][b]==' ':
return True
else:
return False
def completo(x,y,fila,columna,tablero):
pos = 0
# 1: / 2: / 3: /4: / 5: / 6:
if tablero[x][y]=='-' and x!=0 and x!=fila:
if (tablero[x+1][y-1]=='|' and tablero[x+1][y+1]=='|' and tablero[x+2][y]=='-')and(tablero[x-1][y-1]=='|' and tablero[x-1][y+1]=='|' and tablero[x-2][y]=='-'):
pos = 5
elif tablero[x+1][y-1]=='|' and tablero[x+1][y+1]=='|' and tablero[x+2][y]=='-':
pos = 2
elif tablero[x-1][y-1]=='|' and tablero[x-1][y+1]=='|' and tablero[x-2][y]=='-':
pos = 1
elif tablero[x][y]=='-' and x==0:
if tablero[x+1][y-1]=='|' and tablero[x+1][y+1]=='|' and tablero[x+2][y]=='-':
pos = 2
elif tablero[x][y]=='-' and x==fila:
if tablero[x-1][y-1]=='|' and tablero[x-1][y+1]=='|' and tablero[x-2][y]=='-':
pos = 1
elif tablero[x][y]=='|' and y!=0 and y!=columna:
if (tablero[x][y+2]=='|' and tablero[x+1][y+1]=='-' and tablero[x-1][y+1]=='-') and (tablero[x][y-2]=='|' and tablero[x+1][y-1]=='-' and tablero[x-1][y-1]=='-'):
pos = 6
elif tablero[x][y+2]=='|' and tablero[x+1][y+1]=='-' and tablero[x-1][y+1]=='-':
pos = 3
elif tablero[x][y-2]=='|' and tablero[x+1][y-1]=='-' and tablero[x-1][y-1]=='-':
pos = 4
elif tablero[x][y]=='|' and y==0:
if tablero[x][y+2]=='|' and tablero[x+1][y+1]=='-' and tablero[x-1][y+1]=='-':
pos = 3
elif tablero[x][y]=='|' and y == columna:
if tablero[x][y-2]=='|' and tablero[x+1][y-1]=='-' and tablero[x-1][y-1]=='-':
pos = 4
return pos
def cuadro_para(turno,jugadores,x,y,pos,tablero,puntos):
print(jugadores[turno]," ha completado un cuadro")
nombre = jugadores[turno]
inicial = nombre[0]
if pos == 1:
tablero[x-1][y]= inicial
elif pos == 2:
tablero[x+1][y]= inicial
elif pos == 3:
tablero[x][y+1]= inicial
elif pos == 4:
tablero[x][y-1]= inicial
elif pos == 5:
tablero[x+1][y]= inicial
tablero[x-1][y]= inicial
puntos[turno]+=1
elif pos == 6:
tablero[x][y+1]= inicial
tablero[x][y-1]= inicial
puntos[turno]+=1
print("El jugador ha ganado otro turno por completar un cuadro")
return
def lista_jugadores(jugadores,num,puntos):
for i in range (num):
puntos[i]=0
print("Jugador #",i,": ")
nombre = input("Digite el nombre del jugador: ")
if i>0:
for j in range (i):
aux_nom = jugadores[j]
while nombre[0] == aux_nom[0]:
nombre = input("Digite otro nombre para el jugador con inicial diferente")
jugadores[i]= nombre
else:
jugadores[0]= nombre
def clear():
if o.name == "nt":
o.system("cls")
else:
o.system("clear")
def guardado_tablero(filas,columnas,tablero,ultimo,jugadores,puntos,num):
txt = " ultimo_juego.txt"
line1 = str(filas) + " " + str(columnas) + " " + str(ultimo) + " " + str(num)+ "\n"
line2 = ""
line3 = ""
for j in range(num):
line2 += str(jugadores[j]) + " "
line3 += str(puntos[j]) + " "
copia_pausa = [[0 for i in range(columnas)]for j in range(filas)]
for x in range(filas):
for y in range(columnas):
copia_pausa[x][y]= ord(tablero[x][y])
guardado = np.array(copia_pausa)
if txt in o.listdir(".") and o.path.isfile(txt) :
o.remove(o.getcwd + "/" + txt)
print("Se ha reemplazado un archivo de partida pausada")
np.savetxt("ultimo_juego.txt",guardado)
print("Tablero guardado con éxito")
datos = open("datos_juego.txt","w")
#orden de guardado de datos: fila,columna,turno,num_jugadores,jugadores,puntos
datos.write(line1)
datos.write(line2)
datos.write("\n")
datos.write(line3)
print("Datos guardados correctamente")
datos.close()
def carga_datos(jugadores,puntos,tablero):
matriz = "ultimo_juego.txt"
txt = "datos-juego.txt"
if not (txt in o.listdir(".") and o.path.isfile(txt)):
print("Error accediendo a los registros")
fila = columna = turno = num = -1
return fila,columna,turno,num
recarga = open(txt,"r")
cargados = np.loadtxt(matriz)
line1 = recarga.readline()
line1 = line1.split()
fila = int(line1[0])
columna = int(line1[1])
turno = int(line1[2])
num = int(line1[3])
line2 = recarga.readline()
line2 = line2.split()
line3 = recarga.readline()
line3 = line3.split()
for j in range(num):
jugadores[j]= line2[j]
puntos[j]= int(line3[j])
for x in range(fila):
for y in range(columna):
tablero[x][y]=chr(cargados[x][y])
recarga.close()
o.remove(o.getcwd()+ matriz)
o.remove(o.getcwd()+ txt)
print("Datos de partida cargados correctamente, recuerde al cargarlos no se podrá jugar la misma partida posteriormente")
return fila,columna,turno,num
#------------------------jugabilidad y desarrollo---------------------------------------
def tu_turno(fila,columna,turno,tablero,jugadores,puntos,num):
print("Es el turno del jugador: ",jugadores[turno])
x=y=x1=x2=y1=y2=con=0
while not(valido(x1,x2,y1,y2) and libre(x,y)):
if con>0:
print("Movimiento inválido,vuelva a digitar por favor:")
print("Recuerde que para pausar el juego debe oprimir -1")
x1=int(input("Digite la coordenada x1: "))
if x1 == -1:
guardado_tablero(fila, columna, tablero, turno, jugadores, puntos, num)
turno = -1
return turno
y1=int(input("Digite la coordenada y1: "))
if y1 == -1:
guardado_tablero(fila, columna, tablero, turno, jugadores, puntos, num)
turno = -1
return turno
x2=int(input("Digite la coordenada x2: "))
if x2 == -1:
guardado_tablero(fila, columna, tablero, turno, jugadores, puntos, num)
turno = -1
return turno
y2=int(input("Digite la coordenada y2: "))
if y2 == -1:
guardado_tablero(fila, columna, tablero, turno, jugadores, puntos, num)
turno = -1
return turno
x = x1+x2-2
y = y1+y2-2
con+=1
if x%2 == 0 and y%2!=0:
tablero[x][y]='-'
else:
tablero[x][y]='|'
pos = completo(x,y,fila,columna,tablero)
if pos!= 0:
puntos[turno]+=1
cuadro_para(turno,jugadores,x,y,pos,tablero,puntos)
sleep(1)
else:
turno +=1
if turno >= num:
turno = 0
return turno
def disponibles(tablero,fila,columna):
for i in range(fila):
for j in range(columna):
if tablero[i][j]==' ':
return True
return False
#------------------------DESARROLLO DEL MAIN CUADRITO---------------------------------
jugadores=[' ',' ',' ',' ',' ']
puntos=[0,0,0,0,0]
filas = columnas = 0
turno = 0
seguir = True
print("Bienvenido al juego de cuadrito de Daniel López, versión en Python")
despliegue_menu()
opt = int(input("Digite su opción: "))
clear()
if opt==1:
print("Cargando tablero vacio y espacio de juego,¿está listo?")
filas = int(input("Digite el número de filas: "))
columnas = int(input("Digite el número de columnas: "))
filas = filas*2-1
columnas = columnas*2-1
tablero = [['°' for i in range(25)]for i in range (25)]
num= int(input("Digite el número de jugadores: "))
lista_jugadores(jugadores,num,puntos)
print("El juego empieza: ")
tablero_inicial(tablero,filas,columnas)
while(True):
mostrar_tablero(tablero,filas,columnas)
turno = tu_turno(filas,columnas,turno,tablero,jugadores,puntos,num)
if turno == -1:
print("Se ha detenido el juego, gracias por jugar")
break
seguir = disponibles(tablero,filas, columnas)
if not seguir:
print("No ha movimientos disponibles, el juego ha terminado")
break
clear()
print()
elif opt==2:
print("Buscando en el historial de partida, recuerde verificar que el guardado del juego se realizo correctamente")
filas,columnas,turno,num = carga_datos(jugadores, puntos, tablero)
if filas != -1 and num !=-1:
print("El juego continua: ")
while(True):
mostrar_tablero(tablero,filas,columnas)
turno = tu_turno(filas,columnas,turno,tablero,jugadores,puntos,num)
if turno == -1:
print("Se ha detenido el juego, gracias por jugar")
break
seguir = disponibles(tablero,filas, columnas)
if not seguir:
print("No ha movimientos disponibles, el juego ha terminado")
break
clear()
else:
print("Volviendo al menú principal...")
elif opt==3:
print("A continuación se muestra las opciones disponibles para jugador")
elif opt==4:
print("Cargando datos y estadísticas")
print("-"*15)
print("Gracias por venir, espero haya disfrutado del juego")
|
#copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import copy
import importlib
import paddle.nn as nn
from paddle.jit import to_static
from paddle.static import InputSpec
import paddle
def create_input_specs():
src_word = paddle.static.InputSpec(
name="src_word", shape=[None, None], dtype="int64")
trg_word = paddle.static.InputSpec(
name="trg_word", shape=[None, None], dtype="int64")
return [src_word, trg_word]
def apply_to_static(config, model):
support_to_static = config.get('to_static', False)
if support_to_static:
specs = create_input_specs()
model = to_static(model, input_spec=specs)
return model
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v4.proto.resources import keyword_view_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_keyword__view__pb2
from google.ads.google_ads.v4.proto.services import keyword_view_service_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_keyword__view__service__pb2
class KeywordViewServiceStub(object):
"""Proto file describing the Keyword View service.
Service to manage keyword views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetKeywordView = channel.unary_unary(
'/google.ads.googleads.v4.services.KeywordViewService/GetKeywordView',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_keyword__view__service__pb2.GetKeywordViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_keyword__view__pb2.KeywordView.FromString,
)
class KeywordViewServiceServicer(object):
"""Proto file describing the Keyword View service.
Service to manage keyword views.
"""
def GetKeywordView(self, request, context):
"""Returns the requested keyword view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KeywordViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetKeywordView': grpc.unary_unary_rpc_method_handler(
servicer.GetKeywordView,
request_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_keyword__view__service__pb2.GetKeywordViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_keyword__view__pb2.KeywordView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v4.services.KeywordViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.