hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ceaba19b2ae0fdea8ed8a631ad36be10ed6c44e
| 1,109
|
py
|
Python
|
cron2/removeExpiredPremium.py
|
B1N4R1/HackSkill
|
0910f274a462dc58d45cf6442886634d027f139b
|
[
"MIT"
] | null | null | null |
cron2/removeExpiredPremium.py
|
B1N4R1/HackSkill
|
0910f274a462dc58d45cf6442886634d027f139b
|
[
"MIT"
] | null | null | null |
cron2/removeExpiredPremium.py
|
B1N4R1/HackSkill
|
0910f274a462dc58d45cf6442886634d027f139b
|
[
"MIT"
] | null | null | null |
import MySQLdb
import sys
import time
start_time = time.time()
db = MySQLdb.connect(host="127.0.0.1",user="root",passwd="",db="game")
cur = db.cursor()
cur.execute(" SELECT id, boughtDate, premiumUntil, totalPaid \
FROM users_premium \
WHERE TIMESTAMPDIFF(SECOND, NOW(), premiumUntil) < 0 \
")
imported = False
for userID, bought, premium, paid in cur.fetchall():
if not imported:
imported = True
sys.path.insert(0, '/opt/lampp/htdocs/HE/python')
import badge_add
badge_add.userBadge = 'user'
badge_add.userID = userID
badge_add.badgeID = 80 #Donator badge
badge_add.badge_add()
cur.execute("INSERT INTO premium_history \
(userID, boughtDate, premiumUntil, paid)\
VALUES \
(%s, %s, %s, %s)\
", (userID, bought, premium, paid))
cur.execute(" DELETE FROM users_premium \
WHERE id = %s AND premiumUntil = %s \
", (userID, premium))
cur.execute(" UPDATE internet_webserver \
SET active = 0 \
WHERE id = %s \
", userID)
db.commit()
print time.strftime("%d/%m/%y %H:%M:%S"),' - ',__file__,' - ',round(time.time() - start_time, 4), "s"
| 22.632653
| 101
| 0.65284
|
3a087e62ac22911da5fdb8885a38a29be59f29a3
| 3,390
|
py
|
Python
|
xmrnodes/helpers.py
|
lalanza808/monero.fail
|
f55dc9e893944ba093bda055ea5b93cd96dcf051
|
[
"MIT"
] | 17
|
2020-10-24T15:50:18.000Z
|
2022-03-17T07:56:07.000Z
|
xmrnodes/helpers.py
|
savg110/monero.fail
|
b88bd19578dd177503ae7e92ef22d83535dcaadb
|
[
"MIT"
] | 7
|
2020-10-18T11:30:16.000Z
|
2022-03-19T04:47:51.000Z
|
xmrnodes/helpers.py
|
savg110/monero.fail
|
b88bd19578dd177503ae7e92ef22d83535dcaadb
|
[
"MIT"
] | 8
|
2020-10-18T11:18:10.000Z
|
2022-03-31T15:08:29.000Z
|
import sys
import socket
import pickle
from os import path
from requests import get as r_get
from levin.section import Section
from levin.bucket import Bucket
from levin.ctypes import *
from levin.constants import LEVIN_SIGNATURE
from xmrnodes import config
def make_request(url: str, path="/get_info", data=None):
if is_onion(url):
proxies = {"http": f"socks5h://{config.TOR_HOST}:{config.TOR_PORT}"}
timeout = 18
else:
proxies = None
timeout = 6
r = r_get(url + path, timeout=timeout, proxies=proxies, json=data)
r.raise_for_status()
return r
def determine_crypto(url):
data = {"method": "get_block_header_by_height", "params": {"height": 0}}
hashes = {
"monero": [
"418015bb9ae982a1975da7d79277c2705727a56894ba0fb246adaabb1f4632e3", #mainnet
"48ca7cd3c8de5b6a4d53d2861fbdaedca141553559f9be9520068053cda8430b", #testnet
"76ee3cc98646292206cd3e86f74d88b4dcc1d937088645e9b0cbca84b7ce74eb" #stagenet
],
"wownero": [
"a3fd635dd5cb55700317783469ba749b5259f0eeac2420ab2c27eb3ff5ffdc5c", #mainnet
"d81a24c7aad4628e5c9129f8f2ec85888885b28cf468597a9762c3945e9f29aa", #testnet
]
}
try:
r = make_request(url, "/json_rpc", data)
assert "result" in r.json()
hash = r.json()["result"]["block_header"]["hash"]
crypto = "unknown"
for c, h in hashes.items():
if hash in h:
crypto = c
break
return crypto
except:
return "unknown"
def is_onion(url: str):
_split = url.split(":")
if len(_split) < 2:
return False
if _split[1].endswith(".onion"):
return True
else:
return False
# Use hacky filesystem cache since i dont feel like shipping redis
def rw_cache(key_name, data=None):
pickle_file = path.join(config.DATA_DIR, f'{key_name}.pkl')
if data:
with open(pickle_file, 'wb') as f:
f.write(pickle.dumps(data))
return data
else:
with open(pickle_file, 'rb') as f:
pickled_data = pickle.load(f)
return pickled_data
def retrieve_peers(host, port):
try:
print(f'[.] Connecting to {host}:{port}')
sock = socket.socket()
sock.settimeout(5)
sock.connect((host, int(port)))
except:
sys.stderr.write("unable to connect to %s:%d\n" % (host, int([port])))
sys.exit()
bucket = Bucket.create_handshake_request()
sock.send(bucket.header())
sock.send(bucket.payload())
buckets = []
peers = []
while 1:
buffer = sock.recv(8)
if not buffer:
sys.stderr.write("Invalid response; exiting\n")
break
if not buffer.startswith(bytes(LEVIN_SIGNATURE)):
sys.stderr.write("Invalid response; exiting\n")
break
bucket = Bucket.from_buffer(signature=buffer, sock=sock)
buckets.append(bucket)
if bucket.command == 1001:
_peers = bucket.get_peers() or []
for peer in _peers:
try:
peers.append('http://%s:%d' % (peer['ip'].ip, peer['port'].value))
except:
pass
sock.close()
break
if peers:
return peers
else:
return None
| 28.25
| 89
| 0.59587
|
3fbe0814e0cc979a4f22fd4d27c849268addcb19
| 838
|
py
|
Python
|
python/arrays/guess_game.py
|
hs634/algorithms
|
e411d747eb27a1c784605e44111de2703f6dde4d
|
[
"MIT"
] | null | null | null |
python/arrays/guess_game.py
|
hs634/algorithms
|
e411d747eb27a1c784605e44111de2703f6dde4d
|
[
"MIT"
] | null | null | null |
python/arrays/guess_game.py
|
hs634/algorithms
|
e411d747eb27a1c784605e44111de2703f6dde4d
|
[
"MIT"
] | null | null | null |
__author__ = 'harsh'
import random
inclusive_range = (1, 1000)
print("Guess my target number that is between %i and %i (inclusive).\n"
% inclusive_range)
target = random.randint(*inclusive_range)
answer, i = None, 0
while answer != target:
i += 1
try:
txt = input("Your guess(%i): " % i)
answer = int(txt)
except ValueError:
print(" I don't understand your input of '%s' ?" % txt)
continue
except SyntaxError, e:
print(" Why are you entering EOF ?")
continue
if answer < inclusive_range[0] or answer > inclusive_range[1]:
print(" Out of range!")
continue
if answer == target:
print(" Ye-Haw!!")
break
if answer < target: print(" Too low.")
if answer > target: print(" Too high.")
print("\nThanks for playing.")
| 26.1875
| 71
| 0.591885
|
105210cb91ff98016e26321ba885a0ed1211eb33
| 732
|
py
|
Python
|
nlcpy_test/247_Irecv_Waitany.py
|
SX-Aurora/mpi4py-ve
|
aa6b1f97933196f8a485d5d808e89d5a29b58b1c
|
[
"BSD-2-Clause"
] | null | null | null |
nlcpy_test/247_Irecv_Waitany.py
|
SX-Aurora/mpi4py-ve
|
aa6b1f97933196f8a485d5d808e89d5a29b58b1c
|
[
"BSD-2-Clause"
] | null | null | null |
nlcpy_test/247_Irecv_Waitany.py
|
SX-Aurora/mpi4py-ve
|
aa6b1f97933196f8a485d5d808e89d5a29b58b1c
|
[
"BSD-2-Clause"
] | null | null | null |
from mpi4pyve import MPI
import numpy as np
import nlcpy as vp
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
print("rank = ",rank)
x = vp.array([1,2,3], dtype=int)
print("x = ",x)
print("type(x) = ",type(x))
comm.Send([x, MPI.INT], dest=1)
elif rank == 1:
print("rank = ",rank)
y = vp.empty(3, dtype=int)
req = comm.Irecv([y, MPI.INT])
MPI.Request.Waitany([req])
print("y = ",y)
print("type(y) = ",type(y))
import sys
try:
y
if not isinstance(y, vp.core.core.ndarray):
print("NG : ", __file__, file=sys.stderr)
except NameError:
print("Failure test case : ", __file__, file=sys.stderr)
| 22.875
| 64
| 0.561475
|
cf7a1fd452f713ff0f0feac226ee5ec1e892cd26
| 875
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/_opaque.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/_opaque.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/_opaque.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
"""Data-type definitions for EGL/GLES"""
import ctypes
pointer = ctypes.pointer
class _Opaque( ctypes.Structure ):
"""An Opaque Structure reference (base class)"""
class _opaque_pointer( ctypes.POINTER( _Opaque ) ):
_type_ = _Opaque
@classmethod
def from_param( cls, value ):
return ctypes.cast( value, cls )
@property
def address( self ):
return ctypes.addressof( self.contents )
@property
def as_voidp( self ):
return ctypes.c_voidp( self.address )
def __hash__(self):
"""Allow these pointers to be used as keys in dictionaries"""
return self.address
def opaque_pointer_cls( name ):
"""Create an Opaque pointer class for the given name"""
typ = type( name, (_Opaque,), {} )
p_typ = type( name+'_pointer', (_opaque_pointer,), {'_type_':typ})
return p_typ
| 33.653846
| 71
| 0.637714
|
0a5ce34073548f67fcc1caf2cdf0514cf60748d5
| 5,347
|
py
|
Python
|
mpilogger/__init__.py
|
mbkumar/python-mpi-logger
|
0ec9890b0b5a1afe17c4f67dafed63d9637db023
|
[
"BSD-2-Clause"
] | null | null | null |
mpilogger/__init__.py
|
mbkumar/python-mpi-logger
|
0ec9890b0b5a1afe17c4f67dafed63d9637db023
|
[
"BSD-2-Clause"
] | null | null | null |
mpilogger/__init__.py
|
mbkumar/python-mpi-logger
|
0ec9890b0b5a1afe17c4f67dafed63d9637db023
|
[
"BSD-2-Clause"
] | null | null | null |
import sys
import time
import logging
import array
import atexit
import mpi4py
from mpi4py import MPI
# Maximum length of message in characters
_message_maxlen = 2048
# Interval between checking for new logging messages.
# Used instead of Waitsome to reduce CPU load.
_sleep_interval = 0.01
# Because of clashes between `mpi4py` and `logging` when python is exiting we
# need to destroy the connections to the logging proceses at the module level.
# Object level destructors, or the `Handler.close` method will not work.
@atexit.register
def _destroy_log_comm():
for lc in _log_comm_list:
if lc.rank == 0:
lc.Isend([None, MPI.INT], dest=0, tag=1)
lc.Disconnect()
# Internal variable for keeping track of the log communicators.
_log_comm_list = []
class MPILogHandler(logging.Handler):
"""A Handler which logs messages over MPI to a single process
which then write them to a file.
This uses MPI-2's Dynamic Process Management to spawn a child process
which listens for log messages and then writes them to the specified file.
It checks for new messages every 0.01s.
Note
----
This Handler also makes `rank` and `size` available in the `logger.Record`
for any formatter to use.
"""
def __init__(self, logfile, comm=None, *args, **kwargs):
"""Create the logger.
Parameters
----------
logfile : string
Name of file to write.
comm : MPI.Intracomm
MPI communicator used by this logger.
"""
super().__init__(*args, **kwargs)
self._logfile = logfile
self._comm = MPI.COMM_WORLD if comm is None else comm
# Spawn new process for logging
self._log_comm = self._comm.Spawn(sys.executable, args=[__file__, self._logfile])
# Add the communicator to the list of ones to keep track of.
_log_comm_list.append(self._log_comm)
def __del__(self):
# Note this does not get called unless the Handler has been removed
# from the logger.
_log_comm_list.remove(self._log_comm)
if self._log_comm.rank == 0:
self._log_comm.Isend([None, MPI.INT], dest=0, tag=1)
self._log_comm.Disconnect()
def emit(self, record):
"""Emit the log message.
Parameters
----------
record : logging.Record
logging record that will get written to disk.
"""
try:
record.rank = self._comm.rank
record.size = self._comm.size
msg = self.format(record)
# If message too long, truncate it
if len(msg) > _message_maxlen:
msg = msg[:_message_maxlen]
msg_buf = array.array('b', msg.encode())
# Send message to the logging process
self._request = self._log_comm.Issend([msg_buf, MPI.CHAR], dest=0, tag=0)
s = MPI.Status()
self._request.Wait(status=s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("Too few arguments to MPI logging process.")
# Get the logfile and tag from the command line arguments
logfile = sys.argv[1]
# Open the logfile
fh = open(logfile, 'w')
# Get the parent Intracomm
comm_parent = MPI.Comm.Get_parent()
# Initialise all the buffers to receive logging messages
buffers = [(array.array('b', '\0'.encode()) * _message_maxlen) for pi in range(comm_parent.remote_size)]
requests = []
# Create a request for checking if we should exit
exit_request = comm_parent.Irecv([None, MPI.INT], source=0, tag=1)
# Establish all the initial connections for receiving messages
for pi in range(comm_parent.remote_size):
request = comm_parent.Irecv([buffers[pi], MPI.CHAR], source=pi, tag=0)
requests.append(request)
while True:
# Wait until any connection receives a message
status_list = []
ind_requests = MPI.Request.Testsome(requests, statuses=status_list)
# Request.Waitsome() and Request.Testsome() return None or list from mpi4py 2.0.0
num_requests = len(ind_requests)
# If a request has changed
if num_requests > 0:
# Iterate over changed requests and process them
for ind, s in zip(ind_requests, status_list):
# Check to see if there was an error.
if s.Get_error() != 0:
raise Exception("Logging error (code %i)." % s.Get_error())
# Write the message to disk
msg_rank = s.Get_source()
msg = buffers[msg_rank].tobytes().decode().rstrip('\0')
fh.write('%s\n' % msg)
fh.flush()
# Replace the buffer and connection
buffers[ind] = (array.array('b', '\0'.encode()) * _message_maxlen)
requests[ind] = comm_parent.Irecv([buffers[ind], MPI.CHAR], source=msg_rank, tag=0)
if MPI.Request.Test(exit_request):
# We should exit from this process.
break
time.sleep(_sleep_interval)
comm_parent.Disconnect()
fh.close()
| 29.541436
| 108
| 0.614363
|
88c3a5115a388ef59e047ed01803306213048fb1
| 512
|
py
|
Python
|
blog/urls.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
flannerykj/urbanapplause
|
c9b6c0f9a2f65b869fe1e6fa921972e7236e4fe5
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include, url
from . import views
from django.views.generic.base import TemplateView
from unidecode import unidecode
app_name = 'blog'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^post/(?P<slug>[^\.]+)/$', views.DetailView.as_view(), name='post-detail'),
url(r'^category/(?P<category>[^\.]+)/$', views.CategoryIndexView.as_view(), name='category-index'),
url(r'^tag/(?P<tag>[^\.]+)/$', views.TagIndexView.as_view(), name='tag-index'),
]
| 42.666667
| 103
| 0.666016
|
2f1716febd3efe8157a74a82dff8728a840a1eca
| 32,517
|
py
|
Python
|
src/oci/database/models/vm_cluster_summary.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-09-10T22:09:45.000Z
|
2021-12-24T17:00:07.000Z
|
src/oci/database/models/vm_cluster_summary.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/database/models/vm_cluster_summary.py
|
xjuarez/oci-python-sdk
|
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class VmClusterSummary(object):
"""
Details of the Exadata Cloud@Customer VM cluster.
"""
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "PROVISIONING"
LIFECYCLE_STATE_PROVISIONING = "PROVISIONING"
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "AVAILABLE"
LIFECYCLE_STATE_AVAILABLE = "AVAILABLE"
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "TERMINATING"
LIFECYCLE_STATE_TERMINATING = "TERMINATING"
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "TERMINATED"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a VmClusterSummary.
#: This constant has a value of "MAINTENANCE_IN_PROGRESS"
LIFECYCLE_STATE_MAINTENANCE_IN_PROGRESS = "MAINTENANCE_IN_PROGRESS"
#: A constant which can be used with the license_model property of a VmClusterSummary.
#: This constant has a value of "LICENSE_INCLUDED"
LICENSE_MODEL_LICENSE_INCLUDED = "LICENSE_INCLUDED"
#: A constant which can be used with the license_model property of a VmClusterSummary.
#: This constant has a value of "BRING_YOUR_OWN_LICENSE"
LICENSE_MODEL_BRING_YOUR_OWN_LICENSE = "BRING_YOUR_OWN_LICENSE"
def __init__(self, **kwargs):
"""
Initializes a new VmClusterSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this VmClusterSummary.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this VmClusterSummary.
:type compartment_id: str
:param last_patch_history_entry_id:
The value to assign to the last_patch_history_entry_id property of this VmClusterSummary.
:type last_patch_history_entry_id: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this VmClusterSummary.
Allowed values for this property are: "PROVISIONING", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED", "MAINTENANCE_IN_PROGRESS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param display_name:
The value to assign to the display_name property of this VmClusterSummary.
:type display_name: str
:param time_created:
The value to assign to the time_created property of this VmClusterSummary.
:type time_created: datetime
:param lifecycle_details:
The value to assign to the lifecycle_details property of this VmClusterSummary.
:type lifecycle_details: str
:param time_zone:
The value to assign to the time_zone property of this VmClusterSummary.
:type time_zone: str
:param is_local_backup_enabled:
The value to assign to the is_local_backup_enabled property of this VmClusterSummary.
:type is_local_backup_enabled: bool
:param exadata_infrastructure_id:
The value to assign to the exadata_infrastructure_id property of this VmClusterSummary.
:type exadata_infrastructure_id: str
:param is_sparse_diskgroup_enabled:
The value to assign to the is_sparse_diskgroup_enabled property of this VmClusterSummary.
:type is_sparse_diskgroup_enabled: bool
:param vm_cluster_network_id:
The value to assign to the vm_cluster_network_id property of this VmClusterSummary.
:type vm_cluster_network_id: str
:param cpus_enabled:
The value to assign to the cpus_enabled property of this VmClusterSummary.
:type cpus_enabled: int
:param ocpus_enabled:
The value to assign to the ocpus_enabled property of this VmClusterSummary.
:type ocpus_enabled: float
:param memory_size_in_gbs:
The value to assign to the memory_size_in_gbs property of this VmClusterSummary.
:type memory_size_in_gbs: int
:param db_node_storage_size_in_gbs:
The value to assign to the db_node_storage_size_in_gbs property of this VmClusterSummary.
:type db_node_storage_size_in_gbs: int
:param data_storage_size_in_tbs:
The value to assign to the data_storage_size_in_tbs property of this VmClusterSummary.
:type data_storage_size_in_tbs: float
:param data_storage_size_in_gbs:
The value to assign to the data_storage_size_in_gbs property of this VmClusterSummary.
:type data_storage_size_in_gbs: float
:param shape:
The value to assign to the shape property of this VmClusterSummary.
:type shape: str
:param gi_version:
The value to assign to the gi_version property of this VmClusterSummary.
:type gi_version: str
:param system_version:
The value to assign to the system_version property of this VmClusterSummary.
:type system_version: str
:param ssh_public_keys:
The value to assign to the ssh_public_keys property of this VmClusterSummary.
:type ssh_public_keys: list[str]
:param license_model:
The value to assign to the license_model property of this VmClusterSummary.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type license_model: str
:param db_servers:
The value to assign to the db_servers property of this VmClusterSummary.
:type db_servers: list[str]
:param freeform_tags:
The value to assign to the freeform_tags property of this VmClusterSummary.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this VmClusterSummary.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'last_patch_history_entry_id': 'str',
'lifecycle_state': 'str',
'display_name': 'str',
'time_created': 'datetime',
'lifecycle_details': 'str',
'time_zone': 'str',
'is_local_backup_enabled': 'bool',
'exadata_infrastructure_id': 'str',
'is_sparse_diskgroup_enabled': 'bool',
'vm_cluster_network_id': 'str',
'cpus_enabled': 'int',
'ocpus_enabled': 'float',
'memory_size_in_gbs': 'int',
'db_node_storage_size_in_gbs': 'int',
'data_storage_size_in_tbs': 'float',
'data_storage_size_in_gbs': 'float',
'shape': 'str',
'gi_version': 'str',
'system_version': 'str',
'ssh_public_keys': 'list[str]',
'license_model': 'str',
'db_servers': 'list[str]',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'last_patch_history_entry_id': 'lastPatchHistoryEntryId',
'lifecycle_state': 'lifecycleState',
'display_name': 'displayName',
'time_created': 'timeCreated',
'lifecycle_details': 'lifecycleDetails',
'time_zone': 'timeZone',
'is_local_backup_enabled': 'isLocalBackupEnabled',
'exadata_infrastructure_id': 'exadataInfrastructureId',
'is_sparse_diskgroup_enabled': 'isSparseDiskgroupEnabled',
'vm_cluster_network_id': 'vmClusterNetworkId',
'cpus_enabled': 'cpusEnabled',
'ocpus_enabled': 'ocpusEnabled',
'memory_size_in_gbs': 'memorySizeInGBs',
'db_node_storage_size_in_gbs': 'dbNodeStorageSizeInGBs',
'data_storage_size_in_tbs': 'dataStorageSizeInTBs',
'data_storage_size_in_gbs': 'dataStorageSizeInGBs',
'shape': 'shape',
'gi_version': 'giVersion',
'system_version': 'systemVersion',
'ssh_public_keys': 'sshPublicKeys',
'license_model': 'licenseModel',
'db_servers': 'dbServers',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._id = None
self._compartment_id = None
self._last_patch_history_entry_id = None
self._lifecycle_state = None
self._display_name = None
self._time_created = None
self._lifecycle_details = None
self._time_zone = None
self._is_local_backup_enabled = None
self._exadata_infrastructure_id = None
self._is_sparse_diskgroup_enabled = None
self._vm_cluster_network_id = None
self._cpus_enabled = None
self._ocpus_enabled = None
self._memory_size_in_gbs = None
self._db_node_storage_size_in_gbs = None
self._data_storage_size_in_tbs = None
self._data_storage_size_in_gbs = None
self._shape = None
self._gi_version = None
self._system_version = None
self._ssh_public_keys = None
self._license_model = None
self._db_servers = None
self._freeform_tags = None
self._defined_tags = None
@property
def id(self):
"""
Gets the id of this VmClusterSummary.
The `OCID`__ of the VM cluster.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this VmClusterSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this VmClusterSummary.
The `OCID`__ of the VM cluster.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this VmClusterSummary.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
Gets the compartment_id of this VmClusterSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this VmClusterSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this VmClusterSummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this VmClusterSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def last_patch_history_entry_id(self):
"""
Gets the last_patch_history_entry_id of this VmClusterSummary.
The `OCID`__ of the last patch history. This value is updated as soon as a patch operation starts.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The last_patch_history_entry_id of this VmClusterSummary.
:rtype: str
"""
return self._last_patch_history_entry_id
@last_patch_history_entry_id.setter
def last_patch_history_entry_id(self, last_patch_history_entry_id):
"""
Sets the last_patch_history_entry_id of this VmClusterSummary.
The `OCID`__ of the last patch history. This value is updated as soon as a patch operation starts.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param last_patch_history_entry_id: The last_patch_history_entry_id of this VmClusterSummary.
:type: str
"""
self._last_patch_history_entry_id = last_patch_history_entry_id
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this VmClusterSummary.
The current state of the VM cluster.
Allowed values for this property are: "PROVISIONING", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED", "MAINTENANCE_IN_PROGRESS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this VmClusterSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this VmClusterSummary.
The current state of the VM cluster.
:param lifecycle_state: The lifecycle_state of this VmClusterSummary.
:type: str
"""
allowed_values = ["PROVISIONING", "AVAILABLE", "UPDATING", "TERMINATING", "TERMINATED", "FAILED", "MAINTENANCE_IN_PROGRESS"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def display_name(self):
"""
Gets the display_name of this VmClusterSummary.
The user-friendly name for the Exadata Cloud@Customer VM cluster. The name does not need to be unique.
:return: The display_name of this VmClusterSummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this VmClusterSummary.
The user-friendly name for the Exadata Cloud@Customer VM cluster. The name does not need to be unique.
:param display_name: The display_name of this VmClusterSummary.
:type: str
"""
self._display_name = display_name
@property
def time_created(self):
"""
Gets the time_created of this VmClusterSummary.
The date and time that the VM cluster was created.
:return: The time_created of this VmClusterSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this VmClusterSummary.
The date and time that the VM cluster was created.
:param time_created: The time_created of this VmClusterSummary.
:type: datetime
"""
self._time_created = time_created
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this VmClusterSummary.
Additional information about the current lifecycle state.
:return: The lifecycle_details of this VmClusterSummary.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this VmClusterSummary.
Additional information about the current lifecycle state.
:param lifecycle_details: The lifecycle_details of this VmClusterSummary.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def time_zone(self):
"""
Gets the time_zone of this VmClusterSummary.
The time zone of the Exadata infrastructure. For details, see `Exadata Infrastructure Time Zones`__.
__ https://docs.cloud.oracle.com/Content/Database/References/timezones.htm
:return: The time_zone of this VmClusterSummary.
:rtype: str
"""
return self._time_zone
@time_zone.setter
def time_zone(self, time_zone):
"""
Sets the time_zone of this VmClusterSummary.
The time zone of the Exadata infrastructure. For details, see `Exadata Infrastructure Time Zones`__.
__ https://docs.cloud.oracle.com/Content/Database/References/timezones.htm
:param time_zone: The time_zone of this VmClusterSummary.
:type: str
"""
self._time_zone = time_zone
@property
def is_local_backup_enabled(self):
"""
Gets the is_local_backup_enabled of this VmClusterSummary.
If true, database backup on local Exadata storage is configured for the VM cluster. If false, database backup on local Exadata storage is not available in the VM cluster.
:return: The is_local_backup_enabled of this VmClusterSummary.
:rtype: bool
"""
return self._is_local_backup_enabled
@is_local_backup_enabled.setter
def is_local_backup_enabled(self, is_local_backup_enabled):
"""
Sets the is_local_backup_enabled of this VmClusterSummary.
If true, database backup on local Exadata storage is configured for the VM cluster. If false, database backup on local Exadata storage is not available in the VM cluster.
:param is_local_backup_enabled: The is_local_backup_enabled of this VmClusterSummary.
:type: bool
"""
self._is_local_backup_enabled = is_local_backup_enabled
@property
def exadata_infrastructure_id(self):
"""
Gets the exadata_infrastructure_id of this VmClusterSummary.
The `OCID`__ of the Exadata infrastructure.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The exadata_infrastructure_id of this VmClusterSummary.
:rtype: str
"""
return self._exadata_infrastructure_id
@exadata_infrastructure_id.setter
def exadata_infrastructure_id(self, exadata_infrastructure_id):
"""
Sets the exadata_infrastructure_id of this VmClusterSummary.
The `OCID`__ of the Exadata infrastructure.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param exadata_infrastructure_id: The exadata_infrastructure_id of this VmClusterSummary.
:type: str
"""
self._exadata_infrastructure_id = exadata_infrastructure_id
@property
def is_sparse_diskgroup_enabled(self):
"""
Gets the is_sparse_diskgroup_enabled of this VmClusterSummary.
If true, sparse disk group is configured for the VM cluster. If false, sparse disk group is not created.
:return: The is_sparse_diskgroup_enabled of this VmClusterSummary.
:rtype: bool
"""
return self._is_sparse_diskgroup_enabled
@is_sparse_diskgroup_enabled.setter
def is_sparse_diskgroup_enabled(self, is_sparse_diskgroup_enabled):
"""
Sets the is_sparse_diskgroup_enabled of this VmClusterSummary.
If true, sparse disk group is configured for the VM cluster. If false, sparse disk group is not created.
:param is_sparse_diskgroup_enabled: The is_sparse_diskgroup_enabled of this VmClusterSummary.
:type: bool
"""
self._is_sparse_diskgroup_enabled = is_sparse_diskgroup_enabled
@property
def vm_cluster_network_id(self):
"""
Gets the vm_cluster_network_id of this VmClusterSummary.
The `OCID`__ of the VM cluster network.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The vm_cluster_network_id of this VmClusterSummary.
:rtype: str
"""
return self._vm_cluster_network_id
@vm_cluster_network_id.setter
def vm_cluster_network_id(self, vm_cluster_network_id):
"""
Sets the vm_cluster_network_id of this VmClusterSummary.
The `OCID`__ of the VM cluster network.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param vm_cluster_network_id: The vm_cluster_network_id of this VmClusterSummary.
:type: str
"""
self._vm_cluster_network_id = vm_cluster_network_id
@property
def cpus_enabled(self):
"""
Gets the cpus_enabled of this VmClusterSummary.
The number of enabled CPU cores.
:return: The cpus_enabled of this VmClusterSummary.
:rtype: int
"""
return self._cpus_enabled
@cpus_enabled.setter
def cpus_enabled(self, cpus_enabled):
"""
Sets the cpus_enabled of this VmClusterSummary.
The number of enabled CPU cores.
:param cpus_enabled: The cpus_enabled of this VmClusterSummary.
:type: int
"""
self._cpus_enabled = cpus_enabled
@property
def ocpus_enabled(self):
"""
Gets the ocpus_enabled of this VmClusterSummary.
The number of enabled OCPU cores.
:return: The ocpus_enabled of this VmClusterSummary.
:rtype: float
"""
return self._ocpus_enabled
@ocpus_enabled.setter
def ocpus_enabled(self, ocpus_enabled):
"""
Sets the ocpus_enabled of this VmClusterSummary.
The number of enabled OCPU cores.
:param ocpus_enabled: The ocpus_enabled of this VmClusterSummary.
:type: float
"""
self._ocpus_enabled = ocpus_enabled
@property
def memory_size_in_gbs(self):
"""
Gets the memory_size_in_gbs of this VmClusterSummary.
The memory allocated in GBs.
:return: The memory_size_in_gbs of this VmClusterSummary.
:rtype: int
"""
return self._memory_size_in_gbs
@memory_size_in_gbs.setter
def memory_size_in_gbs(self, memory_size_in_gbs):
"""
Sets the memory_size_in_gbs of this VmClusterSummary.
The memory allocated in GBs.
:param memory_size_in_gbs: The memory_size_in_gbs of this VmClusterSummary.
:type: int
"""
self._memory_size_in_gbs = memory_size_in_gbs
@property
def db_node_storage_size_in_gbs(self):
"""
Gets the db_node_storage_size_in_gbs of this VmClusterSummary.
The local node storage allocated in GBs.
:return: The db_node_storage_size_in_gbs of this VmClusterSummary.
:rtype: int
"""
return self._db_node_storage_size_in_gbs
@db_node_storage_size_in_gbs.setter
def db_node_storage_size_in_gbs(self, db_node_storage_size_in_gbs):
"""
Sets the db_node_storage_size_in_gbs of this VmClusterSummary.
The local node storage allocated in GBs.
:param db_node_storage_size_in_gbs: The db_node_storage_size_in_gbs of this VmClusterSummary.
:type: int
"""
self._db_node_storage_size_in_gbs = db_node_storage_size_in_gbs
@property
def data_storage_size_in_tbs(self):
"""
Gets the data_storage_size_in_tbs of this VmClusterSummary.
Size, in terabytes, of the DATA disk group.
:return: The data_storage_size_in_tbs of this VmClusterSummary.
:rtype: float
"""
return self._data_storage_size_in_tbs
@data_storage_size_in_tbs.setter
def data_storage_size_in_tbs(self, data_storage_size_in_tbs):
"""
Sets the data_storage_size_in_tbs of this VmClusterSummary.
Size, in terabytes, of the DATA disk group.
:param data_storage_size_in_tbs: The data_storage_size_in_tbs of this VmClusterSummary.
:type: float
"""
self._data_storage_size_in_tbs = data_storage_size_in_tbs
@property
def data_storage_size_in_gbs(self):
"""
Gets the data_storage_size_in_gbs of this VmClusterSummary.
Size, in gigabytes, of the DATA disk group.
:return: The data_storage_size_in_gbs of this VmClusterSummary.
:rtype: float
"""
return self._data_storage_size_in_gbs
@data_storage_size_in_gbs.setter
def data_storage_size_in_gbs(self, data_storage_size_in_gbs):
"""
Sets the data_storage_size_in_gbs of this VmClusterSummary.
Size, in gigabytes, of the DATA disk group.
:param data_storage_size_in_gbs: The data_storage_size_in_gbs of this VmClusterSummary.
:type: float
"""
self._data_storage_size_in_gbs = data_storage_size_in_gbs
@property
def shape(self):
"""
Gets the shape of this VmClusterSummary.
The shape of the Exadata infrastructure. The shape determines the amount of CPU, storage, and memory resources allocated to the instance.
:return: The shape of this VmClusterSummary.
:rtype: str
"""
return self._shape
@shape.setter
def shape(self, shape):
"""
Sets the shape of this VmClusterSummary.
The shape of the Exadata infrastructure. The shape determines the amount of CPU, storage, and memory resources allocated to the instance.
:param shape: The shape of this VmClusterSummary.
:type: str
"""
self._shape = shape
@property
def gi_version(self):
"""
Gets the gi_version of this VmClusterSummary.
The Oracle Grid Infrastructure software version for the VM cluster.
:return: The gi_version of this VmClusterSummary.
:rtype: str
"""
return self._gi_version
@gi_version.setter
def gi_version(self, gi_version):
"""
Sets the gi_version of this VmClusterSummary.
The Oracle Grid Infrastructure software version for the VM cluster.
:param gi_version: The gi_version of this VmClusterSummary.
:type: str
"""
self._gi_version = gi_version
@property
def system_version(self):
"""
Gets the system_version of this VmClusterSummary.
Operating system version of the image.
:return: The system_version of this VmClusterSummary.
:rtype: str
"""
return self._system_version
@system_version.setter
def system_version(self, system_version):
"""
Sets the system_version of this VmClusterSummary.
Operating system version of the image.
:param system_version: The system_version of this VmClusterSummary.
:type: str
"""
self._system_version = system_version
@property
def ssh_public_keys(self):
"""
Gets the ssh_public_keys of this VmClusterSummary.
The public key portion of one or more key pairs used for SSH access to the VM cluster.
:return: The ssh_public_keys of this VmClusterSummary.
:rtype: list[str]
"""
return self._ssh_public_keys
@ssh_public_keys.setter
def ssh_public_keys(self, ssh_public_keys):
"""
Sets the ssh_public_keys of this VmClusterSummary.
The public key portion of one or more key pairs used for SSH access to the VM cluster.
:param ssh_public_keys: The ssh_public_keys of this VmClusterSummary.
:type: list[str]
"""
self._ssh_public_keys = ssh_public_keys
@property
def license_model(self):
"""
Gets the license_model of this VmClusterSummary.
The Oracle license model that applies to the VM cluster. The default is LICENSE_INCLUDED.
Allowed values for this property are: "LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The license_model of this VmClusterSummary.
:rtype: str
"""
return self._license_model
@license_model.setter
def license_model(self, license_model):
"""
Sets the license_model of this VmClusterSummary.
The Oracle license model that applies to the VM cluster. The default is LICENSE_INCLUDED.
:param license_model: The license_model of this VmClusterSummary.
:type: str
"""
allowed_values = ["LICENSE_INCLUDED", "BRING_YOUR_OWN_LICENSE"]
if not value_allowed_none_or_none_sentinel(license_model, allowed_values):
license_model = 'UNKNOWN_ENUM_VALUE'
self._license_model = license_model
@property
def db_servers(self):
"""
Gets the db_servers of this VmClusterSummary.
The list of Db server.
:return: The db_servers of this VmClusterSummary.
:rtype: list[str]
"""
return self._db_servers
@db_servers.setter
def db_servers(self, db_servers):
"""
Sets the db_servers of this VmClusterSummary.
The list of Db server.
:param db_servers: The db_servers of this VmClusterSummary.
:type: list[str]
"""
self._db_servers = db_servers
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this VmClusterSummary.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this VmClusterSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this VmClusterSummary.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this VmClusterSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this VmClusterSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this VmClusterSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this VmClusterSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this VmClusterSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.666311
| 245
| 0.66965
|
a089540c79194b9dcc83a29076eb3387a911b327
| 11,500
|
py
|
Python
|
o3seespy/command/uniaxial_material/pytz.py
|
o3seespy/o3seespy
|
4fdd942370df1ac8d454e361f651405717b8584c
|
[
"MIT",
"BSD-3-Clause"
] | 16
|
2019-10-24T17:58:46.000Z
|
2022-03-01T19:48:06.000Z
|
o3seespy/command/uniaxial_material/pytz.py
|
o3seespy/o3seespy
|
4fdd942370df1ac8d454e361f651405717b8584c
|
[
"MIT",
"BSD-3-Clause"
] | 5
|
2020-04-17T01:39:27.000Z
|
2020-12-18T05:07:58.000Z
|
o3seespy/command/uniaxial_material/pytz.py
|
o3seespy/o3seespy
|
4fdd942370df1ac8d454e361f651405717b8584c
|
[
"MIT",
"BSD-3-Clause"
] | 6
|
2020-02-20T02:13:11.000Z
|
2021-11-01T19:08:41.000Z
|
from o3seespy.command.uniaxial_material.base_material import UniaxialMaterialBase
class PySimple1(UniaxialMaterialBase):
"""
The PySimple1 UniaxialMaterial Class
This command is used to construct a PySimple1 uniaxial material object.
"""
op_type = 'PySimple1'
def __init__(self, osi, soil_type, pult, y50, cd, c=0.0):
"""
Initial method for PySimple1
Parameters
----------
osi: o3seespy.OpenSeesInstance
soil_type: int
Soiltype = 1 backbone of p-y curve approximates matlock (1970) soft clay relation. soiltype = 2 backbone of
p-y curve approximates api (1993) sand relation.
pult: float
Ultimate capacity of the p-y material. note that "p" or "pult" are distributed loads [force per length of
pile] in common design equations, but are both loads for this uniaxial_material [i.e., distributed load times the
tributary length of the pile].
y50: float
Displacement at which 50% of pult is mobilized in monotonic loading.
cd: float
Variable that sets the drag resistance within a fully-mobilized gap as cd*pult.
c: float, optional
The viscous damping term (dashpot) on the far-field (elastic) component of the displacement rate (velocity).
(optional default = 0.0). nonzero c values are used to represent radiation damping effects
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.uniaxial_material.PySimple1(osi, soil_type=1, pult=1.0, y50=1.0, cd=1.0, c=0.0)
"""
self.osi = osi
self.soil_type = int(soil_type)
self.pult = float(pult)
self.y50 = float(y50)
self.cd = float(cd)
self.c = float(c)
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.soil_type, self.pult, self.y50, self.cd, self.c]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
class TzSimple1(UniaxialMaterialBase):
"""
The TzSimple1 UniaxialMaterial Class
This command is used to construct a TzSimple1 uniaxial material object.
"""
op_type = 'TzSimple1'
def __init__(self, osi, soil_type, tult, z50, c=0.0):
"""
Initial method for TzSimple1
Parameters
----------
osi: o3seespy.OpenSeesInstance
soil_type: int
Soiltype = 1 backbone of t-z curve approximates reese and o'neill (1987). soiltype = 2 backbone of t-z curve
approximates mosher (1984) relation.
tult: float
Ultimate capacity of the t-z material. see note 1.
z50: float
Displacement at which 50% of tult is mobilized in monotonic loading.
c: float, optional
The viscous damping term (dashpot) on the far-field (elastic) component of the displacement rate (velocity).
(optional default = 0.0). see note 2.
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.uniaxial_material.TzSimple1(osi, soil_type=1, tult=1.0, z50=1.0, c=0.0)
"""
self.osi = osi
self.soil_type = int(soil_type)
self.tult = float(tult)
self.z50 = float(z50)
self.c = float(c)
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.soil_type, self.tult, self.z50, self.c]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
class QzSimple1(UniaxialMaterialBase):
"""
The QzSimple1 UniaxialMaterial Class
This command is used to construct a QzSimple1 uniaxial material object.
"""
op_type = 'QzSimple1'
def __init__(self, osi, qz_type, qult, z50, suction=0.0, c=0.0):
"""
Initial method for QzSimple1
Parameters
----------
osi: o3seespy.OpenSeesInstance
qz_type: int
Qztype = 1 backbone of q-z curve approximates reese and o'neill's (1987) relation for drilled shafts in
clay. qztype = 2 backbone of q-z curve approximates vijayvergiya's (1977) relation for piles in sand.
qult: float
Ultimate capacity of the q-z material. see note 1.
z50: float
Displacement at which 50% of qult is mobilized in monotonic loading. see note 2.
suction: float, optional
Uplift resistance is equal to suction*qult. default = 0.0. the value of suction must be 0.0 to 0.1.*
c: float, optional
The viscous damping term (dashpot) on the far-field (elastic) component of the displacement rate (velocity).
default = 0.0. nonzero c values are used to represent radiation damping effects.*
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.uniaxial_material.QzSimple1(osi, qz_type=1, qult=1.0, z50=1.0, suction=0.0, c=0.0)
"""
self.osi = osi
self.qz_type = int(qz_type)
self.qult = float(qult)
self.z50 = float(z50)
self.suction = float(suction)
self.c = float(c)
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.qz_type, self.qult, self.z50, self.suction, self.c]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
class PyLiq1(UniaxialMaterialBase):
"""
The PyLiq1 UniaxialMaterial Class
"""
op_type = 'PyLiq1'
def __init__(self, osi, soil_type, pult, y50, cd, c, p_res, ele1, ele2, time_series=None):
"""
Initial method for PyLiq1
Parameters
----------
osi: o3seespy.OpenSeesInstance
soil_type: int
Soiltype = 1 backbone of p-y curve approximates matlock (1970) soft clay relation. soiltype = 2 backbone of
p-y curve approximates api (1993) sand relation.
pult: float
Ultimate capacity of the p-y material. note that "p" or "pult" are distributed loads [force per length of
pile] in common design equations, but are both loads for this uniaxial_material [i.e., distributed load times the
tributary length of the pile].
y50: float
Displacement at which 50% of pult is mobilized in monotonic loading.
cd: float
Variable that sets the drag resistance within a fully-mobilized gap as cd*pult.
c: float
The viscous damping term (dashpot) on the far-field (elastic) component of the displacement rate (velocity).
(optional default = 0.0). nonzero c values are used to represent radiation damping effects
p_res: float
Sets the minimum (or residual) peak resistance that the material retains as the adjacent solid soil elements
liquefy
ele1: float
Are the eleobject (element numbers) for the two solid elements from which pyliq1 will obtain mean effective
stresses and excess pore pressures
ele2: float
Are the eleobject (element numbers) for the two solid elements from which pyliq1 will obtain mean effective
stresses and excess pore pressures
time_series: obj, optional
Alternatively, mean effective stress can be supplied by a time series by specifying the text string
``'-timeseries'`` and the object of the series ``seriesobject``.
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.uniaxial_material.PyLiq1(osi, soil_type=1, pult=1.0, y50=1.0, cd=1.0, c=1.0, p_res=1.0, ele1=1.0, ele2=1.0)
"""
self.osi = osi
self.soil_type = int(soil_type)
self.pult = float(pult)
self.y50 = float(y50)
self.cd = float(cd)
self.c = float(c)
self.p_res = float(p_res)
self.ele1 = float(ele1)
self.ele2 = float(ele2)
self.time_series = time_series
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.soil_type, self.pult, self.y50, self.cd, self.c, self.p_res, self.ele1, self.ele2]
if getattr(self, 'time_series') is not None:
self._parameters += ['-timeSeries', self.time_series.tag]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
def set_update_material_stage(self, value, ele=None, eles=None):
self.set_parameter(self.osi, 'updateMaterialStage', value, ele, eles)
class TzLiq1(UniaxialMaterialBase):
"""
The TzLiq1 UniaxialMaterial Class
"""
op_type = 'TzLiq1'
def __init__(self, osi, tz_type, tult, z50, c, ele1, ele2, time_series=None):
"""
Initial method for TzLiq1
Parameters
----------
osi: o3seespy.OpenSeesInstance
tz_type: int
Tztype = 1 backbone of t-z curve approximates reese and o'neill (1987). tztype = 2 backbone of t-z curve
approximates mosher (1984) relation.
tult: float
Ultimate capacity of the t-z material. see note 1.
z50: float
Displacement at which 50% of tult is mobilized in monotonic loading.
c: float
The viscous damping term (dashpot) on the far-field (elastic) component of the displacement rate (velocity).
ele1: float
Are the eleobject (element numbers) for the two solid elements from which pyliq1 will obtain mean effective
stresses and excess pore pressures
ele2: float
Are the eleobject (element numbers) for the two solid elements from which pyliq1 will obtain mean effective
stresses and excess pore pressures
time_series: obj, optional
Alternatively, mean effective stress can be supplied by a time series by specifying the text string
``'-timeseries'`` and the object of the seriesm ``seriesobject``.
Examples
--------
>>> import o3seespy as o3
>>> osi = o3.OpenSeesInstance(ndm=2)
>>> o3.uniaxial_material.TzLiq1(osi, tz_type=1, tult=1.0, z50=1.0, c=1.0, ele1=1.0, ele2=1.0)
"""
self.osi = osi
self.tz_type = int(tz_type)
self.tult = float(tult)
self.z50 = float(z50)
self.c = float(c)
self.ele1 = float(ele1)
self.ele2 = float(ele2)
self.time_series = time_series
if osi is not None:
osi.n_mat += 1
self._tag = osi.n_mat
self._parameters = [self.op_type, self._tag, self.tz_type, self.tult, self.z50, self.c, self.ele1, self.ele2]
if getattr(self, 'time_series') is not None:
self._parameters += ['-timeSeries', self.time_series.tag]
if osi is None:
self.built = 0
if osi is not None:
self.to_process(osi)
def set_update_material_stage(self, value, ele=None, eles=None):
self.set_parameter(self.osi, 'updateMaterialStage', value, ele, eles)
| 39.930556
| 140
| 0.607652
|
543f1fb0cc8ee77fc6838c3d2fcab5cd2b6f2d3f
| 158
|
py
|
Python
|
pykp/metric/__init__.py
|
VieZhong/seq2seq-keyphrase-pytorch
|
92a41469ddbe6d76a545c4e24f6c672b98e0b289
|
[
"Apache-2.0"
] | 152
|
2019-10-07T03:15:53.000Z
|
2022-03-24T16:26:26.000Z
|
pykp/metric/__init__.py
|
VieZhong/seq2seq-keyphrase-pytorch
|
92a41469ddbe6d76a545c4e24f6c672b98e0b289
|
[
"Apache-2.0"
] | 46
|
2019-11-04T09:51:51.000Z
|
2022-03-06T18:40:13.000Z
|
pykp/metric/__init__.py
|
VieZhong/seq2seq-keyphrase-pytorch
|
92a41469ddbe6d76a545c4e24f6c672b98e0b289
|
[
"Apache-2.0"
] | 41
|
2018-03-29T13:41:12.000Z
|
2021-06-22T14:28:21.000Z
|
# -*- coding: utf-8 -*-
"""
Python File Template
"""
import os
__author__ = "Rui Meng"
__email__ = "rui.meng@pitt.edu"
if __name__ == '__main__':
pass
| 13.166667
| 31
| 0.620253
|
acd1fbf32e2713c6a1e36d0b3d940b9ca8919618
| 11,668
|
py
|
Python
|
torchdynamo/variables/lists.py
|
frank-wei/torchdynamo
|
26c4c1b593bebf4246e566749dade38254b59ffb
|
[
"BSD-3-Clause"
] | 41
|
2021-09-14T14:58:06.000Z
|
2022-02-25T12:36:22.000Z
|
torchdynamo/variables/lists.py
|
frank-wei/torchdynamo
|
26c4c1b593bebf4246e566749dade38254b59ffb
|
[
"BSD-3-Clause"
] | 62
|
2022-03-11T00:12:37.000Z
|
2022-03-31T20:04:27.000Z
|
torchdynamo/variables/lists.py
|
frank-wei/torchdynamo
|
26c4c1b593bebf4246e566749dade38254b59ffb
|
[
"BSD-3-Clause"
] | 6
|
2021-11-19T17:50:56.000Z
|
2022-02-10T22:31:58.000Z
|
from typing import Dict
from typing import List
import torch
from .. import variables
from ..bytecode_transformation import create_instruction
from ..exc import unimplemented
from ..source import GetItemSource
from ..utils import namedtuple_fields
from .base import MutableLocal
from .base import VariableTracker
class BaseListVariable(VariableTracker):
@staticmethod
def cls_for(obj):
return {
iter: ListIteratorVariable,
list: ListVariable,
slice: SliceVariable,
torch.Size: SizeVariable,
tuple: TupleVariable,
}[obj]
def __init__(self, items: List[VariableTracker], **kwargs):
super(BaseListVariable, self).__init__(**kwargs)
assert isinstance(items, list)
assert all(isinstance(x, VariableTracker) for x in items)
self.items: List[VariableTracker] = items
def _as_proxy(self):
return [x.as_proxy() for x in self.items]
def as_python_constant(self):
return self.python_type()([x.as_python_constant() for x in self.items])
def as_proxy(self):
return self.python_type()(self._as_proxy())
def getitem_const(self, arg: VariableTracker):
index = arg.as_python_constant()
if isinstance(index, slice):
if self.source is not None:
return self.clone(
items=self.items[index],
source=GetItemSource(self.source, index),
mutable_local=None,
).add_options(arg, self)
else:
return self.clone(
items=self.items[index], mutable_local=None
).add_options(arg, self)
else:
assert isinstance(index, int)
return self.items[index].add_options(arg, self)
def unpack_var_sequence(self, tx):
return [x.add_options(self) for x in self.items]
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
options = VariableTracker.propagate(self, args, kwargs.values())
if name == "__getitem__":
assert not kwargs and len(args) == 1
return self.getitem_const(args[0])
elif name == "__add__":
assert not kwargs and len(args) == 1
return type(self)(self.items + args[0].items, **options)
elif (
name == "__contains__"
and len(args) == 1
and args[0].is_python_constant()
and all(x.is_python_constant() for x in self.items)
):
assert not kwargs
search = args[0].as_python_constant()
result = any(x.as_python_constant() == search for x in self.items)
return variables.ConstantVariable(result, **options)
return super(BaseListVariable, self).call_method(tx, name, args, kwargs)
class RangeVariable(BaseListVariable):
def __init__(self, value, items=None, guards=None, **kwargs):
if items is None:
items = [variables.ConstantVariable(x, guards=guards) for x in value]
super().__init__(items, guards=guards, **kwargs)
self.value = value
def python_type(self):
return range
def as_python_constant(self):
return self.value
def reconstruct(self, codegen):
assert "range" not in codegen.tx.f_globals
range_fn = codegen.create_load_global("range", add=True)
if self.value.step == 1:
if self.value.start == 0:
return [
range_fn,
codegen.create_load_const(self.value.stop),
create_instruction("CALL_FUNCTION", 1),
]
return [
range_fn,
codegen.create_load_const(self.value.start),
codegen.create_load_const(self.value.stop),
create_instruction("CALL_FUNCTION", 2),
]
return [
range_fn,
codegen.create_load_const(self.value.start),
codegen.create_load_const(self.value.stop),
codegen.create_load_const(self.value.step),
create_instruction("CALL_FUNCTION", 3),
]
class ListVariable(BaseListVariable):
def python_type(self):
return list
def reconstruct(self, codegen):
codegen.foreach(self.items)
return [create_instruction("BUILD_LIST", len(self.items))]
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
options = VariableTracker.propagate(self, args, kwargs.values())
if name == "append" and self.mutable_local:
assert not kwargs
(arg,) = args
return tx.replace_all(
self,
ListVariable(self.items + [arg], **options),
)
elif (
name in ("extend", "__iadd__")
and self.mutable_local
and args
and args[0].has_unpack_var_sequence(tx)
):
assert not kwargs
(arg,) = args
return tx.replace_all(
self,
ListVariable(
list(self.items) + list(arg.unpack_var_sequence(tx)),
**options,
),
)
elif name == "insert" and self.mutable_local:
assert not kwargs
idx, value = args
items = list(self.items)
items.insert(idx.as_python_constant(), value)
return tx.replace_all(
self,
ListVariable(items, **options),
)
elif name == "pop" and self.mutable_local:
assert not kwargs
items = list(self.items)
result = items.pop(*[a.as_python_constant() for a in args])
tx.replace_all(
self,
ListVariable(items, **options),
)
return result
elif name == "clear" and self.mutable_local:
assert not kwargs and not args
return tx.replace_all(
self,
ListVariable([], **options),
)
elif (
name == "__setitem__"
and self.mutable_local
and args
and args[0].is_python_constant()
):
assert not kwargs
key, value = args
items = list(self.items)
items[key.as_python_constant()] = value
result = ListVariable(items, **options)
return tx.replace_all(self, result)
else:
return super().call_method(tx, name, args, kwargs)
class TupleVariable(BaseListVariable):
def python_type(self):
return tuple
def reconstruct(self, codegen):
codegen.foreach(self.items)
return [create_instruction("BUILD_TUPLE", len(self.items))]
def call_method(
self,
tx,
name,
args: "List[VariableTracker]",
kwargs: "Dict[str, VariableTracker]",
) -> "VariableTracker":
options = VariableTracker.propagate(self, args, kwargs.values())
if (
name in ("__add__", "__iadd__")
and len(args) == 1
and isinstance(args[0], TupleVariable)
):
assert not kwargs
return TupleVariable(self.items + args[0].items, **options)
elif (
name in ("__add__", "__iadd__")
and len(args) == 1
and isinstance(args[0], variables.ConstantVariable)
):
assert not kwargs
return TupleVariable(
self.items + list(args[0].unpack_var_sequence(self)), **options
)
return super().call_method(tx, name, args, kwargs)
class SizeVariable(TupleVariable):
"""torch.Size(...)"""
def python_type(self):
return torch.Size
def reconstruct(self, codegen):
codegen.load_import_from("torch", "Size")
codegen.foreach(self.items)
build_torch_size = [
create_instruction("BUILD_TUPLE", len(self.items)),
create_instruction("CALL_FUNCTION", 1),
]
return build_torch_size
class NamedTupleVariable(TupleVariable):
def __init__(self, items, tuple_cls, **kwargs):
super().__init__(items, **kwargs)
self.tuple_cls = tuple_cls
def python_type(self):
return self.tuple_cls
def reconstruct(self, codegen):
create_fn = getattr(self.tuple_cls, "_make", self.tuple_cls)
codegen.append_output(codegen._create_load_const(create_fn))
codegen.foreach(self.items)
return [
create_instruction("BUILD_TUPLE", len(self.items)),
create_instruction("CALL_FUNCTION", 1),
]
def var_getattr(self, tx, name):
fields = namedtuple_fields(self.tuple_cls)
if name not in fields:
unimplemented(f"NamedTupleVariable.{name}")
return self.items[fields.index(name)].add_options(self)
def call_hasattr(self, tx, name: str) -> "VariableTracker":
options = VariableTracker.propagate(self)
fields = namedtuple_fields(self.tuple_cls)
return variables.ConstantVariable(name in fields, **options)
class SliceVariable(BaseListVariable):
def __init__(self, items, **kwargs):
start, stop, step = [variables.ConstantVariable(None)] * 3
if len(items) == 1:
(stop,) = items
elif len(items) == 2:
start, stop = items
elif len(items) == 3:
start, stop, step = items
else:
assert False
super().__init__([start, stop, step], **kwargs)
def as_proxy(self):
return slice(*self._as_proxy())
def python_type(self):
return slice
def as_python_constant(self):
return slice(*[x.as_python_constant() for x in self.items])
def reconstruct(self, codegen):
codegen.foreach(self.items)
return [create_instruction("BUILD_SLICE", len(self.items))]
def var_getattr(self, tx, name):
fields = ["start", "stop", "step"]
if name not in fields:
unimplemented(f"slice.{name}")
return self.items[fields.index(name)].add_options(self)
class ListIteratorVariable(VariableTracker):
def __init__(self, items, index: int = 0, **kwargs):
super(ListIteratorVariable, self).__init__(**kwargs)
assert isinstance(items, list)
assert all(isinstance(x, VariableTracker) for x in items)
self.items = items
self.index = index
def next_variables(self):
assert self.mutable_local
if self.index >= len(self.items):
raise StopIteration()
return self.items[self.index].add_options(self), ListIteratorVariable(
self.items,
self.index + 1,
mutable_local=MutableLocal(),
**VariableTracker.propagate([self]),
)
def as_python_constant(self):
if self.index > 0:
raise NotImplementedError()
return iter([x.as_python_constant() for x in self.items])
def unpack_var_sequence(self, tx):
return [x.add_options(self) for x in self.items[self.index :]]
def reconstruct(self, codegen):
remaining_items = self.items[self.index :]
codegen.foreach(remaining_items)
return [
create_instruction("BUILD_TUPLE", len(remaining_items)),
create_instruction("GET_ITER"),
]
| 33.147727
| 81
| 0.580905
|
374f4269ce4ea2819f2c0716553a24bf9355229d
| 530
|
py
|
Python
|
projects/migrations/0015_review_project_id.py
|
kiptoo-rotich/Awards
|
7206861279df86328b49e81af0429380a14676de
|
[
"MIT"
] | null | null | null |
projects/migrations/0015_review_project_id.py
|
kiptoo-rotich/Awards
|
7206861279df86328b49e81af0429380a14676de
|
[
"MIT"
] | null | null | null |
projects/migrations/0015_review_project_id.py
|
kiptoo-rotich/Awards
|
7206861279df86328b49e81af0429380a14676de
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-19 16:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0014_remove_review_project_id'),
]
operations = [
migrations.AddField(
model_name='review',
name='project_id',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='projects.projects'),
preserve_default=False,
),
]
| 25.238095
| 116
| 0.645283
|
90a6ee5541cdf2baf41de31f5e91b873be0c5a34
| 39,294
|
py
|
Python
|
test/aqua/operators/test_op_construction.py
|
TDHTTTT/qiskit-aqua
|
3486ea6da7a2c799c86a6590d2813088d76c99e0
|
[
"Apache-2.0"
] | 1
|
2020-10-02T15:35:03.000Z
|
2020-10-02T15:35:03.000Z
|
test/aqua/operators/test_op_construction.py
|
vikpande/qiskit-aqua
|
b45f952e342398156e9ed61a2126a85b4d55effd
|
[
"Apache-2.0"
] | null | null | null |
test/aqua/operators/test_op_construction.py
|
vikpande/qiskit-aqua
|
b45f952e342398156e9ed61a2126a85b4d55effd
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Operator construction, including OpPrimitives and singletons. """
import unittest
from test.aqua import QiskitAquaTestCase
import itertools
import scipy
from scipy.stats import unitary_group
import numpy as np
from ddt import ddt, data
from qiskit import QiskitError
from qiskit.aqua import AquaError
from qiskit.circuit import QuantumCircuit, QuantumRegister, Instruction, Parameter, ParameterVector
from qiskit.extensions.exceptions import ExtensionError
from qiskit.quantum_info import Operator, Pauli, Statevector
from qiskit.circuit.library import CZGate, ZGate
from qiskit.aqua.operators import (
X, Y, Z, I, CX, T, H, Minus, PrimitiveOp, PauliOp, CircuitOp, MatrixOp, EvolvedOp, StateFn,
CircuitStateFn, VectorStateFn, DictStateFn, OperatorStateFn, ListOp, ComposedOp, TensoredOp,
SummedOp
)
# pylint: disable=invalid-name
@ddt
class TestOpConstruction(QiskitAquaTestCase):
"""Operator Construction tests."""
def test_pauli_primitives(self):
""" from to file test """
newop = X ^ Y ^ Z ^ I
self.assertEqual(newop.primitive, Pauli(label='XYZI'))
kpower_op = (Y ^ 5) ^ (I ^ 3)
self.assertEqual(kpower_op.primitive, Pauli(label='YYYYYIII'))
kpower_op2 = (Y ^ I) ^ 4
self.assertEqual(kpower_op2.primitive, Pauli(label='YIYIYIYI'))
# Check immutability
self.assertEqual(X.primitive, Pauli(label='X'))
self.assertEqual(Y.primitive, Pauli(label='Y'))
self.assertEqual(Z.primitive, Pauli(label='Z'))
self.assertEqual(I.primitive, Pauli(label='I'))
def test_composed_eval(self):
""" Test eval of ComposedOp """
self.assertAlmostEqual(Minus.eval('1'), -.5 ** .5)
def test_evals(self):
""" evals test """
# pylint: disable=no-member
# TODO: Think about eval names
self.assertEqual(Z.eval('0').eval('0'), 1)
self.assertEqual(Z.eval('1').eval('0'), 0)
self.assertEqual(Z.eval('0').eval('1'), 0)
self.assertEqual(Z.eval('1').eval('1'), -1)
self.assertEqual(X.eval('0').eval('0'), 0)
self.assertEqual(X.eval('1').eval('0'), 1)
self.assertEqual(X.eval('0').eval('1'), 1)
self.assertEqual(X.eval('1').eval('1'), 0)
self.assertEqual(Y.eval('0').eval('0'), 0)
self.assertEqual(Y.eval('1').eval('0'), -1j)
self.assertEqual(Y.eval('0').eval('1'), 1j)
self.assertEqual(Y.eval('1').eval('1'), 0)
with self.assertRaises(ValueError):
Y.eval('11')
with self.assertRaises(ValueError):
(X ^ Y).eval('1111')
with self.assertRaises(ValueError):
Y.eval((X ^ X).to_matrix_op())
# Check that Pauli logic eval returns same as matrix logic
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('0'), 1)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('0'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('0').eval('1'), 0)
self.assertEqual(PrimitiveOp(Z.to_matrix()).eval('1').eval('1'), -1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('0'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('0').eval('1'), 1)
self.assertEqual(PrimitiveOp(X.to_matrix()).eval('1').eval('1'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('0'), 0)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('0'), -1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('0').eval('1'), 1j)
self.assertEqual(PrimitiveOp(Y.to_matrix()).eval('1').eval('1'), 0)
pauli_op = Z ^ I ^ X ^ Y
mat_op = PrimitiveOp(pauli_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=pauli_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
# print('{} {} {} {}'.format(bstr1, bstr2, pauli_op.eval(bstr1, bstr2),
# mat_op.eval(bstr1, bstr2)))
np.testing.assert_array_almost_equal(pauli_op.eval(bstr1).eval(bstr2),
mat_op.eval(bstr1).eval(bstr2))
gnarly_op = SummedOp([(H ^ I ^ Y).compose(X ^ X ^ Z).tensor(Z),
PrimitiveOp(Operator.from_label('+r0I')),
3 * (X ^ CX ^ T)], coeff=3 + .2j)
gnarly_mat_op = PrimitiveOp(gnarly_op.to_matrix())
full_basis = list(map(''.join, itertools.product('01', repeat=gnarly_op.num_qubits)))
for bstr1, bstr2 in itertools.product(full_basis, full_basis):
np.testing.assert_array_almost_equal(gnarly_op.eval(bstr1).eval(bstr2),
gnarly_mat_op.eval(bstr1).eval(bstr2))
def test_circuit_construction(self):
""" circuit construction test """
hadq2 = H ^ I
cz = hadq2.compose(CX).compose(hadq2)
qc = QuantumCircuit(2)
qc.append(cz.primitive, qargs=range(2))
ref_cz_mat = PrimitiveOp(CZGate()).to_matrix()
np.testing.assert_array_almost_equal(cz.to_matrix(), ref_cz_mat)
def test_io_consistency(self):
""" consistency test """
new_op = X ^ Y ^ I
label = 'XYI'
# label = new_op.primitive.to_label()
self.assertEqual(str(new_op.primitive), label)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
Operator.from_label(label).data)
self.assertEqual(new_op.primitive, Pauli(label=label))
x_mat = X.primitive.to_matrix()
y_mat = Y.primitive.to_matrix()
i_mat = np.eye(2, 2)
np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(),
np.kron(np.kron(x_mat, y_mat), i_mat))
hi = np.kron(H.to_matrix(), I.to_matrix())
hi2 = Operator.from_label('HI').data
hi3 = (H ^ I).to_matrix()
np.testing.assert_array_almost_equal(hi, hi2)
np.testing.assert_array_almost_equal(hi2, hi3)
xy = np.kron(X.to_matrix(), Y.to_matrix())
xy2 = Operator.from_label('XY').data
xy3 = (X ^ Y).to_matrix()
np.testing.assert_array_almost_equal(xy, xy2)
np.testing.assert_array_almost_equal(xy2, xy3)
# Check if numpy array instantiation is the same as from Operator
matrix_op = Operator.from_label('+r')
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# Ditto list of lists
np.testing.assert_array_almost_equal(PrimitiveOp(matrix_op.data.tolist()).to_matrix(),
PrimitiveOp(matrix_op.data).to_matrix())
# TODO make sure this works once we resolve endianness mayhem
# qc = QuantumCircuit(3)
# qc.x(2)
# qc.y(1)
# from qiskit import BasicAer, QuantumCircuit, execute
# unitary = execute(qc, BasicAer.get_backend('unitary_simulator')).result().get_unitary()
# np.testing.assert_array_almost_equal(new_op.primitive.to_matrix(), unitary)
def test_to_matrix(self):
"""to matrix text """
np.testing.assert_array_equal(X.to_matrix(), Operator.from_label('X').data)
np.testing.assert_array_equal(Y.to_matrix(), Operator.from_label('Y').data)
np.testing.assert_array_equal(Z.to_matrix(), Operator.from_label('Z').data)
op1 = Y + H
np.testing.assert_array_almost_equal(op1.to_matrix(), Y.to_matrix() + H.to_matrix())
op2 = op1 * .5
np.testing.assert_array_almost_equal(op2.to_matrix(), op1.to_matrix() * .5)
op3 = (4 - .6j) * op2
np.testing.assert_array_almost_equal(op3.to_matrix(), op2.to_matrix() * (4 - .6j))
op4 = op3.tensor(X)
np.testing.assert_array_almost_equal(op4.to_matrix(),
np.kron(op3.to_matrix(), X.to_matrix()))
op5 = op4.compose(H ^ I)
np.testing.assert_array_almost_equal(op5.to_matrix(), np.dot(op4.to_matrix(),
(H ^ I).to_matrix()))
op6 = op5 + PrimitiveOp(Operator.from_label('+r').data)
np.testing.assert_array_almost_equal(
op6.to_matrix(), op5.to_matrix() + Operator.from_label('+r').data)
def test_circuit_op_to_matrix(self):
""" test CircuitOp.to_matrix """
qc = QuantumCircuit(1)
qc.rz(1.0, 0)
qcop = CircuitOp(qc)
np.testing.assert_array_almost_equal(
qcop.to_matrix(), scipy.linalg.expm(-0.5j * Z.to_matrix()))
def test_matrix_to_instruction(self):
"""Test MatrixOp.to_instruction yields an Instruction object."""
matop = (H ^ 3).to_matrix_op()
with self.subTest('assert to_instruction returns Instruction'):
self.assertIsInstance(matop.to_instruction(), Instruction)
matop = ((H ^ 3) + (Z ^ 3)).to_matrix_op()
with self.subTest('matrix operator is not unitary'):
with self.assertRaises(ExtensionError):
matop.to_instruction()
def test_adjoint(self):
""" adjoint test """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
np.testing.assert_array_almost_equal(np.conj(np.transpose(gnarly_op.to_matrix())),
gnarly_op.adjoint().to_matrix())
def test_primitive_strings(self):
""" get primitives test """
self.assertEqual(X.primitive_strings(), {'Pauli'})
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
self.assertEqual(gnarly_op.primitive_strings(), {'QuantumCircuit', 'Matrix'})
def test_to_pauli_op(self):
""" Test to_pauli_op method """
gnarly_op = 3 * (H ^ I ^ Y).compose(X ^ X ^ Z).tensor(T ^ Z) + \
PrimitiveOp(Operator.from_label('+r0IX').data)
mat_op = gnarly_op.to_matrix_op()
pauli_op = gnarly_op.to_pauli_op()
self.assertIsInstance(pauli_op, SummedOp)
for p in pauli_op:
self.assertIsInstance(p, PauliOp)
np.testing.assert_array_almost_equal(mat_op.to_matrix(), pauli_op.to_matrix())
def test_circuit_permute(self):
r""" Test the CircuitOp's .permute method """
perm = range(7)[::-1]
c_op = (((CX ^ 3) ^ X) @
(H ^ 7) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X) @
(Y ^ (CX ^ 3)) @
(X ^ Y ^ Z ^ I ^ X ^ X ^ X))
c_op_perm = c_op.permute(perm)
self.assertNotEqual(c_op, c_op_perm)
c_op_id = c_op_perm.permute(perm)
self.assertEqual(c_op, c_op_id)
def test_summed_op_reduce(self):
"""Test SummedOp"""
sum_op = (X ^ X * 2) + (Y ^ Y) # type: SummedOp
with self.subTest('SummedOp test 1'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1])
sum_op = (X ^ X * 2) + (Y ^ Y)
sum_op += Y ^ Y
with self.subTest('SummedOp test 2-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 2-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 2])
sum_op = (X ^ X * 2) + (Y ^ Y)
sum_op += (Y ^ Y) + (X ^ X * 2)
with self.subTest('SummedOp test 3-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY', 'XX'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1, 1, 2])
sum_op = sum_op.reduce()
with self.subTest('SummedOp test 3-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
with self.subTest('SummedOp test 4-a'):
self.assertEqual(sum_op.coeff, 2)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 4-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += Y ^ Y
with self.subTest('SummedOp test 5-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 5-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 3])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += (X ^ X) * 2 + (Y ^ Y)
with self.subTest('SummedOp test 6-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 2, 1])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 6-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [6, 3])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2)
sum_op += sum_op
with self.subTest('SummedOp test 7-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 4, 2])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 7-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY'])
self.assertListEqual([op.coeff for op in sum_op], [8, 4])
sum_op = SummedOp([X ^ X * 2, Y ^ Y], 2) + SummedOp([X ^ X * 2, Z ^ Z], 3)
with self.subTest('SummedOp test 8-a'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'XX', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [4, 2, 6, 3])
sum_op = sum_op.collapse_summands()
with self.subTest('SummedOp test 8-b'):
self.assertEqual(sum_op.coeff, 1)
self.assertListEqual([str(op.primitive) for op in sum_op], ['XX', 'YY', 'ZZ'])
self.assertListEqual([op.coeff for op in sum_op], [10, 2, 3])
def test_compose_op_of_different_dim(self):
"""
Test if smaller operator expands to correct dim when composed with bigger operator.
Test if PrimitiveOps compose methods are consistent.
"""
# PauliOps of different dim
xy_p = (X ^ Y)
xyz_p = (X ^ Y ^ Z)
pauli_op = xy_p @ xyz_p
expected_result = (I ^ I ^ Z)
self.assertEqual(pauli_op, expected_result)
# MatrixOps of different dim
xy_m = xy_p.to_matrix_op()
xyz_m = xyz_p.to_matrix_op()
matrix_op = xy_m @ xyz_m
self.assertEqual(matrix_op, expected_result.to_matrix_op())
# CircuitOps of different dim
xy_c = xy_p.to_circuit_op()
xyz_c = xyz_p.to_circuit_op()
circuit_op = xy_c @ xyz_c
self.assertTrue(np.array_equal(pauli_op.to_matrix(), matrix_op.to_matrix()))
self.assertTrue(np.allclose(pauli_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
self.assertTrue(np.allclose(matrix_op.to_matrix(), circuit_op.to_matrix(), rtol=1e-14))
def test_permute_on_primitive_op(self):
""" Test if permute methods of PrimitiveOps are consistent and work as expected. """
indices = [1, 2, 4]
# PauliOp
pauli_op = (X ^ Y ^ Z)
permuted_pauli_op = pauli_op.permute(indices)
expected_pauli_op = (X ^ I ^ Y ^ Z ^ I)
self.assertEqual(permuted_pauli_op, expected_pauli_op)
# CircuitOp
circuit_op = pauli_op.to_circuit_op()
permuted_circuit_op = circuit_op.permute(indices)
expected_circuit_op = expected_pauli_op.to_circuit_op()
self.assertEqual(permuted_circuit_op.primitive.__str__(),
expected_circuit_op.primitive.__str__())
# MatrixOp
matrix_op = pauli_op.to_matrix_op()
permuted_matrix_op = matrix_op.permute(indices)
expected_matrix_op = expected_pauli_op.to_matrix_op()
equal = np.allclose(permuted_matrix_op.to_matrix(), expected_matrix_op.to_matrix())
self.assertTrue(equal)
def test_permute_on_list_op(self):
""" Test if ListOp permute method is consistent with PrimitiveOps permute methods. """
op1 = (X ^ Y ^ Z).to_circuit_op()
op2 = (Z ^ X ^ Y)
# ComposedOp
indices = [1, 2, 0]
primitive_op = op1 @ op2
primitive_op_perm = primitive_op.permute(indices) # CircuitOp.permute
composed_op = ComposedOp([op1, op2])
composed_op_perm = composed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = composed_op_perm.oplist[0] @ composed_op_perm.oplist[1]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# TensoredOp
indices = [3, 5, 4, 0, 2, 1]
primitive_op = op1 ^ op2
primitive_op_perm = primitive_op.permute(indices)
tensored_op = TensoredOp([op1, op2])
tensored_op_perm = tensored_op.permute(indices)
# reduce the ListOp to PrimitiveOp
composed_oplist = tensored_op_perm.oplist
to_primitive = \
composed_oplist[0] @ (composed_oplist[1].oplist[0] ^ composed_oplist[1].oplist[1]) @ \
composed_oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
# SummedOp
primitive_op = (X ^ Y ^ Z)
summed_op = SummedOp([primitive_op])
indices = [1, 2, 0]
primitive_op_perm = primitive_op.permute(indices) # PauliOp.permute
summed_op_perm = summed_op.permute(indices)
# reduce the ListOp to PrimitiveOp
to_primitive = summed_op_perm.oplist[0] @ primitive_op @ summed_op_perm.oplist[2]
# compare resulting PrimitiveOps
equal = np.allclose(primitive_op_perm.to_matrix(), to_primitive.to_matrix())
self.assertTrue(equal)
def test_expand_on_list_op(self):
""" Test if expanded ListOp has expected num_qubits. """
add_qubits = 3
# ComposedOp
composed_op = ComposedOp([(X ^ Y ^ Z), (H ^ T), (Z ^ X ^ Y ^ Z).to_matrix_op()])
expanded = composed_op._expand_dim(add_qubits)
self.assertEqual(composed_op.num_qubits + add_qubits, expanded.num_qubits)
# TensoredOp
tensored_op = TensoredOp([(X ^ Y), (Z ^ I)])
expanded = tensored_op._expand_dim(add_qubits)
self.assertEqual(tensored_op.num_qubits + add_qubits, expanded.num_qubits)
# SummedOp
summed_op = SummedOp([(X ^ Y), (Z ^ I ^ Z)])
expanded = summed_op._expand_dim(add_qubits)
self.assertEqual(summed_op.num_qubits + add_qubits, expanded.num_qubits)
def test_expand_on_state_fn(self):
""" Test if expanded StateFn has expected num_qubits. """
num_qubits = 3
add_qubits = 2
# case CircuitStateFn, with primitive QuantumCircuit
qc2 = QuantumCircuit(num_qubits)
qc2.cx(0, 1)
cfn = CircuitStateFn(qc2, is_measurement=True)
cfn_exp = cfn._expand_dim(add_qubits)
self.assertEqual(cfn_exp.num_qubits, add_qubits + num_qubits)
# case OperatorStateFn, with OperatorBase primitive, in our case CircuitStateFn
osfn = OperatorStateFn(cfn)
osfn_exp = osfn._expand_dim(add_qubits)
self.assertEqual(osfn_exp.num_qubits, add_qubits + num_qubits)
# case DictStateFn
dsfn = DictStateFn('1'*num_qubits, is_measurement=True)
self.assertEqual(dsfn.num_qubits, num_qubits)
dsfn_exp = dsfn._expand_dim(add_qubits)
self.assertEqual(dsfn_exp.num_qubits, num_qubits + add_qubits)
# case VectorStateFn
vsfn = VectorStateFn(np.ones(2**num_qubits, dtype=complex))
self.assertEqual(vsfn.num_qubits, num_qubits)
vsfn_exp = vsfn._expand_dim(add_qubits)
self.assertEqual(vsfn_exp.num_qubits, num_qubits + add_qubits)
def test_permute_on_state_fn(self):
""" Test if StateFns permute are consistent. """
num_qubits = 4
dim = 2**num_qubits
primitive_list = [1.0/(i+1) for i in range(dim)]
primitive_dict = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
dict_fn = DictStateFn(primitive=primitive_dict, is_measurement=True)
vec_fn = VectorStateFn(primitive=primitive_list, is_measurement=True)
# check if dict_fn and vec_fn are equivalent
equivalent = np.allclose(dict_fn.to_matrix(), vec_fn.to_matrix())
self.assertTrue(equivalent)
# permute
indices = [2, 3, 0, 1]
permute_dict = dict_fn.permute(indices)
permute_vect = vec_fn.permute(indices)
equivalent = np.allclose(permute_dict.to_matrix(), permute_vect.to_matrix())
self.assertTrue(equivalent)
def test_compose_consistency(self):
"""Test if PrimitiveOp @ ComposedOp is consistent with ComposedOp @ PrimitiveOp."""
# PauliOp
op1 = (X ^ Y ^ Z)
op2 = (X ^ Y ^ Z)
op3 = (X ^ Y ^ Z).to_circuit_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# CircitOp
op1 = op1.to_circuit_op()
op2 = op2.to_circuit_op()
op3 = op3.to_matrix_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
# MatrixOp
op1 = op1.to_matrix_op()
op2 = op2.to_matrix_op()
op3 = op3.to_pauli_op()
comp1 = op1 @ ComposedOp([op2, op3])
comp2 = ComposedOp([op3, op2]) @ op1
self.assertListEqual(comp1.oplist, list(reversed(comp2.oplist)))
def test_compose_with_indices(self):
""" Test compose method using its permutation feature."""
pauli_op = (X ^ Y ^ Z)
circuit_op = (T ^ H)
matrix_op = (X ^ Y ^ H ^ T).to_matrix_op()
evolved_op = EvolvedOp(matrix_op)
# composition of PrimitiveOps
num_qubits = 4
primitive_op = pauli_op @ circuit_op @ matrix_op
composed_op = pauli_op @ circuit_op @ evolved_op
self.assertEqual(primitive_op.num_qubits, num_qubits)
self.assertEqual(composed_op.num_qubits, num_qubits)
# with permutation
num_qubits = 5
indices = [1, 4]
permuted_primitive_op = evolved_op @ circuit_op.permute(indices) @ pauli_op @ matrix_op
composed_primitive_op = \
evolved_op @ pauli_op.compose(circuit_op, permutation=indices, front=True) @ matrix_op
self.assertTrue(np.allclose(permuted_primitive_op.to_matrix(),
composed_primitive_op.to_matrix()))
self.assertEqual(num_qubits, permuted_primitive_op.num_qubits)
# ListOp
num_qubits = 6
tensored_op = TensoredOp([pauli_op, circuit_op])
summed_op = pauli_op + circuit_op.permute([2, 1])
composed_op = circuit_op @ evolved_op @ matrix_op
list_op = summed_op @ composed_op.compose(tensored_op, permutation=[1, 2, 3, 5, 4],
front=True)
self.assertEqual(num_qubits, list_op.num_qubits)
num_qubits = 4
circuit_fn = CircuitStateFn(primitive=circuit_op.primitive, is_measurement=True)
operator_fn = OperatorStateFn(primitive=circuit_op ^ circuit_op, is_measurement=True)
no_perm_op = circuit_fn @ operator_fn
self.assertEqual(no_perm_op.num_qubits, num_qubits)
indices = [0, 4]
perm_op = operator_fn.compose(circuit_fn, permutation=indices, front=True)
self.assertEqual(perm_op.num_qubits, max(indices) + 1)
# StateFn
num_qubits = 3
dim = 2**num_qubits
vec = [1.0/(i+1) for i in range(dim)]
dic = {format(i, 'b').zfill(num_qubits): 1.0/(i+1) for i in range(dim)}
is_measurement = True
op_state_fn = OperatorStateFn(matrix_op, is_measurement=is_measurement) # num_qubit = 4
vec_state_fn = VectorStateFn(vec, is_measurement=is_measurement) # 3
dic_state_fn = DictStateFn(dic, is_measurement=is_measurement) # 3
circ_state_fn = CircuitStateFn(circuit_op.to_circuit(), is_measurement=is_measurement) # 2
composed_op = op_state_fn @ vec_state_fn @ dic_state_fn @ circ_state_fn
self.assertEqual(composed_op.num_qubits, op_state_fn.num_qubits)
# with permutation
perm = [2, 4, 6]
composed = \
op_state_fn @ dic_state_fn.compose(vec_state_fn, permutation=perm, front=True) @ \
circ_state_fn
self.assertEqual(composed.num_qubits, max(perm) + 1)
def test_summed_op_equals(self):
"""Test corner cases of SummedOp's equals function."""
with self.subTest('multiplicative factor'):
self.assertEqual(2 * X, X + X)
with self.subTest('commutative'):
self.assertEqual(X + Z, Z + X)
with self.subTest('circuit and paulis'):
z = CircuitOp(ZGate())
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix op and paulis'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(Z + z, z + Z)
with self.subTest('matrix multiplicative'):
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(2 * z, z + z)
with self.subTest('parameter coefficients'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertEqual(expr * z, expr * z)
with self.subTest('different coefficient types'):
expr = Parameter('theta')
z = MatrixOp([[1, 0], [0, -1]])
self.assertNotEqual(expr * z, 2 * z)
with self.subTest('additions aggregation'):
z = MatrixOp([[1, 0], [0, -1]])
a = z + z + Z
b = 2 * z + Z
c = z + Z + z
self.assertEqual(a, b)
self.assertEqual(b, c)
self.assertEqual(a, c)
def test_circuit_compose_register_independent(self):
"""Test that CircuitOp uses combines circuits independent of the register.
I.e. that is uses ``QuantumCircuit.compose`` over ``combine`` or ``extend``.
"""
op = Z ^ 2
qr = QuantumRegister(2, 'my_qr')
circuit = QuantumCircuit(qr)
composed = op.compose(CircuitOp(circuit))
self.assertEqual(composed.num_qubits, 2)
def test_matrix_op_conversions(self):
"""Test to reveal QiskitError when to_instruction or to_circuit method is called on
parametrized matrix op."""
m = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]])
matrix_op = MatrixOp(m, Parameter('beta'))
for method in ['to_instruction', 'to_circuit']:
with self.subTest(method):
# QiskitError: multiplication of Operator with ParameterExpression isn't implemented
self.assertRaises(QiskitError, getattr(matrix_op, method))
def test_primitive_op_to_matrix(self):
"""Test to reveal TypeError: multiplication of 'complex' and 'Parameter' is not
implemented, which is raised on PrimitiveOps with parameter, when to_matrix is called. """
# MatrixOp
m = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [1, 0, 0, 0], [0, -1, 0, 0]])
matrix_op = MatrixOp(m, Parameter('beta'))
# PauliOp
pauli_op = PauliOp(primitive=Pauli(label='XYZ'), coeff=Parameter('beta'))
self.assertRaises(TypeError, pauli_op.to_matrix)
# CircuitOp
qc = QuantumCircuit(2)
qc.cx(0, 1)
circuit_op = CircuitOp(qc, coeff=Parameter('alpha'))
for operator in [matrix_op, pauli_op, circuit_op]:
with self.subTest(operator):
self.assertRaises(TypeError, operator.to_matrix)
def test_list_op_to_circuit(self):
"""Test if unitary ListOps transpile to circuit. """
# generate unitary matrices of dimension 2,4,8, seed is fixed
np.random.seed(233423)
u2 = unitary_group.rvs(2)
u4 = unitary_group.rvs(4)
u8 = unitary_group.rvs(8)
# pauli matrices as numpy.arrays
x = np.array([[0.0, 1.0], [1.0, 0.0]])
y = np.array([[0.0, -1.0j], [1.0j, 0.0]])
z = np.array([[1.0, 0.0], [0.0, -1.0]])
# create MatrixOp and CircuitOp out of matrices
op2 = MatrixOp(u2)
op4 = MatrixOp(u4)
op8 = MatrixOp(u8)
c2 = op2.to_circuit_op()
# algorithm using only matrix operations on numpy.arrays
xu4 = np.kron(x, u4)
zc2 = np.kron(z, u2)
zc2y = np.kron(zc2, y)
matrix = np.matmul(xu4, zc2y)
matrix = np.matmul(matrix, u8)
matrix = np.kron(matrix, u2)
operator = Operator(matrix)
# same algorithm as above, but using PrimitiveOps
list_op = ((X ^ op4) @ (Z ^ c2 ^ Y) @ op8) ^ op2
circuit = list_op.to_circuit()
# verify that ListOp.to_circuit() outputs correct quantum circuit
self.assertTrue(operator.equiv(circuit), "ListOp.to_circuit() outputs wrong circuit!")
def test_composed_op_to_circuit(self):
"""
Test if unitary ComposedOp transpile to circuit and represents expected operator.
Test if to_circuit on non-unitary ListOp raises exception.
"""
x = np.array([[0.0, 1.0], [1.0, 0.0]]) # Pauli X as numpy array
y = np.array([[0.0, -1.0j], [1.0j, 0.0]]) # Pauli Y as numpy array
m1 = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]) # non-unitary
m2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]) # non-unitary
m_op1 = MatrixOp(m1)
m_op2 = MatrixOp(m2)
pm1 = (X ^ Y) ^ m_op1 # non-unitary TensoredOp
pm2 = (X ^ Y) ^ m_op2 # non-unitary TensoredOp
self.assertRaises(ExtensionError, pm1.to_circuit)
self.assertRaises(ExtensionError, pm2.to_circuit)
summed_op = pm1 + pm2 # unitary SummedOp([TensoredOp, TensoredOp])
circuit = summed_op.to_circuit() # should transpile without any exception
# same algorithm that leads to summed_op above, but using only arrays and matrix operations
unitary = np.kron(np.kron(x, y), m1 + m2)
self.assertTrue(Operator(unitary).equiv(circuit))
def test_op_to_circuit_with_parameters(self):
"""On parametrized SummedOp, to_matrix_op returns ListOp, instead of MatrixOp. To avoid
the infinite recursion, AquaError is raised. """
m1 = np.array([[0, 0, 1, 0], [0, 0, 0, -1], [0, 0, 0, 0], [0, 0, 0, 0]]) # non-unitary
m2 = np.array([[0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0], [0, -1, 0, 0]]) # non-unitary
op1_with_param = MatrixOp(m1, Parameter('alpha')) # non-unitary
op2_with_param = MatrixOp(m2, Parameter('beta')) # non-unitary
summed_op_with_param = op1_with_param + op2_with_param # unitary
self.assertRaises(AquaError, summed_op_with_param.to_circuit) # should raise Aqua error
def test_permute_list_op_with_inconsistent_num_qubits(self):
"""Test if permute raises error if ListOp contains operators with different num_qubits."""
list_op = ListOp([X, X ^ X])
self.assertRaises(AquaError, list_op.permute, [0, 1])
@data(Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, -1]]))
def test_op_hashing(self, op):
"""Regression test against faulty set comparison.
Set comparisons rely on a hash table which requires identical objects to have identical
hashes. Thus, the PrimitiveOp.__hash__ should support this requirement.
"""
self.assertEqual(set([2 * op]), set([2 * op]))
@data(Z, CircuitOp(ZGate()), MatrixOp([[1, 0], [0, -1]]))
def test_op_indent(self, op):
"""Test that indentation correctly adds INDENTATION at the beginning of each line"""
initial_str = str(op)
indented_str = op._indent(initial_str)
starts_with_indent = indented_str.startswith(op.INDENTATION)
self.assertTrue(starts_with_indent)
indented_str_content = (
indented_str[len(op.INDENTATION):]
).split("\n{}".format(op.INDENTATION))
self.assertListEqual(indented_str_content, initial_str.split("\n"))
def test_composed_op_immutable_under_eval(self):
"""Test ``ComposedOp.eval`` does not change the operator instance."""
op = 2 * ComposedOp([X])
_ = op.eval()
# previous bug: after op.eval(), op was 2 * ComposedOp([2 * X])
self.assertEqual(op, 2 * ComposedOp([X]))
def test_op_parameters(self):
"""Test that Parameters are stored correctly"""
phi = Parameter('φ')
theta = ParameterVector(name='θ',
length=2)
qc = QuantumCircuit(2)
qc.rz(phi, 0)
qc.rz(phi, 1)
for i in range(2):
qc.rx(theta[i], i)
qc.h(0)
qc.x(1)
l = Parameter('λ')
op = PrimitiveOp(qc,
coeff=l)
params = set([phi, l, *theta.params])
self.assertEqual(params, op.parameters)
self.assertEqual(params, StateFn(op).parameters)
self.assertEqual(params, StateFn(qc, coeff=l).parameters)
def test_list_op_parameters(self):
"""Test that Parameters are stored correctly in a List Operator"""
lam = Parameter('λ')
phi = Parameter('φ')
omega = Parameter('ω')
mat_op = PrimitiveOp([[0, 1],
[1, 0]],
coeff=omega)
qc = QuantumCircuit(1)
qc.rx(phi, 0)
qc_op = PrimitiveOp(qc)
op1 = SummedOp([mat_op, qc_op])
params = [phi, omega]
self.assertEqual(op1.parameters, set(params))
# check list nesting case
op2 = PrimitiveOp([[1, 0],
[0, -1]],
coeff=lam)
list_op = ListOp([op1, op2])
params.append(lam)
self.assertEqual(list_op.parameters, set(params))
@data(VectorStateFn([1, 0]),
DictStateFn({'0': 1}),
CircuitStateFn(QuantumCircuit(1)),
OperatorStateFn(I),
OperatorStateFn(MatrixOp([[1, 0], [0, 1]])),
OperatorStateFn(CircuitOp(QuantumCircuit(1))))
def test_statefn_eval(self, op):
"""Test calling eval on StateFn returns the statevector."""
expected = Statevector([1, 0])
self.assertEqual(op.eval().primitive, expected)
class TestOpMethods(QiskitAquaTestCase):
"""Basic method tests."""
def test_listop_num_qubits(self):
"""Test that ListOp.num_qubits checks that all operators have the same number of qubits."""
op = ListOp([X ^ Y, Y ^ Z])
with self.subTest('All operators have the same numbers of qubits'):
self.assertEqual(op.num_qubits, 2)
op = ListOp([X ^ Y, Y])
with self.subTest('Operators have different numbers of qubits'):
with self.assertRaises(ValueError):
op.num_qubits # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
X @ op # pylint: disable=pointless-statement
class TestListOpComboFn(QiskitAquaTestCase):
"""Test combo fn is propagated."""
def setUp(self):
super().setUp()
self.combo_fn = lambda x: [x_i ** 2 for x_i in x]
self.listop = ListOp([X], combo_fn=self.combo_fn)
def assertComboFnPreserved(self, processed_op):
"""Assert the quadratic combo_fn is preserved."""
x = [1, 2, 3]
self.assertListEqual(processed_op.combo_fn(x), self.combo_fn(x))
def test_at_conversion(self):
"""Test after conversion the combo_fn is preserved."""
for method in ['to_matrix_op', 'to_pauli_op', 'to_circuit_op']:
with self.subTest(method):
converted = getattr(self.listop, method)()
self.assertComboFnPreserved(converted)
def test_after_mul(self):
"""Test after multiplication the combo_fn is preserved."""
self.assertComboFnPreserved(2 * self.listop)
def test_at_traverse(self):
"""Test after traversing the combo_fn is preserved."""
def traverse_fn(op):
return -op
traversed = self.listop.traverse(traverse_fn)
self.assertComboFnPreserved(traversed)
def test_after_adjoint(self):
"""Test after traversing the combo_fn is preserved."""
self.assertComboFnPreserved(self.listop.adjoint())
def test_after_reduce(self):
"""Test after reducing the combo_fn is preserved."""
self.assertComboFnPreserved(self.listop.reduce())
if __name__ == '__main__':
unittest.main()
| 40.803738
| 100
| 0.606505
|
5fb4dd50fc3fd4c152d26007b54559f48bae1c49
| 664
|
py
|
Python
|
tests/test.py
|
xmonader/pyvalidators
|
e4940a73fc1e9466437801df04833bdde2eb9972
|
[
"Apache-2.0"
] | null | null | null |
tests/test.py
|
xmonader/pyvalidators
|
e4940a73fc1e9466437801df04833bdde2eb9972
|
[
"Apache-2.0"
] | null | null | null |
tests/test.py
|
xmonader/pyvalidators
|
e4940a73fc1e9466437801df04833bdde2eb9972
|
[
"Apache-2.0"
] | null | null | null |
from validators.extensions.string import *
from validators.extensions.numeric import *
class Point:
def __init__(self, x, y, name):
self._x = x
self._y = y
self._name = name
@Upper
def name(self):
return self._name
@name.setter
def name(self, val):
self._name = val
@ValidatingProperty
def x(self):
return self._x
@x.setter
def x(self, val):
self._x = val
@Int
def y(self):
return self._x
@y.setter
def y(self, val):
self._x = val
p = Point(3, 4, "CIRCLE")
print(p)
p.x = "dmdm"
p.y = 9
p.name = "AHMED"
p.name = "ahmed"
| 15.809524
| 43
| 0.545181
|
2f477c6496f89f67b4136845736ec47b0b1d3bc2
| 1,160
|
py
|
Python
|
plugins/zendesk/icon_zendesk/actions/delete_membership/schema.py
|
jakub-kaluza/insightconnect-plugins
|
f36292a65439dfc61cf99f0a1e65ce9ec0703877
|
[
"MIT"
] | null | null | null |
plugins/zendesk/icon_zendesk/actions/delete_membership/schema.py
|
jakub-kaluza/insightconnect-plugins
|
f36292a65439dfc61cf99f0a1e65ce9ec0703877
|
[
"MIT"
] | null | null | null |
plugins/zendesk/icon_zendesk/actions/delete_membership/schema.py
|
jakub-kaluza/insightconnect-plugins
|
f36292a65439dfc61cf99f0a1e65ce9ec0703877
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Delete organization membership"
class Input:
MEMBERSHIP_ID = "membership_id"
class Output:
STATUS = "status"
class DeleteMembershipInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"membership_id": {
"type": "integer",
"title": "Membership ID",
"description": "ID of membership to delete E.g. 1401295821555",
"order": 1
}
},
"required": [
"membership_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DeleteMembershipOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"status": {
"type": "boolean",
"title": "Status",
"description": "Success or failure",
"order": 1
}
},
"required": [
"status"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 18.709677
| 69
| 0.609483
|
2cb1f1d4f8eca8271a6f7761ba7d7401042ab08d
| 4,330
|
py
|
Python
|
mooclet_engine/engine/migrations/0001_initial.py
|
marshallho/mooclet-engine
|
176c23299df89b624d008dc903646555c0c0cc4e
|
[
"MIT"
] | 5
|
2018-11-13T22:01:51.000Z
|
2021-05-29T02:37:20.000Z
|
mooclet_engine/engine/migrations/0001_initial.py
|
marshallho/mooclet-engine
|
176c23299df89b624d008dc903646555c0c0cc4e
|
[
"MIT"
] | 3
|
2020-02-11T22:44:25.000Z
|
2021-06-10T17:49:12.000Z
|
mooclet_engine/engine/migrations/0001_initial.py
|
marshallho/mooclet-engine
|
176c23299df89b624d008dc903646555c0c0cc4e
|
[
"MIT"
] | 7
|
2017-01-19T15:58:25.000Z
|
2018-11-10T06:47:44.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-02 17:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Environment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=200)),
],
),
migrations.CreateModel(
name='Learner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Environment')),
],
),
migrations.CreateModel(
name='Mooclet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Environment')),
],
),
migrations.CreateModel(
name='Policy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('environment', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Environment')),
],
options={
'verbose_name_plural': 'policies',
},
),
migrations.CreateModel(
name='Value',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField(blank=True, null=True)),
('text', models.TextField(blank=True, default='')),
('timestamp', models.DateTimeField(auto_now=True, null=True)),
('learner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Learner')),
('mooclet', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Mooclet')),
('policy', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Policy')),
],
),
migrations.CreateModel(
name='Variable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Environment')),
],
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=200)),
('text', models.TextField(blank=True, default='')),
('mooclet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Mooclet')),
],
),
migrations.AddField(
model_name='value',
name='variable',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='engine.Variable'),
),
migrations.AddField(
model_name='value',
name='version',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Version'),
),
migrations.AddField(
model_name='mooclet',
name='policy',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='engine.Policy'),
),
]
| 45.104167
| 158
| 0.581524
|
f151d313b7147a367622827db4a6a31b70611001
| 699
|
py
|
Python
|
social_app/middleware/restrict_users_middleware.py
|
sh3b/social_app
|
64aadd05570373ea75834b53b0b4abd2b16c8451
|
[
"MIT"
] | null | null | null |
social_app/middleware/restrict_users_middleware.py
|
sh3b/social_app
|
64aadd05570373ea75834b53b0b4abd2b16c8451
|
[
"MIT"
] | 4
|
2021-06-04T23:11:02.000Z
|
2022-02-10T11:23:57.000Z
|
social_app/middleware/restrict_users_middleware.py
|
shuayb/social_app
|
64aadd05570373ea75834b53b0b4abd2b16c8451
|
[
"MIT"
] | null | null | null |
from django.shortcuts import redirect
class RestrictAdminToAdminArea:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
is_authenticated = request.user.is_authenticated
# For Admins
if is_authenticated and request.user.is_superuser:
if not request.path.startswith('/admin/'):
return redirect('admin:index')
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
| 27.96
| 61
| 0.662375
|
f75561a740ca41a1fc5f1f92084b941d610610c1
| 2,143
|
py
|
Python
|
pennylane/drawer/utils.py
|
therooler/pennylane
|
88a8a5960a2ffd218a12f85ace632021eef2abf5
|
[
"Apache-2.0"
] | 539
|
2018-11-13T08:45:42.000Z
|
2020-07-27T18:17:16.000Z
|
pennylane/drawer/utils.py
|
therooler/pennylane
|
88a8a5960a2ffd218a12f85ace632021eef2abf5
|
[
"Apache-2.0"
] | 588
|
2018-11-14T10:21:47.000Z
|
2020-07-28T06:27:14.000Z
|
pennylane/drawer/utils.py
|
therooler/pennylane
|
88a8a5960a2ffd218a12f85ace632021eef2abf5
|
[
"Apache-2.0"
] | 165
|
2018-11-13T18:58:56.000Z
|
2020-07-27T17:18:17.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains some useful utility functions for circuit drawing.
"""
def default_wire_map(tape):
"""Create a dictionary mapping used wire labels to non-negative integers
Args:
tape [~.tape.QuantumTape): the QuantumTape containing operations and measurements
Returns:
dict: map from wires to sequential positive integers
"""
# Use dictionary to preserve ordering, sets break order
used_wires = {wire: None for op in tape for wire in op.wires}
return {wire: ind for ind, wire in enumerate(used_wires)}
def convert_wire_order(tape, wire_order=None, show_all_wires=False):
"""Creates the mapping between wire labels and place in order.
Args:
tape (~.tape.QuantumTape): the Quantum Tape containing operations and measurements
wire_order Sequence[Any]: the order (from top to bottom) to print the wires
Keyword Args:
show_all_wires=False (bool): whether to display all wires in ``wire_order``
or only include ones used by operations in ``ops``
Returns:
dict: map from wire labels to sequential positive integers
"""
default = default_wire_map(tape)
if wire_order is None:
return default
wire_order = list(wire_order) + [wire for wire in default if wire not in wire_order]
if not show_all_wires:
used_wires = {wire for op in tape for wire in op.wires}
wire_order = [wire for wire in wire_order if wire in used_wires]
return {wire: ind for ind, wire in enumerate(wire_order)}
| 35.716667
| 90
| 0.719085
|
4295e3bb3675f8ba1c61332882a50bc28d24ceba
| 1,868
|
py
|
Python
|
homeassistant/components/lutron_caseta/binary_sensor.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 6
|
2020-07-18T16:33:25.000Z
|
2021-09-26T09:52:04.000Z
|
homeassistant/components/lutron_caseta/binary_sensor.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 47
|
2020-07-23T07:14:33.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/lutron_caseta/binary_sensor.py
|
klauern/home-assistant-core
|
c18ba6aec0627e6afb6442c678edb5ff2bb17db6
|
[
"Apache-2.0"
] | 5
|
2020-03-29T00:29:13.000Z
|
2021-09-06T20:58:40.000Z
|
"""Support for Lutron Caseta Occupancy/Vacancy Sensors."""
from pylutron_caseta import OCCUPANCY_GROUP_OCCUPIED
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OCCUPANCY,
BinarySensorEntity,
)
from . import DOMAIN as CASETA_DOMAIN, LutronCasetaDevice
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Lutron Caseta binary_sensor platform.
Adds occupancy groups from the Caseta bridge associated with the
config_entry as binary_sensor entities.
"""
entities = []
bridge = hass.data[CASETA_DOMAIN][config_entry.entry_id]
occupancy_groups = bridge.occupancy_groups
for occupancy_group in occupancy_groups.values():
entity = LutronOccupancySensor(occupancy_group, bridge)
entities.append(entity)
async_add_entities(entities, True)
class LutronOccupancySensor(LutronCasetaDevice, BinarySensorEntity):
"""Representation of a Lutron occupancy group."""
@property
def device_class(self):
"""Flag supported features."""
return DEVICE_CLASS_OCCUPANCY
@property
def is_on(self):
"""Return the brightness of the light."""
return self._device["status"] == OCCUPANCY_GROUP_OCCUPIED
async def async_added_to_hass(self):
"""Register callbacks."""
self._smartbridge.add_occupancy_subscriber(
self.device_id, self.async_write_ha_state
)
@property
def device_id(self):
"""Return the device ID used for calling pylutron_caseta."""
return self._device["occupancy_group_id"]
@property
def unique_id(self):
"""Return a unique identifier."""
return f"occupancygroup_{self.device_id}"
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"device_id": self.device_id}
| 29.650794
| 68
| 0.709315
|
269a9a41aabb54e076d9738fa4616234e1afb19c
| 75
|
py
|
Python
|
sssdevops/listtools.py
|
jvaldiviezom/sssdevops
|
b8843a8a9161e9d76069e3c6d188874b63948559
|
[
"BSD-3-Clause"
] | null | null | null |
sssdevops/listtools.py
|
jvaldiviezom/sssdevops
|
b8843a8a9161e9d76069e3c6d188874b63948559
|
[
"BSD-3-Clause"
] | 1
|
2018-07-09T19:22:15.000Z
|
2018-07-09T19:23:59.000Z
|
sssdevops/listtools.py
|
jvaldiviezom/sssdevops
|
b8843a8a9161e9d76069e3c6d188874b63948559
|
[
"BSD-3-Clause"
] | 1
|
2018-07-09T19:36:13.000Z
|
2018-07-09T19:36:13.000Z
|
"""
Miscellaneous list manipulation
"""
def split(num_lst, index):
pass
| 9.375
| 31
| 0.706667
|
6c9943e96679919a26a5059259ea3ddf53a684df
| 10,864
|
py
|
Python
|
tests/unit/frontend/tvm/test_relay.py
|
anilmartha/pyxir
|
0972aed63748afd82ef414b67a6cceaedd738b38
|
[
"Apache-2.0"
] | 25
|
2020-06-17T22:41:13.000Z
|
2022-03-22T16:28:22.000Z
|
tests/unit/frontend/tvm/test_relay.py
|
anilmartha/pyxir
|
0972aed63748afd82ef414b67a6cceaedd738b38
|
[
"Apache-2.0"
] | 25
|
2021-03-16T06:26:44.000Z
|
2022-03-18T11:28:33.000Z
|
tests/unit/frontend/tvm/test_relay.py
|
anilmartha/pyxir
|
0972aed63748afd82ef414b67a6cceaedd738b38
|
[
"Apache-2.0"
] | 19
|
2020-07-30T10:03:02.000Z
|
2021-06-29T01:18:16.000Z
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the relay pyxir frontend"""
import unittest
import numpy as np
# ! To import tvm
import pyxir
try:
import tvm
from tvm import relay
from tvm.relay import testing
skip = False
except Exception as e:
# Skip TVM tests
skip = True
if not skip:
from pyxir.frontend.tvm import relay as xf_relay
class TestRelayFrontend(unittest.TestCase):
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_simple_network(self):
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
simple_net = relay.nn.conv2d(
data=simple_net,
weight=weight,
kernel_size=(3, 3),
channels=16,
padding=(0, 0),
)
simple_net = relay.nn.batch_norm(
simple_net, bn_gamma, bn_beta, bn_mmean, bn_mvar
)[0]
simple_net = relay.nn.relu(simple_net)
simple_net = relay.op.reduce.mean(simple_net, axis=(2, 3))
simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 16))
dense_weight = relay.var("dense_weight")
dense_bias = relay.var("dense_bias")
simple_net = relay.nn.dense(simple_net, weight=dense_weight, units=10)
simple_net = relay.nn.bias_add(simple_net, dense_bias, axis=1)
simple_net = relay.nn.softmax(simple_net, axis=1)
simple_net = relay.op.transform.reshape(simple_net, newshape=(1, 10))
func = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, params = testing.create_workload(simple_net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[1].type[0] == "Pad"
assert layers[2].type[0] == "Convolution"
assert layers[3].type[0] == "BatchNorm"
assert layers[4].type[0] == "ReLU"
assert layers[5].type[0] == "Mean"
assert layers[6].type[0] == "Reshape"
assert layers[7].type[0] == "Dense"
assert layers[8].type[0] == "BiasAdd"
assert layers[9].type[0] == "Softmax"
assert layers[10].type[0] == "Reshape"
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_simple_network_cvx(self):
data = relay.var("data", relay.TensorType((1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
simple_net = relay.nn.pad(data, ((0, 0), (0, 0), (1, 1), (1, 1)))
simple_net = relay.nn.conv2d(
data=simple_net,
weight=weight,
kernel_size=(3, 3),
channels=16,
padding=(0, 0),
)
simple_net = relay.nn.relu(simple_net)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, params = testing.create_workload(simple_net)
xgraph = xf_relay.from_relay(
mod, params, cvx_preprocessing={"data": "scale-0.5__transpose-2,0,1"}
)
assert len(xgraph.get_input_names()) == 1
layers = xgraph.get_layers()
# assert layers[0].type[0] == "Constant"
assert layers[0].type[0] == "StrInput"
assert layers[0].shapes == [-1]
assert layers[1].type[0] == "Cvx"
assert layers[1].shapes == [-1, 3, 224, 224]
assert layers[2].type[0] == "Pad"
assert layers[3].type[0] == "Convolution"
assert layers[4].type[0] == "ReLU"
assert layers[0].tops == ["data_cvx"]
assert layers[1].bottoms == ["data"]
assert layers[1].tops[0][:7] == "nn.pad-"
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_conv2d_transpose(self):
data = relay.var("data", relay.TensorType((-1, 1, 3, 3), "float32"))
weight = relay.var("weight")
simple_net = relay.nn.conv2d_transpose(
data=data,
weight=weight,
kernel_size=(2, 2),
channels=1,
padding=(0, 0),
strides=(2, 2),
data_layout="NCHW",
kernel_layout="IOHW",
)
simple_net = relay.Function(relay.analysis.free_vars(simple_net), simple_net)
mod, params = testing.create_workload(simple_net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[0].shapes == [-1, 1, 3, 3]
assert layers[1].type[0] == "Conv2DTranspose"
assert layers[1].shapes == [-1, 1, 6, 6]
assert layers[1].sizes == [36]
assert layers[1].attrs["padding"] == [[0, 0], [0, 0], [0, 0], [0, 0]]
assert layers[1].attrs["strides"] == [2, 2]
assert layers[1].attrs["dilation"] == [1, 1]
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_resnet_block(self):
data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
conv2d0_expr = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn0_expr = relay.nn.batch_norm(
conv2d0_expr, bn_gamma, bn_beta, bn_mmean, bn_mvar
)[0]
relu0_expr = relay.nn.relu(bn0_expr)
max_pool0_expr = relay.nn.max_pool2d(
relu0_expr, pool_size=(2, 2), strides=(2, 2)
)
conv2d1_weight = relay.var("conv2d1_weight")
conv2d1_bias = relay.var("conv2d1_bias")
conv2d1_expr = relay.nn.conv2d(
data=max_pool0_expr,
weight=conv2d1_weight,
kernel_size=(3, 3),
channels=16,
padding=(1, 1),
)
bias_add0_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1)
relu1_expr = relay.nn.relu(bias_add0_expr)
add0_expr = relay.op.tensor.add(max_pool0_expr, relu1_expr)
avg_pool0_expr = relay.nn.avg_pool2d(
add0_expr, pool_size=(2, 2), strides=(2, 2)
)
global_avg_pool0_expr = relay.op.nn.global_avg_pool2d(avg_pool0_expr)
bf_expr = relay.nn.batch_flatten(global_avg_pool0_expr)
net = avg_pool0_expr
net = relay.Function(relay.analysis.free_vars(net), net)
mod, params = testing.create_workload(net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[1].type[0] == "Convolution"
assert layers[2].type[0] == "BatchNorm"
assert layers[3].type[0] == "ReLU"
assert layers[4].type[0] == "Pooling"
assert layers[5].type[0] == "Convolution"
assert layers[6].type[0] == "BiasAdd"
assert layers[7].type[0] == "ReLU"
assert layers[8].type[0] == "Eltwise"
assert layers[9].type[0] == "Pooling"
assert layers[9].shapes == [-1, 16, 56, 56]
@unittest.skipIf(skip, "Could not import TVM and/or TVM frontend")
def test_inception_block(self):
data = relay.var("data", relay.TensorType((-1, 3, 224, 224), "float32"))
weight = relay.var("weight")
bn_gamma = relay.var("bn_gamma")
bn_beta = relay.var("bn_beta")
bn_mmean = relay.var("bn_mean")
bn_mvar = relay.var("bn_var")
conv2d0_expr = relay.nn.conv2d(
data=data, weight=weight, kernel_size=(3, 3), channels=16, padding=(1, 1)
)
bn0_expr = relay.nn.batch_norm(
conv2d0_expr, bn_gamma, bn_beta, bn_mmean, bn_mvar
)[0]
relu0_expr = relay.nn.relu(bn0_expr)
max_pool0_expr = relay.nn.max_pool2d(
relu0_expr, pool_size=(2, 2), strides=(2, 2)
)
conv2d1_weight = relay.var("conv2d1_weight")
conv2d1_bias = relay.var("conv2d1_bias")
conv2d1_expr = relay.nn.conv2d(
data=max_pool0_expr,
weight=conv2d1_weight,
kernel_size=(3, 3),
channels=16,
padding=(1, 1),
strides=(2, 2),
)
bias_add1_expr = relay.nn.bias_add(conv2d1_expr, conv2d1_bias, axis=1)
relu1_expr = relay.nn.relu(bias_add1_expr)
conv2d2_weight = relay.var("conv2d2_weight")
conv2d2_bias = relay.var("conv2d2_bias")
conv2d2_expr = relay.nn.conv2d(
data=max_pool0_expr,
weight=conv2d2_weight,
kernel_size=(3, 3),
channels=16,
padding=(1, 1),
strides=(2, 2),
)
bias_add2_expr = relay.nn.bias_add(conv2d2_expr, conv2d2_bias, axis=1)
relu2_expr = relay.nn.relu(bias_add2_expr)
concat0_expr = relay.op.tensor.concatenate([relu1_expr, relu2_expr], axis=1)
global_max_pool0_expr = relay.op.nn.global_max_pool2d(concat0_expr)
net = global_max_pool0_expr
net = relay.Function(relay.analysis.free_vars(net), net)
mod, params = testing.create_workload(net)
xgraph = xf_relay.from_relay(mod, params)
layers = xgraph.get_layers()
assert layers[0].type[0] == "Input"
assert layers[1].type[0] == "Convolution"
assert layers[2].type[0] == "BatchNorm"
assert layers[3].type[0] == "ReLU"
assert layers[4].type[0] == "Pooling"
assert layers[5].type[0] == "Convolution"
assert layers[6].type[0] == "BiasAdd"
assert layers[7].type[0] == "ReLU"
assert layers[8].type[0] == "Convolution"
assert layers[9].type[0] == "BiasAdd"
assert layers[10].type[0] == "ReLU"
assert layers[11].type[0] == "Concat"
assert layers[12].type[0] == "Pooling"
assert layers[12].shapes == [-1, 32, 1, 1]
| 35.736842
| 85
| 0.594348
|
7af6b5480bc1b71e5979ff265a38e2cde8a8cf1d
| 2,631
|
py
|
Python
|
tests/restart/walls.py
|
Uornca/mirheo
|
162c722ffa27c02e1f5b0d1866816e44c2393f0f
|
[
"MIT"
] | 22
|
2019-07-17T13:06:41.000Z
|
2021-12-15T14:45:24.000Z
|
tests/restart/walls.py
|
Uornca/mirheo
|
162c722ffa27c02e1f5b0d1866816e44c2393f0f
|
[
"MIT"
] | 63
|
2019-06-26T13:30:47.000Z
|
2021-02-23T10:13:10.000Z
|
tests/restart/walls.py
|
dimaleks/uDeviceX
|
2ad5e9dd9f118e3998b291cbfc35ee91205bbef8
|
[
"MIT"
] | 9
|
2019-10-11T07:32:19.000Z
|
2021-05-17T11:25:35.000Z
|
#!/usr/bin/env python
import argparse
import mirheo as mir
parser = argparse.ArgumentParser()
parser.add_argument("--restart", action='store_true', default=False)
parser.add_argument("--ranks", type=int, nargs=3, default=(1,1,1))
args = parser.parse_args()
dt = 0.001
domain = (8, 16, 8)
force = (1.0, 0, 0)
density = 8
rc = 1.0
gdot = 0.5 # shear rate
T = 3.0 # period for oscillating plate case
tend = 5.0
nsteps = int (tend / dt)
u = mir.Mirheo(args.ranks, domain, debug_level=3, log_filename='log', no_splash=True,
checkpoint_every = (0 if args.restart else nsteps))
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=density)
u.registerParticleVector(pv=pv, ic=ic)
dpd = mir.Interactions.Pairwise('dpd', rc=rc, kind="DPD", a=10.0, gamma=20.0, kBT=1.0, power=0.125)
u.registerInteraction(dpd)
vx = gdot*(domain[2] - 2*rc)
plate_lo = mir.Walls.Plane ("plate_lo", normal=(0, 0, -1), pointThrough=(0, 0, rc))
plate_hi = mir.Walls.MovingPlane("plate_hi", normal=(0, 0, 1), pointThrough=(0, 0, domain[2] - rc), velocity=(vx, 0, 0))
nsteps_eq = (1 if args.restart else 1000)
u.registerWall(plate_lo, nsteps_eq)
u.registerWall(plate_hi, nsteps_eq)
vv = mir.Integrators.VelocityVerlet("vv", )
frozen_lo = u.makeFrozenWallParticles(pvName="plate_lo", walls=[plate_lo], interactions=[dpd], integrator=vv, number_density=density, dt=dt)
frozen_hi = u.makeFrozenWallParticles(pvName="plate_hi", walls=[plate_hi], interactions=[dpd], integrator=vv, number_density=density, dt=dt)
u.setWall(plate_lo, pv)
u.setWall(plate_hi, pv)
for p in [pv, frozen_lo, frozen_hi]:
u.setInteraction(dpd, p, pv)
u.registerIntegrator(vv)
u.setIntegrator(vv, pv)
move = mir.Integrators.Translate('move', velocity=(vx, 0, 0))
u.registerIntegrator(move)
u.setIntegrator(move, frozen_hi)
sample_every = 2
dump_every = 1000
bin_size = (8., 8., 1.0)
u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-'))
if args.restart:
u.restart("restart")
u.run(nsteps + 1, dt=dt)
# nTEST: restart.walls
# cd restart
# rm -rf h5
# mir.run --runargs "-n 2" ./walls.py
# mir.run --runargs "-n 2" ./walls.py --restart
# mir.avgh5 xy velocities h5/solvent-0000[7-9].h5 | awk '{print $1}' > profile.out.txt
# nTEST: restart.walls.mpi
# cd restart
# rm -rf h5
# mir.run --runargs "-n 4" ./walls.py --ranks 1 2 1
# mir.run --runargs "-n 4" ./walls.py --ranks 1 2 1 --restart
# mir.avgh5 xy velocities h5/solvent-0000[7-9].h5 | awk '{print $1}' > profile.out.txt
| 31.698795
| 140
| 0.687571
|
a6c542ab4db206e7a4c52c0cb6ba4fb428cc883a
| 1,151
|
py
|
Python
|
app/core/tests/test_models.py
|
Rokiaa/recipe-app-API
|
991fc93824cd3e4137fda255c39afbe1f120b1aa
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
Rokiaa/recipe-app-API
|
991fc93824cd3e4137fda255c39afbe1f120b1aa
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
Rokiaa/recipe-app-API
|
991fc93824cd3e4137fda255c39afbe1f120b1aa
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@londonappdev.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email = email,
password = password,
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normlized"""
email = 'test@LONDONAPPDEV.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@londonappdev.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 29.512821
| 63
| 0.743701
|
79172c37be5acbd28e438c9ad9a3a402b7f462b7
| 145,314
|
py
|
Python
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
TUDelftHao/models
|
faf0c2dc442ceaa8425aff73abd00f92f3137b7b
|
[
"Apache-2.0"
] | 1
|
2020-09-25T14:30:08.000Z
|
2020-09-25T14:30:08.000Z
|
research/object_detection/meta_architectures/center_net_meta_arch.py
|
TUDelftHao/models
|
faf0c2dc442ceaa8425aff73abd00f92f3137b7b
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/meta_architectures/center_net_meta_arch.py
|
TUDelftHao/models
|
faf0c2dc442ceaa8425aff73abd00f92f3137b7b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The CenterNet meta architecture as described in the "Objects as Points" paper [1].
[1]: https://arxiv.org/abs/1904.07850
"""
import abc
import collections
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow.compat.v2 as tf2
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import model
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.utils import shape_utils
# Number of channels needed to predict size and offsets.
NUM_OFFSET_CHANNELS = 2
NUM_SIZE_CHANNELS = 2
# Error range for detecting peaks.
PEAK_EPSILON = 1e-6
# Constants shared between all keypoint tasks.
UNMATCHED_KEYPOINT_SCORE = 0.1
KEYPOINT_CANDIDATE_SEARCH_SCALE = 0.3
class CenterNetFeatureExtractor(tf.keras.Model):
"""Base class for feature extractors for the CenterNet meta architecture.
Child classes are expected to override the _output_model property which will
return 1 or more tensors predicted by the feature extractor.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, name=None, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Initializes a CenterNet feature extractor.
Args:
name: str, the name used for the underlying keras model.
channel_means: A tuple of floats, denoting the mean of each channel
which will be subtracted from it. If None or empty, we use 0s.
channel_stds: A tuple of floats, denoting the standard deviation of each
channel. Each channel will be divided by its standard deviation value.
If None or empty, we use 1s.
bgr_ordering: bool, if set will change the channel ordering to be in the
[blue, red, green] order.
"""
super(CenterNetFeatureExtractor, self).__init__(name=name)
if channel_means is None or len(channel_means) == 0: # pylint:disable=g-explicit-length-test
channel_means = [0., 0., 0.]
if channel_stds is None or len(channel_stds) == 0: # pylint:disable=g-explicit-length-test
channel_stds = [1., 1., 1.]
self._channel_means = channel_means
self._channel_stds = channel_stds
self._bgr_ordering = bgr_ordering
def preprocess(self, inputs):
"""Converts a batch of unscaled images to a scale suitable for the model.
This method normalizes the image using the given `channel_means` and
`channels_stds` values at initialization time while optionally flipping
the channel order if `bgr_ordering` is set.
Args:
inputs: a [batch, height, width, channels] float32 tensor
Returns:
outputs: a [batch, height, width, channels] float32 tensor
"""
if self._bgr_ordering:
red, green, blue = tf.unstack(inputs, axis=3)
inputs = tf.stack([blue, green, red], axis=3)
channel_means = tf.reshape(tf.constant(self._channel_means),
[1, 1, 1, -1])
channel_stds = tf.reshape(tf.constant(self._channel_stds),
[1, 1, 1, -1])
return (inputs - channel_means)/channel_stds
@property
@abc.abstractmethod
def out_stride(self):
"""The stride in the output image of the network."""
pass
@property
@abc.abstractmethod
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
pass
@property
@abc.abstractmethod
def supported_sub_model_types(self):
"""Valid sub model types supported by the get_sub_model function."""
pass
@abc.abstractmethod
def get_sub_model(self, sub_model_type):
"""Returns the underlying keras model for the given sub_model_type.
This function is useful when we only want to get a subset of weights to
be restored from a checkpoint.
Args:
sub_model_type: string, the type of sub model. Currently, CenterNet
feature extractors support 'detection' and 'classification'.
"""
pass
def make_prediction_net(num_out_channels, kernel_size=3, num_filters=256,
bias_fill=None):
"""Creates a network to predict the given number of output channels.
This function is intended to make the prediction heads for the CenterNet
meta architecture.
Args:
num_out_channels: Number of output channels.
kernel_size: The size of the conv kernel in the intermediate layer
num_filters: The number of filters in the intermediate conv layer.
bias_fill: If not None, is used to initialize the bias in the final conv
layer.
Returns:
net: A keras module which when called on an input tensor of size
[batch_size, height, width, num_in_channels] returns an output
of size [batch_size, height, width, num_out_channels]
"""
out_conv = tf.keras.layers.Conv2D(num_out_channels, kernel_size=1)
if bias_fill is not None:
out_conv.bias_initializer = tf.keras.initializers.constant(bias_fill)
net = tf.keras.Sequential(
[tf.keras.layers.Conv2D(num_filters, kernel_size=kernel_size,
padding='same'),
tf.keras.layers.ReLU(),
out_conv]
)
return net
def _to_float32(x):
return tf.cast(x, tf.float32)
def _get_shape(tensor, num_dims):
tf.Assert(tensor.get_shape().ndims == num_dims, [tensor])
return shape_utils.combined_static_and_dynamic_shape(tensor)
def _flatten_spatial_dimensions(batch_images):
batch_size, height, width, channels = _get_shape(batch_images, 4)
return tf.reshape(batch_images, [batch_size, height * width,
channels])
def top_k_feature_map_locations(feature_map, max_pool_kernel_size=3, k=100,
per_channel=False):
"""Returns the top k scores and their locations in a feature map.
Given a feature map, the top k values (based on activation) are returned. If
`per_channel` is True, the top k values **per channel** are returned.
The `max_pool_kernel_size` argument allows for selecting local peaks in a
region. This filtering is done per channel, so nothing prevents two values at
the same location to be returned.
Args:
feature_map: [batch, height, width, channels] float32 feature map.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood (independently for each channel).
For example, to make sure no two neighboring values (in the same channel)
are returned, set max_pool_kernel_size=3. If None or 1, will not apply max
pooling.
k: The number of highest scoring locations to return.
per_channel: If True, will return the top k scores and locations per
feature map channel. If False, the top k across the entire feature map
(height x width x channels) are returned.
Returns:
Tuple of
scores: A [batch, N] float32 tensor with scores from the feature map in
descending order. If per_channel is False, N = k. Otherwise,
N = k * channels, and the first k elements correspond to channel 0, the
second k correspond to channel 1, etc.
y_indices: A [batch, N] int tensor with y indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
x_indices: A [batch, N] int tensor with x indices of the top k feature map
locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
channel_indices: A [batch, N] int tensor with channel indices of the top k
feature map locations. If per_channel is False, N = k. Otherwise,
N = k * channels.
"""
if not max_pool_kernel_size or max_pool_kernel_size == 1:
feature_map_peaks = feature_map
else:
feature_map_max_pool = tf.nn.max_pool(
feature_map, ksize=max_pool_kernel_size, strides=1, padding='SAME')
feature_map_peak_mask = tf.math.abs(
feature_map - feature_map_max_pool) < PEAK_EPSILON
# Zero out everything that is not a peak.
feature_map_peaks = (
feature_map * _to_float32(feature_map_peak_mask))
batch_size, _, width, num_channels = _get_shape(feature_map, 4)
if per_channel:
# Perform top k over batch and channels.
feature_map_peaks_transposed = tf.transpose(feature_map_peaks,
perm=[0, 3, 1, 2])
feature_map_peaks_transposed = tf.reshape(
feature_map_peaks_transposed, [batch_size, num_channels, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_transposed, k=k)
# Convert the indices such that they represent the location in the full
# (flattened) feature map of size [batch, height * width * channels].
channel_idx = tf.range(num_channels)[tf.newaxis, :, tf.newaxis]
peak_flat_indices = num_channels * peak_flat_indices + channel_idx
scores = tf.reshape(scores, [batch_size, -1])
peak_flat_indices = tf.reshape(peak_flat_indices, [batch_size, -1])
else:
feature_map_peaks_flat = tf.reshape(feature_map_peaks, [batch_size, -1])
scores, peak_flat_indices = tf.math.top_k(feature_map_peaks_flat, k=k)
# Get x, y and channel indices corresponding to the top indices in the flat
# array.
y_indices, x_indices, channel_indices = (
row_col_channel_indices_from_flattened_indices(
peak_flat_indices, width, num_channels))
return scores, y_indices, x_indices, channel_indices
def prediction_tensors_to_boxes(detection_scores, y_indices, x_indices,
channel_indices, height_width_predictions,
offset_predictions):
"""Converts CenterNet class-center, offset and size predictions to boxes.
Args:
detection_scores: A [batch, num_boxes] float32 tensor with detection
scores in range [0, 1].
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
channel_indices: A [batch, num_boxes] int32 tensor with channel indices
corresponding to object classes.
height_width_predictions: A float tensor of shape [batch_size, height,
width, 2] representing the height and width of a box centered at each
pixel.
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box centered at each pixel. This
helps reduce the error from downsampling.
Returns:
detection_boxes: A tensor of shape [batch_size, num_boxes, 4] holding the
the raw bounding box coordinates of boxes.
detection_classes: An integer tensor of shape [batch_size, num_boxes]
indicating the predicted class for each box.
detection_scores: A float tensor of shape [batch_size, num_boxes] indicating
the score for each box.
num_detections: An integer tensor of shape [batch_size,] indicating the
number of boxes detected for each sample in the batch.
"""
_, _, width, _ = _get_shape(height_width_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
height_width_flat = _flatten_spatial_dimensions(height_width_predictions)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
height_width = tf.gather(height_width_flat, peak_spatial_indices,
batch_dims=1)
height_width = tf.maximum(height_width, 0)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
heights, widths = tf.unstack(height_width, axis=2)
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
detection_classes = channel_indices
num_detections = tf.reduce_sum(tf.to_int32(detection_scores > 0), axis=1)
boxes = tf.stack([y_indices + y_offsets - heights / 2.0,
x_indices + x_offsets - widths / 2.0,
y_indices + y_offsets + heights / 2.0,
x_indices + x_offsets + widths / 2.0], axis=2)
return boxes, detection_classes, detection_scores, num_detections
def prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_predictions):
"""Converts CenterNet temporal offset map predictions to batched format.
This function is similiar to the box offset conversion function, as both
temporal offsets and box offsets are size-2 vectors.
Args:
y_indices: A [batch, num_boxes] int32 tensor with y indices corresponding to
object center locations (expressed in output coordinate frame).
x_indices: A [batch, num_boxes] int32 tensor with x indices corresponding to
object center locations (expressed in output coordinate frame).
offset_predictions: A float tensor of shape [batch_size, height, width, 2]
representing the y and x offsets of a box's center across adjacent frames.
Returns:
offsets: A tensor of shape [batch_size, num_boxes, 2] holding the
the object temporal offsets of (y, x) dimensions.
"""
_, _, width, _ = _get_shape(offset_predictions, 4)
peak_spatial_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(offset_predictions)
offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
return offsets
def prediction_tensors_to_keypoint_candidates(
keypoint_heatmap_predictions,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.1,
max_pool_kernel_size=1,
max_candidates=20):
"""Convert keypoint heatmap predictions and offsets to keypoint candidates.
Args:
keypoint_heatmap_predictions: A float tensor of shape [batch_size, height,
width, num_keypoints] representing the per-keypoint heatmaps.
keypoint_heatmap_offsets: A float tensor of shape [batch_size, height,
width, 2] (or [batch_size, height, width, 2 * num_keypoints] if
'per_keypoint_offset' is set True) representing the per-keypoint offsets.
keypoint_score_threshold: float, the threshold for considering a keypoint
a candidate.
max_pool_kernel_size: integer, the max pool kernel size to use to pull off
peak score locations in a neighborhood. For example, to make sure no two
neighboring values for the same keypoint are returned, set
max_pool_kernel_size=3. If None or 1, will not apply any local filtering.
max_candidates: integer, maximum number of keypoint candidates per
keypoint type.
Returns:
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the
location of keypoint candidates in [y, x] format (expressed in absolute
coordinates in the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] with the scores for each
keypoint candidate. The scores come directly from the heatmap predictions.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] with the number of candidates for each
keypoint type, as it's possible to filter some candidates due to the score
threshold.
"""
batch_size, _, width, num_keypoints = _get_shape(
keypoint_heatmap_predictions, 4)
# Get x, y and channel indices corresponding to the top indices in the
# keypoint heatmap predictions.
# Note that the top k candidates are produced for **each keypoint type**.
# Might be worth eventually trying top k in the feature map, independent of
# the keypoint type.
keypoint_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(keypoint_heatmap_predictions,
max_pool_kernel_size=max_pool_kernel_size,
k=max_candidates,
per_channel=True))
peak_spatial_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
y_indices = _to_float32(y_indices)
x_indices = _to_float32(x_indices)
offsets_flat = _flatten_spatial_dimensions(keypoint_heatmap_offsets)
selected_offsets = tf.gather(offsets_flat, peak_spatial_indices, batch_dims=1)
_, num_indices, num_channels = _get_shape(selected_offsets, 3)
if num_channels > 2:
reshaped_offsets = tf.reshape(selected_offsets,
[batch_size, num_indices, -1, 2])
offsets = tf.gather(reshaped_offsets, channel_indices, batch_dims=2)
else:
offsets = selected_offsets
y_offsets, x_offsets = tf.unstack(offsets, axis=2)
keypoint_candidates = tf.stack([y_indices + y_offsets,
x_indices + x_offsets], axis=2)
keypoint_candidates = tf.reshape(
keypoint_candidates,
[batch_size, num_keypoints, max_candidates, 2])
keypoint_candidates = tf.transpose(keypoint_candidates, [0, 2, 1, 3])
keypoint_scores = tf.reshape(
keypoint_scores,
[batch_size, num_keypoints, max_candidates])
keypoint_scores = tf.transpose(keypoint_scores, [0, 2, 1])
num_candidates = tf.reduce_sum(
tf.to_int32(keypoint_scores >= keypoint_score_threshold), axis=1)
return keypoint_candidates, keypoint_scores, num_candidates
def regressed_keypoints_at_object_centers(regressed_keypoint_predictions,
y_indices, x_indices):
"""Returns the regressed keypoints at specified object centers.
The original keypoint predictions are regressed relative to each feature map
location. The returned keypoints are expressed in absolute coordinates in the
output frame (i.e. the center offsets are added to each individual regressed
set of keypoints).
Args:
regressed_keypoint_predictions: A float tensor of shape
[batch_size, height, width, 2 * num_keypoints] holding regressed
keypoints. The last dimension has keypoint coordinates ordered as follows:
[y0, x0, y1, x1, ..., y{J-1}, x{J-1}] where J is the number of keypoints.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, 2 * num_keypoints] where
regressed keypoints are gathered at the provided locations, and converted
to absolute coordinates in the output coordinate frame.
"""
batch_size, _, width, _ = _get_shape(regressed_keypoint_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
regressed_keypoints_flat = _flatten_spatial_dimensions(
regressed_keypoint_predictions)
relative_regressed_keypoints = tf.gather(
regressed_keypoints_flat, flattened_indices, batch_dims=1)
relative_regressed_keypoints = tf.reshape(
relative_regressed_keypoints,
[batch_size, num_instances, -1, 2])
relative_regressed_keypoints_y, relative_regressed_keypoints_x = tf.unstack(
relative_regressed_keypoints, axis=3)
y_indices = _to_float32(tf.expand_dims(y_indices, axis=-1))
x_indices = _to_float32(tf.expand_dims(x_indices, axis=-1))
absolute_regressed_keypoints = tf.stack(
[y_indices + relative_regressed_keypoints_y,
x_indices + relative_regressed_keypoints_x],
axis=3)
return tf.reshape(absolute_regressed_keypoints,
[batch_size, num_instances, -1])
def refine_keypoints(regressed_keypoints, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=None,
unmatched_keypoint_score=0.1, box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance'):
"""Refines regressed keypoints by snapping to the nearest candidate keypoints.
The initial regressed keypoints represent a full set of keypoints regressed
from the centers of the objects. The keypoint candidates are estimated
independently from heatmaps, and are not associated with any object instances.
This function refines the regressed keypoints by "snapping" to the
nearest/highest score/highest score-distance ratio (depending on the
candidate_ranking_mode) candidate of the same keypoint type (e.g. "nose").
If no candidates are nearby, the regressed keypoint remains unchanged.
In order to snap a regressed keypoint to a candidate keypoint, the following
must be satisfied:
- the candidate keypoint must be of the same type as the regressed keypoint
- the candidate keypoint must not lie outside the predicted boxes (or the
boxes which encloses the regressed keypoints for the instance if `bboxes` is
not provided). Note that the box is scaled by
`regressed_box_scale` in height and width, to provide some margin around the
keypoints
- the distance to the closest candidate keypoint cannot exceed
candidate_search_scale * max(height, width), where height and width refer to
the bounding box for the instance.
Note that the same candidate keypoint is allowed to snap to regressed
keypoints in difference instances.
Args:
regressed_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the initial regressed
keypoints.
keypoint_candidates: A tensor of shape
[batch_size, max_candidates, num_keypoints, 2] holding the location of
keypoint candidates in [y, x] format (expressed in absolute coordinates in
the output coordinate frame).
keypoint_scores: A float tensor of shape
[batch_size, max_candidates, num_keypoints] indicating the scores for
keypoint candidates.
num_keypoint_candidates: An integer tensor of shape
[batch_size, num_keypoints] indicating the number of valid candidates for
each keypoint type, as there may be padding (dim 1) of
`keypoint_candidates` and `keypoint_scores`.
bboxes: A tensor of shape [batch_size, num_instances, 4] with predicted
bounding boxes for each instance, expressed in the output coordinate
frame. If not provided, boxes will be computed from regressed keypoints.
unmatched_keypoint_score: float, the default score to use for regressed
keypoints that are not successfully snapped to a nearby candidate.
box_scale: float, the multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints) for
an instance. This scale is typically larger than 1.0 when not providing
`bboxes`.
candidate_search_scale: float, the scale parameter that multiplies the
largest dimension of a bounding box. The resulting distance becomes a
search radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: A string as one of ['min_distance',
'score_distance_ratio'] indicating how to select the candidate. If invalid
value is provided, an ValueError will be raised.
Returns:
A tuple with:
refined_keypoints: A float tensor of shape
[batch_size, num_instances, num_keypoints, 2] with the final, refined
keypoints.
refined_scores: A float tensor of shape
[batch_size, num_instances, num_keypoints] with scores associated with all
instances and keypoints in `refined_keypoints`.
Raises:
ValueError: if provided candidate_ranking_mode is not one of
['min_distance', 'score_distance_ratio']
"""
batch_size, num_instances, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(regressed_keypoints))
max_candidates = keypoint_candidates.shape[1]
# Replace all invalid (i.e. padded) keypoint candidates with NaN.
# This will prevent them from being considered.
range_tiled = tf.tile(
tf.reshape(tf.range(max_candidates), [1, max_candidates, 1]),
[batch_size, 1, num_keypoints])
num_candidates_tiled = tf.tile(tf.expand_dims(num_keypoint_candidates, 1),
[1, max_candidates, 1])
invalid_candidates = range_tiled >= num_candidates_tiled
nan_mask = tf.where(
invalid_candidates,
np.nan * tf.ones_like(invalid_candidates, dtype=tf.float32),
tf.ones_like(invalid_candidates, dtype=tf.float32))
keypoint_candidates_with_nans = tf.math.multiply(
keypoint_candidates, tf.expand_dims(nan_mask, -1))
# Pairwise squared distances between regressed keypoints and candidate
# keypoints (for a single keypoint type).
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
regressed_keypoint_expanded = tf.expand_dims(regressed_keypoints,
axis=2)
keypoint_candidates_expanded = tf.expand_dims(
keypoint_candidates_with_nans, axis=1)
sqrd_distances = tf.math.reduce_sum(
tf.math.squared_difference(regressed_keypoint_expanded,
keypoint_candidates_expanded),
axis=-1)
distances = tf.math.sqrt(sqrd_distances)
# Determine the candidates that have the minimum distance to the regressed
# keypoints. Shape [batch_size, num_instances, num_keypoints].
min_distances = tf.math.reduce_min(distances, axis=2)
if candidate_ranking_mode == 'min_distance':
nearby_candidate_inds = tf.math.argmin(distances, axis=2)
elif candidate_ranking_mode == 'score_distance_ratio':
# tiled_keypoint_scores:
# Shape [batch_size, num_instances, max_candidates, num_keypoints].
tiled_keypoint_scores = tf.tile(
tf.expand_dims(keypoint_scores, axis=1),
multiples=[1, num_instances, 1, 1])
ranking_scores = tiled_keypoint_scores / (distances + 1e-6)
nearby_candidate_inds = tf.math.argmax(ranking_scores, axis=2)
else:
raise ValueError('Not recognized candidate_ranking_mode: %s' %
candidate_ranking_mode)
# Gather the coordinates and scores corresponding to the closest candidates.
# Shape of tensors are [batch_size, num_instances, num_keypoints, 2] and
# [batch_size, num_instances, num_keypoints], respectively.
nearby_candidate_coords, nearby_candidate_scores = (
_gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
nearby_candidate_inds))
if bboxes is None:
# Create bboxes from regressed keypoints.
# Shape [batch_size * num_instances, 4].
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, num_keypoints, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
else:
bboxes_flattened = tf.reshape(bboxes, [-1, 4])
# Scale the bounding boxes.
# Shape [batch_size, num_instances, 4].
boxlist = box_list.BoxList(bboxes_flattened)
boxlist_scaled = box_list_ops.scale_height_width(
boxlist, box_scale, box_scale)
bboxes_scaled = boxlist_scaled.get()
bboxes = tf.reshape(bboxes_scaled, [batch_size, num_instances, 4])
# Get ymin, xmin, ymax, xmax bounding box coordinates, tiled per keypoint.
# Shape [batch_size, num_instances, num_keypoints].
bboxes_tiled = tf.tile(tf.expand_dims(bboxes, 2), [1, 1, num_keypoints, 1])
ymin, xmin, ymax, xmax = tf.unstack(bboxes_tiled, axis=3)
# Produce a mask that indicates whether the original regressed keypoint
# should be used instead of a candidate keypoint.
# Shape [batch_size, num_instances, num_keypoints].
search_radius = (
tf.math.maximum(ymax - ymin, xmax - xmin) * candidate_search_scale)
mask = (tf.cast(nearby_candidate_coords[:, :, :, 0] < ymin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 0] > ymax, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] < xmin, tf.int32) +
tf.cast(nearby_candidate_coords[:, :, :, 1] > xmax, tf.int32) +
# Filter out the chosen candidate with score lower than unmatched
# keypoint score.
tf.cast(nearby_candidate_scores <
unmatched_keypoint_score, tf.int32) +
tf.cast(min_distances > search_radius, tf.int32))
mask = mask > 0
# Create refined keypoints where candidate keypoints replace original
# regressed keypoints if they are in the vicinity of the regressed keypoints.
# Shape [batch_size, num_instances, num_keypoints, 2].
refined_keypoints = tf.where(
tf.tile(tf.expand_dims(mask, -1), [1, 1, 1, 2]),
regressed_keypoints,
nearby_candidate_coords)
# Update keypoints scores. In the case where we use the original regressed
# keypoints, we use a default score of `unmatched_keypoint_score`.
# Shape [batch_size, num_instances, num_keypoints].
refined_scores = tf.where(
mask,
unmatched_keypoint_score * tf.ones_like(nearby_candidate_scores),
nearby_candidate_scores)
return refined_keypoints, refined_scores
def _pad_to_full_keypoint_dim(keypoint_coords, keypoint_scores, keypoint_inds,
num_total_keypoints):
"""Scatter keypoint elements into tensors with full keypoints dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
keypoint_inds: a list of integers that indicate the keypoint indices for
this specific keypoint class. These indices are used to scatter into
tensors that have a `num_total_keypoints` dimension.
num_total_keypoints: The total number of keypoints that this model predicts.
Returns:
A tuple with
keypoint_coords_padded: a
[batch_size, num_instances, num_total_keypoints,2] float32 tensor.
keypoint_scores_padded: a [batch_size, num_instances, num_total_keypoints]
float32 tensor.
"""
batch_size, num_instances, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [2, 0, 1, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [2, 0, 1])
kpt_inds_tensor = tf.expand_dims(keypoint_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_coords_transposed,
shape=[num_total_keypoints, batch_size, num_instances, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=kpt_inds_tensor,
updates=kpt_scores_transposed,
shape=[num_total_keypoints, batch_size, num_instances])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 2, 0, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 2, 0])
return keypoint_coords_padded, keypoint_scores_padded
def _pad_to_full_instance_dim(keypoint_coords, keypoint_scores, instance_inds,
max_instances):
"""Scatter keypoint elements into tensors with full instance dimension.
Args:
keypoint_coords: a [batch_size, num_instances, num_keypoints, 2] float32
tensor.
keypoint_scores: a [batch_size, num_instances, num_keypoints] float32
tensor.
instance_inds: a list of integers that indicate the instance indices for
these keypoints. These indices are used to scatter into tensors
that have a `max_instances` dimension.
max_instances: The maximum number of instances detected by the model.
Returns:
A tuple with
keypoint_coords_padded: a [batch_size, max_instances, num_keypoints, 2]
float32 tensor.
keypoint_scores_padded: a [batch_size, max_instances, num_keypoints]
float32 tensor.
"""
batch_size, _, num_keypoints, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
kpt_coords_transposed = tf.transpose(keypoint_coords, [1, 0, 2, 3])
kpt_scores_transposed = tf.transpose(keypoint_scores, [1, 0, 2])
instance_inds = tf.expand_dims(instance_inds, axis=-1)
kpt_coords_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_coords_transposed,
shape=[max_instances, batch_size, num_keypoints, 2])
kpt_scores_scattered = tf.scatter_nd(
indices=instance_inds,
updates=kpt_scores_transposed,
shape=[max_instances, batch_size, num_keypoints])
keypoint_coords_padded = tf.transpose(kpt_coords_scattered, [1, 0, 2, 3])
keypoint_scores_padded = tf.transpose(kpt_scores_scattered, [1, 0, 2])
return keypoint_coords_padded, keypoint_scores_padded
def _gather_candidates_at_indices(keypoint_candidates, keypoint_scores,
indices):
"""Gathers keypoint candidate coordinates and scores at indices.
Args:
keypoint_candidates: a float tensor of shape [batch_size, max_candidates,
num_keypoints, 2] with candidate coordinates.
keypoint_scores: a float tensor of shape [batch_size, max_candidates,
num_keypoints] with keypoint scores.
indices: an integer tensor of shape [batch_size, num_indices, num_keypoints]
with indices.
Returns:
A tuple with
gathered_keypoint_candidates: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2] with gathered coordinates.
gathered_keypoint_scores: a float tensor of shape [batch_size,
num_indices, num_keypoints, 2].
"""
# Transpose tensors so that all batch dimensions are up front.
keypoint_candidates_transposed = tf.transpose(keypoint_candidates,
[0, 2, 1, 3])
keypoint_scores_transposed = tf.transpose(keypoint_scores, [0, 2, 1])
nearby_candidate_inds_transposed = tf.transpose(indices,
[0, 2, 1])
nearby_candidate_coords_tranposed = tf.gather(
keypoint_candidates_transposed, nearby_candidate_inds_transposed,
batch_dims=2)
nearby_candidate_scores_transposed = tf.gather(
keypoint_scores_transposed, nearby_candidate_inds_transposed,
batch_dims=2)
gathered_keypoint_candidates = tf.transpose(nearby_candidate_coords_tranposed,
[0, 2, 1, 3])
gathered_keypoint_scores = tf.transpose(nearby_candidate_scores_transposed,
[0, 2, 1])
return gathered_keypoint_candidates, gathered_keypoint_scores
def flattened_indices_from_row_col_indices(row_indices, col_indices, num_cols):
"""Get the index in a flattened array given row and column indices."""
return (row_indices * num_cols) + col_indices
def row_col_channel_indices_from_flattened_indices(indices, num_cols,
num_channels):
"""Computes row, column and channel indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
num_channels: Number of channels in the image.
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
channel_indices. The channel indices corresponding to each of the input
indices.
"""
row_indices = (indices // num_channels) // num_cols
col_indices = (indices // num_channels) % num_cols
channel_indices = indices % num_channels
return row_indices, col_indices, channel_indices
def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,
width):
"""Computes valid anchor weights for an image assuming pixels will be flattened.
This function is useful when we only want to penalize valid areas in the
image in the case when padding is used. The function assumes that the loss
function will be applied after flattening the spatial dimensions and returns
anchor weights accordingly.
Args:
true_image_shapes: An integer tensor of shape [batch_size, 3] representing
the true image shape (without padding) for each sample in the batch.
height: height of the prediction from the network.
width: width of the prediction from the network.
Returns:
valid_anchor_weights: a float tensor of shape [batch_size, height * width]
with 1s in locations where the spatial coordinates fall within the height
and width in true_image_shapes.
"""
indices = tf.reshape(tf.range(height * width), [1, -1])
batch_size = tf.shape(true_image_shapes)[0]
batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices
y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(
batch_indices, width, 1)
max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]
max_x = _to_float32(tf.expand_dims(max_x, 1))
max_y = _to_float32(tf.expand_dims(max_y, 1))
x_coords = _to_float32(x_coords)
y_coords = _to_float32(y_coords)
valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)
return _to_float32(valid_mask)
def convert_strided_predictions_to_normalized_boxes(boxes, stride,
true_image_shapes):
"""Converts predictions in the output space to normalized boxes.
Boxes falling outside the valid image boundary are clipped to be on the
boundary.
Args:
boxes: A tensor of shape [batch_size, num_boxes, 4] holding the raw
coordinates of boxes in the model's output space.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
Returns:
boxes: A tensor of shape [batch_size, num_boxes, 4] representing the
coordinates of the normalized boxes.
"""
def _normalize_boxlist(args):
boxes, height, width = args
boxes = box_list_ops.scale(boxes, stride, stride)
boxes = box_list_ops.to_normalized_coordinates(boxes, height, width)
boxes = box_list_ops.clip_to_window(boxes, [0., 0., 1., 1.],
filter_nonoverlapping=False)
return boxes
box_lists = [box_list.BoxList(boxes) for boxes in tf.unstack(boxes, axis=0)]
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
true_heights_list = tf.unstack(true_heights, axis=0)
true_widths_list = tf.unstack(true_widths, axis=0)
box_lists = list(map(_normalize_boxlist,
zip(box_lists, true_heights_list, true_widths_list)))
boxes = tf.stack([box_list_instance.get() for
box_list_instance in box_lists], axis=0)
return boxes
def convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_out_of_frame_keypoints=False):
"""Converts predictions in the output space to normalized keypoints.
If clip_out_of_frame_keypoints=False, keypoint coordinates falling outside
the valid image boundary are normalized but not clipped; If
clip_out_of_frame_keypoints=True, keypoint coordinates falling outside the
valid image boundary are clipped to the closest image boundary and the scores
will be set to 0.0.
Args:
keypoint_coords: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] holding the raw coordinates
of keypoints in the model's output space.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] holding the keypoint scores.
stride: The stride in the output space.
true_image_shapes: A tensor of shape [batch_size, 3] representing the true
shape of the input not considering padding.
clip_out_of_frame_keypoints: A boolean indicating whether keypoints outside
the image boundary should be clipped. If True, keypoint coords will be
clipped to image boundary. If False, keypoints are normalized but not
filtered based on their location.
Returns:
keypoint_coords_normalized: A tensor of shape
[batch_size, num_instances, num_keypoints, 2] representing the coordinates
of the normalized keypoints.
keypoint_scores: A tensor of shape
[batch_size, num_instances, num_keypoints] representing the updated
keypoint scores.
"""
# Flatten keypoints and scores.
batch_size, _, _, _ = (
shape_utils.combined_static_and_dynamic_shape(keypoint_coords))
# Scale and normalize keypoints.
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
yscale = float(stride) / tf.cast(true_heights, tf.float32)
xscale = float(stride) / tf.cast(true_widths, tf.float32)
yx_scale = tf.stack([yscale, xscale], axis=1)
keypoint_coords_normalized = keypoint_coords * tf.reshape(
yx_scale, [batch_size, 1, 1, 2])
if clip_out_of_frame_keypoints:
# Determine the keypoints that are in the true image regions.
valid_indices = tf.logical_and(
tf.logical_and(keypoint_coords_normalized[:, :, :, 0] >= 0.0,
keypoint_coords_normalized[:, :, :, 0] <= 1.0),
tf.logical_and(keypoint_coords_normalized[:, :, :, 1] >= 0.0,
keypoint_coords_normalized[:, :, :, 1] <= 1.0))
batch_window = tf.tile(
tf.constant([[0.0, 0.0, 1.0, 1.0]], dtype=tf.float32),
multiples=[batch_size, 1])
def clip_to_window(inputs):
keypoints, window = inputs
return keypoint_ops.clip_to_window(keypoints, window)
keypoint_coords_normalized = tf.map_fn(
clip_to_window, (keypoint_coords_normalized, batch_window),
dtype=tf.float32, back_prop=False)
keypoint_scores = tf.where(valid_indices, keypoint_scores,
tf.zeros_like(keypoint_scores))
return keypoint_coords_normalized, keypoint_scores
def convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=None, densepose_surface_coords=None, stride=4,
mask_height=256, mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Converts predicted full-image masks into instance masks.
For each predicted detection box:
* Crop and resize the predicted mask (and optionally DensePose coordinates)
based on the detected bounding box coordinates and class prediction. Uses
bilinear resampling.
* Binarize the mask using the provided score threshold.
Args:
boxes: A tensor of shape [batch, max_detections, 4] holding the predicted
boxes, in normalized coordinates (relative to the true image dimensions).
classes: An integer tensor of shape [batch, max_detections] containing the
detected class for each box (0-indexed).
masks: A [batch, output_height, output_width, num_classes] float32
tensor with class probabilities.
true_image_shapes: A tensor of shape [batch, 3] representing the true
shape of the inputs not considering padding.
densepose_part_heatmap: (Optional) A [batch, output_height, output_width,
num_parts] float32 tensor with part scores (i.e. logits).
densepose_surface_coords: (Optional) A [batch, output_height, output_width,
2 * num_parts] float32 tensor with predicted part coordinates (in
vu-format).
stride: The stride in the output space.
mask_height: The desired resized height for instance masks.
mask_width: The desired resized width for instance masks.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: The class index (0-indexed) corresponding to the
class which has DensePose labels (e.g. person class).
Returns:
A tuple of masks and surface_coords.
instance_masks: A [batch_size, max_detections, mask_height, mask_width]
uint8 tensor with predicted foreground mask for each
instance. If DensePose tensors are provided, then each pixel value in the
mask encodes the 1-indexed part.
surface_coords: A [batch_size, max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. Note that v, u coordinates are
only defined on instance masks, and the coordinates at each location of
the foreground mask correspond to coordinates on a local part coordinate
system (the specific part can be inferred from the `instance_masks`
output. If DensePose feature maps are not passed to this function, this
output will be None.
Raises:
ValueError: If one but not both of `densepose_part_heatmap` and
`densepose_surface_coords` is provided.
"""
batch_size, output_height, output_width, _ = (
shape_utils.combined_static_and_dynamic_shape(masks))
input_height = stride * output_height
input_width = stride * output_width
true_heights, true_widths, _ = tf.unstack(true_image_shapes, axis=1)
# If necessary, create dummy DensePose tensors to simplify the map function.
densepose_present = True
if ((densepose_part_heatmap is not None) ^
(densepose_surface_coords is not None)):
raise ValueError('To use DensePose, both `densepose_part_heatmap` and '
'`densepose_surface_coords` must be provided')
if densepose_part_heatmap is None and densepose_surface_coords is None:
densepose_present = False
densepose_part_heatmap = tf.zeros(
(batch_size, output_height, output_width, 1), dtype=tf.float32)
densepose_surface_coords = tf.zeros(
(batch_size, output_height, output_width, 2), dtype=tf.float32)
crop_and_threshold_fn = functools.partial(
crop_and_threshold_masks, input_height=input_height,
input_width=input_width, mask_height=mask_height, mask_width=mask_width,
score_threshold=score_threshold,
densepose_class_index=densepose_class_index)
instance_masks, surface_coords = shape_utils.static_or_dynamic_map_fn(
crop_and_threshold_fn,
elems=[boxes, classes, masks, densepose_part_heatmap,
densepose_surface_coords, true_heights, true_widths],
dtype=[tf.uint8, tf.float32],
back_prop=False)
surface_coords = surface_coords if densepose_present else None
return instance_masks, surface_coords
def crop_and_threshold_masks(elems, input_height, input_width, mask_height=256,
mask_width=256, score_threshold=0.5,
densepose_class_index=-1):
"""Crops and thresholds masks based on detection boxes.
Args:
elems: A tuple of
boxes - float32 tensor of shape [max_detections, 4]
classes - int32 tensor of shape [max_detections] (0-indexed)
masks - float32 tensor of shape [output_height, output_width, num_classes]
part_heatmap - float32 tensor of shape [output_height, output_width,
num_parts]
surf_coords - float32 tensor of shape [output_height, output_width,
2 * num_parts]
true_height - scalar int tensor
true_width - scalar int tensor
input_height: Input height to network.
input_width: Input width to network.
mask_height: Height for resizing mask crops.
mask_width: Width for resizing mask crops.
score_threshold: The threshold at which to convert predicted mask
into foreground pixels.
densepose_class_index: scalar int tensor with the class index (0-indexed)
for DensePose.
Returns:
A tuple of
all_instances: A [max_detections, mask_height, mask_width] uint8 tensor
with a predicted foreground mask for each instance. Background is encoded
as 0, and foreground is encoded as a positive integer. Specific part
indices are encoded as 1-indexed parts (for classes that have part
information).
surface_coords: A [max_detections, mask_height, mask_width, 2]
float32 tensor with (v, u) coordinates. for each part.
"""
(boxes, classes, masks, part_heatmap, surf_coords, true_height,
true_width) = elems
# Boxes are in normalized coordinates relative to true image shapes. Convert
# coordinates to be normalized relative to input image shapes (since masks
# may still have padding).
boxlist = box_list.BoxList(boxes)
y_scale = true_height / input_height
x_scale = true_width / input_width
boxlist = box_list_ops.scale(boxlist, y_scale, x_scale)
boxes = boxlist.get()
# Convert masks from [output_height, output_width, num_classes] to
# [num_classes, output_height, output_width, 1].
num_classes = tf.shape(masks)[-1]
masks_4d = tf.transpose(masks, perm=[2, 0, 1])[:, :, :, tf.newaxis]
# Tile part and surface coordinate masks for all classes.
part_heatmap_4d = tf.tile(part_heatmap[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
surf_coords_4d = tf.tile(surf_coords[tf.newaxis, :, :, :],
multiples=[num_classes, 1, 1, 1])
feature_maps_concat = tf.concat([masks_4d, part_heatmap_4d, surf_coords_4d],
axis=-1)
# The following tensor has shape
# [max_detections, mask_height, mask_width, 1 + 3 * num_parts].
cropped_masks = tf2.image.crop_and_resize(
feature_maps_concat,
boxes=boxes,
box_indices=classes,
crop_size=[mask_height, mask_width],
method='bilinear')
# Split the cropped masks back into instance masks, part masks, and surface
# coordinates.
num_parts = tf.shape(part_heatmap)[-1]
instance_masks, part_heatmap_cropped, surface_coords_cropped = tf.split(
cropped_masks, [1, num_parts, 2 * num_parts], axis=-1)
# Threshold the instance masks. Resulting tensor has shape
# [max_detections, mask_height, mask_width, 1].
instance_masks_int = tf.cast(
tf.math.greater_equal(instance_masks, score_threshold), dtype=tf.int32)
# Produce a binary mask that is 1.0 only:
# - in the foreground region for an instance
# - in detections corresponding to the DensePose class
det_with_parts = tf.equal(classes, densepose_class_index)
det_with_parts = tf.cast(
tf.reshape(det_with_parts, [-1, 1, 1, 1]), dtype=tf.int32)
instance_masks_with_parts = tf.math.multiply(instance_masks_int,
det_with_parts)
# Similarly, produce a binary mask that holds the foreground masks only for
# instances without parts (i.e. non-DensePose classes).
det_without_parts = 1 - det_with_parts
instance_masks_without_parts = tf.math.multiply(instance_masks_int,
det_without_parts)
# Assemble a tensor that has standard instance segmentation masks for
# non-DensePose classes (with values in [0, 1]), and part segmentation masks
# for DensePose classes (with vaues in [0, 1, ..., num_parts]).
part_mask_int_zero_indexed = tf.math.argmax(
part_heatmap_cropped, axis=-1, output_type=tf.int32)[:, :, :, tf.newaxis]
part_mask_int_one_indexed = part_mask_int_zero_indexed + 1
all_instances = (instance_masks_without_parts +
instance_masks_with_parts * part_mask_int_one_indexed)
# Gather the surface coordinates for the parts.
surface_coords_cropped = tf.reshape(
surface_coords_cropped, [-1, mask_height, mask_width, num_parts, 2])
surface_coords = gather_surface_coords_for_parts(surface_coords_cropped,
part_mask_int_zero_indexed)
surface_coords = (
surface_coords * tf.cast(instance_masks_with_parts, tf.float32))
return [tf.squeeze(all_instances, axis=3), surface_coords]
def gather_surface_coords_for_parts(surface_coords_cropped,
highest_scoring_part):
"""Gathers the (v, u) coordinates for the highest scoring DensePose parts.
Args:
surface_coords_cropped: A [max_detections, height, width, num_parts, 2]
float32 tensor with (v, u) surface coordinates.
highest_scoring_part: A [max_detections, height, width] integer tensor with
the highest scoring part (0-indexed) indices for each location.
Returns:
A [max_detections, height, width, 2] float32 tensor with the (v, u)
coordinates selected from the highest scoring parts.
"""
max_detections, height, width, num_parts, _ = (
shape_utils.combined_static_and_dynamic_shape(surface_coords_cropped))
flattened_surface_coords = tf.reshape(surface_coords_cropped, [-1, 2])
flattened_part_ids = tf.reshape(highest_scoring_part, [-1])
# Produce lookup indices that represent the locations of the highest scoring
# parts in the `flattened_surface_coords` tensor.
flattened_lookup_indices = (
num_parts * tf.range(max_detections * height * width) +
flattened_part_ids)
vu_coords_flattened = tf.gather(flattened_surface_coords,
flattened_lookup_indices, axis=0)
return tf.reshape(vu_coords_flattened, [max_detections, height, width, 2])
def predicted_embeddings_at_object_centers(embedding_predictions,
y_indices, x_indices):
"""Returns the predicted embeddings at specified object centers.
Args:
embedding_predictions: A float tensor of shape [batch_size, height, width,
reid_embed_size] holding predicted embeddings.
y_indices: A [batch, num_instances] int tensor holding y indices for object
centers. These indices correspond to locations in the output feature map.
x_indices: A [batch, num_instances] int tensor holding x indices for object
centers. These indices correspond to locations in the output feature map.
Returns:
A float tensor of shape [batch_size, num_objects, reid_embed_size] where
predicted embeddings are gathered at the provided locations.
"""
batch_size, _, width, _ = _get_shape(embedding_predictions, 4)
flattened_indices = flattened_indices_from_row_col_indices(
y_indices, x_indices, width)
_, num_instances = _get_shape(flattened_indices, 2)
embeddings_flat = _flatten_spatial_dimensions(embedding_predictions)
embeddings = tf.gather(embeddings_flat, flattened_indices, batch_dims=1)
embeddings = tf.reshape(embeddings, [batch_size, num_instances, -1])
return embeddings
class ObjectDetectionParams(
collections.namedtuple('ObjectDetectionParams', [
'localization_loss', 'scale_loss_weight', 'offset_loss_weight',
'task_loss_weight'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the object detection task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
localization_loss,
scale_loss_weight,
offset_loss_weight,
task_loss_weight=1.0):
"""Constructor with default values for ObjectDetectionParams.
Args:
localization_loss: a object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
scale_loss_weight: float, The weight for localizing box size. Note that
the scale loss is dependent on the input image size, since we penalize
the raw height and width. This constant may need to be adjusted
depending on the input size.
offset_loss_weight: float, The weight for localizing center offsets.
task_loss_weight: float, the weight of the object detection loss.
Returns:
An initialized ObjectDetectionParams namedtuple.
"""
return super(ObjectDetectionParams,
cls).__new__(cls, localization_loss, scale_loss_weight,
offset_loss_weight, task_loss_weight)
class KeypointEstimationParams(
collections.namedtuple('KeypointEstimationParams', [
'task_name', 'class_id', 'keypoint_indices', 'classification_loss',
'localization_loss', 'keypoint_labels', 'keypoint_std_dev',
'keypoint_heatmap_loss_weight', 'keypoint_offset_loss_weight',
'keypoint_regression_loss_weight', 'keypoint_candidate_score_threshold',
'heatmap_bias_init', 'num_candidates_per_keypoint', 'task_loss_weight',
'peak_max_pool_kernel_size', 'unmatched_keypoint_score', 'box_scale',
'candidate_search_scale', 'candidate_ranking_mode',
'offset_peak_radius', 'per_keypoint_offset'
])):
"""Namedtuple to host object detection related parameters.
This is a wrapper class over the fields that are either the hyper-parameters
or the loss functions needed for the keypoint estimation task. The class is
immutable after constructed. Please see the __new__ function for detailed
information for each fields.
"""
__slots__ = ()
def __new__(cls,
task_name,
class_id,
keypoint_indices,
classification_loss,
localization_loss,
keypoint_labels=None,
keypoint_std_dev=None,
keypoint_heatmap_loss_weight=1.0,
keypoint_offset_loss_weight=1.0,
keypoint_regression_loss_weight=1.0,
keypoint_candidate_score_threshold=0.1,
heatmap_bias_init=-2.19,
num_candidates_per_keypoint=100,
task_loss_weight=1.0,
peak_max_pool_kernel_size=3,
unmatched_keypoint_score=0.1,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance',
offset_peak_radius=0,
per_keypoint_offset=False):
"""Constructor with default values for KeypointEstimationParams.
Args:
task_name: string, the name of the task this namedtuple corresponds to.
Note that it should be an unique identifier of the task.
class_id: int, the ID of the class that contains the target keypoints to
considered in this task. For example, if the task is human pose
estimation, the class id should correspond to the "human" class. Note
that the ID is 0-based, meaning that class 0 corresponds to the first
non-background object class.
keypoint_indices: A list of integers representing the indicies of the
keypoints to be considered in this task. This is used to retrieve the
subset of the keypoints from gt_keypoints that should be considered in
this task.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the center offset and height/width predictions in
CenterNet.
keypoint_labels: A list of strings representing the label text of each
keypoint, e.g. "nose", 'left_shoulder". Note that the length of this
list should be equal to keypoint_indices.
keypoint_std_dev: A list of float represent the standard deviation of the
Gaussian kernel used to generate the keypoint heatmap. It is to provide
the flexibility of using different sizes of Gaussian kernel for each
keypoint class.
keypoint_heatmap_loss_weight: float, The weight for the keypoint heatmap.
keypoint_offset_loss_weight: float, The weight for the keypoint offsets
loss.
keypoint_regression_loss_weight: float, The weight for keypoint regression
loss. Note that the loss is dependent on the input image size, since we
penalize the raw height and width. This constant may need to be adjusted
depending on the input size.
keypoint_candidate_score_threshold: float, The heatmap score threshold for
a keypoint to become a valid candidate.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the class prediction head. If set to None, the bias is
initialized with zeros.
num_candidates_per_keypoint: The maximum number of candidates to retrieve
for each keypoint.
task_loss_weight: float, the weight of the keypoint estimation loss.
peak_max_pool_kernel_size: Max pool kernel size to use to pull off peak
score locations in a neighborhood (independently for each keypoint
types).
unmatched_keypoint_score: The default score to use for regressed keypoints
that are not successfully snapped to a nearby candidate.
box_scale: The multiplier to expand the bounding boxes (either the
provided boxes or those which tightly cover the regressed keypoints).
candidate_search_scale: The scale parameter that multiplies the largest
dimension of a bounding box. The resulting distance becomes a search
radius for candidates in the vicinity of each regressed keypoint.
candidate_ranking_mode: One of ['min_distance', 'score_distance_ratio']
indicating how to select the keypoint candidate.
offset_peak_radius: The radius (in the unit of output pixel) around
groundtruth heatmap peak to assign the offset targets. If set 0, then
the offset target will only be assigned to the heatmap peak (same
behavior as the original paper).
per_keypoint_offset: A bool indicates whether to assign offsets for each
keypoint channel separately. If set False, the output offset target has
the shape [batch_size, out_height, out_width, 2] (same behavior as the
original paper). If set True, the output offset target has the shape
[batch_size, out_height, out_width, 2 * num_keypoints] (recommended when
the offset_peak_radius is not zero).
Returns:
An initialized KeypointEstimationParams namedtuple.
"""
return super(KeypointEstimationParams, cls).__new__(
cls, task_name, class_id, keypoint_indices, classification_loss,
localization_loss, keypoint_labels, keypoint_std_dev,
keypoint_heatmap_loss_weight, keypoint_offset_loss_weight,
keypoint_regression_loss_weight, keypoint_candidate_score_threshold,
heatmap_bias_init, num_candidates_per_keypoint, task_loss_weight,
peak_max_pool_kernel_size, unmatched_keypoint_score, box_scale,
candidate_search_scale, candidate_ranking_mode, offset_peak_radius,
per_keypoint_offset)
class ObjectCenterParams(
collections.namedtuple('ObjectCenterParams', [
'classification_loss', 'object_center_loss_weight', 'heatmap_bias_init',
'min_box_overlap_iou', 'max_box_predictions', 'use_only_known_classes'
])):
"""Namedtuple to store object center prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
object_center_loss_weight,
heatmap_bias_init=-2.19,
min_box_overlap_iou=0.7,
max_box_predictions=100,
use_labeled_classes=False):
"""Constructor with default values for ObjectCenterParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
object_center_loss_weight: float, The weight for the object center loss.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the object center prediction head. If set to None, the bias is
initialized with zeros.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
max_box_predictions: int, the maximum number of boxes to predict.
use_labeled_classes: boolean, compute the loss only labeled classes.
Returns:
An initialized ObjectCenterParams namedtuple.
"""
return super(ObjectCenterParams,
cls).__new__(cls, classification_loss,
object_center_loss_weight, heatmap_bias_init,
min_box_overlap_iou, max_box_predictions,
use_labeled_classes)
class MaskParams(
collections.namedtuple('MaskParams', [
'classification_loss', 'task_loss_weight', 'mask_height', 'mask_width',
'score_threshold', 'heatmap_bias_init'
])):
"""Namedtuple to store mask prediction related parameters."""
__slots__ = ()
def __new__(cls,
classification_loss,
task_loss_weight=1.0,
mask_height=256,
mask_width=256,
score_threshold=0.5,
heatmap_bias_init=-2.19):
"""Constructor with default values for MaskParams.
Args:
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the semantic segmentation predictions in CenterNet.
task_loss_weight: float, The loss weight for the segmentation task.
mask_height: The height of the resized instance segmentation mask.
mask_width: The width of the resized instance segmentation mask.
score_threshold: The threshold at which to convert predicted mask
probabilities (after passing through sigmoid) into foreground pixels.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the semantic segmentation prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized MaskParams namedtuple.
"""
return super(MaskParams,
cls).__new__(cls, classification_loss,
task_loss_weight, mask_height, mask_width,
score_threshold, heatmap_bias_init)
class DensePoseParams(
collections.namedtuple('DensePoseParams', [
'class_id', 'classification_loss', 'localization_loss',
'part_loss_weight', 'coordinate_loss_weight', 'num_parts',
'task_loss_weight', 'upsample_to_input_res', 'upsample_method',
'heatmap_bias_init'
])):
"""Namedtuple to store DensePose prediction related parameters."""
__slots__ = ()
def __new__(cls,
class_id,
classification_loss,
localization_loss,
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=24,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='bilinear',
heatmap_bias_init=-2.19):
"""Constructor with default values for DensePoseParams.
Args:
class_id: the ID of the class that contains the DensePose groundtruth.
This should typically correspond to the "person" class. Note that the ID
is 0-based, meaning that class 0 corresponds to the first non-background
object class.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the body part predictions in CenterNet.
localization_loss: an object_detection.core.losses.Loss object to compute
the loss for the surface coordinate regression in CenterNet.
part_loss_weight: The loss weight to apply to part prediction.
coordinate_loss_weight: The loss weight to apply to surface coordinate
prediction.
num_parts: The number of DensePose parts to predict.
task_loss_weight: float, the loss weight for the DensePose task.
upsample_to_input_res: Whether to upsample the DensePose feature maps to
the input resolution before applying loss. Note that the prediction
outputs are still at the standard CenterNet output stride.
upsample_method: Method for upsampling DensePose feature maps. Options are
either 'bilinear' or 'nearest'). This takes no effect when
`upsample_to_input_res` is False.
heatmap_bias_init: float, the initial value of bias in the convolutional
kernel of the part prediction head. If set to None, the
bias is initialized with zeros.
Returns:
An initialized DensePoseParams namedtuple.
"""
return super(DensePoseParams,
cls).__new__(cls, class_id, classification_loss,
localization_loss, part_loss_weight,
coordinate_loss_weight, num_parts,
task_loss_weight, upsample_to_input_res,
upsample_method, heatmap_bias_init)
class TrackParams(
collections.namedtuple('TrackParams', [
'num_track_ids', 'reid_embed_size', 'num_fc_layers',
'classification_loss', 'task_loss_weight'
])):
"""Namedtuple to store tracking prediction related parameters."""
__slots__ = ()
def __new__(cls,
num_track_ids,
reid_embed_size,
num_fc_layers,
classification_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
num_track_ids: int. The maximum track ID in the dataset. Used for ReID
embedding classification task.
reid_embed_size: int. The embedding size for ReID task.
num_fc_layers: int. The number of (fully-connected, batch-norm, relu)
layers for track ID classification head.
classification_loss: an object_detection.core.losses.Loss object to
compute the loss for the ReID embedding in CenterNet.
task_loss_weight: float, the loss weight for the tracking task.
Returns:
An initialized TrackParams namedtuple.
"""
return super(TrackParams,
cls).__new__(cls, num_track_ids, reid_embed_size,
num_fc_layers, classification_loss,
task_loss_weight)
class TemporalOffsetParams(
collections.namedtuple('TemporalOffsetParams', [
'localization_loss', 'task_loss_weight'
])):
"""Namedtuple to store temporal offset related parameters."""
__slots__ = ()
def __new__(cls,
localization_loss,
task_loss_weight=1.0):
"""Constructor with default values for TrackParams.
Args:
localization_loss: an object_detection.core.losses.Loss object to
compute the loss for the temporal offset in CenterNet.
task_loss_weight: float, the loss weight for the temporal offset
task.
Returns:
An initialized TemporalOffsetParams namedtuple.
"""
return super(TemporalOffsetParams,
cls).__new__(cls, localization_loss, task_loss_weight)
# The following constants are used to generate the keys of the
# (prediction, loss, target assigner,...) dictionaries used in CenterNetMetaArch
# class.
DETECTION_TASK = 'detection_task'
OBJECT_CENTER = 'object_center'
BOX_SCALE = 'box/scale'
BOX_OFFSET = 'box/offset'
KEYPOINT_REGRESSION = 'keypoint/regression'
KEYPOINT_HEATMAP = 'keypoint/heatmap'
KEYPOINT_OFFSET = 'keypoint/offset'
SEGMENTATION_TASK = 'segmentation_task'
SEGMENTATION_HEATMAP = 'segmentation/heatmap'
DENSEPOSE_TASK = 'densepose_task'
DENSEPOSE_HEATMAP = 'densepose/heatmap'
DENSEPOSE_REGRESSION = 'densepose/regression'
LOSS_KEY_PREFIX = 'Loss'
TRACK_TASK = 'track_task'
TRACK_REID = 'track/reid'
TEMPORALOFFSET_TASK = 'temporal_offset_task'
TEMPORAL_OFFSET = 'track/offset'
def get_keypoint_name(task_name, head_name):
return '%s/%s' % (task_name, head_name)
def get_num_instances_from_weights(groundtruth_weights_list):
"""Computes the number of instances/boxes from the weights in a batch.
Args:
groundtruth_weights_list: A list of float tensors with shape
[max_num_instances] representing whether there is an actual instance in
the image (with non-zero value) or is padded to match the
max_num_instances (with value 0.0). The list represents the batch
dimension.
Returns:
A scalar integer tensor incidating how many instances/boxes are in the
images in the batch. Note that this function is usually used to normalize
the loss so the minimum return value is 1 to avoid weird behavior.
"""
num_instances = tf.reduce_sum(
[tf.math.count_nonzero(w) for w in groundtruth_weights_list])
num_instances = tf.maximum(num_instances, 1)
return num_instances
class CenterNetMetaArch(model.DetectionModel):
"""The CenterNet meta architecture [1].
[1]: https://arxiv.org/abs/1904.07850
"""
def __init__(self,
is_training,
add_summaries,
num_classes,
feature_extractor,
image_resizer_fn,
object_center_params,
object_detection_params=None,
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None):
"""Initializes a CenterNet model.
Args:
is_training: Set to True if this model is being built for training.
add_summaries: Whether to add tf summaries in the model.
num_classes: int, The number of classes that the model should predict.
feature_extractor: A CenterNetFeatureExtractor to use to extract features
from an image.
image_resizer_fn: a callable for image resizing. This callable always
takes a rank-3 image tensor (corresponding to a single image) and
returns a rank-3 image tensor, possibly with new spatial dimensions and
a 1-D tensor of shape [3] indicating shape of true image within the
resized image tensor as the resized image tensor could be padded. See
builders/image_resizer_builder.py.
object_center_params: An ObjectCenterParams namedtuple. This object holds
the hyper-parameters for object center prediction. This is required by
either object detection or keypoint estimation tasks.
object_detection_params: An ObjectDetectionParams namedtuple. This object
holds the hyper-parameters necessary for object detection. Please see
the class definition for more details.
keypoint_params_dict: A dictionary that maps from task name to the
corresponding KeypointEstimationParams namedtuple. This object holds the
hyper-parameters necessary for multiple keypoint estimations. Please
see the class definition for more details.
mask_params: A MaskParams namedtuple. This object
holds the hyper-parameters for segmentation. Please see the class
definition for more details.
densepose_params: A DensePoseParams namedtuple. This object holds the
hyper-parameters for DensePose prediction. Please see the class
definition for more details. Note that if this is provided, it is
expected that `mask_params` is also provided.
track_params: A TrackParams namedtuple. This object
holds the hyper-parameters for tracking. Please see the class
definition for more details.
temporal_offset_params: A TemporalOffsetParams namedtuple. This object
holds the hyper-parameters for offset prediction based tracking.
"""
assert object_detection_params or keypoint_params_dict
# Shorten the name for convenience and better formatting.
self._is_training = is_training
# The Objects as Points paper attaches loss functions to multiple
# (`num_feature_outputs`) feature maps in the the backbone. E.g.
# for the hourglass backbone, `num_feature_outputs` is 2.
self._feature_extractor = feature_extractor
self._num_feature_outputs = feature_extractor.num_feature_outputs
self._stride = self._feature_extractor.out_stride
self._image_resizer_fn = image_resizer_fn
self._center_params = object_center_params
self._od_params = object_detection_params
self._kp_params_dict = keypoint_params_dict
self._mask_params = mask_params
if densepose_params is not None and mask_params is None:
raise ValueError('To run DensePose prediction, `mask_params` must also '
'be supplied.')
self._densepose_params = densepose_params
self._track_params = track_params
self._temporal_offset_params = temporal_offset_params
# Construct the prediction head nets.
self._prediction_head_dict = self._construct_prediction_heads(
num_classes,
self._num_feature_outputs,
class_prediction_bias_init=self._center_params.heatmap_bias_init)
# Initialize the target assigners.
self._target_assigner_dict = self._initialize_target_assigners(
stride=self._stride,
min_box_overlap_iou=self._center_params.min_box_overlap_iou)
# Will be used in VOD single_frame_meta_arch for tensor reshape.
self._batched_prediction_tensor_names = []
super(CenterNetMetaArch, self).__init__(num_classes)
@property
def batched_prediction_tensor_names(self):
if not self._batched_prediction_tensor_names:
raise RuntimeError('Must call predict() method to get batched prediction '
'tensor names.')
return self._batched_prediction_tensor_names
def _construct_prediction_heads(self, num_classes, num_feature_outputs,
class_prediction_bias_init):
"""Constructs the prediction heads based on the specific parameters.
Args:
num_classes: An integer indicating how many classes in total to predict.
num_feature_outputs: An integer indicating how many feature outputs to use
for calculating the loss. The Objects as Points paper attaches loss
functions to multiple (`num_feature_outputs`) feature maps in the the
backbone. E.g. for the hourglass backbone, `num_feature_outputs` is 2.
class_prediction_bias_init: float, the initial value of bias in the
convolutional kernel of the class prediction head. If set to None, the
bias is initialized with zeros.
Returns:
A dictionary of keras modules generated by calling make_prediction_net
function. It will also create and set a private member of the class when
learning the tracking task.
"""
prediction_heads = {}
prediction_heads[OBJECT_CENTER] = [
make_prediction_net(num_classes, bias_fill=class_prediction_bias_init)
for _ in range(num_feature_outputs)
]
if self._od_params is not None:
prediction_heads[BOX_SCALE] = [
make_prediction_net(NUM_SIZE_CHANNELS)
for _ in range(num_feature_outputs)
]
prediction_heads[BOX_OFFSET] = [
make_prediction_net(NUM_OFFSET_CHANNELS)
for _ in range(num_feature_outputs)
]
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
num_keypoints = len(kp_params.keypoint_indices)
prediction_heads[get_keypoint_name(task_name, KEYPOINT_HEATMAP)] = [
make_prediction_net(
num_keypoints, bias_fill=kp_params.heatmap_bias_init)
for _ in range(num_feature_outputs)
]
prediction_heads[get_keypoint_name(task_name, KEYPOINT_REGRESSION)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints)
for _ in range(num_feature_outputs)
]
if kp_params.per_keypoint_offset:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS * num_keypoints)
for _ in range(num_feature_outputs)
]
else:
prediction_heads[get_keypoint_name(task_name, KEYPOINT_OFFSET)] = [
make_prediction_net(NUM_OFFSET_CHANNELS)
for _ in range(num_feature_outputs)
]
if self._mask_params is not None:
prediction_heads[SEGMENTATION_HEATMAP] = [
make_prediction_net(num_classes,
bias_fill=self._mask_params.heatmap_bias_init)
for _ in range(num_feature_outputs)]
if self._densepose_params is not None:
prediction_heads[DENSEPOSE_HEATMAP] = [
make_prediction_net( # pylint: disable=g-complex-comprehension
self._densepose_params.num_parts,
bias_fill=self._densepose_params.heatmap_bias_init)
for _ in range(num_feature_outputs)]
prediction_heads[DENSEPOSE_REGRESSION] = [
make_prediction_net(2 * self._densepose_params.num_parts)
for _ in range(num_feature_outputs)
]
if self._track_params is not None:
prediction_heads[TRACK_REID] = [
make_prediction_net(self._track_params.reid_embed_size)
for _ in range(num_feature_outputs)]
# Creates a classification network to train object embeddings by learning
# a projection from embedding space to object track ID space.
self.track_reid_classification_net = tf.keras.Sequential()
for _ in range(self._track_params.num_fc_layers - 1):
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.reid_embed_size,
input_shape=(
self._track_params.reid_embed_size,)))
self.track_reid_classification_net.add(
tf.keras.layers.BatchNormalization())
self.track_reid_classification_net.add(tf.keras.layers.ReLU())
self.track_reid_classification_net.add(
tf.keras.layers.Dense(self._track_params.num_track_ids,
input_shape=(
self._track_params.reid_embed_size,)))
if self._temporal_offset_params is not None:
prediction_heads[TEMPORAL_OFFSET] = [
make_prediction_net(NUM_OFFSET_CHANNELS)
for _ in range(num_feature_outputs)
]
return prediction_heads
def _initialize_target_assigners(self, stride, min_box_overlap_iou):
"""Initializes the target assigners and puts them in a dictionary.
Args:
stride: An integer indicating the stride of the image.
min_box_overlap_iou: float, the minimum IOU overlap that predicted boxes
need have with groundtruth boxes to not be penalized. This is used for
computing the class specific center heatmaps.
Returns:
A dictionary of initialized target assigners for each task.
"""
target_assigners = {}
target_assigners[OBJECT_CENTER] = (
cn_assigner.CenterNetCenterHeatmapTargetAssigner(
stride, min_box_overlap_iou))
if self._od_params is not None:
target_assigners[DETECTION_TASK] = (
cn_assigner.CenterNetBoxTargetAssigner(stride))
if self._kp_params_dict is not None:
for task_name, kp_params in self._kp_params_dict.items():
target_assigners[task_name] = (
cn_assigner.CenterNetKeypointTargetAssigner(
stride=stride,
class_id=kp_params.class_id,
keypoint_indices=kp_params.keypoint_indices,
keypoint_std_dev=kp_params.keypoint_std_dev,
peak_radius=kp_params.offset_peak_radius,
per_keypoint_offset=kp_params.per_keypoint_offset))
if self._mask_params is not None:
target_assigners[SEGMENTATION_TASK] = (
cn_assigner.CenterNetMaskTargetAssigner(stride))
if self._densepose_params is not None:
dp_stride = 1 if self._densepose_params.upsample_to_input_res else stride
target_assigners[DENSEPOSE_TASK] = (
cn_assigner.CenterNetDensePoseTargetAssigner(dp_stride))
if self._track_params is not None:
target_assigners[TRACK_TASK] = (
cn_assigner.CenterNetTrackTargetAssigner(
stride, self._track_params.num_track_ids))
if self._temporal_offset_params is not None:
target_assigners[TEMPORALOFFSET_TASK] = (
cn_assigner.CenterNetTemporalOffsetTargetAssigner(stride))
return target_assigners
def _compute_object_center_loss(self, input_height, input_width,
object_center_predictions, per_pixel_weights):
"""Computes the object center loss.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_center_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_classes] representing the object center
feature maps.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the object center loss per instance.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
if self._center_params.use_only_known_classes:
gt_labeled_classes_list = self.groundtruth_lists(
fields.InputDataFields.groundtruth_labeled_classes)
batch_labeled_classes = tf.stack(gt_labeled_classes_list, axis=0)
batch_labeled_classes_shape = tf.shape(batch_labeled_classes)
batch_labeled_classes = tf.reshape(
batch_labeled_classes,
[batch_labeled_classes_shape[0], 1, batch_labeled_classes_shape[-1]])
per_pixel_weights = per_pixel_weights * batch_labeled_classes
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[OBJECT_CENTER]
heatmap_targets = assigner.assign_center_targets_from_boxes(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
loss = 0.0
object_center_loss = self._center_params.classification_loss
# Loop through each feature output head.
for pred in object_center_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += object_center_loss(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_center_predictions)) * num_boxes)
return loss_per_instance
def _compute_object_detection_losses(self, input_height, input_width,
prediction_dict, per_pixel_weights):
"""Computes the weighted object detection losses.
This wrapper function calls the function which computes the losses for
object detection task and applies corresponding weights to the losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by
"predict" function. See "predict" function for more detailed
description.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
object detection task:
BOX_SCALE: the weighted scale (height/width) loss.
BOX_OFFSET: the weighted object offset loss.
"""
od_scale_loss, od_offset_loss = self._compute_box_scale_and_offset_loss(
scale_predictions=prediction_dict[BOX_SCALE],
offset_predictions=prediction_dict[BOX_OFFSET],
input_height=input_height,
input_width=input_width)
loss_dict = {}
loss_dict[BOX_SCALE] = (
self._od_params.scale_loss_weight * od_scale_loss)
loss_dict[BOX_OFFSET] = (
self._od_params.offset_loss_weight * od_offset_loss)
return loss_dict
def _compute_box_scale_and_offset_loss(self, input_height, input_width,
scale_predictions, offset_predictions):
"""Computes the scale loss of the object detection task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
scale_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object scale (i.e height and width).
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for object offset.
Returns:
A tuple of two losses:
scale_loss: A float scalar tensor representing the object height/width
loss normalized by total number of boxes.
offset_loss: A float scalar tensor representing the object offset loss
normalized by total number of boxes
"""
# TODO(vighneshb) Explore a size invariant version of scale loss.
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
num_predictions = float(len(scale_predictions))
assigner = self._target_assigner_dict[DETECTION_TASK]
(batch_indices, batch_height_width_targets, batch_offset_targets,
batch_weights) = assigner.assign_size_and_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
scale_loss = 0
offset_loss = 0
localization_loss_fn = self._od_params.localization_loss
for scale_pred, offset_pred in zip(scale_predictions, offset_predictions):
# Compute the scale loss.
scale_pred = cn_assigner.get_batch_predictions_from_indices(
scale_pred, batch_indices)
scale_loss += localization_loss_fn(
scale_pred, batch_height_width_targets, weights=batch_weights)
# Compute the offset loss.
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += localization_loss_fn(
offset_pred, batch_offset_targets, weights=batch_weights)
scale_loss = tf.reduce_sum(scale_loss) / (
num_predictions * num_boxes)
offset_loss = tf.reduce_sum(offset_loss) / (
num_predictions * num_boxes)
return scale_loss, offset_loss
def _compute_keypoint_estimation_losses(self, task_name, input_height,
input_width, prediction_dict,
per_pixel_weights):
"""Computes the weighted keypoint losses."""
kp_params = self._kp_params_dict[task_name]
heatmap_key = get_keypoint_name(task_name, KEYPOINT_HEATMAP)
offset_key = get_keypoint_name(task_name, KEYPOINT_OFFSET)
regression_key = get_keypoint_name(task_name, KEYPOINT_REGRESSION)
heatmap_loss = self._compute_kp_heatmap_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
heatmap_predictions=prediction_dict[heatmap_key],
classification_loss_fn=kp_params.classification_loss,
per_pixel_weights=per_pixel_weights)
offset_loss = self._compute_kp_offset_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
offset_predictions=prediction_dict[offset_key],
localization_loss_fn=kp_params.localization_loss)
reg_loss = self._compute_kp_regression_loss(
input_height=input_height,
input_width=input_width,
task_name=task_name,
regression_predictions=prediction_dict[regression_key],
localization_loss_fn=kp_params.localization_loss)
loss_dict = {}
loss_dict[heatmap_key] = (
kp_params.keypoint_heatmap_loss_weight * heatmap_loss)
loss_dict[offset_key] = (
kp_params.keypoint_offset_loss_weight * offset_loss)
loss_dict[regression_key] = (
kp_params.keypoint_regression_loss_weight * reg_loss)
return loss_dict
def _compute_kp_heatmap_loss(self, input_height, input_width, task_name,
heatmap_predictions, classification_loss_fn,
per_pixel_weights):
"""Computes the heatmap loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
heatmap_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_keypoints] representing the prediction heads
of the model for keypoint heatmap.
classification_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the class predictions in CenterNet.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
loss: A float scalar tensor representing the object keypoint heatmap loss
normalized by number of instances.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
assigner = self._target_assigner_dict[task_name]
(keypoint_heatmap, num_instances_per_kp_type,
valid_mask_batch) = assigner.assign_keypoint_heatmap_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list,
gt_boxes_list=gt_boxes_list)
flattened_valid_mask = _flatten_spatial_dimensions(
tf.expand_dims(valid_mask_batch, axis=-1))
flattened_heapmap_targets = _flatten_spatial_dimensions(keypoint_heatmap)
# Sum over the number of instances per keypoint types to get the total
# number of keypoints. Note that this is used to normalized the loss and we
# keep the minimum value to be 1 to avoid generating weird loss value when
# no keypoint is in the image batch.
num_instances = tf.maximum(
tf.cast(tf.reduce_sum(num_instances_per_kp_type), dtype=tf.float32),
1.0)
loss = 0.0
# Loop through each feature output head.
for pred in heatmap_predictions:
pred = _flatten_spatial_dimensions(pred)
unweighted_loss = classification_loss_fn(
pred,
flattened_heapmap_targets,
weights=tf.ones_like(per_pixel_weights))
# Apply the weights after the loss function to have full control over it.
loss += unweighted_loss * per_pixel_weights * flattened_valid_mask
loss = tf.reduce_sum(loss) / (
float(len(heatmap_predictions)) * num_instances)
return loss
def _compute_kp_offset_loss(self, input_height, input_width, task_name,
offset_predictions, localization_loss_fn):
"""Computes the offset loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
offset_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2] representing the prediction heads of the model
for keypoint offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint offset predictions in CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint offset loss
normalized by number of total keypoints.
"""
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_offsets,
batch_weights) = assigner.assign_keypoints_offset_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_weights_list=gt_weights_list,
gt_classes_list=gt_classes_list)
# Keypoint offset loss.
loss = 0.0
for prediction in offset_predictions:
batch_size, out_height, out_width, channels = _get_shape(prediction, 4)
if channels > 2:
prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
prediction = cn_assigner.get_batch_predictions_from_indices(
prediction, batch_indices)
# The dimensions passed are not as per the doc string but the loss
# still computes the correct value.
unweighted_loss = localization_loss_fn(
prediction,
batch_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(offset_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_kp_regression_loss(self, input_height, input_width, task_name,
regression_predictions, localization_loss_fn):
"""Computes the keypoint regression loss of the keypoint estimation task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
task_name: A string representing the name of the keypoint task.
regression_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_keypoints] representing the prediction
heads of the model for keypoint regression offset.
localization_loss_fn: An object_detection.core.losses.Loss object to
compute the loss for the keypoint regression offset predictions in
CenterNet.
Returns:
loss: A float scalar tensor representing the keypoint regression offset
loss normalized by number of total keypoints.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_keypoints_list = self.groundtruth_lists(fields.BoxListFields.keypoints)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
# keypoint regression offset loss.
assigner = self._target_assigner_dict[task_name]
(batch_indices, batch_regression_offsets,
batch_weights) = assigner.assign_joint_regression_targets(
height=input_height,
width=input_width,
gt_keypoints_list=gt_keypoints_list,
gt_classes_list=gt_classes_list,
gt_weights_list=gt_weights_list,
gt_boxes_list=gt_boxes_list)
loss = 0.0
for prediction in regression_predictions:
batch_size, out_height, out_width, _ = _get_shape(prediction, 4)
reshaped_prediction = tf.reshape(
prediction, shape=[batch_size, out_height, out_width, -1, 2])
reg_prediction = cn_assigner.get_batch_predictions_from_indices(
reshaped_prediction, batch_indices)
unweighted_loss = localization_loss_fn(
reg_prediction,
batch_regression_offsets,
weights=tf.expand_dims(tf.ones_like(batch_weights), -1))
# Apply the weights after the loss function to have full control over it.
loss += batch_weights * tf.reduce_sum(unweighted_loss, axis=1)
loss = tf.reduce_sum(loss) / (
float(len(regression_predictions)) *
tf.maximum(tf.reduce_sum(batch_weights), 1.0))
return loss
def _compute_segmentation_losses(self, prediction_dict, per_pixel_weights):
"""Computes all the losses associated with segmentation.
Args:
prediction_dict: The dictionary returned from the predict() method.
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A dictionary with segmentation losses.
"""
segmentation_heatmap = prediction_dict[SEGMENTATION_HEATMAP]
mask_loss = self._compute_mask_loss(
segmentation_heatmap, per_pixel_weights)
losses = {
SEGMENTATION_HEATMAP: mask_loss
}
return losses
def _compute_mask_loss(self, segmentation_predictions,
per_pixel_weights):
"""Computes the mask loss.
Args:
segmentation_predictions: A list of float32 tensors of shape [batch_size,
out_height, out_width, num_classes].
per_pixel_weights: A float tensor of shape [batch_size,
out_height * out_width, 1] with 1s in locations where the spatial
coordinates fall within the height and width in true_image_shapes.
Returns:
A float scalar tensor representing the mask loss.
"""
gt_masks_list = self.groundtruth_lists(fields.BoxListFields.masks)
gt_classes_list = self.groundtruth_lists(fields.BoxListFields.classes)
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[SEGMENTATION_TASK]
heatmap_targets = assigner.assign_segmentation_targets(
gt_masks_list=gt_masks_list,
gt_classes_list=gt_classes_list)
flattened_heatmap_targets = _flatten_spatial_dimensions(heatmap_targets)
loss = 0.0
mask_loss_fn = self._mask_params.classification_loss
total_pixels_in_loss = tf.reduce_sum(per_pixel_weights)
# Loop through each feature output head.
for pred in segmentation_predictions:
pred = _flatten_spatial_dimensions(pred)
loss += mask_loss_fn(
pred, flattened_heatmap_targets, weights=per_pixel_weights)
# TODO(ronnyvotel): Consider other ways to normalize loss.
total_loss = tf.reduce_sum(loss) / (
float(len(segmentation_predictions)) * total_pixels_in_loss)
return total_loss
def _compute_densepose_losses(self, input_height, input_width,
prediction_dict):
"""Computes the weighted DensePose losses.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: A dictionary holding predicted tensors output by the
"predict" function. See the "predict" function for more detailed
description.
Returns:
A dictionary of scalar float tensors representing the weighted losses for
the DensePose task:
DENSEPOSE_HEATMAP: the weighted part segmentation loss.
DENSEPOSE_REGRESSION: the weighted part surface coordinate loss.
"""
dp_heatmap_loss, dp_regression_loss = (
self._compute_densepose_part_and_coordinate_losses(
input_height=input_height,
input_width=input_width,
part_predictions=prediction_dict[DENSEPOSE_HEATMAP],
surface_coord_predictions=prediction_dict[DENSEPOSE_REGRESSION]))
loss_dict = {}
loss_dict[DENSEPOSE_HEATMAP] = (
self._densepose_params.part_loss_weight * dp_heatmap_loss)
loss_dict[DENSEPOSE_REGRESSION] = (
self._densepose_params.coordinate_loss_weight * dp_regression_loss)
return loss_dict
def _compute_densepose_part_and_coordinate_losses(
self, input_height, input_width, part_predictions,
surface_coord_predictions):
"""Computes the individual losses for the DensePose task.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
part_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, num_parts].
surface_coord_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, 2 * num_parts].
Returns:
A tuple with two scalar loss tensors: part_prediction_loss and
surface_coord_loss.
"""
gt_dp_num_points_list = self.groundtruth_lists(
fields.BoxListFields.densepose_num_points)
gt_dp_part_ids_list = self.groundtruth_lists(
fields.BoxListFields.densepose_part_ids)
gt_dp_surface_coords_list = self.groundtruth_lists(
fields.BoxListFields.densepose_surface_coords)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
assigner = self._target_assigner_dict[DENSEPOSE_TASK]
batch_indices, batch_part_ids, batch_surface_coords, batch_weights = (
assigner.assign_part_and_coordinate_targets(
height=input_height,
width=input_width,
gt_dp_num_points_list=gt_dp_num_points_list,
gt_dp_part_ids_list=gt_dp_part_ids_list,
gt_dp_surface_coords_list=gt_dp_surface_coords_list,
gt_weights_list=gt_weights_list))
part_prediction_loss = 0
surface_coord_loss = 0
classification_loss_fn = self._densepose_params.classification_loss
localization_loss_fn = self._densepose_params.localization_loss
num_predictions = float(len(part_predictions))
num_valid_points = tf.math.count_nonzero(batch_weights)
num_valid_points = tf.cast(tf.math.maximum(num_valid_points, 1), tf.float32)
for part_pred, surface_coord_pred in zip(part_predictions,
surface_coord_predictions):
# Potentially upsample the feature maps, so that better quality (i.e.
# higher res) groundtruth can be applied.
if self._densepose_params.upsample_to_input_res:
part_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
part_pred)
surface_coord_pred = tf.keras.layers.UpSampling2D(
self._stride, interpolation=self._densepose_params.upsample_method)(
surface_coord_pred)
# Compute the part prediction loss.
part_pred = cn_assigner.get_batch_predictions_from_indices(
part_pred, batch_indices[:, 0:3])
part_prediction_loss += classification_loss_fn(
part_pred[:, tf.newaxis, :],
batch_part_ids[:, tf.newaxis, :],
weights=batch_weights[:, tf.newaxis, tf.newaxis])
# Compute the surface coordinate loss.
batch_size, out_height, out_width, _ = _get_shape(
surface_coord_pred, 4)
surface_coord_pred = tf.reshape(
surface_coord_pred, [batch_size, out_height, out_width, -1, 2])
surface_coord_pred = cn_assigner.get_batch_predictions_from_indices(
surface_coord_pred, batch_indices)
surface_coord_loss += localization_loss_fn(
surface_coord_pred,
batch_surface_coords,
weights=batch_weights[:, tf.newaxis])
part_prediction_loss = tf.reduce_sum(part_prediction_loss) / (
num_predictions * num_valid_points)
surface_coord_loss = tf.reduce_sum(surface_coord_loss) / (
num_predictions * num_valid_points)
return part_prediction_loss, surface_coord_loss
def _compute_track_losses(self, input_height, input_width, prediction_dict):
"""Computes all the losses associated with tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with tracking losses.
"""
object_reid_predictions = prediction_dict[TRACK_REID]
embedding_loss = self._compute_track_embedding_loss(
input_height=input_height,
input_width=input_width,
object_reid_predictions=object_reid_predictions)
losses = {
TRACK_REID: embedding_loss
}
return losses
def _compute_track_embedding_loss(self, input_height, input_width,
object_reid_predictions):
"""Computes the object ReID loss.
The embedding is trained as a classification task where the target is the
ID of each track among all tracks in the whole dataset.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
object_reid_predictions: A list of float tensors of shape [batch_size,
out_height, out_width, reid_embed_size] representing the object
embedding feature maps.
Returns:
A float scalar tensor representing the object ReID loss per instance.
"""
gt_track_ids_list = self.groundtruth_lists(fields.BoxListFields.track_ids)
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = _to_float32(get_num_instances_from_weights(gt_weights_list))
# Convert the groundtruth to targets.
assigner = self._target_assigner_dict[TRACK_TASK]
batch_indices, batch_weights, track_targets = assigner.assign_track_targets(
height=input_height,
width=input_width,
gt_track_ids_list=gt_track_ids_list,
gt_boxes_list=gt_boxes_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
loss = 0.0
object_reid_loss = self._track_params.classification_loss
# Loop through each feature output head.
for pred in object_reid_predictions:
embedding_pred = cn_assigner.get_batch_predictions_from_indices(
pred, batch_indices)
reid_classification = self.track_reid_classification_net(embedding_pred)
loss += object_reid_loss(
reid_classification, track_targets, weights=batch_weights)
loss_per_instance = tf.reduce_sum(loss) / (
float(len(object_reid_predictions)) * num_boxes)
return loss_per_instance
def _compute_temporal_offset_loss(self, input_height,
input_width, prediction_dict):
"""Computes the temporal offset loss for tracking.
Args:
input_height: An integer scalar tensor representing input image height.
input_width: An integer scalar tensor representing input image width.
prediction_dict: The dictionary returned from the predict() method.
Returns:
A dictionary with track/temporal_offset losses.
"""
gt_boxes_list = self.groundtruth_lists(fields.BoxListFields.boxes)
gt_offsets_list = self.groundtruth_lists(
fields.BoxListFields.temporal_offsets)
gt_match_list = self.groundtruth_lists(
fields.BoxListFields.track_match_flags)
gt_weights_list = self.groundtruth_lists(fields.BoxListFields.weights)
num_boxes = tf.cast(
get_num_instances_from_weights(gt_weights_list), tf.float32)
offset_predictions = prediction_dict[TEMPORAL_OFFSET]
num_predictions = float(len(offset_predictions))
assigner = self._target_assigner_dict[TEMPORALOFFSET_TASK]
(batch_indices, batch_offset_targets,
batch_weights) = assigner.assign_temporal_offset_targets(
height=input_height,
width=input_width,
gt_boxes_list=gt_boxes_list,
gt_offsets_list=gt_offsets_list,
gt_match_list=gt_match_list,
gt_weights_list=gt_weights_list)
batch_weights = tf.expand_dims(batch_weights, -1)
offset_loss_fn = self._temporal_offset_params.localization_loss
loss_dict = {}
offset_loss = 0
for offset_pred in offset_predictions:
offset_pred = cn_assigner.get_batch_predictions_from_indices(
offset_pred, batch_indices)
offset_loss += offset_loss_fn(offset_pred[:, None],
batch_offset_targets[:, None],
weights=batch_weights)
offset_loss = tf.reduce_sum(offset_loss) / (num_predictions * num_boxes)
loss_dict[TEMPORAL_OFFSET] = offset_loss
return loss_dict
def preprocess(self, inputs):
outputs = shape_utils.resize_images_and_return_shapes(
inputs, self._image_resizer_fn)
resized_inputs, true_image_shapes = outputs
return (self._feature_extractor.preprocess(resized_inputs),
true_image_shapes)
def predict(self, preprocessed_inputs, _):
"""Predicts CenterNet prediction tensors given an input batch.
Feature extractors are free to produce predictions from multiple feature
maps and therefore we return a dictionary mapping strings to lists.
E.g. the hourglass backbone produces two feature maps.
Args:
preprocessed_inputs: a [batch, height, width, channels] float32 tensor
representing a batch of images.
Returns:
prediction_dict: a dictionary holding predicted tensors with
'preprocessed_inputs' - The input image after being resized and
preprocessed by the feature extractor.
'object_center' - A list of size num_feature_outputs containing
float tensors of size [batch_size, output_height, output_width,
num_classes] representing the predicted object center heatmap logits.
'box/scale' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted box height and width at each output
location. This field exists only when object detection task is
specified.
'box/offset' - [optional] A list of size num_feature_outputs holding
float tensors of size [batch_size, output_height, output_width, 2]
representing the predicted y and x offsets at each output location.
'$TASK_NAME/keypoint_heatmap' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, num_keypoints] representing the predicted
keypoint heatmap logits.
'$TASK_NAME/keypoint_offset' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2] representing the predicted keypoint
offsets at each output location.
'$TASK_NAME/keypoint_regression' - [optional] A list of size
num_feature_outputs holding float tensors of size [batch_size,
output_height, output_width, 2 * num_keypoints] representing the
predicted keypoint regression at each output location.
'segmentation/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_classes] representing the mask logits.
'densepose/heatmap' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, num_parts] representing the mask logits for each part.
'densepose/regression' - [optional] A list of size num_feature_outputs
holding float tensors of size [batch_size, output_height,
output_width, 2 * num_parts] representing the DensePose surface
coordinate predictions.
Note the $TASK_NAME is provided by the KeypointEstimation namedtuple
used to differentiate between different keypoint tasks.
"""
features_list = self._feature_extractor(preprocessed_inputs)
predictions = {}
for head_name, heads in self._prediction_head_dict.items():
predictions[head_name] = [
head(feature) for (feature, head) in zip(features_list, heads)
]
predictions['preprocessed_inputs'] = preprocessed_inputs
self._batched_prediction_tensor_names = predictions.keys()
return predictions
def loss(self, prediction_dict, true_image_shapes, scope=None):
"""Computes scalar loss tensors with respect to provided groundtruth.
This function implements the various CenterNet losses.
Args:
prediction_dict: a dictionary holding predicted tensors returned by
"predict" function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
scope: Optional scope name.
Returns:
A dictionary mapping the keys [
'Loss/object_center',
'Loss/box/scale', (optional)
'Loss/box/offset', (optional)
'Loss/$TASK_NAME/keypoint/heatmap', (optional)
'Loss/$TASK_NAME/keypoint/offset', (optional)
'Loss/$TASK_NAME/keypoint/regression', (optional)
'Loss/segmentation/heatmap', (optional)
'Loss/densepose/heatmap', (optional)
'Loss/densepose/regression', (optional)
'Loss/track/reid'] (optional)
'Loss/track/offset'] (optional)
scalar tensors corresponding to the losses for different tasks. Note the
$TASK_NAME is provided by the KeypointEstimation namedtuple used to
differentiate between different keypoint tasks.
"""
_, input_height, input_width, _ = _get_shape(
prediction_dict['preprocessed_inputs'], 4)
output_height, output_width = (input_height // self._stride,
input_width // self._stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(
tf.to_float(true_image_shapes) / self._stride)
valid_anchor_weights = get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
valid_anchor_weights = tf.expand_dims(valid_anchor_weights, 2)
object_center_loss = self._compute_object_center_loss(
object_center_predictions=prediction_dict[OBJECT_CENTER],
input_height=input_height,
input_width=input_width,
per_pixel_weights=valid_anchor_weights)
losses = {
OBJECT_CENTER:
self._center_params.object_center_loss_weight * object_center_loss
}
if self._od_params is not None:
od_losses = self._compute_object_detection_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in od_losses:
od_losses[key] = od_losses[key] * self._od_params.task_loss_weight
losses.update(od_losses)
if self._kp_params_dict is not None:
for task_name, params in self._kp_params_dict.items():
kp_losses = self._compute_keypoint_estimation_losses(
task_name=task_name,
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in kp_losses:
kp_losses[key] = kp_losses[key] * params.task_loss_weight
losses.update(kp_losses)
if self._mask_params is not None:
seg_losses = self._compute_segmentation_losses(
prediction_dict=prediction_dict,
per_pixel_weights=valid_anchor_weights)
for key in seg_losses:
seg_losses[key] = seg_losses[key] * self._mask_params.task_loss_weight
losses.update(seg_losses)
if self._densepose_params is not None:
densepose_losses = self._compute_densepose_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in densepose_losses:
densepose_losses[key] = (
densepose_losses[key] * self._densepose_params.task_loss_weight)
losses.update(densepose_losses)
if self._track_params is not None:
track_losses = self._compute_track_losses(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in track_losses:
track_losses[key] = (
track_losses[key] * self._track_params.task_loss_weight)
losses.update(track_losses)
if self._temporal_offset_params is not None:
offset_losses = self._compute_temporal_offset_loss(
input_height=input_height,
input_width=input_width,
prediction_dict=prediction_dict)
for key in offset_losses:
offset_losses[key] = (
offset_losses[key] * self._temporal_offset_params.task_loss_weight)
losses.update(offset_losses)
# Prepend the LOSS_KEY_PREFIX to the keys in the dictionary such that the
# losses will be grouped together in Tensorboard.
return dict([('%s/%s' % (LOSS_KEY_PREFIX, key), val)
for key, val in losses.items()])
def postprocess(self, prediction_dict, true_image_shapes, **params):
"""Produces boxes given a prediction dict returned by predict().
Although predict returns a list of tensors, only the last tensor in
each list is used for making box predictions.
Args:
prediction_dict: a dictionary holding predicted tensors from "predict"
function.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is of
the form [height, width, channels] indicating the shapes of true images
in the resized images, as resized images can be padded with zeros.
**params: Currently ignored.
Returns:
detections: a dictionary containing the following fields
detection_boxes - A tensor of shape [batch, max_detections, 4]
holding the predicted boxes.
detection_boxes_strided: A tensor of shape [batch_size, num_detections,
4] holding the predicted boxes in absolute coordinates of the
feature extractor's final layer output.
detection_scores: A tensor of shape [batch, max_detections] holding
the predicted score for each box.
detection_classes: An integer tensor of shape [batch, max_detections]
containing the detected class for each box.
num_detections: An integer tensor of shape [batch] containing the
number of detected boxes for each sample in the batch.
detection_keypoints: (Optional) A float tensor of shape [batch,
max_detections, num_keypoints, 2] with normalized keypoints. Any
invalid keypoints have their coordinates and scores set to 0.0.
detection_keypoint_scores: (Optional) A float tensor of shape [batch,
max_detection, num_keypoints] with scores for each keypoint.
detection_masks: (Optional) A uint8 tensor of shape [batch,
max_detections, mask_height, mask_width] with masks for each
detection. Background is specified with 0, and foreground is specified
with positive integers (1 for standard instance segmentation mask, and
1-indexed parts for DensePose task).
detection_surface_coords: (Optional) A float32 tensor of shape [batch,
max_detection, mask_height, mask_width, 2] with DensePose surface
coordinates, in (v, u) format.
detection_embeddings: (Optional) A float tensor of shape [batch,
max_detections, reid_embed_size] containing object embeddings.
"""
object_center_prob = tf.nn.sigmoid(prediction_dict[OBJECT_CENTER][-1])
# Get x, y and channel indices corresponding to the top indices in the class
# center predictions.
detection_scores, y_indices, x_indices, channel_indices = (
top_k_feature_map_locations(
object_center_prob, max_pool_kernel_size=3,
k=self._center_params.max_box_predictions))
boxes_strided, classes, scores, num_detections = (
prediction_tensors_to_boxes(
detection_scores, y_indices, x_indices, channel_indices,
prediction_dict[BOX_SCALE][-1], prediction_dict[BOX_OFFSET][-1]))
boxes = convert_strided_predictions_to_normalized_boxes(
boxes_strided, self._stride, true_image_shapes)
postprocess_dict = {
fields.DetectionResultFields.detection_boxes: boxes,
fields.DetectionResultFields.detection_scores: scores,
fields.DetectionResultFields.detection_classes: classes,
fields.DetectionResultFields.num_detections: num_detections,
'detection_boxes_strided': boxes_strided
}
if self._kp_params_dict:
keypoints, keypoint_scores = self._postprocess_keypoints(
prediction_dict, classes, y_indices, x_indices,
boxes_strided, num_detections)
keypoints, keypoint_scores = (
convert_strided_predictions_to_normalized_keypoints(
keypoints, keypoint_scores, self._stride, true_image_shapes,
clip_out_of_frame_keypoints=True))
postprocess_dict.update({
fields.DetectionResultFields.detection_keypoints: keypoints,
fields.DetectionResultFields.detection_keypoint_scores:
keypoint_scores
})
if self._mask_params:
masks = tf.nn.sigmoid(prediction_dict[SEGMENTATION_HEATMAP][-1])
densepose_part_heatmap, densepose_surface_coords = None, None
densepose_class_index = 0
if self._densepose_params:
densepose_part_heatmap = prediction_dict[DENSEPOSE_HEATMAP][-1]
densepose_surface_coords = prediction_dict[DENSEPOSE_REGRESSION][-1]
densepose_class_index = self._densepose_params.class_id
instance_masks, surface_coords = (
convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap, densepose_surface_coords,
stride=self._stride, mask_height=self._mask_params.mask_height,
mask_width=self._mask_params.mask_width,
score_threshold=self._mask_params.score_threshold,
densepose_class_index=densepose_class_index))
postprocess_dict[
fields.DetectionResultFields.detection_masks] = instance_masks
if self._densepose_params:
postprocess_dict[
fields.DetectionResultFields.detection_surface_coords] = (
surface_coords)
if self._track_params:
embeddings = self._postprocess_embeddings(prediction_dict,
y_indices, x_indices)
postprocess_dict.update({
fields.DetectionResultFields.detection_embeddings: embeddings
})
if self._temporal_offset_params:
offsets = prediction_tensors_to_temporal_offsets(
y_indices, x_indices,
prediction_dict[TEMPORAL_OFFSET][-1])
postprocess_dict[fields.DetectionResultFields.detection_offsets] = offsets
return postprocess_dict
def _postprocess_embeddings(self, prediction_dict, y_indices, x_indices):
"""Performs postprocessing on embedding predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain embedding prediction
feature maps for tracking task.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
Returns:
embeddings: A [batch_size, max_detection, reid_embed_size] float32
tensor with L2 normalized embeddings extracted from detection box
centers.
"""
embedding_predictions = prediction_dict[TRACK_REID][-1]
embeddings = predicted_embeddings_at_object_centers(
embedding_predictions, y_indices, x_indices)
embeddings, _ = tf.linalg.normalize(embeddings, axis=-1)
return embeddings
def _postprocess_keypoints(self, prediction_dict, classes, y_indices,
x_indices, boxes, num_detections):
"""Performs postprocessing on keypoint predictions.
Args:
prediction_dict: a dictionary holding predicted tensors, returned from the
predict() method. This dictionary should contain keypoint prediction
feature maps for each keypoint task.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with bounding
boxes in (un-normalized) output space.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
Returns:
A tuple of
keypoints: a [batch_size, max_detection, num_total_keypoints, 2] float32
tensor with keypoints in the output (strided) coordinate frame.
keypoint_scores: a [batch_size, max_detections, num_total_keypoints]
float32 tensor with keypoint scores.
"""
total_num_keypoints = sum(len(kp_dict.keypoint_indices) for kp_dict
in self._kp_params_dict.values())
batch_size, max_detections, _ = _get_shape(boxes, 3)
kpt_coords_for_example_list = []
kpt_scores_for_example_list = []
for ex_ind in range(batch_size):
kpt_coords_for_class_list = []
kpt_scores_for_class_list = []
instance_inds_for_class_list = []
for task_name, kp_params in self._kp_params_dict.items():
keypoint_heatmap = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_HEATMAP)][-1]
keypoint_offsets = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_OFFSET)][-1]
keypoint_regression = prediction_dict[
get_keypoint_name(task_name, KEYPOINT_REGRESSION)][-1]
instance_inds = self._get_instance_indices(
classes, num_detections, ex_ind, kp_params.class_id)
def true_fn(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params):
"""Logics to execute when instance_inds is not an empty set."""
# Postprocess keypoints and scores for class and single image. Shapes
# are [1, num_instances_i, num_keypoints_i, 2] and
# [1, num_instances_i, num_keypoints_i], respectively. Note that
# num_instances_i and num_keypoints_i refers to the number of
# instances and keypoints for class i, respectively.
kpt_coords_for_class, kpt_scores_for_class = (
self._postprocess_keypoints_for_class_and_image(
keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds,
ex_ind, kp_params))
# Expand keypoint dimension (with padding) so that coordinates and
# scores have shape [1, num_instances_i, num_total_keypoints, 2] and
# [1, num_instances_i, num_total_keypoints], respectively.
kpts_coords_for_class_padded, kpt_scores_for_class_padded = (
_pad_to_full_keypoint_dim(
kpt_coords_for_class, kpt_scores_for_class,
kp_params.keypoint_indices, total_num_keypoints))
return kpts_coords_for_class_padded, kpt_scores_for_class_padded
def false_fn():
"""Logics to execute when the instance_inds is an empty set."""
return (tf.zeros([1, 0, total_num_keypoints, 2], dtype=tf.float32),
tf.zeros([1, 0, total_num_keypoints], dtype=tf.float32))
true_fn = functools.partial(
true_fn, keypoint_heatmap, keypoint_offsets, keypoint_regression,
classes, y_indices, x_indices, boxes, instance_inds, ex_ind,
kp_params)
results = tf.cond(tf.size(instance_inds) > 0, true_fn, false_fn)
kpt_coords_for_class_list.append(results[0])
kpt_scores_for_class_list.append(results[1])
instance_inds_for_class_list.append(instance_inds)
# Concatenate all keypoints across all classes (single example).
kpt_coords_for_example = tf.concat(kpt_coords_for_class_list, axis=1)
kpt_scores_for_example = tf.concat(kpt_scores_for_class_list, axis=1)
instance_inds_for_example = tf.concat(instance_inds_for_class_list,
axis=0)
if tf.size(instance_inds_for_example) > 0:
# Scatter into tensor where instances align with original detection
# instances. New shape of keypoint coordinates and scores are
# [1, max_detections, num_total_keypoints, 2] and
# [1, max_detections, num_total_keypoints], respectively.
kpt_coords_for_example_all_det, kpt_scores_for_example_all_det = (
_pad_to_full_instance_dim(
kpt_coords_for_example, kpt_scores_for_example,
instance_inds_for_example,
self._center_params.max_box_predictions))
else:
kpt_coords_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints, 2], dtype=tf.float32)
kpt_scores_for_example_all_det = tf.zeros(
[1, max_detections, total_num_keypoints], dtype=tf.float32)
kpt_coords_for_example_list.append(kpt_coords_for_example_all_det)
kpt_scores_for_example_list.append(kpt_scores_for_example_all_det)
# Concatenate all keypoints and scores from all examples in the batch.
# Shapes are [batch_size, max_detections, num_total_keypoints, 2] and
# [batch_size, max_detections, num_total_keypoints], respectively.
keypoints = tf.concat(kpt_coords_for_example_list, axis=0)
keypoint_scores = tf.concat(kpt_scores_for_example_list, axis=0)
return keypoints, keypoint_scores
def _get_instance_indices(self, classes, num_detections, batch_index,
class_id):
"""Gets the instance indices that match the target class ID.
Args:
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
num_detections: A [batch_size] int tensor with the number of valid
detections for each image.
batch_index: An integer specifying the index for an example in the batch.
class_id: Class id
Returns:
instance_inds: A [num_instances] int tensor where each element indicates
the instance location within the `classes` tensor. This is useful to
associate the refined keypoints with the original detections (i.e.
boxes)
"""
classes = classes[batch_index:batch_index+1, ...]
_, max_detections = shape_utils.combined_static_and_dynamic_shape(
classes)
# Get the detection indices corresponding to the target class.
valid_detections_with_kpt_class = tf.math.logical_and(
tf.range(max_detections) < num_detections[batch_index],
classes[0] == class_id)
instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]
return instance_inds
def _postprocess_keypoints_for_class_and_image(
self, keypoint_heatmap, keypoint_offsets, keypoint_regression, classes,
y_indices, x_indices, boxes, indices_with_kpt_class, batch_index,
kp_params):
"""Postprocess keypoints for a single image and class.
This function performs the following postprocessing operations on a single
image and single keypoint class:
- Converts keypoints scores to range [0, 1] with sigmoid.
- Determines the detections that correspond to the specified keypoint class.
- Gathers the regressed keypoints at the detection (i.e. box) centers.
- Gathers keypoint candidates from the keypoint heatmaps.
- Snaps regressed keypoints to nearby keypoint candidates.
Args:
keypoint_heatmap: A [batch_size, height, width, num_keypoints] float32
tensor with keypoint heatmaps.
keypoint_offsets: A [batch_size, height, width, 2] float32 tensor with
local offsets to keypoint centers.
keypoint_regression: A [batch_size, height, width, 2 * num_keypoints]
float32 tensor with regressed offsets to all keypoints.
classes: A [batch_size, max_detections] int tensor with class indices for
all detected objects.
y_indices: A [batch_size, max_detections] int tensor with y indices for
all object centers.
x_indices: A [batch_size, max_detections] int tensor with x indices for
all object centers.
boxes: A [batch_size, max_detections, 4] float32 tensor with detected
boxes in the output (strided) frame.
indices_with_kpt_class: A [num_instances] int tensor where each element
indicates the instance location within the `classes` tensor. This is
useful to associate the refined keypoints with the original detections
(i.e. boxes)
batch_index: An integer specifying the index for an example in the batch.
kp_params: A `KeypointEstimationParams` object with parameters for a
single keypoint class.
Returns:
A tuple of
refined_keypoints: A [1, num_instances, num_keypoints, 2] float32 tensor
with refined keypoints for a single class in a single image, expressed
in the output (strided) coordinate frame. Note that `num_instances` is a
dynamic dimension, and corresponds to the number of valid detections
for the specific class.
refined_scores: A [1, num_instances, num_keypoints] float32 tensor with
keypoint scores.
"""
keypoint_indices = kp_params.keypoint_indices
num_keypoints = len(keypoint_indices)
keypoint_heatmap = tf.nn.sigmoid(
keypoint_heatmap[batch_index:batch_index+1, ...])
keypoint_offsets = keypoint_offsets[batch_index:batch_index+1, ...]
keypoint_regression = keypoint_regression[batch_index:batch_index+1, ...]
y_indices = y_indices[batch_index:batch_index+1, ...]
x_indices = x_indices[batch_index:batch_index+1, ...]
# Gather the feature map locations corresponding to the object class.
y_indices_for_kpt_class = tf.gather(y_indices, indices_with_kpt_class,
axis=1)
x_indices_for_kpt_class = tf.gather(x_indices, indices_with_kpt_class,
axis=1)
boxes_for_kpt_class = tf.gather(boxes, indices_with_kpt_class, axis=1)
# Gather the regressed keypoints. Final tensor has shape
# [1, num_instances, num_keypoints, 2].
regressed_keypoints_for_objects = regressed_keypoints_at_object_centers(
keypoint_regression, y_indices_for_kpt_class, x_indices_for_kpt_class)
regressed_keypoints_for_objects = tf.reshape(
regressed_keypoints_for_objects, [1, -1, num_keypoints, 2])
# Get the candidate keypoints and scores.
# The shape of keypoint_candidates and keypoint_scores is:
# [1, num_candidates_per_keypoint, num_keypoints, 2] and
# [1, num_candidates_per_keypoint, num_keypoints], respectively.
keypoint_candidates, keypoint_scores, num_keypoint_candidates = (
prediction_tensors_to_keypoint_candidates(
keypoint_heatmap, keypoint_offsets,
keypoint_score_threshold=(
kp_params.keypoint_candidate_score_threshold),
max_pool_kernel_size=kp_params.peak_max_pool_kernel_size,
max_candidates=kp_params.num_candidates_per_keypoint))
# Get the refined keypoints and scores, of shape
# [1, num_instances, num_keypoints, 2] and
# [1, num_instances, num_keypoints], respectively.
refined_keypoints, refined_scores = refine_keypoints(
regressed_keypoints_for_objects, keypoint_candidates, keypoint_scores,
num_keypoint_candidates, bboxes=boxes_for_kpt_class,
unmatched_keypoint_score=kp_params.unmatched_keypoint_score,
box_scale=kp_params.box_scale,
candidate_search_scale=kp_params.candidate_search_scale,
candidate_ranking_mode=kp_params.candidate_ranking_mode)
return refined_keypoints, refined_scores
def regularization_losses(self):
return []
def restore_map(self,
fine_tune_checkpoint_type='detection',
load_all_detection_checkpoint_vars=False):
raise RuntimeError('CenterNetMetaArch not supported under TF1.x.')
def restore_from_objects(self, fine_tune_checkpoint_type='detection'):
"""Returns a map of Trackable objects to load from a foreign checkpoint.
Returns a dictionary of Tensorflow 2 Trackable objects (e.g. tf.Module
or Checkpoint). This enables the model to initialize based on weights from
another task. For example, the feature extractor variables from a
classification model can be used to bootstrap training of an object
detector. When loading from an object detection model, the checkpoint model
should have the same parameters as this detection model with exception of
the num_classes parameter.
Note that this function is intended to be used to restore Keras-based
models when running Tensorflow 2, whereas restore_map (not implemented
in CenterNet) is intended to be used to restore Slim-based models when
running Tensorflow 1.x.
TODO(jonathanhuang): Make this function consistent with other
meta-architectures.
Args:
fine_tune_checkpoint_type: whether to restore from a full detection
checkpoint (with compatible variable names) or to restore from a
classification checkpoint for initialization prior to training.
Valid values: `detection`, `classification`, `fine_tune`.
Default 'detection'.
'detection': used when loading models pre-trained on other detection
tasks. With this checkpoint type the weights of the feature extractor
are expected under the attribute 'feature_extractor'.
'classification': used when loading models pre-trained on an image
classification task. Note that only the encoder section of the network
is loaded and not the upsampling layers. With this checkpoint type,
the weights of only the encoder section are expected under the
attribute 'feature_extractor'.
'fine_tune': used when loading the entire CenterNet feature extractor
pre-trained on other tasks. The checkpoints saved during CenterNet
model training can be directly loaded using this type. With this
checkpoint type, the weights of the feature extractor are expected
under the attribute 'model._feature_extractor'.
For more details, see the tensorflow section on Loading mechanics.
https://www.tensorflow.org/guide/checkpoint#loading_mechanics
Returns:
A dict mapping keys to Trackable objects (tf.Module or Checkpoint).
"""
supported_types = self._feature_extractor.supported_sub_model_types
supported_types += ['fine_tune']
if fine_tune_checkpoint_type not in supported_types:
message = ('Checkpoint type "{}" not supported for {}. '
'Supported types are {}')
raise ValueError(
message.format(fine_tune_checkpoint_type,
self._feature_extractor.__class__.__name__,
supported_types))
elif fine_tune_checkpoint_type == 'fine_tune':
feature_extractor_model = tf.train.Checkpoint(
_feature_extractor=self._feature_extractor)
return {'model': feature_extractor_model}
else:
return {'feature_extractor': self._feature_extractor.get_sub_model(
fine_tune_checkpoint_type)}
def updates(self):
raise RuntimeError('This model is intended to be used with model_lib_v2 '
'which does not support updates()')
| 45.883802
| 97
| 0.716559
|
67e13de7fefe80eb811d631330e4002f3f518687
| 1,775
|
py
|
Python
|
share/qt/extract_strings_qt.py
|
sk21new/sk21
|
1a98560d355653887c5ce0bcb0f3b7cbab837fe9
|
[
"MIT"
] | 1
|
2017-12-08T21:45:38.000Z
|
2017-12-08T21:45:38.000Z
|
share/qt/extract_strings_qt.py
|
sk21new/sk21
|
1a98560d355653887c5ce0bcb0f3b7cbab837fe9
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
sk21new/sk21
|
1a98560d355653887c5ce0bcb0f3b7cbab837fe9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/sk21strings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *sk21_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("sk21-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| 25
| 80
| 0.577465
|
bd2176b466118820f4db66a420409c099c50570b
| 5,735
|
py
|
Python
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/ucp_base_operator.py
|
att-comdev/shipyard
|
14d66afb012025a5289818d8e8d2092ccce19ffa
|
[
"Apache-2.0"
] | 14
|
2017-07-05T14:12:51.000Z
|
2021-05-20T23:31:55.000Z
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/ucp_base_operator.py
|
att-comdev/shipyard
|
14d66afb012025a5289818d8e8d2092ccce19ffa
|
[
"Apache-2.0"
] | 14
|
2017-06-09T04:33:44.000Z
|
2018-05-07T13:07:40.000Z
|
src/bin/shipyard_airflow/shipyard_airflow/plugins/ucp_base_operator.py
|
att-comdev/shipyard
|
14d66afb012025a5289818d8e8d2092ccce19ffa
|
[
"Apache-2.0"
] | 16
|
2017-05-18T15:22:22.000Z
|
2019-06-14T16:43:44.000Z
|
# Copyright 2018 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import math
from datetime import datetime
from airflow.models import BaseOperator
from airflow.plugins_manager import AirflowPlugin
from airflow.utils.decorators import apply_defaults
try:
from get_k8s_logs import get_pod_logs
except ImportError:
from shipyard_airflow.plugins.get_k8s_logs import get_pod_logs
try:
from get_k8s_logs import K8sLoggingException
except ImportError:
from shipyard_airflow.plugins.get_k8s_logs import K8sLoggingException
try:
from xcom_puller import XcomPuller
except ImportError:
from shipyard_airflow.plugins.xcom_puller import XcomPuller
LOG = logging.getLogger(__name__)
class UcpBaseOperator(BaseOperator):
"""UCP Base Operator
All UCP related workflow operators will use the UCP base
operator as the parent and inherit attributes and methods
from this class
"""
@apply_defaults
def __init__(self,
main_dag_name=None,
pod_selector_pattern=None,
shipyard_conf=None,
start_time=None,
sub_dag_name=None,
xcom_push=True,
*args, **kwargs):
"""Initialization of UcpBaseOperator object.
:param continue_processing: A boolean value on whether to continue
with the workflow. Defaults to True.
:param main_dag_name: Parent Dag
:param pod_selector_pattern: A list containing the information on
the patterns of the Pod name and name
of the associated container for log
queries. This will allow us to query
multiple components, e.g. MAAS and
Drydock at the same time. It also allows
us to query the logs of specific container
in Pods with multiple containers. For
instance the Airflow worker pod contains
both the airflow-worker container and the
log-rotate container.
:param shipyard_conf: Location of shipyard.conf
:param start_time: Time when Operator gets executed
:param sub_dag_name: Child Dag
:param xcom_push: xcom usage
"""
super(UcpBaseOperator, self).__init__(*args, **kwargs)
self.continue_processing = True
self.main_dag_name = main_dag_name
self.pod_selector_pattern = pod_selector_pattern or []
self.shipyard_conf = shipyard_conf
self.start_time = datetime.now()
self.sub_dag_name = sub_dag_name
self.xcom_push_flag = xcom_push
def execute(self, context):
# Execute UCP base function
self.ucp_base(context)
# Execute base function
self.run_base(context)
if self.continue_processing:
# Exeute child function
self.do_execute()
def ucp_base(self, context):
LOG.info("Running UCP Base Operator...")
# Read and parse shiyard.conf
config = configparser.ConfigParser()
config.read(self.shipyard_conf)
# Initialize variable
self.ucp_namespace = config.get('k8s_logs', 'ucp_namespace')
# Define task_instance
self.task_instance = context['task_instance']
# Set up and retrieve values from xcom
self.xcom_puller = XcomPuller(self.main_dag_name, self.task_instance)
self.action_info = self.xcom_puller.get_action_info()
self.dc = self.xcom_puller.get_deployment_configuration()
self.revision_id = self.action_info['committed_rev_id']
def get_k8s_logs(self):
"""Retrieve Kubernetes pod/container logs specified by an opererator
This method is "best effort" and should not prevent the progress of
the workflow processing
"""
if self.pod_selector_pattern:
for selector in self.pod_selector_pattern:
# Get difference in current time and time when the
# operator was first executed (in seconds)
t_diff = (datetime.now() - self.start_time).total_seconds()
# Note that we will end up with a floating number for
# 't_diff' and will need to round it up to the nearest
# integer
t_diff_int = int(math.ceil(t_diff))
try:
get_pod_logs(selector['pod_pattern'],
self.ucp_namespace,
selector['container'],
t_diff_int)
except K8sLoggingException as e:
LOG.error(e)
else:
LOG.debug("There are no pod logs specified to retrieve")
class UcpBaseOperatorPlugin(AirflowPlugin):
"""Creates UcpBaseOperator in Airflow."""
name = 'ucp_base_operator_plugin'
operators = [UcpBaseOperator]
| 35.84375
| 79
| 0.626678
|
37ffde90b437c0c57bc7b43d701f24860d1ac8cb
| 4,298
|
py
|
Python
|
learning/rnn_mlp_training.py
|
flclain/master
|
a40a2a5bf4e1017da3a5cf3456eb0b28de6f86e8
|
[
"MIT"
] | null | null | null |
learning/rnn_mlp_training.py
|
flclain/master
|
a40a2a5bf4e1017da3a5cf3456eb0b28de6f86e8
|
[
"MIT"
] | null | null | null |
learning/rnn_mlp_training.py
|
flclain/master
|
a40a2a5bf4e1017da3a5cf3456eb0b28de6f86e8
|
[
"MIT"
] | 1
|
2020-12-15T07:34:39.000Z
|
2020-12-15T07:34:39.000Z
|
import torch
import torch.nn as nn
# import torch.nn.functional as f
import torch.optim as optim
# import torchvision
# import torchvision.transforms as transforms
# import matplotlib.pyplot as plt
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
import numpy as np
import time
from classes.datasets import CustomDataset
from classes.rnn_mlp import RNN_MLP,custom_mse
import helpers.helpers_training as training
import sys
import json
import matplotlib.pyplot as plt
"""
This script trains a iatcnn model
The data is loaded using custom dataset
and pytorch dataloader
THe model is trained on 80% of the dataset and evaluated
on the rest.
The objective function is the mean squared error between
target sequence and predicted sequence.
The training evaluates as well the Final Displacement Error
At the end of the training, model is saved in master/learning/data/models.
If the training is interrupted, the model is saved.
The interrupted trianing can be resumed by assigning a file path to the
load_path variable.
Three curves are plotted:
train ADE
eval ADE
eval FDE
"""
#python learning/rnn_mlp_training.py parameters/data.json parameters/rnn_mlp_training.json parameters/torch_extractors.json parameters/prepare_training.json
def main():
# set pytorch
# torch.manual_seed(10)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
print(torch.cuda.is_available())
args = sys.argv
# data = json.load(open(args[1]))
# torch_param = json.load(open(args[3]))
# training_param = json.load(open(args[2]))
# prepare_param = json.load(open(args[4]))
training_param = json.load(open("parameters/rnn_mlp_training.json"))
data = json.load(open("parameters/data.json"))
torch_param = json.load(open("parameters/torch_extractors.json"))
prepare_param = json.load(open("parameters/prepare_training.json"))
# ids = np.array(json.load(open(torch_param["ids_path"]))["ids"])
ids = json.load(open(torch_param["ids_path"]))["ids"]
train_ids,eval_ids,test_ids = training.split_train_eval_test(ids,prepare_param["train_scenes"],prepare_param["test_scenes"], eval_prop = prepare_param["eval_prop"])
train_indices = train_ids
eval_indices = test_ids
# load datasets
train_dataset = CustomDataset(train_indices,data["torch_data"])
eval_dataset = CustomDataset(eval_indices,data["torch_data"])
# create dataloaders
train_loader = torch.utils.data.DataLoader( train_dataset, batch_size= training_param["batch_size"], shuffle=True,num_workers= training_param["num_workers"],drop_last = True)
eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size= training_param["batch_size"], shuffle=False,num_workers= training_param["num_workers"],drop_last = True)
net = RNN_MLP(
device = device,
batch_size = training_param["batch_size"],
input_dim = training_param["input_dim"],
hidden_size = training_param["hidden_size"],
recurrent_layer = training_param["recurrent_layer"],
mlp_layers = training_param["mlp_layers"],
output_size = training_param["output_size"]
)
print(net)
net = net.to(device)
optimizer = optim.Adam(net.parameters(),lr = training_param["lr"])
criterion = custom_mse
training.training_loop(training_param["n_epochs"],training_param["batch_size"],
net,device,train_loader,eval_loader,criterion,criterion,optimizer, data["scalers"],
data["multiple_scalers"],training_param["model_type"],
plot = training_param["plot"],early_stopping = True,load_path = training_param["load_path"])
load_path = "./learning/data/models/model_1552260631.156045.tar"
checkpoint = torch.load(load_path)
net.load_state_dict(checkpoint['state_dict'])
net = net.to(device)
# net.eval()
# batch_test_losses = []
# for data in eval_loader:
# samples,targets = data
# samples = samples.to(device)
# targets = targets.to(device)
# outputs = net(samples)
if __name__ == "__main__":
main()
| 33.317829
| 178
| 0.715449
|
caf410dc4f803c362bb81514b33d542391fd4081
| 2,764
|
py
|
Python
|
reinforcement_learning/sequential_agent.py
|
TeamSerpentine/flatlands-2020
|
003ea89e13b148b3757bec1808d9d2cf66e29ed5
|
[
"BSD-3-Clause"
] | null | null | null |
reinforcement_learning/sequential_agent.py
|
TeamSerpentine/flatlands-2020
|
003ea89e13b148b3757bec1808d9d2cf66e29ed5
|
[
"BSD-3-Clause"
] | null | null | null |
reinforcement_learning/sequential_agent.py
|
TeamSerpentine/flatlands-2020
|
003ea89e13b148b3757bec1808d9d2cf66e29ed5
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
from pathlib import Path
import numpy as np
from flatland.envs.observations import TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import complex_rail_generator
from flatland.envs.schedule_generators import complex_schedule_generator
from flatland.utils.rendertools import RenderTool
base_dir = Path(__file__).resolve().parent.parent
sys.path.append(str(base_dir))
from reinforcement_learning.ordered_policy import OrderedPolicy
"""
This file shows how to move agents in a sequential way: it moves the trains one by one, following a shortest path strategy.
This is obviously very slow, but it's a good way to get familiar with the different Flatland components: RailEnv, TreeObsForRailEnv, etc...
multi_agent_training.py is a better starting point to train your own solution!
"""
np.random.seed(2)
x_dim = np.random.randint(8, 20)
y_dim = np.random.randint(8, 20)
n_agents = np.random.randint(3, 8)
n_goals = n_agents + np.random.randint(0, 3)
min_dist = int(0.75 * min(x_dim, y_dim))
env = RailEnv(
width=x_dim,
height=y_dim,
rail_generator=complex_rail_generator(
nr_start_goal=n_goals, nr_extra=5, min_dist=min_dist,
max_dist=99999,
seed=0
),
schedule_generator=complex_schedule_generator(),
obs_builder_object=TreeObsForRailEnv(max_depth=1, predictor=ShortestPathPredictorForRailEnv()),
number_of_agents=n_agents)
env.reset(True, True)
tree_depth = 1
observation_helper = TreeObsForRailEnv(max_depth=tree_depth, predictor=ShortestPathPredictorForRailEnv())
env_renderer = RenderTool(env, gl="PGL", )
handle = env.get_agent_handles()
n_episodes = 10
max_steps = 100 * (env.height + env.width)
record_images = False
policy = OrderedPolicy()
action_dict = dict()
for trials in range(1, n_episodes + 1):
# Reset environment
obs, info = env.reset(True, True)
done = env.dones
env_renderer.reset()
frame_step = 0
# Run episode
for step in range(max_steps):
env_renderer.render_env(show=True, show_observations=False, show_predictions=True)
if record_images:
env_renderer.gl.save_image("./Images/flatland_frame_{:04d}.bmp".format(frame_step))
frame_step += 1
# Action
acting_agent = 0
for a in range(env.get_num_agents()):
if done[a]:
acting_agent += 1
if a == acting_agent:
action = policy.act(obs[a])
else:
action = 4
action_dict.update({a: action})
# Environment step
obs, all_rewards, done, _ = env.step(action_dict)
if done['__all__']:
break
| 32.139535
| 139
| 0.713097
|
9b792e8a6a9822fb55a12c36c49ca0c2f9b81a71
| 5,835
|
py
|
Python
|
hummingbot/connector/derivative/binance_perpetual/binance_perpetual_user_stream_data_source.py
|
csdenboer/hummingbot
|
8a799675a325ebdbb74d76b2a44472cdbf74d691
|
[
"Apache-2.0"
] | 4
|
2021-09-15T02:56:57.000Z
|
2022-03-01T19:59:19.000Z
|
hummingbot/connector/derivative/binance_perpetual/binance_perpetual_user_stream_data_source.py
|
csdenboer/hummingbot
|
8a799675a325ebdbb74d76b2a44472cdbf74d691
|
[
"Apache-2.0"
] | 8
|
2020-10-01T05:17:54.000Z
|
2020-11-09T13:38:19.000Z
|
hummingbot/connector/derivative/binance_perpetual/binance_perpetual_user_stream_data_source.py
|
csdenboer/hummingbot
|
8a799675a325ebdbb74d76b2a44472cdbf74d691
|
[
"Apache-2.0"
] | 3
|
2021-01-25T00:23:20.000Z
|
2022-03-21T18:57:33.000Z
|
import asyncio
import logging
from typing import Optional, Dict, AsyncIterable
import aiohttp
import ujson
import websockets
from websockets import ConnectionClosed
from hummingbot.core.data_type.user_stream_tracker_data_source import UserStreamTrackerDataSource
from hummingbot.core.utils.async_utils import safe_ensure_future
from hummingbot.logger import HummingbotLogger
BINANCE_USER_STREAM_ENDPOINT = "/fapi/v1/listenKey"
class BinancePerpetualUserStreamDataSource(UserStreamTrackerDataSource):
_bpusds_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._bpusds_logger is None:
cls._bpusds_logger = logging.getLogger(__name__)
return cls._bpusds_logger
@property
def last_recv_time(self) -> float:
return self._last_recv_time
def __init__(self, base_url: str, stream_url: str, api_key: str):
super().__init__()
self._api_key: str = api_key
self._current_listen_key = None
self._listen_for_user_stream_task = None
self._last_recv_time: float = 0
self._http_stream_url = base_url + BINANCE_USER_STREAM_ENDPOINT
self._wss_stream_url = stream_url + "/ws/"
async def get_listen_key(self):
async with aiohttp.ClientSession() as client:
async with client.post(self._http_stream_url,
headers={"X-MBX-APIKEY": self._api_key}) as response:
response: aiohttp.ClientResponse = response
if response.status != 200:
raise IOError(f"Error fetching Binance Perpetual user stream listen key. "
f"HTTP status is {response.status}.")
data: Dict[str, str] = await response.json()
return data["listenKey"]
async def ping_listen_key(self, listen_key: str) -> bool:
async with aiohttp.ClientSession() as client:
async with client.put(self._http_stream_url,
headers={"X-MBX-APIKEY": self._api_key},
params={"listenKey": listen_key}) as response:
data: [str, any] = await response.json()
if "code" in data:
self.logger().warning(f"Failed to refresh the listen key {listen_key}: {data}")
return False
return True
async def ws_messages(self, client: websockets.WebSocketClientProtocol) -> AsyncIterable[str]:
try:
while True:
try:
raw_msg: str = await asyncio.wait_for(client.recv(), timeout=60.0)
yield raw_msg
except asyncio.TimeoutError:
try:
pong_waiter = await client.ping()
await asyncio.wait_for(pong_waiter, timeout=60.0)
except asyncio.TimeoutError:
raise
except asyncio.TimeoutError:
self.logger().warning("Websocket ping timed out. Going to reconnect... ")
return
except ConnectionClosed:
return
finally:
await client.close()
async def log_user_stream(self, output: asyncio.Queue):
while True:
try:
stream_url: str = f"{self._wss_stream_url}{self._current_listen_key}"
async with websockets.connect(stream_url) as ws:
ws: websockets.WebSocketClientProtocol = ws
async for raw_msg in self.ws_messages(ws):
msg_json: Dict[str, any] = ujson.loads(raw_msg)
output.put_nowait(msg_json)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error. Retrying after 5 seconds... ", exc_info=True)
await asyncio.sleep(5)
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
try:
while True:
try:
if self._current_listen_key is None:
self._current_listen_key = await self.get_listen_key()
self.logger().debug(f"Obtained listen key {self._current_listen_key}.")
if self._listen_for_user_stream_task is not None:
self._listen_for_user_stream_task.cancel()
self._listen_for_user_stream_task = safe_ensure_future(self.log_user_stream(output))
await self.wait_til_next_tick(seconds=3600)
success: bool = await self.ping_listen_key(self._current_listen_key)
if not success:
self._current_listen_key = None
if self._listen_for_user_stream_task is not None:
self._listen_for_user_stream_task.cancel()
self._listen_for_user_stream_task = None
continue
self.logger().debug(f"Refreshed listen key {self._current_listen_key}.")
await self.wait_til_next_tick(seconds=60)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error("Unexpected error while maintaning the user event listen key. Retrying after "
"5 seconds...", exc_info=True)
await asyncio.sleep(5)
finally:
if self._listen_for_user_stream_task is not None:
self._listen_for_user_stream_task.cancel()
self._listen_for_user_stream_task = None
self._current_listen_key = None
| 45.944882
| 118
| 0.590917
|
fbaf02fb23637f10f0fccdbead266af6a6fdc1a7
| 39
|
py
|
Python
|
pybind_torch/example.py
|
nachovizzo/pybind_pytorch
|
8b99fa084b19bea6572122211761f241c0e43728
|
[
"MIT"
] | 2
|
2021-07-06T17:09:18.000Z
|
2021-07-22T03:54:33.000Z
|
pybind_torch/example.py
|
nachovizzo/pybind_pytorch
|
8b99fa084b19bea6572122211761f241c0e43728
|
[
"MIT"
] | null | null | null |
pybind_torch/example.py
|
nachovizzo/pybind_pytorch
|
8b99fa084b19bea6572122211761f241c0e43728
|
[
"MIT"
] | null | null | null |
from pybind_torch import greet
greet()
| 13
| 30
| 0.820513
|
d98c8cd8658d84b45a1e59ee41fa12b27889c85d
| 1,334
|
py
|
Python
|
src/ggrc_workflows/migrations/versions/20140806232046_520b31514bd2_add_relative_dates.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | null | null | null |
src/ggrc_workflows/migrations/versions/20140806232046_520b31514bd2_add_relative_dates.py
|
Smotko/ggrc-core
|
b3abb58b24e7559960d71a94ba79c75539e7fe29
|
[
"Apache-2.0"
] | 12
|
2015-01-08T14:50:19.000Z
|
2017-11-29T19:37:53.000Z
|
src/ggrc_workflows/migrations/versions/20140806232046_520b31514bd2_add_relative_dates.py
|
mikecb/ggrc-core
|
1cda560cb0920021416e07740c6cca1acba56268
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2015-01-08T13:25:09.000Z
|
2015-01-08T13:25:09.000Z
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Add relative dates
Revision ID: 520b31514bd2
Revises: 32221e9f330c
Create Date: 2014-08-06 23:20:46.059979
"""
# revision identifiers, used by Alembic.
revision = '520b31514bd2'
down_revision = '32221e9f330c'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('task_group_tasks', sa.Column('relative_end_day', sa.Integer(), nullable=True))
op.add_column('task_group_tasks', sa.Column('relative_end_month', sa.Integer(), nullable=True))
op.add_column('task_group_tasks', sa.Column('relative_start_day', sa.Integer(), nullable=True))
op.add_column('task_group_tasks', sa.Column('relative_start_month', sa.Integer(), nullable=True))
op.add_column('workflows', sa.Column('next_cycle_start_date', sa.Date(), nullable=True))
def downgrade():
op.drop_column('workflows', 'next_cycle_start_date')
op.drop_column('task_group_tasks', 'relative_start_month')
op.drop_column('task_group_tasks', 'relative_start_day')
op.drop_column('task_group_tasks', 'relative_end_month')
op.drop_column('task_group_tasks', 'relative_end_day')
| 36.054054
| 101
| 0.755622
|
1f8b033a87f45127f41d3cf13eeb12197205500d
| 630
|
py
|
Python
|
web_manage/manage.py
|
chicken-noodle/mysite
|
6973edd1357a2490f7f47e5ce427af5b144fb50a
|
[
"bzip2-1.0.6"
] | 1
|
2019-09-02T02:14:51.000Z
|
2019-09-02T02:14:51.000Z
|
web_manage/manage.py
|
chicken-noodle/mysite
|
6973edd1357a2490f7f47e5ce427af5b144fb50a
|
[
"bzip2-1.0.6"
] | null | null | null |
web_manage/manage.py
|
chicken-noodle/mysite
|
6973edd1357a2490f7f47e5ce427af5b144fb50a
|
[
"bzip2-1.0.6"
] | 1
|
2019-09-02T03:33:06.000Z
|
2019-09-02T03:33:06.000Z
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_manage.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.636364
| 74
| 0.684127
|
fbb5e5cca05c0f0db531fdacd35a3080f810441e
| 13
|
py
|
Python
|
constants.py
|
AntonMelentiev/aqa_sandbox
|
1ca71c71aa3be572b36eb8b9f95a57afb2ada87f
|
[
"Apache-2.0"
] | 3
|
2021-09-02T13:52:01.000Z
|
2022-01-12T13:07:00.000Z
|
constants.py
|
AntonMelentiev/aqa_sandbox
|
1ca71c71aa3be572b36eb8b9f95a57afb2ada87f
|
[
"Apache-2.0"
] | null | null | null |
constants.py
|
AntonMelentiev/aqa_sandbox
|
1ca71c71aa3be572b36eb8b9f95a57afb2ada87f
|
[
"Apache-2.0"
] | null | null | null |
TIMEOUT = 10
| 6.5
| 12
| 0.692308
|
775f09a5879b55ec31c0cc774613efc4f38632f6
| 73
|
py
|
Python
|
celery_once/backends/__init__.py
|
alkymi-io/celery-once
|
adcb91d393569703b560c863305746b2e0e12d28
|
[
"BSD-2-Clause"
] | 388
|
2017-06-02T00:10:24.000Z
|
2022-03-31T12:49:09.000Z
|
celery_once/backends/__init__.py
|
alkymi-io/celery-once
|
adcb91d393569703b560c863305746b2e0e12d28
|
[
"BSD-2-Clause"
] | 82
|
2017-06-05T11:16:15.000Z
|
2021-11-25T12:59:03.000Z
|
celery_once/backends/__init__.py
|
alkymi-io/celery-once
|
adcb91d393569703b560c863305746b2e0e12d28
|
[
"BSD-2-Clause"
] | 63
|
2017-05-12T17:39:09.000Z
|
2021-11-23T19:37:53.000Z
|
# -*- coding: utf-8 -*-
from .file import File
from .redis import Redis
| 14.6
| 24
| 0.657534
|
a89f2c2bb9be0f6c15c85e5ecc0c5974752d4cd6
| 4,878
|
py
|
Python
|
envoy/examples/puma-proxy/internal_lib/backend/loader.py
|
kevinjesse/puma
|
163a363275bf5d84d1957aaf23783be84053e40f
|
[
"MIT"
] | null | null | null |
envoy/examples/puma-proxy/internal_lib/backend/loader.py
|
kevinjesse/puma
|
163a363275bf5d84d1957aaf23783be84053e40f
|
[
"MIT"
] | null | null | null |
envoy/examples/puma-proxy/internal_lib/backend/loader.py
|
kevinjesse/puma
|
163a363275bf5d84d1957aaf23783be84053e40f
|
[
"MIT"
] | null | null | null |
#
# @author Kevin Jesse
# @email kevin.r.jesse@gmail.com
#
"""
Loader loads the configuration settings for parts of speech and loads the questions for Qlib
"""
import nltk
from collections import defaultdict
import json
import os.path as path
#old default weights nounw=.8, adjw=1, numw=.8, verbbw=.6, verbw=.2, pverbw=.2,whw=.1
def LoadLanguageResource(nounw=.8, adjw=1, numw=.8, verbbw=.6, verbw=.2, pverbw=.2,whw=.1):
WeightRules = defaultdict(int)
# nounlist = ['NN', 'NNS', 'NNP', 'NNPS','PRP']
# verblist = ['VBP','VB','VBG','VBZ']
# whlist = ['WDT','WP','WP$','WRB']
nounlist = ['NN', 'NNS', 'NNP', 'NNPS']
adjlist = ['JJ', 'JJR', 'JJS', 'RP']
verbbaselist = ['VB', 'VBN']
verblist = ['VBP','VBG', 'VBZ']
pastverblist = ['VBN', 'VBD']
whlist = ['WDT', 'WP', 'WP$', 'WRB']
numlist = ['CD']
for noun in nounlist:
WeightRules[noun] = nounw
for adj in adjlist:
WeightRules[adj] = adjw
for num in numlist:
WeightRules[num] = numw
for verb in verbbaselist:
WeightRules[verb] = verbbw
for verb in verblist:
WeightRules[verb] = verbw
for verb in pastverblist:
WeightRules[verb] = pverbw
for wh in whlist:
WeightRules[wh] = whw
# WeightRules['VBP','VB','VBG','VBZ','VBN','WDT','WP','WP$','WRB'] = 1
stop_dict = defaultdict(bool)
for word in nltk.corpus.stopwords.words('english'):
stop_dict[word] = True
resource = {}
resource['rules'] = WeightRules
resource['stop_words'] = stop_dict
return resource
def LoadData(datalist):
database = {}
for datafile in datalist:
f = open(datafile)
line = f.readline()
f.close()
raw_data = json.loads(str(line.strip()))
database = PushData(raw_data, database)
return database
def PushData(data, database):
last = len(database.keys())
for pair in data:
database[last] = nltk.word_tokenize(pair['question'])
last += 1
database[last] = nltk.word_tokenize(pair['answer'])
last += 1
return database
def LoadDataPair(datalist):
database = {}
database['Q'] = {}
database['A'] = {}
# print datalist
for datafile in datalist:
f = open(datafile)
# line = f.readline()
line = f.read()
f.close()
# print (line)
# print (str(line.strip()))
# print line
raw_data = json.loads(str(line.strip()))
database = PushDataPair(raw_data, database)
return database
def PushDataPair(data, database):
last = len(database['Q'].keys())
for pair in data:
database['Q'][last] = nltk.word_tokenize(pair['question'])
database['A'][last] = nltk.word_tokenize(pair['answer'])
last += 1
return database
def LoadTemplate(filelist):
Library = {}
for filepath in filelist:
name = path.splitext(path.basename(filepath))[0]
if name in ['template_init', 'template_joke']:
Library[name] = {}
for line in open(filepath):
# print line
theme, line_real = line.strip().split(';') # i think you want to split then strip...
# print theme
try:
Library[name][theme].append(line_real)
except KeyError:
Library[name][theme] = []
Library[name][theme].append(line_real)
else:
Library[name] = [line.strip() for line in open(filepath)]
return Library
def LoadTopic(topicfile):
return [line.strip() for line in open(topicfile)]
def LoadQuestions(question_file, is_choice_file=False):
qLib = []
if is_choice_file:
for line in open(question_file):
state_type, argc, question = line.split(';')
state_type = state_type.strip()
argc = argc.strip()
question = question.strip()
qLib.append(tuple((question, state_type, argc)))
return qLib
for line in open(question_file):
a, q = line.split(';')
a = a.strip()
q = q.strip()
qLib.append(tuple((q, a, 0)))
# try:
# qLib.append(tuple((q, a)))
# except KeyError:
# qLib[q] = []
# qLib[type].append(q)
return qLib
def LoadContinue(question_file):
cLib = []
for line in open(question_file):
c, a, q = line.split(';')
c = c.strip()
a = a.strip()
q = q.strip()
cLib.append(tuple((q, a, c)))
# try:
# qLib.append(tuple((q, a)))
# except KeyError:
# qLib[q] = []
# qLib[type].append(q)
return cLib
| 29.92638
| 102
| 0.541
|
b6329102dc5d80ed9ba2001d701e9e1f67adb3f0
| 1,049
|
py
|
Python
|
Hill Climbing/HCRastrigin.py
|
aislan-leite/Metaheuristic
|
e3553a78a2df76188c764801968a702929758406
|
[
"MIT"
] | null | null | null |
Hill Climbing/HCRastrigin.py
|
aislan-leite/Metaheuristic
|
e3553a78a2df76188c764801968a702929758406
|
[
"MIT"
] | 1
|
2017-07-16T20:24:48.000Z
|
2017-07-16T20:24:48.000Z
|
Hill Climbing/HCRastrigin.py
|
aislan-leite/Metaheuristic
|
e3553a78a2df76188c764801968a702929758406
|
[
"MIT"
] | null | null | null |
import math
import random
#Benchmarck Rastrigin
def fo(x,y):
z = 20 + (x * x - 10 * (math.cos(2 * math.pi * x))) + (y * y - 10 * (math.cos(2 * math.pi * y)))
return z
#inicialização das variaveis de decisão
s0 = random.uniform(-5.12,5.12)
s1 = random.uniform(-5.12,5.12)
sx = s0 #variavel atual de s0
sy = s1 #variavel atual de s1
atual_sol = fo(sx,sy)
print("Solução inicial \nS0: ", sx, ", s1: ", sy, ", f(x): ", fo(sx,sy))
i = 0
while i < 100000:
ajuste = sx * 0.1 #pertubação de 10%
if(atual_sol > fo(sx+ajuste,sy)):
sx = sx + ajuste
atual_sol = fo(sx,sy)
elif(atual_sol > fo(sx-ajuste,sy)):
sx = sx - ajuste
atual_sol = fo(sx,sy)
ajuste = sy * 0.1 #pertubação de 10%
if(atual_sol > fo(sx,sy+ajuste)):
sy = sy + ajuste
atual_sol = fo(sx,sy)
elif(atual_sol > fo(sx, sy-ajuste)):
sy = sy - ajuste
atual_sol = fo(sx, sy)
i = i + 1
print("X: ", sx, ", Y: ", sy, ", f(x): ", fo(sx,sy))
| 29.138889
| 100
| 0.519542
|
f793dd5ddab841029b32ef726bb057c266ba5baa
| 406
|
py
|
Python
|
app/blueprints/events.py
|
vabs/quizzy
|
1ffe60c0f018c253e977c920b537a42cc7825e0e
|
[
"Apache-2.0"
] | null | null | null |
app/blueprints/events.py
|
vabs/quizzy
|
1ffe60c0f018c253e977c920b537a42cc7825e0e
|
[
"Apache-2.0"
] | 1
|
2021-02-08T20:22:55.000Z
|
2021-02-08T20:22:55.000Z
|
app/blueprints/events.py
|
vabs/quizzy
|
1ffe60c0f018c253e977c920b537a42cc7825e0e
|
[
"Apache-2.0"
] | null | null | null |
from flask import session
from flask_socketio import emit, join_room, leave_room
from .. import socketio, connected_players
@socketio.on('joined', namespace='/play')
def joined():
join_room('quizzy')
emit('status', {'msg': session.get('name') + ' has joined the game.'}, room='quizzy')
@socketio.on('/status', namespace='/play')
def players():
emit('players', { 'players': connected_players })
| 29
| 89
| 0.699507
|
eb893890ad5ca8b5af2efcfb2b2fa78dd5440406
| 1,576
|
py
|
Python
|
IDC/network.py
|
Khanhnn00/blind_sr_denoise
|
3153f90d20fd884ab69b47c30c685e0175276055
|
[
"Apache-2.0"
] | null | null | null |
IDC/network.py
|
Khanhnn00/blind_sr_denoise
|
3153f90d20fd884ab69b47c30c685e0175276055
|
[
"Apache-2.0"
] | null | null | null |
IDC/network.py
|
Khanhnn00/blind_sr_denoise
|
3153f90d20fd884ab69b47c30c685e0175276055
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from util import swap_axis
class Generator(nn.Module):
def __init__(self, conf):
super(Generator, self).__init__()
struct = conf.G_structure
# First layer - Converting RGB image to latent space
self.first_layer = nn.Conv2d(in_channels=1, out_channels=conf.G_chan, kernel_size=struct[0], bias=False)
feature_block = [] # Stacking intermediate layer
for layer in range(1, len(struct) - 1):
feature_block += [nn.Conv2d(in_channels=conf.G_chan, out_channels=conf.G_chan, kernel_size=struct[layer], bias=False)]
self.feature_block = nn.Sequential(*feature_block)
# Final layer - Down-sampling and converting back to image
self.final_layer = nn.Conv2d(in_channels=conf.G_chan, out_channels=1, kernel_size=struct[-1],
stride=int(1 / conf.scale_factor), bias=False)
# Calculate number of pixels shaved in the forward pass
self.output_size = self.forward(torch.FloatTensor(torch.ones([1, 1, conf.input_crop_size, conf.input_crop_size]))).shape[-1]
self.forward_shave = int(conf.input_crop_size * conf.scale_factor) - self.output_size
def forward(self, input_tensor):
# Swap axis of RGB image for the network to get a "batch" of size = 3 rather the 3 channels
input_tensor = swap_axis(input_tensor)
downscaled = self.first_layer(input_tensor)
features = self.feature_block(downscaled)
output = self.final_layer(features)
return swap_axis(output)
| 52.533333
| 132
| 0.685914
|
62ed500dfd499cd55a6e4bb728f7f0a8f4a88798
| 2,184
|
py
|
Python
|
comicolorization/models/ltbc/low_level.py
|
DwangoMediaVillage/Comicolorization
|
98f323e78baceae0b1086f01ac51b5e8a7515abb
|
[
"MIT"
] | 122
|
2017-08-21T10:01:07.000Z
|
2022-03-21T13:52:19.000Z
|
comicolorization/models/ltbc/low_level.py
|
DwangoMediaVillage/Comicolorization
|
98f323e78baceae0b1086f01ac51b5e8a7515abb
|
[
"MIT"
] | 7
|
2017-10-20T15:12:13.000Z
|
2022-01-30T23:04:37.000Z
|
comicolorization/models/ltbc/low_level.py
|
DwangoMediaVillage/Comicolorization
|
98f323e78baceae0b1086f01ac51b5e8a7515abb
|
[
"MIT"
] | 26
|
2017-08-22T08:11:20.000Z
|
2022-03-09T14:59:18.000Z
|
import chainer
class LowLevelNetwork(chainer.Chain):
def __init__(self):
super(LowLevelNetwork, self).__init__(
conv1_1=chainer.links.Convolution2D(
in_channels=None,
out_channels=64,
ksize=3,
stride=2,
pad=1),
bn1_1=chainer.links.BatchNormalization(64),
conv1_2=chainer.links.Convolution2D(
in_channels=None,
out_channels=128,
ksize=3,
stride=1,
pad=1),
bn1_2=chainer.links.BatchNormalization(128),
conv2_1=chainer.links.Convolution2D(
in_channels=None,
out_channels=128,
ksize=3,
stride=2,
pad=1),
bn2_1=chainer.links.BatchNormalization(128),
conv2_2=chainer.links.Convolution2D(
in_channels=None,
out_channels=256,
ksize=3,
stride=1,
pad=1),
bn2_2=chainer.links.BatchNormalization(256),
conv3_1=chainer.links.Convolution2D(
in_channels=None,
out_channels=256,
ksize=3,
stride=2,
pad=1),
bn3_1=chainer.links.BatchNormalization(256),
conv3_2=chainer.links.Convolution2D(
in_channels=None,
out_channels=512,
ksize=3,
stride=1,
pad=1),
bn3_2=chainer.links.BatchNormalization(512),
)
def __call__(self, x, test=False):
# type: (any, bool) -> any
h = x
h = chainer.functions.relu(self.bn1_1(self.conv1_1(h), test=test))
h = chainer.functions.relu(self.bn1_2(self.conv1_2(h), test=test))
h = chainer.functions.relu(self.bn2_1(self.conv2_1(h), test=test))
h = chainer.functions.relu(self.bn2_2(self.conv2_2(h), test=test))
h = chainer.functions.relu(self.bn3_1(self.conv3_1(h), test=test))
h = chainer.functions.relu(self.bn3_2(self.conv3_2(h), test=test))
return h
| 35.803279
| 74
| 0.521978
|
402b333f5405da00b984f2ec73e6a52965d3fd8e
| 21,385
|
py
|
Python
|
tegridy-tools/minGPT/minGPT.py
|
aikovsky/tegridy-tools
|
4a87e1a27e2ad9ee6e16049bde7835836caa4fdd
|
[
"Apache-2.0"
] | 18
|
2020-12-27T09:29:34.000Z
|
2022-03-28T12:15:57.000Z
|
tegridy-tools/minGPT/minGPT.py
|
aikovsky/tegridy-tools
|
4a87e1a27e2ad9ee6e16049bde7835836caa4fdd
|
[
"Apache-2.0"
] | null | null | null |
tegridy-tools/minGPT/minGPT.py
|
aikovsky/tegridy-tools
|
4a87e1a27e2ad9ee6e16049bde7835836caa4fdd
|
[
"Apache-2.0"
] | 7
|
2021-02-16T14:11:03.000Z
|
2022-01-24T09:04:01.000Z
|
# -*- coding: utf-8 -*-
# This is a stand-alone version of minGPT by Andrej Karpathy as a single Python module
# Based on minGPT sorce code as it appears on Aug 25, 2020
# https://github.com/karpathy/minGPT
#######################################################
# Critical requirements
# !pip install numpy
# !pip install torch
# !pip install matplotlib
# !pip install tqdm
#######################################################
print('Loading minGPT modules...')
import copy
import math
import random
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import Dataset
dtype = torch.float
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print('Available Processing Device is:', device)
import matplotlib.pyplot as plt
# %matplotlib inline
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
import tqdm
from tqdm import auto
print('Done!')
######################################################################
"""
GPT model:
- the initial stem consists of a combination of token encoding and a positional encoding
- the meat of it is a uniform sequence of Transformer blocks
- each Transformer is a sequential combination of a 1-hidden-layer MLP block and a self-attention block
- all blocks feed into a central residual pathway similar to resnets
- the final decoder is a linear projection into a vanilla Softmax classifier
"""
class GPTConfig:
""" base GPT config, params common to all GPT versions """
embd_pdrop = 0.1
resid_pdrop = 0.1
attn_pdrop = 0.1
def __init__(self, vocab_size, block_size, **kwargs):
self.vocab_size = vocab_size
self.block_size = block_size
for k,v in kwargs.items():
setattr(self, k, v)
class GPT1Config(GPTConfig):
""" GPT-1 like network roughly 125M params """
n_layer = 12
n_head = 12
n_embd = 768
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, config):
super().__init__()
assert config.n_embd % config.n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(config.n_embd, config.n_embd)
self.query = nn.Linear(config.n_embd, config.n_embd)
self.value = nn.Linear(config.n_embd, config.n_embd)
# regularization
self.attn_drop = nn.Dropout(config.attn_pdrop)
self.resid_drop = nn.Dropout(config.resid_pdrop)
# output projection
self.proj = nn.Linear(config.n_embd, config.n_embd)
# causal mask to ensure that attention is only applied to the left in the input sequence
self.register_buffer("mask", torch.tril(torch.ones(config.block_size, config.block_size))
.view(1, 1, config.block_size, config.block_size))
self.n_head = config.n_head
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
# https://github.com/karpathy/minGPT/pull/55/commits/a492a389014cc4262d8011edeffdedebcec0dc57
# perform sqrt(d) scaling on (B, nh, T, hs) instead of (B, nh, T, T).
# q = q * (1.0 / math.sqrt(k.size(-1)))
# att = (q @ k.transpose(-2, -1))
att = att.masked_fill(self.mask[:,:,:T,:T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
y = y.transpose(1, 2).contiguous().view(B, T, C) # re-assemble all head outputs side by side
# output projection
y = self.resid_drop(self.proj(y))
return y
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, config):
super().__init__()
self.ln1 = nn.LayerNorm(config.n_embd)
self.ln2 = nn.LayerNorm(config.n_embd)
self.attn = CausalSelfAttention(config)
self.mlp = nn.Sequential(
nn.Linear(config.n_embd, 4 * config.n_embd),
nn.GELU(),
nn.Linear(4 * config.n_embd, config.n_embd),
nn.Dropout(config.resid_pdrop),
)
def forward(self, x):
x = x + self.attn(self.ln1(x))
x = x + self.mlp(self.ln2(x))
return x
class GPT(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, config):
super().__init__()
# input embedding stem
self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)
self.pos_emb = nn.Parameter(torch.zeros(1, config.block_size, config.n_embd))
self.drop = nn.Dropout(config.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(config) for _ in range(config.n_layer)])
# decoder head
self.ln_f = nn.LayerNorm(config.n_embd)
self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.block_size = config.block_size
self.apply(self._init_weights)
logger.info("number of parameters: %e", sum(p.numel() for p in self.parameters()))
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear, )
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
for mn, m in self.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('pos_emb')
# validate that we considered every parameter
param_dict = {pn: p for pn, p in self.named_parameters()}
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
# % (str(param_dict.keys() - union_params), )
# create the pytorch optimizer object
optim_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
optimizer = torch.optim.AdamW(optim_groups, lr=train_config.learning_rate, betas=train_config.betas)
return optimizer
def forward(self, idx, targets=None):
b, t = idx.size()
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# forward the GPT model
token_embeddings = self.tok_emb(idx) # each index maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :] # each position maps to a (learnable) vector
x = self.drop(token_embeddings + position_embeddings)
x = self.blocks(x)
x = self.ln_f(x)
logits = self.head(x)
# if we are given some desired targets also calculate the loss
loss = None
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1))
return logits, loss
##########################################################################
class TrainerConfig:
# optimization parameters
max_epochs = 10
batch_size = 64
learning_rate = 3e-4
betas = (0.9, 0.95)
grad_norm_clip = 1.0
weight_decay = 0.1 # only applied on matmul weights
# learning rate decay params: linear warmup followed by cosine decay to 10% of original
lr_decay = False
warmup_tokens = 375e6 # these two numbers come from the GPT-3 paper, but may not be good defaults elsewhere
final_tokens = 260e9 # (at what point we reach 10% of original LR)
# checkpoint settings
ckpt_path = None
num_workers = 0 # for DataLoader
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
class Trainer:
def __init__(self, model, train_dataset, test_dataset, config, ckpt_path):
self.model = model
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.config = config
self.config.ckpt_path = ckpt_path
self.ckpt_path = ckpt_path
# take over whatever gpus are on the system
self.device = 'cpu'
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.model = torch.nn.DataParallel(self.model).to(self.device)
def load_checkpoint(self):
if self.config.ckpt_path is not None:
ckpt_model = self.model.module if hasattr(self.model, "module") else self.model
logger.info("loading %s", self.config.ckpt_path)
print('Loading checkpoint...')
ck = torch.load(self.config.ckpt_path, device)
ckpt_model.load_state_dict(copy.deepcopy(ck))
print('Checkpoint loaded!')
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in ckpt_model.state_dict():
print(param_tensor, "\t", ckpt_model.state_dict()[param_tensor].size())
def save_checkpoint(self):
# DataParallel wrappers keep raw model object in .module attribute
raw_model = self.model.module if hasattr(self.model, "module") else self.model
logger.info("saving %s", self.config.ckpt_path)
torch.save(raw_model.state_dict(), self.config.ckpt_path)
def train(self):
model, config = self.model, self.config
raw_model = model.module if hasattr(self.model, "module") else model
optimizer = raw_model.configure_optimizers(config)
def run_epoch(split):
is_train = split == 'train'
model.train(is_train)
data = self.train_dataset if is_train else self.test_dataset
loader = DataLoader(data, shuffle=True, pin_memory=True,
batch_size=config.batch_size,
num_workers=config.num_workers)
losses = []
pbar = tqdm.auto.tqdm(enumerate(loader), total=len(loader) if is_train else enumerate(loader))
for it, (x, y) in pbar:
# Iterations save function
if int(it) % 1000 == 0 and self.config.ckpt_path != None:
self.save_checkpoint()
# place data on the correct device
x = x.to(self.device)
y = y.to(self.device)
# forward the model
with torch.set_grad_enabled(is_train):
logits, loss = model(x, y)
loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
losses.append(loss.item())
if is_train:
# backprop and update the parameters
model.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config.grad_norm_clip)
optimizer.step()
# decay the learning rate based on our progress
if config.lr_decay:
self.tokens += (y >= 0).sum() # number of tokens processed this step (i.e. label is not -100)
if self.tokens < config.warmup_tokens:
# linear warmup
lr_mult = float(self.tokens) / float(max(1, config.warmup_tokens))
else:
# cosine learning rate decay
progress = float(self.tokens - config.warmup_tokens) / float(max(1, config.final_tokens - config.warmup_tokens))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = config.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr
else:
lr = config.learning_rate
# report progress
pbar.set_description(f"epoch {epoch+1} iter {it}: train loss {loss.item():.5f}. lr {lr:e}")
if not is_train:
test_loss = float(np.mean(losses))
logger.info("test loss: %f", test_loss)
return test_loss
best_loss = float('inf')
self.tokens = 0 # counter used for learning rate decay
self.load_checkpoint()
for epoch in range(config.max_epochs):
run_epoch('train')
if self.test_dataset is not None:
test_loss = run_epoch('test')
# supports early stopping based on the test loss, or just save always if no test set is provided
good_model = self.test_dataset is None or test_loss < best_loss
if self.config.ckpt_path is not None and good_model:
#best_loss = test_loss
self.save_checkpoint()
##########################################################################
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -float('Inf')
return out
@torch.no_grad()
def sample(model, x, steps, temperature=1.0, sample=False, top_k=None):
"""
take a conditioning sequence of indices in x (of shape (b,t)) and predict the next token in
the sequence, feeding the predictions back into the model each time. Clearly the sampling
has quadratic complexity unlike an RNN that is only linear, and has a finite context window
of block_size, unlike an RNN that has an infinite context window.
"""
block_size = model.get_block_size()
model.eval()
for k in tqdm.auto.tqdm(range(steps)):
x_cond = x if x.size(1) <= block_size else x[:, -block_size:] # crop context if needed
logits, _ = model(x_cond)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
return x
# make deterministic if needed
# set_seed(42)
############################################################################
class CharDataset(Dataset):
def __init__(self, data, block_size):
chars = sorted(list(set(data)))
data_size, vocab_size = len(data), len(chars)
print('Dataset has %d characters, %d unique.' % (data_size, vocab_size))
self.stoi = { ch:i for i,ch in enumerate(chars) }
self.itos = { i:ch for i,ch in enumerate(chars) }
self.block_size = block_size
self.vocab_size = vocab_size
self.data = data
def __len__(self):
return len(self.data) - self.block_size
def __getitem__(self, idx):
# grab a chunk of (block_size + 1) characters from the data
chunk = self.data[idx:idx + self.block_size + 1]
# encode every character to an integer
dix = [self.stoi[s] for s in chunk]
x = torch.tensor(dix[:-1], dtype=torch.long)
y = torch.tensor(dix[1:], dtype=torch.long)
return x, y
#############################################################################
def MainLoader(full_path_to_training_text_file,
full_path_to_validation_text_file = None,
number_of_dataloader_workers = 4,
model_attention_span_in_tokens = 512,
model_embed_size = 256,
number_of_heads = 16,
number_of_layers = 4,
number_of_training_epochs = 2,
training_batch_size = 48,
model_learning_rate = 6e-4,
ckpt_path = None):
text = open(full_path_to_training_text_file, 'r').read() # don't worry we won't run out of file handles
train_dataset = CharDataset(text, model_attention_span_in_tokens) # one line of poem is roughly 50 characters
if full_path_to_validation_text_file is not None:
text_val = open(full_path_to_validation_text_file, 'r').read()
val_dataset = CharDataset(text_val, model_attention_span_in_tokens)
mconf = GPTConfig(train_dataset.vocab_size,
train_dataset.block_size,
n_layer=number_of_layers,
n_head=number_of_heads,
n_embd=model_embed_size)
model = GPT(mconf)
tconf = TrainerConfig(max_epochs=number_of_training_epochs,
batch_size=training_batch_size,
learning_rate=model_learning_rate,
num_workers=number_of_dataloader_workers)
trainer = Trainer(model, train_dataset, full_path_to_validation_text_file, tconf, ckpt_path)
print('Checkpoint =', ckpt_path)
return trainer, model, train_dataset
#############################################################################
def Generate(model,
train_dataset,
trainer,
number_of_tokens_to_generate = 2048,
creativity_temperature = 0.8,
top_k_prob = 4,
input_prompt = "Some text"):
model.to(device)
context = input_prompt
x = torch.tensor([train_dataset.stoi[s] for s in context], dtype=torch.long)[None,...].to(trainer.device)
y = sample(model, x, number_of_tokens_to_generate, temperature=creativity_temperature, sample=True, top_k=top_k_prob)[0]
completion = ''.join([train_dataset.itos[int(i)] for i in y])
return completion
############################################################################
def PlotPositionalEmbeddings(model, attention_span):
# visualize some of the learned positional embeddings, maybe they contain structure
plt.figure(figsize=(18, 1))
ci = model.pos_emb.data[0, :, 0].cpu()
zci = torch.cat((torch.tensor([0.0]), ci)) # pre-cat a zero
plt.imshow(zci.view(1, attention_span+1).numpy())
plt.axis('off')
| 40.27307
| 140
| 0.599065
|
ebff2eaf768395dcee0f3048be569ef2108251f8
| 1,449
|
py
|
Python
|
scripts/sources/s_min_rel_ent_point_view.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2021-04-10T13:24:30.000Z
|
2022-03-26T08:20:42.000Z
|
scripts/sources/s_min_rel_ent_point_view.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | null | null | null |
scripts/sources/s_min_rel_ent_point_view.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2019-08-13T22:02:17.000Z
|
2022-02-09T17:49:12.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_min_rel_ent_point_view [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_min_rel_ent_point_view&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExViewTheoryPoint).
# +
import numpy as np
from arpym.views import min_rel_entropy_normal
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_point_view-parameters)
mu_base = np.array([0.26, 0.29, 0.33]) # base expectation
sig2_base = np.array([[0.18, 0.11, 0.13],
[0.11, 0.23, 0.16],
[0.13, 0.16, 0.23]]) # base covariance
v = np.array([[1, -1, 0], [0, 1, -1]]) # view matrix
z_view = np.array([1.02, -0.5]) # point view
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_min_rel_ent_point_view-implementation-step01): Compute point view updated parameters
# +
k_, n_ = v.shape # market and view dimension
mu_upd, sig2_upd = min_rel_entropy_normal(mu_base, sig2_base, v, z_view, v,
np.zeros((k_)))
| 34.5
| 219
| 0.640442
|
123ba3a5bd385551f74597fccda733f67347c2b2
| 1,241
|
py
|
Python
|
project/get_db.py
|
c24-microws-mar/catalog-service
|
cc5489129e23181a68a29b39c38fc50a542e8095
|
[
"MIT"
] | null | null | null |
project/get_db.py
|
c24-microws-mar/catalog-service
|
cc5489129e23181a68a29b39c38fc50a542e8095
|
[
"MIT"
] | null | null | null |
project/get_db.py
|
c24-microws-mar/catalog-service
|
cc5489129e23181a68a29b39c38fc50a542e8095
|
[
"MIT"
] | null | null | null |
url = "http://musicbrainz.org/ws/2/release?query={}&limit=50&fmt=json"
cover_url_temp = 'http://coverartarchive.org/release/{}'
import requests
import json
import string
import random
albums = []
for word in ['offspring', 'britney', 'baby', 'metal', 'house', 'dance','hot']:#string.ascii_lowercase[0:3]:
print word
out = requests.get(url.format(word))
data = json.loads(out.text)
if 'releases' in data:
print('nmatches {}'.format(len(data['releases'])))
for album in data['releases']:
alb = { 'album': album['title'],
'albumId': album['id'],
'artist': album['artist-credit'][0]['artist']['name'],
'artistId': album['artist-credit'][0]['artist']['id'],
'price': random.randint(10,15),
}
cover = requests.get(cover_url_temp.format(alb['albumId']))
cover_url = 'NOT FOUND'
has_cover = False
print('.')
if cover.ok:
cdata = json.loads(cover.text)
try:
cover_url = cdata['images'][0]['thumbnails']['large']
has_cover = True
except:
cover_url = 'FAILED'
pass
alb['coverUrl'] = cover_url
alb['hasCover'] = has_cover
albums.append(alb)
print('generated {} elems'.format(len(albums)))
with open('database.json', 'w') as f:
json.dump(albums, f, indent=4)
| 28.860465
| 107
| 0.639001
|
27cc0fba65cadf85bf95f6d9e03cfca21768229f
| 2,590
|
py
|
Python
|
tests/test_monitor.py
|
insidus341/DeskLEDs
|
e0bb3f0258dc47198b88373ada83f545aa5ebf14
|
[
"MIT"
] | null | null | null |
tests/test_monitor.py
|
insidus341/DeskLEDs
|
e0bb3f0258dc47198b88373ada83f545aa5ebf14
|
[
"MIT"
] | 3
|
2021-06-08T22:01:10.000Z
|
2022-03-12T00:40:10.000Z
|
tests/test_monitor.py
|
insidus341/DeskLEDs
|
e0bb3f0258dc47198b88373ada83f545aa5ebf14
|
[
"MIT"
] | 1
|
2020-07-19T13:33:14.000Z
|
2020-07-19T13:33:14.000Z
|
from run.monitor import Monitor
MONITOR_ID = 1
def test_monitor_get_monitor_id():
monitor = Monitor(MONITOR_ID)
assert monitor.get_monitor_id() == MONITOR_ID
def test_monitor_get_monitor_middle():
monitor = Monitor(MONITOR_ID)
assert isinstance(monitor.get_monitor_bounding_box(), tuple)
def test_monitor_get_monitor_bounding_box_top_left_tuple():
monitor = Monitor(MONITOR_ID)
assert isinstance(monitor.get_monitor_bounding_box_top_left(), tuple)
def test_monitor_get_monitor_bounding_box_top_left_len():
monitor = Monitor(MONITOR_ID)
assert len(monitor.get_monitor_bounding_box_top_left()) == 2
def test_monitor_get_monitor_bounding_box_bottom_right_tuple():
monitor = Monitor(MONITOR_ID)
assert isinstance(monitor.get_monitor_bounding_box_bottom_right(), tuple)
def test_monitor_get_monitor_bounding_box_bottom_right_len():
monitor = Monitor(MONITOR_ID)
assert len(monitor.get_monitor_bounding_box_bottom_right()) == 2
def test_monitor_get_pixel_list_list():
monitor = Monitor(MONITOR_ID)
assert isinstance(monitor.get_pixel_list(), list)
def test_monitor_get_pixel_list_len():
monitor = Monitor(MONITOR_ID)
assert len(monitor.get_pixel_list()) > 0
def test_monitor_get_monitor_average_color_tuple():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert isinstance(monitor_rgb, tuple)
def test_monitor_get_monitor_average_color_red_greater_than_or_equal_zero():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert monitor_rgb[0] >= 0
def test_monitor_get_monitor_average_color_red_less_than_or_equal_255():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert monitor_rgb[0] <= 255
def test_monitor_get_monitor_average_color_green_greater_than_or_equal_zero():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert monitor_rgb[1] >= 0
def test_monitor_get_monitor_average_color_green_less_than_or_equal_255():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert monitor_rgb[1] <= 255
def test_monitor_get_monitor_average_color_blue_greater_than_or_equal_zero():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert monitor_rgb[2] >= 0
def test_monitor_get_monitor_average_color_blue_less_than_or_equal_255():
monitor = Monitor(MONITOR_ID)
monitor_rgb = monitor.get_monitor_average_color()
assert monitor_rgb[2] <= 255
| 36.478873
| 78
| 0.8
|
2714ab1dc920f4d69780fef5e1c153299b3abb2d
| 3,832
|
py
|
Python
|
stages/third/process_results_repack.py
|
ClaudioGuevara/rosetta
|
beb6a390433e7a98adb125b26a035ecd44e7b15a
|
[
"AFL-1.1"
] | null | null | null |
stages/third/process_results_repack.py
|
ClaudioGuevara/rosetta
|
beb6a390433e7a98adb125b26a035ecd44e7b15a
|
[
"AFL-1.1"
] | null | null | null |
stages/third/process_results_repack.py
|
ClaudioGuevara/rosetta
|
beb6a390433e7a98adb125b26a035ecd44e7b15a
|
[
"AFL-1.1"
] | null | null | null |
import os
import re
from utils.create_file import create_file
def process_results_repack(complex_folder, antibody, antigen, rosetta_path):
structure_selected=""
file_results=open(os.path.join(complex_folder, "repack.fasc"), "r").read().splitlines()[2:]
score=0
rep_force=20
for line in file_results:
results= re.sub(' +', ' ',line).split(" ")
if float(results[1]) < score and float(results[6]) < rep_force:
structure_selected=results[-1]
score=float(results[1])
rep_force=float(results[6])
if not structure_selected:
return False
else:
#rosetta_database_direct = f"/home/claudio/tesis/rosetta_src_2021.16.61629_bundle/main/database/"
rosetta_database_direct = f"{rosetta_path}/database/"
content = f"-database {rosetta_database_direct}\n-docking # the docking option group\n -partners HL_A # set rigid body docking partners\n -dock_pert 3 8 # set coarse perturbation parameters (degrees and angstroms)\n -dock_mcm_trans_magnitude 0.1 # refinement translational perturbation\n -dock_mcm_rot_magnitude 5.0 # refinement rotational perturbation\n-s {structure_selected}.pdb\n-run:max_retry_job 50 # if the mover fails, retry 50 times\n-use_input_sc # add the side chains from the input pdb to the rotamer library\n-ex1 # increase rotamer bins to include mean +- 1 standard deviation\n-ex2 # increase rotamer bins to include mean +- 2 standard deviations\n-out:path:all ../results/{antibody}-{antigen}/\n-out # out option group\n -file # out:file option group\n -scorefile docking2.fasc # the name of the model score file\n-score:weights talaris2014.wts # Set talaris2014 as default score function"
create_file("docking_new.options", content, complex_folder)
content = f'<ROSETTASCRIPTS>\n <SCOREFXNS>\n </SCOREFXNS>\n <TASKOPERATIONS>\n <InitializeFromCommandline name="ifcl" />\n <RestrictToRepacking name="rtr" />\n Restrict to residues within a distance and vector cutoff of the protein-protein interface\n <RestrictToInterfaceVector CB_dist_cutoff="10.0" chain1_num="1,2" chain2_num="3" name="rtiv" nearby_atom_cutoff="5.5" vector_angle_cutoff="75" vector_dist_cutoff="9.0" />\n Fix residues known experimentally to be critical in interaction\n <PreventResiduesFromRepacking name="prfrp" residues="187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,302,303,304,305,306,307,308,309,310,311,312,313,314" />\n </TASKOPERATIONS>\n <FILTERS>\n </FILTERS>\n <MOVERS>\n MINIMIZATION MOVERS\n Single cycle of FastRelax to minimize backbone of docking partners\n <FastRelax name="minimize_interface" repeats="1" scorefxn="talaris2014" task_operations="ifcl,rtr,rtiv,prfrp" />\n\n DOCKING MOVERS\n <Docking conserve_foldtree="0" design="0" fullatom="0" ignore_default_docking_task="0" jumps="1" local_refine="0" name="dock_low" optimize_fold_tree="1" score_high="talaris2014" score_low="score_docking_low" task_operations="ifcl,prfrp" />\n <Docking conserve_foldtree="0" design="0" fullatom="1" jumps="1" local_refine="1" name="dock_high" optimize_fold_tree="1" score_high="talaris2014" score_low="score_docking_low" task_operations="ifcl,prfrp" />\n\n <SaveAndRetrieveSidechains allsc="0" name="srsc" /> Speeds the move from centroid to full atom mode\n\n </MOVERS>\n <APPLY_TO_POSE>\n </APPLY_TO_POSE>\n <PROTOCOLS>\n Run docking protocol\n <Add mover="dock_low" />\n <Add mover="srsc" />\n <Add mover="dock_high" />\n\n Minimize interface\n <Add mover="minimize_interface" />\n </PROTOCOLS>\n <OUTPUT scorefxn="talaris2014" />\n</ROSETTASCRIPTS>'
create_file("docking_full3.xml", content, complex_folder)
return os.path.join(complex_folder, structure_selected)
| 127.733333
| 1,825
| 0.725992
|
727cbca9729a478b178a5d810021879554351863
| 516
|
py
|
Python
|
blender/arm/logicnode/native/LN_write_storage.py
|
onelsonic/armory
|
55cfead0844923d419d75bf4bd677ebed714b4b5
|
[
"Zlib"
] | 2,583
|
2016-07-27T08:25:47.000Z
|
2022-03-31T10:42:17.000Z
|
blender/arm/logicnode/native/LN_write_storage.py
|
N8n5h/armory
|
5b4d24f067a2354bafd3ab417bb8e30ee0c5aff8
|
[
"Zlib"
] | 2,122
|
2016-07-31T14:20:04.000Z
|
2022-03-31T20:44:14.000Z
|
blender/arm/logicnode/native/LN_write_storage.py
|
N8n5h/armory
|
5b4d24f067a2354bafd3ab417bb8e30ee0c5aff8
|
[
"Zlib"
] | 451
|
2016-08-12T05:52:58.000Z
|
2022-03-31T01:33:07.000Z
|
from arm.logicnode.arm_nodes import *
class WriteStorageNode(ArmLogicTreeNode):
"""Writes the given content in the given key.
@seeNode Read Storage"""
bl_idname = 'LNWriteStorageNode'
bl_label = 'Write Storage'
arm_section = 'file'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmStringSocket', 'Key')
self.add_input('ArmDynamicSocket', 'Value')
self.add_output('ArmNodeSocketAction', 'Out')
| 28.666667
| 53
| 0.680233
|
5e82b0e549d258f31943ed40500785dd7a49dd3f
| 2,674
|
py
|
Python
|
setup.py
|
showmen15/testEEE
|
44619da6d0972903fc93691e30688b3c0fd4e6e7
|
[
"MIT"
] | null | null | null |
setup.py
|
showmen15/testEEE
|
44619da6d0972903fc93691e30688b3c0fd4e6e7
|
[
"MIT"
] | null | null | null |
setup.py
|
showmen15/testEEE
|
44619da6d0972903fc93691e30688b3c0fd4e6e7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# !/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
sys.stderr.write('using distutils\n')
from distutils.core import setup
with open('requirements.txt') as f:
required = f.read().splitlines()
setup(
name='amber-python-drivers',
packages=[
'amberdriver',
'amberdriver.common',
'amberdriver.dummy',
'amberdriver.hokuyo',
'amberdriver.drive_to_point',
'amberdriver.collision_avoidance',
'amberdriver.null',
'amberdriver.tools',
'amberdriver.tests'
],
package_dir={
'amberdriver': 'src/amberdriver',
'amberdriver.common': 'src/amberdriver/common',
'amberdriver.dummy': 'src/amberdriver/dummy',
'amberdriver.hokuyo': 'src/amberdriver/hokuyo',
'amberdriver.drive_to_point': 'src/amberdriver/drive_to_point',
'amberdriver.collision_avoidance': 'src/amberdriver/collision_avoidance',
'amberdriver.null': 'src/amberdriver/null',
'amberdriver.tools': 'src/amberdriver/tools',
'amberdriver.tests': 'src/amberdriver/tests'
},
package_data={'': [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/collision_avoidance/collision_avoidance.ini',
'src/amberdriver/tools/main.ini'
]},
data_files=[
('', [
'src/amberdriver/common/amber.ini',
'src/amberdriver/dummy/dummy.ini',
'src/amberdriver/hokuyo/hokuyo.ini',
'src/amberdriver/drive_to_point/drive_to_point.ini',
'src/amberdriver/collision_avoidance/collision_avoidance.ini',
'src/amberdriver/tools/main.ini'
]),
],
test_suite="amberdriver.tests",
include_package_data=True,
install_requires=required,
version='1.17',
description='Amber drivers in python',
author=u'Paweł Suder',
author_email='pawel@suder.info',
url='http://project-capo.github.io/',
download_url='http://github.com/project-capo/amber-python-drivers/',
keywords=[
'amber',
'dummy',
'hokuyo',
'drive to point',
'collision avoidance',
'panda'
],
classifiers=[
'Programming Language :: Python',
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
],
long_description='''\
'''
)
| 31.833333
| 81
| 0.624907
|
735c0f9b94a074e017423b24fdca158adb2505e9
| 2,719
|
py
|
Python
|
src/joemetry/utils.py
|
RESPULTE/Joemetry
|
7922fff9d28cfdbc60373dfba1e86e5206ffe7a4
|
[
"MIT"
] | null | null | null |
src/joemetry/utils.py
|
RESPULTE/Joemetry
|
7922fff9d28cfdbc60373dfba1e86e5206ffe7a4
|
[
"MIT"
] | null | null | null |
src/joemetry/utils.py
|
RESPULTE/Joemetry
|
7922fff9d28cfdbc60373dfba1e86e5206ffe7a4
|
[
"MIT"
] | null | null | null |
from math import sqrt, acos, sin, cos, radians
from joemetry._type_hints import *
from joemetry import Point, Segment, Polygon
# broke
def get_circumcircle_of_triangle(triangle: List[Point], radius: bool = True) -> Union[Tuple[Point, float], Point]:
point_a, point_b, point_c = triangle
angle_a = sin(2 * point_b.angle_to(point_c, point_a))
angle_b = sin(2 * point_c.angle_to(point_a, point_b))
angle_c = sin(2 * point_a.angle_to(point_b, point_c))
circumcenter_x = (point_a[0] * angle_a + point_b[0] * angle_b + point_c[0] * angle_c) / (angle_a + angle_b + angle_c)
circumcenter_y = (point_a[1] * angle_a + point_b[1] * angle_b + point_c[1] * angle_c) / (angle_a + angle_b + angle_c)
circumcenter = (circumcenter_x, circumcenter_y)
if radius:
circumradius = get_length(point_a - point_b) / angle_c
return circumcenter.astuple(), circumradius
return circumcenter
# broke
def get_intersect(line1: List[Point], line2: List[Point]) -> Union[None, Point]:
line1, line2 = Segment(*line1), Segment(*line2)
start = line2[0] - line1[0]
end_1, end_2 = line1[1] - line1[0], line2[1] - line2[0]
determinant = end_1.cross(end_2)
if determinant == 0: return None
check_1 = (start).cross(end_2) / determinant
check_2 = (start).cross(end_1) / determinant
if (0 <= check_1 <= 1) and (0 <= check_2 <= 1):
return round(line1[0] + (line1[1] - line1[0]) * check_1, 2).astuple()
return None
def get_support(shape1: List[Point], shape2: List[Point], direction: Point) -> Point:
s1_furthestpoint = max(shape1, key=lambda point: point.dot(direction))
s2_furthestpoint = max(shape2, key=lambda point: point.dot(-direction))
support_point = s1_furthestpoint - s2_furthestpoint
return support_point
def is_collinear(*points: List['Point']) -> bool:
if not all(isinstance(p, Point) for p in points):
raise TypeError(f"'{type(p).__name__}' is invalid")
if len(points) < 2: return True
return all([point.cross(points[(ind + 1) % len(points)]) == 0 for ind, point in enumerate(points)])
def get_bounding_box(polygon) -> Tuple[Point, Point]:
x_coords = [point.x for point in polygon]
y_coords = [point.y for point in polygon]
topright = max(x_coords), max(y_coords)
bottomleft = min(x_coords), min(y_coords)
return bottomleft, topright
def get_center(polygon) -> Point:
'''returns the center of the polygon using the bounding box of it as reference'''
bottomleft, topright = get_bounding_box(polygon)
center_x = bottomleft.x + ((topright.x - bottomleft.x) / 2)
center_y = bottomleft.y + ((topright.y - bottomleft.y) / 2)
return center_x, center_y
| 39.985294
| 121
| 0.677823
|
f5629fbd70ea52108e2042a7665256167b0decd5
| 3,531
|
py
|
Python
|
oneflow/python/test/ops/test_inplace.py
|
xxg1413/oneflow
|
f2e3c85a25b8aecfb6c0c0af1737833b1a77e135
|
[
"Apache-2.0"
] | 1
|
2020-10-13T03:03:40.000Z
|
2020-10-13T03:03:40.000Z
|
oneflow/python/test/ops/test_inplace.py
|
xxg1413/oneflow
|
f2e3c85a25b8aecfb6c0c0af1737833b1a77e135
|
[
"Apache-2.0"
] | null | null | null |
oneflow/python/test/ops/test_inplace.py
|
xxg1413/oneflow
|
f2e3c85a25b8aecfb6c0c0af1737833b1a77e135
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def MakeFuncConfig(enable_inplace):
func_config = flow.FunctionConfig()
func_config.enable_inplace(enable_inplace)
return func_config
def test_loss_inplace(test_case):
def IdentityLoss(name):
w = flow.get_variable(name, (10,), initializer=flow.constant_initializer(100))
y = flow.math.reduce_sum(w)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [5]), momentum=0
).minimize(y)
return y
TrainCompare(test_case, IdentityLoss)
def test_inplace_variable(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def InplaceVariable():
w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(1))
y = flow.math.relu(w)
return y
flow.train.CheckPoint().init()
test_case.assertTrue(
np.allclose(InplaceVariable().get().numpy(), np.ones((10,), np.float32))
)
def test_deadlock(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
y = flow.math.relu(y)
Foo(np.ones((10,), dtype=np.float32))
def test_nodeadlock_with_return(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
y = flow.math.relu(y)
return y
Foo(np.ones((10,), dtype=np.float32)).get()
def test_reentrant_lock_check_failed(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
y = flow.math.relu(y)
Foo(np.ones((10,), dtype=np.float32))
def test_const_inplace_variable(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def InplaceVariable():
w = flow.get_variable("w", (2, 5), initializer=flow.constant_initializer(1))
y = flow.reshape(w, (10,))
return y
flow.train.CheckPoint().init()
of_ret = InplaceVariable().get().numpy()
test_case.assertTrue(np.allclose(of_ret, np.ones((10,), np.float32)))
def TrainCompare(test_case, func):
func_config = MakeFuncConfig(True)
@flow.global_function(type="train", function_config=func_config)
def EnableInplace():
return func("w0")
func_config.enable_inplace(False)
@flow.global_function(type="train", function_config=func_config)
def DisableInplace():
return func("w1")
flow.train.CheckPoint().init()
num_iter = 10
enable_inplace_losses = np.array(
[EnableInplace().get().tolist() for _ in range(num_iter)]
)
disable_inplace_losses = np.array(
[DisableInplace().get().tolist() for _ in range(num_iter)]
)
test_case.assertTrue(np.allclose(enable_inplace_losses, disable_inplace_losses))
| 30.973684
| 86
| 0.695837
|
aca1c0057270ef3c7589edc276ff4d60d077080d
| 5,319
|
py
|
Python
|
doclink/clients/requests_.py
|
Luoyufu/doclink
|
41e57f7e9891f47579cf9131000d87bb8cc09d05
|
[
"MIT"
] | 4
|
2018-01-23T04:49:52.000Z
|
2018-09-09T01:56:13.000Z
|
doclink/clients/requests_.py
|
Luoyufu/doclink
|
41e57f7e9891f47579cf9131000d87bb8cc09d05
|
[
"MIT"
] | 2
|
2019-10-18T15:48:06.000Z
|
2019-10-18T15:49:47.000Z
|
doclink/clients/requests_.py
|
Luoyufu/doclink
|
41e57f7e9891f47579cf9131000d87bb8cc09d05
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import atexit
import os
import requests
from requests_toolbelt import MultipartEncoder
from six import string_types
class RequestsClient(object):
optional_args = (
'params', 'data', 'headers', 'cookies', 'files',
'auth', 'timeout', 'allow_redirects', 'proxies',
'hooks', 'stream', 'verify', 'cert', 'json',
'multipart')
def __init__(self, session=None):
if session is None:
session = requests.Session()
atexit.register(session.close)
self._session = session
@classmethod
def _prepare_optional_args(cls, sending_kwargs, request_meta):
for arg in cls.optional_args:
value = request_meta.get(arg)
if value is not None:
if arg == 'auth':
sending_kwargs['auth'] = cls._create_auth_arg(value)
elif arg == 'files':
files_arg = cls._create_files_arg(value)
if files_arg:
sending_kwargs['files'] = files_arg
elif arg == 'multipart':
multipart_arg = cls._create_multipart_arg(value)
if multipart_arg:
encoder = MultipartEncoder(multipart_arg)
sending_kwargs['data'] = encoder
headers = sending_kwargs.setdefault('headers', {})
headers['Content-Type'] = encoder.content_type
else:
sending_kwargs[arg] = value
@classmethod
def _get_sending_kwargs(cls, request_meta):
sending_kwargs = {}
sending_kwargs.update(
method=request_meta['method'],
url=request_meta.get_url(),
)
cls._prepare_optional_args(sending_kwargs, request_meta)
return sending_kwargs
@classmethod
def _create_files_arg(cls, files_meta):
"""Create files arg for requests.
Args:
files_meta (dict): Countain filed name and file_info mapping.
Returns:
A dict mapping field name to file_item for multipart/form-data
"""
def create_file_item(file_info):
"""Nested function to create a file item for files arg.
Args:
File_info: If it's a file_path str, open it as file_object.
Else, pass it to requests files arg.
Returns:
File instance or file_info tuple. For example:
open('report.xls', 'rb')
('report.xls', open('report.xls', 'rb'))
"""
if isinstance(file_info, string_types):
try:
return open(file_info, 'rb') # param is file_path
except (IOError, TypeError):
pass
return file_info
files_arg = {}
for field, file_infos in files_meta.items():
if isinstance(file_infos, list):
files_arg[field] = [create_file_item(file_info) for file_info in file_infos]
else:
files_arg[field] = create_file_item(file_infos)
return files_arg
@classmethod
def _create_auth_arg(cls, auth_meta):
if auth_meta['type'] == 'basic':
return requests.auth.HTTPBasicAuth(auth_meta['username'], auth_meta['password'])
else:
return requests.auth.HTTPDigestAuth(auth_meta['username'], auth_meta['password'])
@classmethod
def _create_multipart_arg(cls, multipart_meta):
"""Create a MultipartEncoder instance for multipart/form-data.
Requests_toolbelt will not try to guess file_name. To encode a file we need
to give file_name explicitly.
Args:
multipart_meta (dict): Map field name to multipart form-data value.
"""
def create_multipart_item(item_info):
"""Nested function to create a multipart item for files arg.
Args:
item_info: If it's a file_path str, open it as file_object as set file_name.
Else, pass it to requests_toolbelt MultipartEncoder.
Returns:
File instance or file_info tuple. For example:
('report.xls', open('report.xls', 'rb'))
"""
if isinstance(item_info, string_types):
try:
return (os.path.basename(item_info), open(item_info, 'rb')) # file_path
except (IOError, TypeError):
pass
try:
return (os.path.basename(item_info.name), item_info) # file_object
except AttributeError:
pass
return item_info
multipart_arg = {}
for field, item_infos in multipart_meta.items():
if isinstance(item_infos, list):
multipart_arg[field] = [create_multipart_item(item_info)
for item_info in item_infos]
else:
multipart_arg[field] = create_multipart_item(item_infos)
return multipart_arg
def request(self, request_meta):
sending_kwargs = self._get_sending_kwargs(request_meta)
return self._session.request(**sending_kwargs)
| 34.096154
| 93
| 0.567964
|
384b97d644ed3a7235c2356d38f0bfdcdb6b972c
| 1,994
|
py
|
Python
|
Python/add-and-search-word-data-structure-design.py
|
bssrdf/LeetCode-5
|
746df5cff523361145a74d9d429dc541a7b99910
|
[
"MIT"
] | 68
|
2018-01-13T07:15:37.000Z
|
2022-02-20T12:58:24.000Z
|
Python/add-and-search-word-data-structure-design.py
|
ambershen/LeetCode
|
0c53580697b05fadb3981d97bd25f1d9da65fd2f
|
[
"MIT"
] | 2
|
2021-12-10T01:43:37.000Z
|
2021-12-14T21:48:53.000Z
|
Python/add-and-search-word-data-structure-design.py
|
ambershen/LeetCode
|
0c53580697b05fadb3981d97bd25f1d9da65fd2f
|
[
"MIT"
] | 63
|
2017-04-10T03:38:25.000Z
|
2022-03-17T23:24:51.000Z
|
# Time: O(min(n, h)), per operation
# Space: O(min(n, h))
#
# Design a data structure that supports the following two operations:
#
# void addWord(word)
# bool search(word)
# search(word) can search a literal word or a regular expression string containing only letters a-z or ..
# A . means it can represent any one letter.
#
# For example:
#
# addWord("bad")
# addWord("dad")
# addWord("mad")
# search("pad") -> false
# search("bad") -> true
# search(".ad") -> true
# search("b..") -> true
# Note:
# You may assume that all words are consist of lowercase letters a-z.
#
class TrieNode:
# Initialize your data structure here.
def __init__(self):
self.is_string = False
self.leaves = {}
class WordDictionary:
def __init__(self):
self.root = TrieNode()
# @param {string} word
# @return {void}
# Adds a word into the data structure.
def addWord(self, word):
curr = self.root
for c in word:
if not c in curr.leaves:
curr.leaves[c] = TrieNode()
curr = curr.leaves[c]
curr.is_string = True
# @param {string} word
# @return {boolean}
# Returns if the word is in the data structure. A word could
# contain the dot character '.' to represent any one letter.
def search(self, word):
return self.searchHelper(word, 0, self.root)
def searchHelper(self, word, start, curr):
if start == len(word):
return curr.is_string
if word[start] in curr.leaves:
return self.searchHelper(word, start+1, curr.leaves[word[start]])
elif word[start] == '.':
for c in curr.leaves:
if self.searchHelper(word, start+1, curr.leaves[c]):
return True
return False
# Your WordDictionary object will be instantiated and called as such:
# wordDictionary = WordDictionary()
# wordDictionary.addWord("word")
# wordDictionary.search("pattern")
| 29.323529
| 106
| 0.610331
|
e60f382ed573628bde9c01dbd7e51363f98ac3f0
| 685
|
py
|
Python
|
python/robot-name/robot_name.py
|
tamireinhorn/exercism
|
3ca78b262ad590b67c75c5d1cd83db02bc2d1e6e
|
[
"MIT"
] | null | null | null |
python/robot-name/robot_name.py
|
tamireinhorn/exercism
|
3ca78b262ad590b67c75c5d1cd83db02bc2d1e6e
|
[
"MIT"
] | 2
|
2021-12-18T16:31:51.000Z
|
2021-12-18T16:33:33.000Z
|
python/robot-name/robot_name.py
|
tamireinhorn/Exercism
|
3a3d5744e88ab4457df4e6ac20d772d8c50c43da
|
[
"MIT"
] | null | null | null |
import string, random
ALPHABET = string.ascii_uppercase
robot_names = set()
class Robot:
def __init__(self):
self.name = self.generate_name()
def generate_name(self):
letters = ''.join(random.sample(ALPHABET, 2))
numbers = ''.join(list(map(str,(random.sample(range(10), 3)))))
name = f'{letters}{numbers}'
while name in robot_names:
letters = ''.join(random.sample(ALPHABET, 2))
numbers = ''.join(list(map(str,(random.sample(range(10), 3)))))
name = f'{letters}{numbers}'
robot_names.add(name)
return name
def reset(self):
self.name = self.generate_name()
| 32.619048
| 75
| 0.586861
|
967a015e313688d5a31b45df961b8f245d62ad08
| 9,485
|
py
|
Python
|
gds3xtrude/gds3xtrude/gds3xtrude.py
|
diadatp/gds3xtrude_openjscad
|
49b82c0a905a59c82a97e151077c234c4cbc8eeb
|
[
"Apache-2.0"
] | null | null | null |
gds3xtrude/gds3xtrude/gds3xtrude.py
|
diadatp/gds3xtrude_openjscad
|
49b82c0a905a59c82a97e151077c234c4cbc8eeb
|
[
"Apache-2.0"
] | null | null | null |
gds3xtrude/gds3xtrude/gds3xtrude.py
|
diadatp/gds3xtrude_openjscad
|
49b82c0a905a59c82a97e151077c234c4cbc8eeb
|
[
"Apache-2.0"
] | null | null | null |
##
## Copyright (c) 2018-2019 Thomas Kramer.
##
## This file is part of gds3xtrude
## (see https://codeberg.org/tok/gds3xtrude).
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from .polygon_approx import approximate_polygon
from .layer_operations import LayerOp, AbstractLayer
from functools import reduce
from itertools import chain
import importlib.util
import importlib.machinery
import types
import logging
from typing import Dict, List, Tuple, Optional
from .builder import LayerStackBuilder, ApproximateLayerStackBuilder
from .types import Material
logger = logging.getLogger(__name__)
# klayout.db should not be imported if script is run from KLayout GUI.
try:
import pya
except:
import klayout.db as pya
DEFAULT_COLOR = [0.5, 0.5, 0.5]
DEFAULT_MATERIAL = Material('DEFAULT_MATERIAL', color=DEFAULT_COLOR)
class Mask:
""" Wrapper around pya.Region.
"""
def __init__(self, region: pya.Region):
self.region = region
self.material = Material('<unnamed>', color=DEFAULT_COLOR)
def __add__(self, other):
m = Mask(self.region + other.region)
m.material = self.material
return m
def __or__(self, other):
return self + other
def __and__(self, other):
m = Mask(self.region & other.region)
m.material = self.material
return m
def __sub__(self, other):
m = Mask(self.region - other.region)
m.material = self.material
return m
def __xor__(self, other):
return Mask(self.region ^ other.region)
def __hash__(self):
return hash(self.region)
def __equal__(self, other):
return self.region == other.region
def _build_masks(layout: pya.Layout,
cell: pya.Cell,
abstract_layerstack: List[Tuple[int, List[AbstractLayer]]],
selection_box: Optional[pya.Box] = None,
material_map: Optional[Dict[Tuple[int, int], Material]] = None,
default_material: Material = DEFAULT_MATERIAL) -> List[Tuple[int, List[pya.Region]]]:
""" Create the masks of `cell` based on the layer stack information.
:param layout:
:param cell:
:param abstract_layerstack: Layer stack structure as defined in the technology file.
:param selection_box: An optional pya.Box to select which part of the layout should be converted to masks.
:param material_map: Mapping from layer number to color. Dict[(int, int), (r, g, b)]
:param default_material: Color to be used if it is not defined in `color_map`. (r, g, b), 0 <= r, g, b <= 1
:return: List[(thickness: int, mask: pya.Region)]
"""
# Cache for evaluated sub trees.
cache = dict()
def eval_op_tree(op_node: LayerOp) -> Mask:
""" Recursively evaluate the layer operation tree.
:param op_node: Operand node or leaf.
:return: Returns a `Mask` object containing a `pya.Region` of the layer.
"""
if op_node in cache:
return cache[op_node]
if isinstance(op_node, AbstractLayer):
(idx, purpose, material) = op_node.eval()
layer_index = layout.layer(idx, purpose)
region = _flatten_cell(cell, layer_index, selection_box=selection_box, layout=layout)
if selection_box is not None:
region = region & selection_box
result = Mask(region)
if material_map is not None:
result.material = material_map.get((idx, purpose), default_material)
else:
result.material = material if material is not None else default_material
else:
assert isinstance(op_node, LayerOp)
op = op_node.op
lhs = eval_op_tree(op_node.lhs)
rhs = eval_op_tree(op_node.rhs)
result = op(lhs, rhs)
result.region.merge()
cache[op_node] = result
return result
_layerstack = []
for thickness, layers in abstract_layerstack:
if not isinstance(layers, list):
layers = [layers]
_layerstack.append(
(thickness, [eval_op_tree(l) for l in layers])
)
return _layerstack
def _flatten_cell(cell: pya.Cell, layer_index: int,
selection_box: Optional[pya.Box] = None,
cell_cache: Optional[Dict[int, pya.Region]] = None, layout: pya.Layout = None) -> pya.Region:
""" Recursively convert a single layer of a cell into a pya.Region.
:param cell: The pya.Cell to be converted.
:param layer_index: KLayout layer index.
:param cell_cache: Dict mapping cell indices to flattened `pya.Region`s. This allows to reuse flattened cells.
:return: Merged pya.Region containing all polygons of the flattened cell hierarchy.
"""
if cell_cache is None:
# Cache rendered cells
# Dict[cell_index, volume]
cell_cache = dict()
region = pya.Region()
if selection_box is None:
instances = cell.each_inst()
else:
instances = cell.each_touching_inst(selection_box)
for inst in instances:
cell_id = inst.cell_index
trans = inst.cplx_trans
# Transform selection such that it can be used in recursive call.
if selection_box:
trans_selection = selection_box.transformed(trans.inverted())
else:
trans_selection = None
if cell_id in cell_cache:
child_region = cell_cache[cell_id]
if trans_selection is not None:
child_region = child_region & trans_selection
else:
# Recursively flatten child instance.
child_region = _flatten_cell(layout.cell(cell_id), layer_index,
selection_box=trans_selection, cell_cache=cell_cache, layout=layout)
# Cache the region only if the cell has been fully rendered and was not cropped to selection.
if trans_selection is None or inst.cell.bbox().inside(trans_selection):
cell_cache[cell_id] = child_region
transformed = child_region.transformed(trans)
region.insert(transformed)
# Add shapes of this cell.
if selection_box is None:
own_shapes = cell.each_shape(layer_index)
else:
own_shapes = cell.each_touching_shape(layer_index, selection_box)
# region.insert(own_shapes)
for shape in own_shapes:
polygon = shape.polygon
# Texts may have no polygon representation.
if polygon is not None:
region.insert(shape.polygon)
region.merge()
return region
def build_layerstack(builder: LayerStackBuilder,
layout: pya.Layout,
top_cell: pya.Cell,
tech_file,
approx_error: float = 0,
material_map: Optional[Dict[Tuple[int, int], Material]] = None,
centered: bool = False,
scale_factor: float = 1,
selection_box: Optional[pya.Box] = None):
""" Transform the first pya.Cell in `layout` into a solidpython volume.
:param layout: pya.Layout
:param top_cell: pya.Cell
:param tech_file: Path to description of the layer stack.
:param approx_error: Approximate polygons before conversion.
:param material_map: Mapping from layer number to color. Dict[(int, int), (r, g, b)]
:param centered: Move the center of the layout to (0, 0).
:param scale_factor: Scale the layout before conversion.
:param selection_box: An optional pya.Box to select which part of the layout should be converted to masks.
:return: solidpython volume
"""
logger.info('Loading tech file: %s', tech_file)
loader = importlib.machinery.SourceFileLoader('tech', tech_file)
tech = types.ModuleType(loader.name)
loader.exec_module(tech)
logger.info("Convert polygons into volumes.")
layer_masks = _build_masks(layout, top_cell, tech.layerstack, material_map=material_map, selection_box=selection_box)
approx_builder = ApproximateLayerStackBuilder(builder, approx_error=approx_error)
offset = 0
for thickness, masks in layer_masks:
for mask in masks:
approx_builder.add_region(mask.region, thickness=thickness, z_offset=offset, material=mask.material)
offset += thickness
# Find bounding box of masks and center the volume.
if centered and len(layer_masks) > 0:
bboxes = chain(*((mask.region.bbox() for mask in masks) for (thickness, masks) in layer_masks))
# Find union bounding box.
bbox = reduce(lambda a, b: a + b, bboxes)
# Shift center to (0, 0).
center = bbox.center()
builder.translate((-center.x, -center.y, 0))
if scale_factor != 1:
builder.scale(scale_factor)
| 35.792453
| 121
| 0.650817
|
b6326b5b3ce356788eb3f399409e47099e102f50
| 29
|
py
|
Python
|
optimizer/__init__.py
|
lorenz0890/pytorch-admm-pruning
|
85f15d86e6d9037fe4016ebcd435065ecba823b5
|
[
"BSD-3-Clause"
] | null | null | null |
optimizer/__init__.py
|
lorenz0890/pytorch-admm-pruning
|
85f15d86e6d9037fe4016ebcd435065ecba823b5
|
[
"BSD-3-Clause"
] | null | null | null |
optimizer/__init__.py
|
lorenz0890/pytorch-admm-pruning
|
85f15d86e6d9037fe4016ebcd435065ecba823b5
|
[
"BSD-3-Clause"
] | null | null | null |
from .admm_optimizer import *
| 29
| 29
| 0.827586
|
3cb0c37f30178379ae72671077a8b4ea3cec370b
| 3,391
|
py
|
Python
|
invenio_records_resources/services/files/results.py
|
tzubaki/invenio-records-resources
|
82e77d5f6e89abf38c871e912cc97b4bdf289c20
|
[
"MIT"
] | null | null | null |
invenio_records_resources/services/files/results.py
|
tzubaki/invenio-records-resources
|
82e77d5f6e89abf38c871e912cc97b4bdf289c20
|
[
"MIT"
] | null | null | null |
invenio_records_resources/services/files/results.py
|
tzubaki/invenio-records-resources
|
82e77d5f6e89abf38c871e912cc97b4bdf289c20
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""File service results."""
from ..base import ServiceListResult
from ..records.results import RecordItem
class FileItem(RecordItem):
"""List of file items result."""
def __init__(self, service, identity, file_, record, errors=None,
links_tpl=None):
"""Constructor."""
super(FileItem, self).__init__(
service,
identity,
record,
errors=errors,
links_tpl=links_tpl,
schema=service.file_schema,
)
self._file = file_
@property
def file_id(self):
"""Get the record id."""
return self._file.key
@property
def _obj(self):
"""Return the object to dump."""
return self._file
@property
def links(self):
"""Get links for this result item."""
return self._links_tpl.expand(self._file)
def send_file(self, restricted=True, as_attachment=False):
"""Return file stream."""
return self._file.object_version.send_file(
restricted=restricted, as_attachment=as_attachment)
def open_stream(self, mode):
"""Return a file stream context manager."""
return self._file.open_stream(mode)
def get_stream(self, mode):
"""Return a file stream.
It is up to the caller to close the steam.
"""
return self._file.get_stream(mode)
class FileList(ServiceListResult):
"""List of file items result."""
def __init__(self, service, identity, results, record, links_tpl=None,
links_item_tpl=None):
"""Constructor.
:params service: a service instance
:params identity: an identity that performed the service request
:params results: the search results
:params links_config: a links store config
"""
self._identity = identity
self._record = record
self._results = results
self._service = service
self._links_tpl = links_tpl
self._links_item_tpl = links_item_tpl
@property
def entries(self):
"""Iterator over the hits."""
for entry in self._results:
# Project the record
projection = self._service.file_schema.dump(
entry,
context=dict(
identity=self._identity,
)
)
if self._links_item_tpl:
projection['links'] = self._links_item_tpl.expand(entry)
yield projection
def to_dict(self):
"""Return result as a dictionary."""
# TODO: Use a FilesSchema or something to dump the top-level object
record_files = self._record.files
result = {
"enabled": record_files.enabled,
}
if self._links_tpl:
result['links'] = self._links_tpl.expand(self._record)
if result['enabled']:
result.update({
'entries': list(self.entries),
'default_preview': record_files.default_preview,
'order': record_files.order,
})
return result
| 29.232759
| 76
| 0.592451
|
4c0c8056fb70152074788f94ec40e214d1e8a208
| 1,591
|
py
|
Python
|
logic/clustering.py
|
moff4/gladius
|
773a4e81fcfca7376292c8f9ba601b62324a65f0
|
[
"MIT"
] | null | null | null |
logic/clustering.py
|
moff4/gladius
|
773a4e81fcfca7376292c8f9ba601b62324a65f0
|
[
"MIT"
] | null | null | null |
logic/clustering.py
|
moff4/gladius
|
773a4e81fcfca7376292c8f9ba601b62324a65f0
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
from conf import conf
SQL_LOAD_NEAREST = '''
SELECT distinct pt1.tag, pt2.tag
FROM (
select *
from hashtag.post_tag
where tag in (%s)
) pt1 inner join hashtag.post_tag pt2 on pt1.post = pt2.post
and pt1.tag != pt2.tag
'''
LOAD_LIMIT = 50
class Clustering:
def __init__(self):
# tag -> set of neighboors
self.__near = defaultdict(set)
self._loaded = set()
self._queue = set()
def nears(self, tag: str) -> set:
if tag in self._loaded:
return self.__near[tag]
tags = [tag]
for i, t in enumerate(self._queue - self._loaded):
tags.append(t)
if i > LOAD_LIMIT:
break
conn = conf.sql.get_connection()
cur = conn.cursor()
cur.execute(SQL_LOAD_NEAREST % ','.join(['%s'] * len(tags)), tags)
row = cur.fetchone()
while row is not None:
self.__near[(t := row[0])].add(row[1])
if t in self._queue:
self._queue.remove(row[0])
row = cur.fetchone()
self._loaded.update(tags)
return self.__near[tag]
def clustering_coeff(self, tag: str) -> float:
near = self.nears(tag)
self._queue.update(near)
k = len(near)
if k <= 1:
c = 1.0
else:
fr_eges = {
'@'.join([i, j] if i > j else [j, i])
for i in near
for j in (near & self.nears(i))
}
c = 2 * len(fr_eges) / (k * (k - 1))
return c
| 25.66129
| 74
| 0.516028
|
338e547573b8b81493ce3c8f0420f9097830b239
| 6,053
|
py
|
Python
|
my_string_utils.py
|
jeayu/MyStringUtils
|
5ed84b5b6b77b2d33e69ea9a0fe05d9619ddc0b9
|
[
"MIT"
] | 2
|
2020-05-26T07:44:30.000Z
|
2020-05-26T07:44:33.000Z
|
my_string_utils.py
|
jeayu/MyStringUtils
|
5ed84b5b6b77b2d33e69ea9a0fe05d9619ddc0b9
|
[
"MIT"
] | null | null | null |
my_string_utils.py
|
jeayu/MyStringUtils
|
5ed84b5b6b77b2d33e69ea9a0fe05d9619ddc0b9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sublime
import sublime_plugin
import re
import json
def camel2underline(camel_str):
result = re.sub(r'([a-z]|\d)([A-Z])', r'\1_\2', camel_str)
return result.lower() if result is not camel_str else camel_str
def underline2camel(underline_str):
return re.sub(r'(_\w)', lambda x: x.group(1)[1].upper(
), underline_str.lower()) if '_' in underline_str else underline_str
def underline2words(text):
return ' '.join(text.split('_'))
def words2underline(text):
return '_'.join(re.split(r'\s+', text)) if ' ' in text.strip() else text
def camel2words(text):
return re.sub(r'([a-z]|\d)([A-Z])', r'\1 \2', text)
def words2camel(text):
return underline2camel(words2underline(text)) if ' ' in text.strip() else text
def split_line(text):
return re.split(r'\n', text)
class CamelUnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, camel2underline(text))
class UnderlineCamelCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, underline2camel(text))
class UnderlineWordsCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, underline2words(text))
class WordsUnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, words2underline(text))
class CamelWordsCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).strip()
self.view.replace(edit, region, camel2words(text))
class WordsCamelCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region).strip()
self.view.replace(edit, region, words2camel(text))
class JsonListCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, json.dumps(
text.split(), ensure_ascii=False))
class CsvJsonCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings("MyStringUtils.sublime-settings")
separator_regex = settings.get("csv_separator_regex")
indent = settings.get("csv_to_json_indent")
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
lines = split_line(text)
keys = re.split(separator_regex, lines[0])
result = [dict(zip(keys, re.split(separator_regex, value)))
for value in lines[1:]]
self.view.replace(edit, region, json.dumps(
result, ensure_ascii=False, indent=indent if indent else None))
class JsonCsvCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings("MyStringUtils.sublime-settings")
separator = settings.get("csv_separator")
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
text_json = json.loads(text)
result = [separator.join(text_json[0].keys())] + \
[separator.join(i.values()) for i in text_json]
self.view.replace(edit, region, '\n'.join(result))
class FilterDuplicatedLinesCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
lines = split_line(text)
self.view.replace(edit, region, '\n'.join(
sorted(set(lines), key=lines.index)))
class CsvTableSqlCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings("MyStringUtils.sublime-settings")
separator_regex = settings.get("csv_separator_regex")
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
lines = split_line(text)
table_name = lines[0].strip()
comments = re.split(separator_regex, lines[1])
fields = re.split(separator_regex, lines[2])
field_types = re.split(
separator_regex, lines[3])
self.view.replace(edit, region, self.create_table_sql(
table_name, fields, field_types, comments))
def create_table_sql(self, table_name, fields, field_types, comments):
details = ',\n '.join(["`{0}` {1} NOT NULL COMMENT '{2}'".format(
field, field_type, comment) for field, field_type, comment in zip(fields, field_types, comments)])
return """
CREATE TABLE `{table_name}` (
{details},
PRIMARY KEY (`{primary_key}`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COMMENT='{table_name}';
""".format(table_name=table_name, details=details, primary_key=fields[0])
class EvalCommand(sublime_plugin.TextCommand):
def run(self, edit):
for region in self.view.sel():
if not region.empty():
text = self.view.substr(region)
self.view.replace(edit, region, str(eval(text)))
| 33.815642
| 110
| 0.609945
|
b68e6bd9e8ede3e31c39d27b5cec1f18c3e722e0
| 2,805
|
py
|
Python
|
toontown/models/clash/news.py
|
jaczerob/toontown.py
|
fe23ffaddb2a663cbca49ce2358ef266a9c72196
|
[
"MIT"
] | null | null | null |
toontown/models/clash/news.py
|
jaczerob/toontown.py
|
fe23ffaddb2a663cbca49ce2358ef266a9c72196
|
[
"MIT"
] | null | null | null |
toontown/models/clash/news.py
|
jaczerob/toontown.py
|
fe23ffaddb2a663cbca49ce2358ef266a9c72196
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2022-present jaczerob
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from ..base import BaseAPIModel
__all__ = ['News', 'NewsList']
DATE_FMT = '%Y-%m-%dT%H:%M:%S.%fZ'
@dataclass
class News:
"""Wrapper class for the news data
Attributes
----------
post_id : int
the ID of the post
title : str
the title of the post
author : str
who wrote the post
body_raw : str
the raw body of the post containing HTML elements
body : str
a readable version of the body, with HTML elements stripped out
date : datetime
when the post was written
image : str
a link to the image of the post
article_url : str
the link to the full article
"""
__slots__ = ['id', 'author', 'posted', 'title', 'summary', 'category']
def __init__(self, **props) -> None:
self.id: int = props.get('id')
self.author: str = props.get('author')
self.title: str = props.get('title')
self.summary: str = props.get('summary')
self.category: str = props.get('category')
posted = props.get('posted')
self.posted: Optional[datetime] = datetime.strptime(posted, DATE_FMT) if posted else None
@property
def article_url(self):
return f'https://corporateclash.net/news/article/{self.id}'
class NewsList(BaseAPIModel[News]):
"""Wrapper class for the /news response
A tuple-like class containing `News` objects
"""
def __init__(self, **payload) -> None:
iterable = tuple((
News(**props)
for props in payload['iterable']
))
super().__init__(iterable)
| 28.333333
| 97
| 0.678788
|
4842bc0945519ad824f0ed739624add23d6778ac
| 10,322
|
py
|
Python
|
lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/scripted_bkpt/TestScriptedResolver.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 456
|
2015-01-15T08:44:39.000Z
|
2022-03-31T22:34:57.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/scripted_bkpt/TestScriptedResolver.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 2
|
2018-01-23T09:02:40.000Z
|
2018-03-09T20:34:27.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/breakpoint/scripted_bkpt/TestScriptedResolver.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 254
|
2015-01-19T21:53:39.000Z
|
2022-02-23T19:38:56.000Z
|
"""
Test setting breakpoints using a scripted resolver
"""
from __future__ import print_function
import os
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
class TestScriptedResolver(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24528")
def test_scripted_resolver(self):
"""Use a scripted resolver to set a by symbol name breakpoint"""
self.build()
self.do_test()
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24528")
def test_search_depths(self):
""" Make sure we are called at the right depths depending on what we return
from __get_depth__"""
self.build()
self.do_test_depths()
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24528")
def test_command_line(self):
""" Make sure we are called at the right depths depending on what we return
from __get_depth__"""
self.build()
self.do_test_cli()
def test_bad_command_lines(self):
"""Make sure we get appropriate errors when we give invalid key/value
options"""
self.build()
self.do_test_bad_options()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def make_target_and_import(self):
target = lldbutil.run_to_breakpoint_make_target(self)
interp = self.dbg.GetCommandInterpreter()
error = lldb.SBError()
script_name = os.path.join(self.getSourceDir(), "resolver.py")
source_name = os.path.join(self.getSourceDir(), "main.c")
command = "command script import " + script_name
result = lldb.SBCommandReturnObject()
interp.HandleCommand(command, result)
self.assertTrue(result.Succeeded(), "com scr imp failed: %s"%(result.GetError()))
return target
def make_extra_args(self):
json_string = '{"symbol":"break_on_me", "test1": "value1"}'
json_stream = lldb.SBStream()
json_stream.Print(json_string)
extra_args = lldb.SBStructuredData()
error = extra_args.SetFromJSON(json_stream)
self.assertTrue(error.Success(), "Error making SBStructuredData: %s"%(error.GetCString()))
return extra_args
def do_test(self):
"""This reads in a python file and sets a breakpoint using it."""
target = self.make_target_and_import()
extra_args = self.make_extra_args()
file_list = lldb.SBFileSpecList()
module_list = lldb.SBFileSpecList()
# Make breakpoints with this resolver using different filters, first ones that will take:
right = []
# one with no file or module spec - this one should fire:
right.append(target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list))
# one with the right source file and no module - should also fire:
file_list.Append(lldb.SBFileSpec("main.c"))
right.append(target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list))
# Make sure the help text shows up in the "break list" output:
self.expect("break list", substrs=["I am a python breakpoint resolver"], msg="Help is listed in break list")
# one with the right source file and right module - should also fire:
module_list.Append(lldb.SBFileSpec("a.out"))
right.append(target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list))
# And one with no source file but the right module:
file_list.Clear()
right.append(target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list))
# Make sure these all got locations:
for i in range (0, len(right)):
self.assertTrue(right[i].GetNumLocations() >= 1, "Breakpoint %d has no locations."%(i))
# Now some ones that won't take:
module_list.Clear()
file_list.Clear()
wrong = []
# one with the wrong module - should not fire:
module_list.Append(lldb.SBFileSpec("noSuchModule"))
wrong.append(target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list))
# one with the wrong file - also should not fire:
file_list.Clear()
module_list.Clear()
file_list.Append(lldb.SBFileSpec("noFileOfThisName.xxx"))
wrong.append(target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list))
# Now make sure the CU level iteration obeys the file filters:
file_list.Clear()
module_list.Clear()
file_list.Append(lldb.SBFileSpec("no_such_file.xxx"))
wrong.append(target.BreakpointCreateFromScript("resolver.ResolverCUDepth", extra_args, module_list, file_list))
# And the Module filters:
file_list.Clear()
module_list.Clear()
module_list.Append(lldb.SBFileSpec("NoSuchModule.dylib"))
wrong.append(target.BreakpointCreateFromScript("resolver.ResolverCUDepth", extra_args, module_list, file_list))
# Now make sure the Function level iteration obeys the file filters:
file_list.Clear()
module_list.Clear()
file_list.Append(lldb.SBFileSpec("no_such_file.xxx"))
wrong.append(target.BreakpointCreateFromScript("resolver.ResolverFuncDepth", extra_args, module_list, file_list))
# And the Module filters:
file_list.Clear()
module_list.Clear()
module_list.Append(lldb.SBFileSpec("NoSuchModule.dylib"))
wrong.append(target.BreakpointCreateFromScript("resolver.ResolverFuncDepth", extra_args, module_list, file_list))
# Make sure these didn't get locations:
for i in range(0, len(wrong)):
self.assertEqual(wrong[i].GetNumLocations(), 0, "Breakpoint %d has locations."%(i))
# Now run to main and ensure we hit the breakpoints we should have:
lldbutil.run_to_breakpoint_do_run(self, target, right[0])
# Test the hit counts:
for i in range(0, len(right)):
self.assertEqual(right[i].GetHitCount(), 1, "Breakpoint %d has the wrong hit count"%(i))
for i in range(0, len(wrong)):
self.assertEqual(wrong[i].GetHitCount(), 0, "Breakpoint %d has the wrong hit count"%(i))
def do_test_depths(self):
"""This test uses a class variable in resolver.Resolver which gets set to 1 if we saw
compile unit and 2 if we only saw modules. If the search depth is module, you get passed just
the modules with no comp_unit. If the depth is comp_unit you get comp_units. So we can use
this to test that our callback gets called at the right depth."""
target = self.make_target_and_import()
extra_args = self.make_extra_args()
file_list = lldb.SBFileSpecList()
module_list = lldb.SBFileSpecList()
module_list.Append(lldb.SBFileSpec("a.out"))
# Make a breakpoint that has no __get_depth__, check that that is converted to eSearchDepthModule:
bkpt = target.BreakpointCreateFromScript("resolver.Resolver", extra_args, module_list, file_list)
self.assertTrue(bkpt.GetNumLocations() > 0, "Resolver got no locations.")
self.expect("script print(resolver.Resolver.got_files)", substrs=["2"], msg="Was only passed modules")
# Make a breakpoint that asks for modules, check that we didn't get any files:
bkpt = target.BreakpointCreateFromScript("resolver.ResolverModuleDepth", extra_args, module_list, file_list)
self.assertTrue(bkpt.GetNumLocations() > 0, "ResolverModuleDepth got no locations.")
self.expect("script print(resolver.Resolver.got_files)", substrs=["2"], msg="Was only passed modules")
# Make a breakpoint that asks for compile units, check that we didn't get any files:
bkpt = target.BreakpointCreateFromScript("resolver.ResolverCUDepth", extra_args, module_list, file_list)
self.assertTrue(bkpt.GetNumLocations() > 0, "ResolverCUDepth got no locations.")
self.expect("script print(resolver.Resolver.got_files)", substrs=["1"], msg="Was passed compile units")
# Make a breakpoint that returns a bad value - we should convert that to "modules" so check that:
bkpt = target.BreakpointCreateFromScript("resolver.ResolverBadDepth", extra_args, module_list, file_list)
self.assertTrue(bkpt.GetNumLocations() > 0, "ResolverBadDepth got no locations.")
self.expect("script print(resolver.Resolver.got_files)", substrs=["2"], msg="Was only passed modules")
# Make a breakpoint that searches at function depth:
bkpt = target.BreakpointCreateFromScript("resolver.ResolverFuncDepth", extra_args, module_list, file_list)
self.assertTrue(bkpt.GetNumLocations() > 0, "ResolverFuncDepth got no locations.")
self.expect("script print(resolver.Resolver.got_files)", substrs=["3"], msg="Was only passed modules")
self.expect("script print(resolver.Resolver.func_list)", substrs=["break_on_me", "main", "test_func"], msg="Saw all the functions")
def do_test_cli(self):
target = self.make_target_and_import()
lldbutil.run_break_set_by_script(self, "resolver.Resolver", extra_options="-k symbol -v break_on_me")
def do_test_bad_options(self):
target = self.make_target_and_import()
self.expect("break set -P resolver.Resolver -k a_key", error = True, msg="Missing value at end",
substrs=['Key: "a_key" missing value'])
self.expect("break set -P resolver.Resolver -v a_value", error = True, msg="Missing key at end",
substrs=['Value: "a_value" missing matching key'])
self.expect("break set -P resolver.Resolver -v a_value -k a_key -v another_value", error = True, msg="Missing key among args",
substrs=['Value: "a_value" missing matching key'])
self.expect("break set -P resolver.Resolver -k a_key -k a_key -v another_value", error = True, msg="Missing value among args",
substrs=['Key: "a_key" missing value'])
| 47.787037
| 139
| 0.68136
|
68a85e2eba55eb07f89edbfc4a16fc364bff03be
| 8,537
|
py
|
Python
|
negspacy/negation.py
|
arianpasquali/negspacy
|
da9c43f4fc46c2a18076846ac660642167c547c3
|
[
"MIT"
] | null | null | null |
negspacy/negation.py
|
arianpasquali/negspacy
|
da9c43f4fc46c2a18076846ac660642167c547c3
|
[
"MIT"
] | null | null | null |
negspacy/negation.py
|
arianpasquali/negspacy
|
da9c43f4fc46c2a18076846ac660642167c547c3
|
[
"MIT"
] | null | null | null |
from spacy.tokens import Token, Doc, Span
from spacy.matcher import PhraseMatcher
import logging
from negspacy.termsets_pt import LANGUAGES
class Negex:
"""
A spaCy pipeline component which identifies negated tokens in text.
Based on: NegEx - A Simple Algorithm for Identifying Negated Findings and Diseasesin Discharge Summaries
Chapman, Bridewell, Hanbury, Cooper, Buchanan
Parameters
----------
nlp: object
spaCy language object
ent_types: list
list of entity types to negate
language: str
language code, if using default termsets (e.g. "en" for english)
pseudo_negations: list
list of phrases that cancel out a negation, if empty, defaults are used
preceding_negations: list
negations that appear before an entity, if empty, defaults are used
following_negations: list
negations that appear after an entity, if empty, defaults are used
termination: list
phrases that "terminate" a sentence for processing purposes such as "but". If empty, defaults are used
"""
def __init__(
self,
nlp,
language="pt_clinical",
ent_types=list(),
pseudo_negations=list(),
preceding_negations=list(),
following_negations=list(),
termination=list(),
chunk_prefix=list(),
):
if not language in LANGUAGES:
raise KeyError(
f"{language} not found in languages termset. "
"Ensure this is a supported language or specify "
"your own termsets when initializing Negex."
)
termsets = LANGUAGES[language]
if not Span.has_extension("negex"):
Span.set_extension("negex", default=False, force=True)
if not pseudo_negations:
if not "pseudo_negations" in termsets:
raise KeyError("pseudo_negations not specified for this language.")
pseudo_negations = termsets["pseudo_negations"]
if not preceding_negations:
if not "preceding_negations" in termsets:
raise KeyError("preceding_negations not specified for this language.")
preceding_negations = termsets["preceding_negations"]
if not following_negations:
if not "following_negations" in termsets:
raise KeyError("following_negations not specified for this language.")
following_negations = termsets["following_negations"]
if not termination:
if not "termination" in termsets:
raise KeyError("termination not specified for this language.")
termination = termsets["termination"]
# efficiently build spaCy matcher patterns
self.pseudo_patterns = list(nlp.tokenizer.pipe(pseudo_negations))
self.preceding_patterns = list(nlp.tokenizer.pipe(preceding_negations))
self.following_patterns = list(nlp.tokenizer.pipe(following_negations))
self.termination_patterns = list(nlp.tokenizer.pipe(termination))
self.matcher = PhraseMatcher(nlp.vocab, attr="LOWER")
self.matcher.add("pseudo", None, *self.pseudo_patterns)
self.matcher.add("Preceding", None, *self.preceding_patterns)
self.matcher.add("Following", None, *self.following_patterns)
self.matcher.add("Termination", None, *self.termination_patterns)
self.nlp = nlp
self.ent_types = ent_types
self.chunk_prefix = list(nlp.tokenizer.pipe(chunk_prefix))
def get_patterns(self):
"""
returns phrase patterns used for various negation dictionaries
Returns
-------
patterns: dict
pattern_type: [patterns]
"""
patterns = {
"pseudo_patterns": self.pseudo_patterns,
"preceding_patterns": self.preceding_patterns,
"following_patterns": self.following_patterns,
"termination_patterns": self.termination_patterns,
}
for pattern in patterns:
logging.info(pattern)
return patterns
def process_negations(self, doc):
"""
Find negations in doc and clean candidate negations to remove pseudo negations
Parameters
----------
doc: object
spaCy Doc object
Returns
-------
preceding: list
list of tuples for preceding negations
following: list
list of tuples for following negations
terminating: list
list of tuples of terminating phrases
"""
###
# does not work properly in spacy 2.1.8. Will incorporate after 2.2.
# Relying on user to use NER in meantime
# see https://github.com/jenojp/negspacy/issues/7
###
# if not doc.is_nered:
# raise ValueError(
# "Negations are evaluated for Named Entities found in text. "
# "Your SpaCy pipeline does not included Named Entity resolution. "
# "Please ensure it is enabled or choose a different language model that includes it."
# )
preceding = list()
following = list()
terminating = list()
matches = self.matcher(doc)
pseudo = [
(match_id, start, end)
for match_id, start, end in matches
if self.nlp.vocab.strings[match_id] == "pseudo"
]
for match_id, start, end in matches:
if self.nlp.vocab.strings[match_id] == "pseudo":
continue
pseudo_flag = False
for p in pseudo:
if start >= p[1] and start <= p[2]:
pseudo_flag = True
continue
if not pseudo_flag:
if self.nlp.vocab.strings[match_id] == "Preceding":
preceding.append((match_id, start, end))
elif self.nlp.vocab.strings[match_id] == "Following":
following.append((match_id, start, end))
elif self.nlp.vocab.strings[match_id] == "Termination":
terminating.append((match_id, start, end))
else:
logging.warnings(
f"phrase {doc[start:end].text} not in one of the expected matcher types."
)
return preceding, following, terminating
def termination_boundaries(self, doc, terminating):
"""
Create sub sentences based on terminations found in text.
Parameters
----------
doc: object
spaCy Doc object
terminating: list
list of tuples with (match_id, start, end)
returns
-------
boundaries: list
list of tuples with (start, end) of spans
"""
sent_starts = [sent.start for sent in doc.sents]
terminating_starts = [t[1] for t in terminating]
starts = sent_starts + terminating_starts + [len(doc)]
starts.sort()
boundaries = list()
index = 0
for i, start in enumerate(starts):
if not i == 0:
boundaries.append((index, start))
index = start
return boundaries
def negex(self, doc):
"""
Negates entities of interest
Parameters
----------
doc: object
spaCy Doc object
"""
preceding, following, terminating = self.process_negations(doc)
boundaries = self.termination_boundaries(doc, terminating)
for b in boundaries:
sub_preceding = [i for i in preceding if b[0] <= i[1] < b[1]]
sub_following = [i for i in following if b[0] <= i[1] < b[1]]
for e in doc[b[0] : b[1]].ents:
if self.ent_types:
if e.label_ not in self.ent_types:
continue
if any(pre < e.start for pre in [i[1] for i in sub_preceding]):
e._.negex = True
continue
if any(fol > e.end for fol in [i[2] for i in sub_following]):
e._.negex = True
continue
if self.chunk_prefix:
if any(
c.text.lower() == doc[e.start].text.lower()
for c in self.chunk_prefix
):
e._.negex = True
return doc
def __call__(self, doc):
return self.negex(doc)
| 35.869748
| 110
| 0.577721
|
f8c8d25cf9699c94619fffb859093aa25e6a3720
| 718
|
py
|
Python
|
exercicios/ex088.py
|
DeyvisonR/curso_python
|
983b634d39f542369628ae48c7449ea75fd1ebbd
|
[
"MIT"
] | null | null | null |
exercicios/ex088.py
|
DeyvisonR/curso_python
|
983b634d39f542369628ae48c7449ea75fd1ebbd
|
[
"MIT"
] | null | null | null |
exercicios/ex088.py
|
DeyvisonR/curso_python
|
983b634d39f542369628ae48c7449ea75fd1ebbd
|
[
"MIT"
] | null | null | null |
from time import sleep
from random import randint
print('=' * 50)
print(f'{"JOGUE NA MEGA SENA":^50}')
print('=' * 50)
jogos = []
jogosqt = int(input('quantos jogos você quer que eu sorteie: '))
print(f'{f" sorteando {jogosqt} jogos ":=^50}')
cont = 0
cont2 = 0
sorteados = []
while cont2 < jogosqt:
while cont < 6:
num = randint(1, 60)
if num not in sorteados:
sorteados.append(num)
cont = cont + 1
if cont == 6:
jogos.append(sorteados[:])
sorteados.clear()
cont = 0
cont2 = cont2 + 1
for c in range(0, len(jogos)):
jogos[c].sort()
print(f'jogo {c + 1}: {jogos[c]}')
sleep(1)
print('=' * 10, 'FIM DO PROGRAMA', '=' * 10)
| 25.642857
| 64
| 0.557103
|
c94a079f9a4ce4081c478bc373d381070ddfaf96
| 11,423
|
py
|
Python
|
official/vision/detection/modeling/architecture/nn_blocks.py
|
gujralsanyam22/models
|
d96f8f043dbe2b5ca8ea1785f57df8faf68d8875
|
[
"Apache-2.0"
] | 15
|
2018-08-15T19:29:39.000Z
|
2021-11-05T02:14:59.000Z
|
official/vision/detection/modeling/architecture/nn_blocks.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 5
|
2020-10-01T09:02:34.000Z
|
2021-02-21T12:50:11.000Z
|
official/vision/detection/modeling/architecture/nn_blocks.py
|
yangxl-2014-fe/models
|
11ea5237818e791a5717716d5413977f4c4db1e3
|
[
"Apache-2.0"
] | 8
|
2020-09-17T13:09:11.000Z
|
2021-12-17T09:58:43.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common building blocks for neural networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from official.modeling import tf_utils
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResidualBlock(tf.keras.layers.Layer):
"""A residual block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A residual block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(ResidualBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=1,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
super(ResidualBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(ResidualBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
return self._activation_fn(x + shortcut)
@tf.keras.utils.register_keras_serializable(package='Vision')
class BottleneckBlock(tf.keras.layers.Layer):
"""A standard bottleneck block."""
def __init__(self,
filters,
strides,
use_projection=False,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
**kwargs):
"""A standard bottleneck block with BN after convolutions.
Args:
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
**kwargs: keyword arguments to be passed.
"""
super(BottleneckBlock, self).__init__(**kwargs)
self._filters = filters
self._strides = strides
self._use_projection = use_projection
self._use_sync_bn = use_sync_bn
self._activation = activation
self._kernel_initializer = kernel_initializer
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if use_sync_bn:
self._norm = tf.keras.layers.experimental.SyncBatchNormalization
else:
self._norm = tf.keras.layers.BatchNormalization
if tf.keras.backend.image_data_format() == 'channels_last':
self._bn_axis = -1
else:
self._bn_axis = 1
self._activation_fn = tf_utils.get_activation(activation)
def build(self, input_shape):
if self._use_projection:
self._shortcut = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=self._strides,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm0 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv1 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm1 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv2 = tf.keras.layers.Conv2D(
filters=self._filters,
kernel_size=3,
strides=self._strides,
padding='same',
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm2 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
self._conv3 = tf.keras.layers.Conv2D(
filters=self._filters * 4,
kernel_size=1,
strides=1,
use_bias=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)
self._norm3 = self._norm(
axis=self._bn_axis,
momentum=self._norm_momentum,
epsilon=self._norm_epsilon)
super(BottleneckBlock, self).build(input_shape)
def get_config(self):
config = {
'filters': self._filters,
'strides': self._strides,
'use_projection': self._use_projection,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon
}
base_config = super(BottleneckBlock, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
shortcut = inputs
if self._use_projection:
shortcut = self._shortcut(shortcut)
shortcut = self._norm0(shortcut)
x = self._conv1(inputs)
x = self._norm1(x)
x = self._activation_fn(x)
x = self._conv2(x)
x = self._norm2(x)
x = self._activation_fn(x)
x = self._conv3(x)
x = self._norm3(x)
return self._activation_fn(x + shortcut)
| 35.808777
| 80
| 0.674954
|
b8c028c1c6f4f26dd578eaa9b75c31c072305c33
| 12,114
|
py
|
Python
|
sdk/python/pulumi_azure_native/containerservice/v20201201/maintenance_configuration.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/containerservice/v20201201/maintenance_configuration.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/containerservice/v20201201/maintenance_configuration.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['MaintenanceConfigurationArgs', 'MaintenanceConfiguration']
@pulumi.input_type
class MaintenanceConfigurationArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
config_name: Optional[pulumi.Input[str]] = None,
not_allowed_time: Optional[pulumi.Input[Sequence[pulumi.Input['TimeSpanArgs']]]] = None,
time_in_week: Optional[pulumi.Input[Sequence[pulumi.Input['TimeInWeekArgs']]]] = None):
"""
The set of arguments for constructing a MaintenanceConfiguration resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name: The name of the managed cluster resource.
:param pulumi.Input[str] config_name: The name of the maintenance configuration.
:param pulumi.Input[Sequence[pulumi.Input['TimeSpanArgs']]] not_allowed_time: Time slots on which upgrade is not allowed.
:param pulumi.Input[Sequence[pulumi.Input['TimeInWeekArgs']]] time_in_week: Weekday time slots allowed to upgrade.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if config_name is not None:
pulumi.set(__self__, "config_name", config_name)
if not_allowed_time is not None:
pulumi.set(__self__, "not_allowed_time", not_allowed_time)
if time_in_week is not None:
pulumi.set(__self__, "time_in_week", time_in_week)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="configName")
def config_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the maintenance configuration.
"""
return pulumi.get(self, "config_name")
@config_name.setter
def config_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "config_name", value)
@property
@pulumi.getter(name="notAllowedTime")
def not_allowed_time(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TimeSpanArgs']]]]:
"""
Time slots on which upgrade is not allowed.
"""
return pulumi.get(self, "not_allowed_time")
@not_allowed_time.setter
def not_allowed_time(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TimeSpanArgs']]]]):
pulumi.set(self, "not_allowed_time", value)
@property
@pulumi.getter(name="timeInWeek")
def time_in_week(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TimeInWeekArgs']]]]:
"""
Weekday time slots allowed to upgrade.
"""
return pulumi.get(self, "time_in_week")
@time_in_week.setter
def time_in_week(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TimeInWeekArgs']]]]):
pulumi.set(self, "time_in_week", value)
class MaintenanceConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_name: Optional[pulumi.Input[str]] = None,
not_allowed_time: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeSpanArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
time_in_week: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeInWeekArgs']]]]] = None,
__props__=None):
"""
maintenance configuration.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_name: The name of the maintenance configuration.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeSpanArgs']]]] not_allowed_time: Time slots on which upgrade is not allowed.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the managed cluster resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeInWeekArgs']]]] time_in_week: Weekday time slots allowed to upgrade.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MaintenanceConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
maintenance configuration.
:param str resource_name: The name of the resource.
:param MaintenanceConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MaintenanceConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_name: Optional[pulumi.Input[str]] = None,
not_allowed_time: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeSpanArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
time_in_week: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TimeInWeekArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MaintenanceConfigurationArgs.__new__(MaintenanceConfigurationArgs)
__props__.__dict__["config_name"] = config_name
__props__.__dict__["not_allowed_time"] = not_allowed_time
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["time_in_week"] = time_in_week
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20201201:MaintenanceConfiguration"), pulumi.Alias(type_="azure-native:containerservice:MaintenanceConfiguration"), pulumi.Alias(type_="azure-nextgen:containerservice:MaintenanceConfiguration"), pulumi.Alias(type_="azure-native:containerservice/v20210201:MaintenanceConfiguration"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210201:MaintenanceConfiguration"), pulumi.Alias(type_="azure-native:containerservice/v20210301:MaintenanceConfiguration"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210301:MaintenanceConfiguration"), pulumi.Alias(type_="azure-native:containerservice/v20210501:MaintenanceConfiguration"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210501:MaintenanceConfiguration"), pulumi.Alias(type_="azure-native:containerservice/v20210701:MaintenanceConfiguration"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210701:MaintenanceConfiguration"), pulumi.Alias(type_="azure-native:containerservice/v20210801:MaintenanceConfiguration"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210801:MaintenanceConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MaintenanceConfiguration, __self__).__init__(
'azure-native:containerservice/v20201201:MaintenanceConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MaintenanceConfiguration':
"""
Get an existing MaintenanceConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = MaintenanceConfigurationArgs.__new__(MaintenanceConfigurationArgs)
__props__.__dict__["name"] = None
__props__.__dict__["not_allowed_time"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["time_in_week"] = None
__props__.__dict__["type"] = None
return MaintenanceConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notAllowedTime")
def not_allowed_time(self) -> pulumi.Output[Optional[Sequence['outputs.TimeSpanResponse']]]:
"""
Time slots on which upgrade is not allowed.
"""
return pulumi.get(self, "not_allowed_time")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="timeInWeek")
def time_in_week(self) -> pulumi.Output[Optional[Sequence['outputs.TimeInWeekResponse']]]:
"""
Weekday time slots allowed to upgrade.
"""
return pulumi.get(self, "time_in_week")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| 48.846774
| 1,184
| 0.679379
|
9cbc1ce37e4c5bf9afa3c0243903ceed0b83109b
| 330
|
py
|
Python
|
fabdeploy/api.py
|
kolia1985/fabdeploy
|
fa746d94fde058ab86aadbc5d4a391dfcbfd983a
|
[
"BSD-3-Clause"
] | null | null | null |
fabdeploy/api.py
|
kolia1985/fabdeploy
|
fa746d94fde058ab86aadbc5d4a391dfcbfd983a
|
[
"BSD-3-Clause"
] | null | null | null |
fabdeploy/api.py
|
kolia1985/fabdeploy
|
fa746d94fde058ab86aadbc5d4a391dfcbfd983a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from . import fabd, system, git, release, virtualenv, nginx, django, pip, \
postgres, mysql, supervisor, users, ssh, tar, gunicorn, uwsgi, rabbitmq, \
apache, redis, flask, angularjs
from .base import setup_fabdeploy
from .containers import conf, DefaultConf
from .task import Task
| 36.666667
| 78
| 0.760606
|
0673e7d1d0a5c5a54a0632f428aa39364457ad4a
| 5,839
|
py
|
Python
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lldbplatformutil.py
|
Polidea/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 427
|
2018-05-29T14:21:02.000Z
|
2022-03-16T03:17:54.000Z
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lldbplatformutil.py
|
PolideaPlayground/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 25
|
2018-07-23T08:34:15.000Z
|
2021-11-05T07:13:36.000Z
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/lldbplatformutil.py
|
PolideaPlayground/SiriusObfuscator
|
b0e590d8130e97856afe578869b83a209e2b19be
|
[
"Apache-2.0"
] | 52
|
2018-07-19T19:57:32.000Z
|
2022-03-11T16:05:38.000Z
|
""" This module contains functions used by the test cases to hide the
architecture and/or the platform dependent nature of the tests. """
from __future__ import absolute_import
# System modules
import itertools
import re
import subprocess
import sys
import os
# Third-party modules
import six
from six.moves.urllib import parse as urlparse
# LLDB modules
from . import configuration
import use_lldb_suite
import lldb
def check_first_register_readable(test_case):
arch = test_case.getArchitecture()
if arch in ['x86_64', 'i386']:
test_case.expect("register read eax", substrs=['eax = 0x'])
elif arch in ['arm']:
test_case.expect("register read r0", substrs=['r0 = 0x'])
elif arch in ['aarch64']:
test_case.expect("register read x0", substrs=['x0 = 0x'])
elif re.match("mips", arch):
test_case.expect("register read zero", substrs=['zero = 0x'])
elif arch in ['s390x']:
test_case.expect("register read r0", substrs=['r0 = 0x'])
else:
# TODO: Add check for other architectures
test_case.fail(
"Unsupported architecture for test case (arch: %s)" %
test_case.getArchitecture())
def _run_adb_command(cmd, device_id):
device_id_args = []
if device_id:
device_id_args = ["-s", device_id]
full_cmd = ["adb"] + device_id_args + cmd
p = subprocess.Popen(
full_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def _target_is_android():
if not hasattr(_target_is_android, 'result'):
triple = lldb.DBG.GetSelectedPlatform().GetTriple()
match = re.match(".*-.*-.*-android", triple)
_target_is_android.result = match is not None
return _target_is_android.result
def android_device_api():
if not hasattr(android_device_api, 'result'):
assert configuration.lldb_platform_url is not None
device_id = None
parsed_url = urlparse.urlparse(configuration.lldb_platform_url)
host_name = parsed_url.netloc.split(":")[0]
if host_name != 'localhost':
device_id = host_name
if device_id.startswith('[') and device_id.endswith(']'):
device_id = device_id[1:-1]
retcode, stdout, stderr = _run_adb_command(
["shell", "getprop", "ro.build.version.sdk"], device_id)
if retcode == 0:
android_device_api.result = int(stdout)
else:
raise LookupError(
">>> Unable to determine the API level of the Android device.\n"
">>> stdout:\n%s\n"
">>> stderr:\n%s\n" %
(stdout, stderr))
return android_device_api.result
def match_android_device(device_arch, valid_archs=None, valid_api_levels=None):
if not _target_is_android():
return False
if valid_archs is not None and device_arch not in valid_archs:
return False
if valid_api_levels is not None and android_device_api() not in valid_api_levels:
return False
return True
def finalize_build_dictionary(dictionary):
if _target_is_android():
if dictionary is None:
dictionary = {}
dictionary["OS"] = "Android"
if android_device_api() >= 16:
dictionary["PIE"] = 1
return dictionary
def getHostPlatform():
"""Returns the host platform running the test suite."""
# Attempts to return a platform name matching a target Triple platform.
if sys.platform.startswith('linux'):
return 'linux'
elif sys.platform.startswith('win32'):
return 'windows'
elif sys.platform.startswith('darwin'):
return 'darwin'
elif sys.platform.startswith('freebsd'):
return 'freebsd'
elif sys.platform.startswith('netbsd'):
return 'netbsd'
else:
return sys.platform
def getDarwinOSTriples():
return ['darwin', 'macosx', 'ios']
def getPlatform():
"""Returns the target platform which the tests are running on."""
platform = lldb.DBG.GetSelectedPlatform().GetTriple().split('-')[2]
if platform.startswith('freebsd'):
platform = 'freebsd'
elif platform.startswith('netbsd'):
platform = 'netbsd'
return platform
def platformIsDarwin():
"""Returns true if the OS triple for the selected platform is any valid apple OS"""
return getPlatform() in getDarwinOSTriples()
def findMainThreadCheckerDylib():
if not platformIsDarwin():
return ""
with os.popen('xcode-select -p') as output:
xcode_developer_path = output.read().strip()
mtc_dylib_path = '%s/usr/lib/libMainThreadChecker.dylib' % xcode_developer_path
if os.path.isfile(mtc_dylib_path):
return mtc_dylib_path
return ""
class _PlatformContext(object):
"""Value object class which contains platform-specific options."""
def __init__(self, shlib_environment_var, shlib_prefix, shlib_extension):
self.shlib_environment_var = shlib_environment_var
self.shlib_prefix = shlib_prefix
self.shlib_extension = shlib_extension
def createPlatformContext():
if platformIsDarwin():
return _PlatformContext('DYLD_LIBRARY_PATH', 'lib', 'dylib')
elif getPlatform() in ("freebsd", "linux", "netbsd"):
return _PlatformContext('LD_LIBRARY_PATH', 'lib', 'so')
else:
return None
def hasChattyStderr(test_case):
"""Some targets produce garbage on the standard error output. This utility function
determines whether the tests can be strict about the expected stderr contents."""
if match_android_device(test_case.getArchitecture(), ['aarch64'], [22]):
return True # The dynamic linker on the device will complain about unknown DT entries
return False
| 32.259669
| 94
| 0.66347
|
30541c8137a89dd3d950e95bc0d3ea2cfb4b6a34
| 5,897
|
py
|
Python
|
tests/components/datadog/test_init.py
|
Hypfer/home-assistant
|
204ca3f3a6e24ef11ece2e2ee490a8d77553c147
|
[
"Apache-2.0"
] | 1
|
2019-12-06T08:49:19.000Z
|
2019-12-06T08:49:19.000Z
|
tests/components/datadog/test_init.py
|
FuqiangSong/home-assistant
|
d5419b77f9c245e5af006143eb55ae4dda3f174e
|
[
"Apache-2.0"
] | 2
|
2021-02-08T20:39:43.000Z
|
2021-09-08T01:36:57.000Z
|
tests/components/datadog/test_init.py
|
FuqiangSong/home-assistant
|
d5419b77f9c245e5af006143eb55ae4dda3f174e
|
[
"Apache-2.0"
] | null | null | null |
"""The tests for the Datadog component."""
from unittest import mock
import unittest
from homeassistant.const import (
EVENT_LOGBOOK_ENTRY,
EVENT_STATE_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import setup_component
import homeassistant.components.datadog as datadog
import homeassistant.core as ha
from tests.common import assert_setup_component, get_test_home_assistant
class TestDatadog(unittest.TestCase):
"""Test the Datadog component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_invalid_config(self):
"""Test invalid configuration."""
with assert_setup_component(0):
assert not setup_component(
self.hass, datadog.DOMAIN, {datadog.DOMAIN: {"host1": "host1"}}
)
@mock.patch("homeassistant.components.datadog.statsd")
@mock.patch("homeassistant.components.datadog.initialize")
def test_datadog_setup_full(self, mock_connection, mock_client):
"""Test setup with all data."""
self.hass.bus.listen = mock.MagicMock()
assert setup_component(
self.hass,
datadog.DOMAIN,
{datadog.DOMAIN: {"host": "host", "port": 123, "rate": 1, "prefix": "foo"}},
)
assert mock_connection.call_count == 1
assert mock_connection.call_args == mock.call(
statsd_host="host", statsd_port=123
)
assert self.hass.bus.listen.called
assert EVENT_LOGBOOK_ENTRY == self.hass.bus.listen.call_args_list[0][0][0]
assert EVENT_STATE_CHANGED == self.hass.bus.listen.call_args_list[1][0][0]
@mock.patch("homeassistant.components.datadog.statsd")
@mock.patch("homeassistant.components.datadog.initialize")
def test_datadog_setup_defaults(self, mock_connection, mock_client):
"""Test setup with defaults."""
self.hass.bus.listen = mock.MagicMock()
assert setup_component(
self.hass,
datadog.DOMAIN,
{
datadog.DOMAIN: {
"host": "host",
"port": datadog.DEFAULT_PORT,
"prefix": datadog.DEFAULT_PREFIX,
}
},
)
assert mock_connection.call_count == 1
assert mock_connection.call_args == mock.call(
statsd_host="host", statsd_port=8125
)
assert self.hass.bus.listen.called
@mock.patch("homeassistant.components.datadog.statsd")
@mock.patch("homeassistant.components.datadog.initialize")
def test_logbook_entry(self, mock_connection, mock_client):
"""Test event listener."""
self.hass.bus.listen = mock.MagicMock()
assert setup_component(
self.hass,
datadog.DOMAIN,
{datadog.DOMAIN: {"host": "host", "rate": datadog.DEFAULT_RATE}},
)
assert self.hass.bus.listen.called
handler_method = self.hass.bus.listen.call_args_list[0][0][1]
event = {
"domain": "automation",
"entity_id": "sensor.foo.bar",
"message": "foo bar biz",
"name": "triggered something",
}
handler_method(mock.MagicMock(data=event))
assert mock_client.event.call_count == 1
assert mock_client.event.call_args == mock.call(
title="Home Assistant",
text="%%% \n **{}** {} \n %%%".format(event["name"], event["message"]),
tags=["entity:sensor.foo.bar", "domain:automation"],
)
mock_client.event.reset_mock()
@mock.patch("homeassistant.components.datadog.statsd")
@mock.patch("homeassistant.components.datadog.initialize")
def test_state_changed(self, mock_connection, mock_client):
"""Test event listener."""
self.hass.bus.listen = mock.MagicMock()
assert setup_component(
self.hass,
datadog.DOMAIN,
{
datadog.DOMAIN: {
"host": "host",
"prefix": "ha",
"rate": datadog.DEFAULT_RATE,
}
},
)
assert self.hass.bus.listen.called
handler_method = self.hass.bus.listen.call_args_list[1][0][1]
valid = {"1": 1, "1.0": 1.0, STATE_ON: 1, STATE_OFF: 0}
attributes = {"elevation": 3.2, "temperature": 5.0}
for in_, out in valid.items():
state = mock.MagicMock(
domain="sensor",
entity_id="sensor.foo.bar",
state=in_,
attributes=attributes,
)
handler_method(mock.MagicMock(data={"new_state": state}))
assert mock_client.gauge.call_count == 3
for attribute, value in attributes.items():
mock_client.gauge.assert_has_calls(
[
mock.call(
"ha.sensor.{}".format(attribute),
value,
sample_rate=1,
tags=["entity:{}".format(state.entity_id)],
)
]
)
assert mock_client.gauge.call_args == mock.call(
"ha.sensor",
out,
sample_rate=1,
tags=["entity:{}".format(state.entity_id)],
)
mock_client.gauge.reset_mock()
for invalid in ("foo", "", object):
handler_method(
mock.MagicMock(data={"new_state": ha.State("domain.test", invalid, {})})
)
assert not mock_client.gauge.called
| 33.697143
| 88
| 0.564863
|
62b8063ef72c212839ab87a26d25a4b059fa0ebf
| 4,741
|
py
|
Python
|
Experimentation/pygen/generated.py
|
svenssonjoel/COPPE
|
8a2a0b3ee09ba70fa59b85dfaffe510b1882209b
|
[
"BSD-3-Clause"
] | null | null | null |
Experimentation/pygen/generated.py
|
svenssonjoel/COPPE
|
8a2a0b3ee09ba70fa59b85dfaffe510b1882209b
|
[
"BSD-3-Clause"
] | null | null | null |
Experimentation/pygen/generated.py
|
svenssonjoel/COPPE
|
8a2a0b3ee09ba70fa59b85dfaffe510b1882209b
|
[
"BSD-3-Clause"
] | null | null | null |
import checkpoint as checkpoint
import tensorflow.compat.v1 as tf
import json as json
import numpy as np
import os as os
with open("checkpoint/code_gen_test.json") as f:
network = json.load(f)
def model(input_data, network):
conv_filters = tf.keras.initializers.glorot_uniform(seed=None)(shape=(3, 3, input_data.get_shape().as_list()[3], 16))
conv_filters = checkpoint.setup_variable(network['layers'][1]['value'], init=tf.Variable, init_value=conv_filters, var_name='conv/filters')
conv = tf.nn.conv2d(input=input_data, filters=conv_filters, padding='SAME', strides=[1, 1], name='conv')
network['layers'][1]['value']['var']['conv/filters'] = conv_filters
network['layers'][1]['value']['tensor'] = conv
another_conv_0_filters = tf.keras.initializers.glorot_uniform(seed=None)(shape=(3, 3, conv.get_shape().as_list()[3], 16))
another_conv_0_filters = checkpoint.setup_variable(network['layers'][2]['value'], init=tf.Variable, init_value=another_conv_0_filters, var_name='another_conv_0/filters')
another_conv_0 = tf.nn.conv2d(input=conv, filters=another_conv_0_filters, padding='SAME', strides=[1, 1], name='another_conv_0')
network['layers'][2]['value']['var']['another_conv_0/filters'] = another_conv_0_filters
network['layers'][2]['value']['tensor'] = another_conv_0
another_conv_1_filters = tf.keras.initializers.glorot_uniform(seed=None)(shape=(3, 3, another_conv_0.get_shape().as_list()[3], 16))
another_conv_1_filters = checkpoint.setup_variable(network['layers'][3]['value'], init=tf.Variable, init_value=another_conv_1_filters, var_name='another_conv_1/filters')
another_conv_1 = tf.nn.conv2d(input=another_conv_0, filters=another_conv_1_filters, padding='SAME', strides=[1, 1], name='another_conv_1')
network['layers'][3]['value']['var']['another_conv_1/filters'] = another_conv_1_filters
network['layers'][3]['value']['tensor'] = another_conv_1
another_conv_2_filters = tf.keras.initializers.glorot_uniform(seed=None)(shape=(3, 3, another_conv_1.get_shape().as_list()[3], 16))
another_conv_2_filters = checkpoint.setup_variable(network['layers'][4]['value'], init=tf.Variable, init_value=another_conv_2_filters, var_name='another_conv_2/filters')
another_conv_2 = tf.nn.conv2d(input=another_conv_1, filters=another_conv_2_filters, padding='SAME', strides=[1, 1], name='another_conv_2')
network['layers'][4]['value']['var']['another_conv_2/filters'] = another_conv_2_filters
network['layers'][4]['value']['tensor'] = another_conv_2
another_conv_filters = tf.keras.initializers.glorot_uniform(seed=None)(shape=(3, 3, another_conv_2.get_shape().as_list()[3], 16))
another_conv_filters = checkpoint.setup_variable(network['layers'][5]['value'], init=tf.Variable, init_value=another_conv_filters, var_name='another_conv/filters')
another_conv = tf.nn.conv2d(input=another_conv_2, filters=another_conv_filters, padding='SAME', strides=[1, 1], name='another_conv')
network['layers'][5]['value']['var']['another_conv/filters'] = another_conv_filters
network['layers'][5]['value']['tensor'] = another_conv
dense_10kc_weights = tf.keras.initializers.glorot_uniform(seed=None)(shape=(np.prod(another_conv.get_shape().as_list()[1:]), 10))
dense_10kc_weights = checkpoint.setup_variable(network['layers'][6]['value'], init=tf.Variable, init_value=dense_10kc_weights, var_name='dense_10kc/weights')
dense_10kc_bias = tf.zeros(shape=(10,))
dense_10kc_bias = checkpoint.setup_variable(network['layers'][6]['value'], init=tf.Variable, init_value=dense_10kc_bias, var_name='dense_10kc/bias')
another_conv = tf.reshape(another_conv, [-1, np.prod(another_conv.get_shape().as_list()[1:])])
dense_10kc = tf.nn.xw_plus_b(x=another_conv, biases=dense_10kc_bias, weights=dense_10kc_weights, name='dense_10kc')
network['layers'][6]['value']['var']['dense_10kc/bias'] = dense_10kc_bias
network['layers'][6]['value']['var']['dense_10kc/weights'] = dense_10kc_weights
network['layers'][6]['value']['tensor'] = dense_10kc
d_1 = tf.nn.dropout(x=dense_10kc, rate=0.4, name='d_1')
network['layers'][7]['value']['tensor'] = d_1
regularizer = 0.002000 * (tf.nn.l2_loss(t=conv_filters, name='regularizer/conv') + tf.nn.l2_loss(t=another_conv_0_filters, name='regularizer/another_conv_0') + tf.nn.l2_loss(t=another_conv_1_filters, name='regularizer/another_conv_1') + tf.nn.l2_loss(t=another_conv_2_filters, name='regularizer/another_conv_2') + tf.nn.l2_loss(t=another_conv_filters, name='regularizer/another_conv'))
return regularizer
data = np.random.rand(32, 32, 3)
input_tensor = tf.placeholder(dtype=tf.float32, shape=(None, 32, 32, 3), name='input_data')
output = model(input_tensor, network)
| 75.253968
| 389
| 0.74035
|
121ecdcea7a2359a904c565fee78b0c9ecf587a3
| 7,661
|
py
|
Python
|
contrib/devtools/update-translations.py
|
niocutl/ltucoin
|
ce1d3edd6d165a551b4467a015fa4ad028f38100
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
niocutl/ltucoin
|
ce1d3edd6d165a551b4467a015fa4ad028f38100
|
[
"MIT"
] | null | null | null |
contrib/devtools/update-translations.py
|
niocutl/ltucoin
|
ce1d3edd6d165a551b4467a015fa4ad028f38100
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'ltucoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
| 37.553922
| 124
| 0.629422
|
b98e1cd7dbd9f6c3c2671c1304f60e1ea6db10e6
| 1,541
|
py
|
Python
|
src/data/importer/base_loader.py
|
shivamraval98/T5_AE
|
75c2e55c9adced49f670063e649499911b95158e
|
[
"MIT"
] | 5
|
2021-11-09T19:02:02.000Z
|
2022-03-29T01:32:31.000Z
|
src/data/importer/base_loader.py
|
shivamraval98/T5_AE
|
75c2e55c9adced49f670063e649499911b95158e
|
[
"MIT"
] | null | null | null |
src/data/importer/base_loader.py
|
shivamraval98/T5_AE
|
75c2e55c9adced49f670063e649499911b95158e
|
[
"MIT"
] | 1
|
2021-12-05T11:38:01.000Z
|
2021-12-05T11:38:01.000Z
|
import pandas as pd
import os
from sklearn.model_selection import train_test_split
import preprocessor as p
'''
BaseLoader class which is extended by other Data Loaders
'''
class BaseLoader(object):
def __init__(self):
self.split_path = os.getcwd() + "/data/splits/"
self.dataset_path = os.getcwd() + "/data/datasets/"
'''
Function to lowercase all the strings in the list
Parameters
--------------
samp_list (list): input list on which the lowercases operation needs to be performed
Returns
--------------
lowercase_str (list): return list of the strings after performing lowercase operation
'''
def str_lower_helper(self, samp_list):
lowercase_str = []
for samp in samp_list:
if type(samp) == type("str"):
lowercase_str.append(samp.lower())
else:
lowercase_str.append(samp)
return lowercase_str
'''
Function to preprocess the given tweet sentence to remove URL, EMOJI and SMILEY
Parameters
---------------
raw_str (str): the input string which needs to be pre-processed
Returns
-------------
clean_str (str): returns the processed string
'''
def twitter_preprocess(self, raw_str):
p.set_options(p.OPT.URL, p.OPT.EMOJI, p.OPT.SMILEY)
clean_str = ""
try:
clean_str = p.clean(raw_str)
except:
clean_str = raw_str
return clean_str
| 28.537037
| 89
| 0.589877
|
e2a573db4410f533350107de0a9779da235b7537
| 13,572
|
py
|
Python
|
samples/fpga/matrix_multiplication_systolic.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T07:58:06.000Z
|
2021-07-26T07:58:06.000Z
|
samples/fpga/matrix_multiplication_systolic.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | null | null | null |
samples/fpga/matrix_multiplication_systolic.py
|
xiacijie/dace
|
2d942440b1d7b139ba112434bfa78f754e10bfe5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-04T13:01:48.000Z
|
2021-03-04T13:01:48.000Z
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import argparse
import dace
import numpy as np
import pdb
import select
import sys
N = dace.symbol("N")
K = dace.symbol("K")
M = dace.symbol("M")
P = dace.symbol("P")
def make_copy_to_fpga_state(sdfg):
###########################################################################
# Copy data to FPGA
state = sdfg.add_state("copy_to_device")
sdfg.add_array("A", [N, K], dtype=dace.float32)
sdfg.add_array("B", [K, M], dtype=dace.float32)
sdfg.add_array("C", [N, M], dtype=dace.float32)
A_host = state.add_read("A")
B_host = state.add_read("B")
C_host = state.add_read("C")
sdfg.add_array("A_device", [N, K],
dtype=dace.float32,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
sdfg.add_array("B_device", [K, M],
dtype=dace.float32,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
sdfg.add_array("C_device", [N, M],
dtype=dace.float32,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Global)
A_device = state.add_write("A_device")
B_device = state.add_write("B_device")
C_device = state.add_write("C_device")
state.add_memlet_path(A_host,
A_device,
memlet=dace.Memlet("A_device[0:N, 0:K]"))
state.add_memlet_path(B_host,
B_device,
memlet=dace.Memlet("B_device[0:K, 0:M]"))
state.add_memlet_path(C_host,
C_device,
memlet=dace.Memlet("C_device[0:N, 0:M]"))
return state
def make_copy_to_host_state(sdfg):
###########################################################################
# Copy data to FPGA
state = sdfg.add_state("copy_to_host")
C_device = state.add_read("C_device")
C_host = state.add_write("C")
state.add_memlet_path(C_device, C_host, memlet=dace.Memlet("C[0:N, 0:M]"))
return state
def make_read_A(state):
entry, exit = state.add_map("read_A", {
"n0": "0:N/P",
"k": "0:K",
"n1": "0:P"
},
schedule=dace.ScheduleType.FPGA_Device)
mem = state.add_read("A_device")
pipe = state.add_write("A_pipe")
tasklet = state.add_tasklet("read_A", {"from_memory"}, {"to_kernel"},
"to_kernel = from_memory")
state.add_memlet_path(mem,
entry,
tasklet,
dst_conn="from_memory",
memlet=dace.Memlet("A_device[n0 * P + n1, k]"))
state.add_memlet_path(tasklet,
exit,
pipe,
src_conn="to_kernel",
memlet=dace.Memlet("A_pipe[0]"))
def make_read_B(state):
entry, exit = state.add_map("read_B", {
"n": "0:N/P",
"k": "0:K",
"m": "0:M"
},
schedule=dace.ScheduleType.FPGA_Device)
mem = state.add_read("B_device")
pipe = state.add_write("B_pipe")
tasklet = state.add_tasklet("read_B", {"from_memory"}, {"to_kernel"},
"to_kernel = from_memory")
state.add_memlet_path(mem,
entry,
tasklet,
dst_conn="from_memory",
memlet=dace.Memlet("B_device[k, m]"))
state.add_memlet_path(tasklet,
exit,
pipe,
src_conn="to_kernel",
memlet=dace.Memlet("B_pipe[0]"))
def make_write_C(state):
pipe = state.add_read("C_pipe")
mem = state.add_write("C_device")
state.add_memlet_path(pipe,
mem,
memlet=dace.Memlet("C_device[0:N, 0:M]",
other_subset="P - 1"))
def make_compute(sdfg, state):
A_pipe_in = state.add_read("A_pipe")
A_pipe_out = state.add_write("A_pipe")
B_pipe_in = state.add_read("B_pipe")
B_pipe_out = state.add_write("B_pipe")
C_pipe_in = state.add_read("C_pipe")
C_pipe_out = state.add_write("C_pipe")
entry_n0, exit_n0 = state.add_map("n0", {
"n0": "0:N/P",
},
schedule=dace.ScheduleType.FPGA_Device)
entry_k, exit_k = state.add_map("k", {"k": "0:K"},
schedule=dace.ScheduleType.FPGA_Device)
entry_a, exit_a = state.add_map("buffer_A", {"n1": "0:P"},
schedule=dace.ScheduleType.FPGA_Device)
entry_m, exit_m = state.add_map("m", {"m": "0:M"},
schedule=dace.ScheduleType.FPGA_Device)
entry_c, exit_c = state.add_map("write_C", {
"n1": "0:P",
"m": "0:M"
},
schedule=dace.ScheduleType.FPGA_Device)
# Instantiate buffers
sdfg.add_scalar("A_reg",
dtype=dace.float32,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Registers)
A_reg = state.add_write("A_reg")
sdfg.add_array("C_buffer", [M],
dtype=dace.float32,
transient=True,
storage=dace.dtypes.StorageType.FPGA_Local)
C_buffer_in = state.add_read("C_buffer")
C_buffer_out = state.add_write("C_buffer")
buffer_a_tasklet = state.add_tasklet(
"buffer_a", {"a_in"}, {"a_reg", "a_out"}, """\
if n1 == P - p - 1:
a_reg = a_in
if p < P - 1:
a_out = a_in""")
state.add_memlet_path(A_pipe_in,
entry_n0,
entry_k,
entry_a,
buffer_a_tasklet,
memlet=dace.Memlet("A_pipe[p]", dynamic=False),
dst_conn="a_in")
state.add_memlet_path(buffer_a_tasklet,
exit_a,
A_reg,
memlet=dace.Memlet("A_reg[0]", dynamic=True),
src_conn="a_reg")
state.add_memlet_path(buffer_a_tasklet,
exit_a,
exit_k,
exit_n0,
A_pipe_out,
memlet=dace.Memlet("A_pipe[p + 1]", dynamic=True),
src_conn="a_out")
compute_tasklet = state.add_tasklet(
"multiply_add", {"a_in", "b_in", "c_in"}, {"b_out", "c_out"}, """\
c_prev = 0 if k == 0 else c_in
c_out = c_prev + a_in * b_in
if p < P - 1:
b_out = b_in""")
state.add_memlet_path(A_reg,
entry_m,
compute_tasklet,
dst_conn="a_in",
memlet=dace.Memlet("A_reg[0]"))
state.add_memlet_path(B_pipe_in,
entry_n0,
entry_k,
entry_m,
compute_tasklet,
memlet=dace.Memlet("B_pipe[p]", dynamic=False),
dst_conn="b_in")
state.add_memlet_path(compute_tasklet,
exit_m,
exit_k,
exit_n0,
B_pipe_out,
memlet=dace.Memlet("B_pipe[p + 1]", dynamic=True),
src_conn="b_out")
state.add_memlet_path(C_buffer_in,
entry_k,
entry_m,
compute_tasklet,
dst_conn="c_in",
memlet=dace.Memlet("C_buffer[m]"))
state.add_memlet_path(entry_n0, C_buffer_in, memlet=dace.Memlet())
state.add_memlet_path(compute_tasklet,
exit_m,
exit_k,
C_buffer_out,
memlet=dace.Memlet("C_buffer[m]"),
src_conn="c_out")
state.add_memlet_path(C_buffer_out, exit_n0, memlet=dace.Memlet())
# Write back
write_c_tasklet = state.add_tasklet(
"write_c", {"buffer_in", "forward_in"}, {"c_out"}, """\
if n1 <= p:
c_out = forward_in if p > 0 and n1 > 0 else buffer_in""")
state.add_memlet_path(C_buffer_out,
entry_c,
write_c_tasklet,
memlet=dace.Memlet("C_buffer[m]", dynamic=True),
dst_conn="buffer_in")
state.add_memlet_path(C_pipe_in,
entry_n0,
entry_c,
write_c_tasklet,
memlet=dace.Memlet("C_pipe[p-1]", dynamic=True),
dst_conn="forward_in")
state.add_memlet_path(write_c_tasklet,
exit_c,
exit_n0,
C_pipe_out,
memlet=dace.Memlet("C_pipe[p]", dynamic=True),
src_conn="c_out")
# Unroll processing elements
compute_entry, compute_exit = state.add_map(
"unroll_compute", {"p": "0:P"},
schedule=dace.ScheduleType.FPGA_Device,
unroll=True)
# Bring data nodes into scope
state.add_memlet_path(compute_entry, A_pipe_in, memlet=dace.memlet.Memlet())
state.add_memlet_path(compute_entry, B_pipe_in, memlet=dace.memlet.Memlet())
state.add_memlet_path(compute_entry, C_pipe_in, memlet=dace.memlet.Memlet())
state.add_memlet_path(A_pipe_out, compute_exit, memlet=dace.memlet.Memlet())
state.add_memlet_path(B_pipe_out, compute_exit, memlet=dace.memlet.Memlet())
state.add_memlet_path(C_pipe_out, compute_exit, memlet=dace.memlet.Memlet())
def make_fpga_state(sdfg):
state = sdfg.add_state("mm")
sdfg.add_stream("A_pipe",
dace.float32,
transient=True,
shape=(P + 1, ),
storage=dace.dtypes.StorageType.FPGA_Local,
buffer_size="P")
sdfg.add_stream("B_pipe",
dace.float32,
transient=True,
shape=(P + 1, ),
storage=dace.dtypes.StorageType.FPGA_Local)
sdfg.add_stream("C_pipe",
dace.float32,
transient=True,
shape=(P, ),
storage=dace.dtypes.StorageType.FPGA_Local)
make_read_A(state)
make_read_B(state)
make_compute(sdfg, state)
make_write_C(state)
return state
def make_sdfg(specialized):
if specialized:
sdfg = dace.SDFG("mm_fpga_systolic_{}_{}x{}x{}".format(
P.get(), N.get(), K.get(), M.get()))
else:
sdfg = dace.SDFG("mm_fpga_systolic_{}_NxKx{}".format(P.get(), M.get()))
pre_state = make_copy_to_fpga_state(sdfg)
compute_state = make_fpga_state(sdfg)
post_state = make_copy_to_host_state(sdfg)
sdfg.add_edge(pre_state, compute_state, dace.sdfg.InterstateEdge())
sdfg.add_edge(compute_state, post_state, dace.sdfg.InterstateEdge())
return sdfg
if __name__ == "__main__":
print("==== Program start ====")
parser = argparse.ArgumentParser()
parser.add_argument("M", type=int)
parser.add_argument("N", type=int)
parser.add_argument("K", type=int)
parser.add_argument("P", type=int)
parser.add_argument("-specialize",
default=False,
action="store_true",
help="Fix all loop bounds at compile time/in hardware")
args = vars(parser.parse_args())
if not args["specialize"]:
P.set(args["P"])
M.set(args["M"])
# M must always be specialized, as it's used for the static buffer size
sdfg = make_sdfg(False)
sdfg.specialize(dict(P=P, M=M))
N.set(args["N"])
K.set(args["K"])
else:
P.set(args["P"])
M.set(args["M"])
N.set(args["N"])
K.set(args["K"])
sdfg = make_sdfg(True)
sdfg.specialize(dict(P=P, M=M, N=N, K=K))
print("Matrix multiplication {}x{}x{} with {} PEs ({}specialized)".format(
M.get(), N.get(), K.get(), P.get(),
"" if args["specialize"] else "not "))
# Initialize arrays: Randomize A and B, zero C
A = np.ndarray([N.get(), K.get()], dtype=dace.float32.type)
B = np.ndarray([K.get(), M.get()], dtype=dace.float32.type)
C = np.ndarray([N.get(), M.get()], dtype=dace.float32.type)
A[:] = np.random.rand(N.get(), K.get()).astype(dace.float32.type)
B[:] = np.random.rand(K.get(), M.get()).astype(dace.float32.type)
C[:] = dace.float32(0)
A_regression = np.ndarray([N.get(), K.get()], dtype=np.float32)
B_regression = np.ndarray([K.get(), M.get()], dtype=np.float32)
C_regression = np.ndarray([N.get(), M.get()], dtype=np.float32)
A_regression[:] = A[:]
B_regression[:] = B[:]
C_regression[:] = C[:]
if args["specialize"]:
sdfg(A=A, B=B, C=C)
else:
sdfg(A=A, B=B, C=C, N=N, K=K)
diff = np.linalg.norm((A @ B) - C) / float(M.get() * K.get())
if diff > 1e-6:
raise ValueError(f"Verification failed, difference: {diff}")
else:
print("Results successfully verified.")
| 35.251948
| 80
| 0.510021
|
e4457699bfaa5fe0156d64a22304952821c0a088
| 984
|
py
|
Python
|
paddlehub/finetune/task/__init__.py
|
HawChang/PaddleHub
|
7faf09eb423b7c844392e6fa19b9b8db49928835
|
[
"Apache-2.0"
] | 2
|
2020-11-30T08:54:01.000Z
|
2021-04-07T10:11:36.000Z
|
paddlehub/finetune/task/__init__.py
|
haoyuying/PaddleHub
|
9894fbb1dc8575ae1fa74f32a23cc1363467461b
|
[
"Apache-2.0"
] | null | null | null |
paddlehub/finetune/task/__init__.py
|
haoyuying/PaddleHub
|
9894fbb1dc8575ae1fa74f32a23cc1363467461b
|
[
"Apache-2.0"
] | null | null | null |
#coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .base_task import BaseTask, RunEnv, RunState
from .classifier_task import ClassifierTask, ImageClassifierTask, TextClassifierTask, MultiLabelClassifierTask
from .detection_task import DetectionTask
from .reading_comprehension_task import ReadingComprehensionTask
from .regression_task import RegressionTask
from .sequence_task import SequenceLabelTask
| 44.727273
| 110
| 0.805894
|
014533f23b9746874a3b3476a9825bbb2e0f6cbb
| 18,000
|
py
|
Python
|
pytorch/pytorchcv/models/pyramidnet_cifar.py
|
raijinspecial/imgclsmob
|
c5d3ab207a6304f1343e4394f0467bdc7403a72a
|
[
"MIT"
] | null | null | null |
pytorch/pytorchcv/models/pyramidnet_cifar.py
|
raijinspecial/imgclsmob
|
c5d3ab207a6304f1343e4394f0467bdc7403a72a
|
[
"MIT"
] | null | null | null |
pytorch/pytorchcv/models/pyramidnet_cifar.py
|
raijinspecial/imgclsmob
|
c5d3ab207a6304f1343e4394f0467bdc7403a72a
|
[
"MIT"
] | null | null | null |
"""
PyramidNet for CIFAR, implemented in PyTorch.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a84_cifar10',
'pyramidnet110_a84_cifar100', 'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100',
'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet200_a240_bn_cifar10',
'pyramidnet200_a240_bn_cifar100', 'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100',
'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv3x3_block
from .preresnet import PreResActivation
from .pyramidnet import PyrUnit
class CIFARPyramidNet(nn.Module):
"""
PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
num_classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
in_channels=3,
in_size=(32, 32),
num_classes=10):
super(CIFARPyramidNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
activation=None,
activate=False))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module('post_activ', PreResActivation(in_channels=in_channels))
self.features.add_module('final_pool', nn.AvgPool2d(
kernel_size=8,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_pyramidnet_cifar(num_classes,
blocks,
alpha,
bottleneck,
model_name=None,
pretrained=False,
root=os.path.join('~', '.torch', 'models'),
**kwargs):
"""
Create PyramidNet for CIFAR model with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
assert (num_classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
num_classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def pyramidnet110_a48_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar10",
**kwargs)
def pyramidnet110_a48_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar100",
**kwargs)
def pyramidnet110_a84_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar10",
**kwargs)
def pyramidnet110_a84_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
def pyramidnet110_a270_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar10",
**kwargs)
def pyramidnet110_a270_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar100",
**kwargs)
def pyramidnet164_a270_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar10",
**kwargs)
def pyramidnet164_a270_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar100",
**kwargs)
def pyramidnet200_a240_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar10",
**kwargs)
def pyramidnet200_a240_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar100",
**kwargs)
def pyramidnet236_a220_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar10",
**kwargs)
def pyramidnet236_a220_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar100",
**kwargs)
def pyramidnet272_a200_bn_cifar10(num_classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar10",
**kwargs)
def pyramidnet272_a200_bn_cifar100(num_classes=100, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
num_classes=num_classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar100",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
from torch.autograd import Variable
pretrained = False
models = [
(pyramidnet110_a48_cifar10, 10),
(pyramidnet110_a48_cifar100, 100),
(pyramidnet110_a84_cifar10, 10),
(pyramidnet110_a84_cifar100, 100),
(pyramidnet110_a270_cifar10, 10),
(pyramidnet110_a270_cifar100, 100),
(pyramidnet164_a270_bn_cifar10, 10),
(pyramidnet164_a270_bn_cifar100, 100),
(pyramidnet200_a240_bn_cifar10, 10),
(pyramidnet200_a240_bn_cifar100, 100),
(pyramidnet236_a220_bn_cifar10, 10),
(pyramidnet236_a220_bn_cifar100, 100),
(pyramidnet272_a200_bn_cifar10, 10),
(pyramidnet272_a200_bn_cifar100, 100),
]
for model, num_classes in models:
net = model(pretrained=pretrained, num_classes=num_classes)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706)
assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556)
assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446)
assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536)
assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477)
assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307)
assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021)
assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071)
assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702)
assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952)
assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046)
assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096)
assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842)
assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692)
x = Variable(torch.randn(1, 3, 32, 32))
y = net(x)
assert (tuple(y.size()) == (1, num_classes))
if __name__ == "__main__":
_test()
| 33.333333
| 120
| 0.641
|
915c69b11ec1626176128d0d9628e6a8b119f1bc
| 387
|
py
|
Python
|
test/sample_test.py
|
murphyGo/MurphyML
|
a079d3418464ad9183a3292688b4f4476526db36
|
[
"MIT"
] | null | null | null |
test/sample_test.py
|
murphyGo/MurphyML
|
a079d3418464ad9183a3292688b4f4476526db36
|
[
"MIT"
] | null | null | null |
test/sample_test.py
|
murphyGo/MurphyML
|
a079d3418464ad9183a3292688b4f4476526db36
|
[
"MIT"
] | null | null | null |
import unittest
# unittest sample
class SampleTest(unittest.TestCase):
def test_sample(self):
self.assertTrue(True, "always pass")
def test_str(self):
subStr = 'hihi'
fullStr = f"\u0394\u2341{subStr}"
self.assertEqual(type(fullStr), str)
# pytest sample
def test_sample2():
assert 1 < 2
if __name__ == '__main__':
unittest.main()
| 16.125
| 44
| 0.640827
|
bac24e328d9e0f914dfc3ee2b07df6c2cdc59a93
| 3,177
|
py
|
Python
|
python/docs/source/conf.py
|
fvaleye/metadata-guardian
|
ab5d6cada67785c3cfd98112f68e8fdb193d1617
|
[
"Apache-2.0"
] | 9
|
2021-12-31T20:32:35.000Z
|
2022-02-18T17:51:49.000Z
|
python/docs/source/conf.py
|
fvaleye/metadata-guardian
|
ab5d6cada67785c3cfd98112f68e8fdb193d1617
|
[
"Apache-2.0"
] | 1
|
2022-02-25T16:35:04.000Z
|
2022-02-28T21:08:53.000Z
|
python/docs/source/conf.py
|
fvaleye/metadata-guardian
|
ab5d6cada67785c3cfd98112f68e8fdb193d1617
|
[
"Apache-2.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import toml
sys.path.insert(0, os.path.abspath("../metadata_guardian/"))
sys.path.insert(0, os.path.abspath("./_ext"))
def get_release_version() -> str:
"""
Get the release version from the Cargo.toml file
:return:
"""
cargo_content = toml.load("../../Cargo.toml")
return cargo_content["package"]["version"]
# -- Project information -----------------------------------------------------
project = "metadata-guardian"
copyright = "2021 Metadata Guardian contributors"
author = "Florian Valeye"
version = get_release_version()
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc", "edit_on_github"]
autodoc_typehints = "description"
nitpicky = True
nitpick_ignore = [
("py:class", "RawDataRules"),
("py:class", "pyarrow._fs.FileSystem"),
("py:class", "pyarrow._dataset.Dataset"),
("py:class", "pyarrow.lib.Schema"),
("py:class", "avro.datafile.DataFileReader"),
("py:class", "mypy_boto3_athena.client.AthenaClient"),
("py:class", "mypy_boto3_glue.client.GlueClient"),
("py:class", "snowflake.connector.connection.SnowflakeConnection"),
("py:class", "deltalake.data_catalog.DataCatalog"),
(
"py:class",
"confluent_kafka.schema_registry.schema_registry_client.SchemaRegistryClient",
),
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "pydata_sphinx_theme"
html_logo = "../../../logo.png"
html_favicon = "../../../logo.png"
html_theme_options = {
"external_links": [],
"github_url": "https://github.com/fvaleye/MetadataGuardian",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
edit_on_github_project = "fvaleye/MetadataGuardian"
edit_on_github_branch = "main"
page_source_prefix = "python/docs/source"
| 33.797872
| 86
| 0.67548
|
bd0eb9086498ec7c3d75e8e3ae937d856aca3b38
| 9,001
|
py
|
Python
|
pyqtgraph/widgets/ScatterPlotWidget.py
|
feketeimre/pyqtgraph
|
b421cc27905338438e99cf19d4962c51e64079bd
|
[
"MIT"
] | null | null | null |
pyqtgraph/widgets/ScatterPlotWidget.py
|
feketeimre/pyqtgraph
|
b421cc27905338438e99cf19d4962c51e64079bd
|
[
"MIT"
] | null | null | null |
pyqtgraph/widgets/ScatterPlotWidget.py
|
feketeimre/pyqtgraph
|
b421cc27905338438e99cf19d4962c51e64079bd
|
[
"MIT"
] | 1
|
2019-07-15T19:35:15.000Z
|
2019-07-15T19:35:15.000Z
|
from ..Qt import QtGui, QtCore
from .PlotWidget import PlotWidget
from .DataFilterWidget import DataFilterParameter
from .ColorMapWidget import ColorMapParameter
from .. import parametertree as ptree
from .. import functions as fn
from .. import getConfigOption
from ..graphicsItems.TextItem import TextItem
import numpy as np
from ..pgcollections import OrderedDict
__all__ = ['ScatterPlotWidget']
class ScatterPlotWidget(QtGui.QSplitter):
"""
This is a high-level widget for exploring relationships in tabular data.
Given a multi-column record array, the widget displays a scatter plot of a
specific subset of the data. Includes controls for selecting the columns to
plot, filtering data, and determining symbol color and shape.
The widget consists of four components:
1) A list of column names from which the user may select 1 or 2 columns
to plot. If one column is selected, the data for that column will be
plotted in a histogram-like manner by using :func:`pseudoScatter()
<pyqtgraph.pseudoScatter>`. If two columns are selected, then the
scatter plot will be generated with x determined by the first column
that was selected and y by the second.
2) A DataFilter that allows the user to select a subset of the data by
specifying multiple selection criteria.
3) A ColorMap that allows the user to determine how points are colored by
specifying multiple criteria.
4) A PlotWidget for displaying the data.
"""
sigScatterPlotClicked = QtCore.Signal(object, object)
def __init__(self, parent=None):
QtGui.QSplitter.__init__(self, QtCore.Qt.Horizontal)
self.ctrlPanel = QtGui.QSplitter(QtCore.Qt.Vertical)
self.addWidget(self.ctrlPanel)
self.fieldList = QtGui.QListWidget()
self.fieldList.setSelectionMode(self.fieldList.ExtendedSelection)
self.ptree = ptree.ParameterTree(showHeader=False)
self.filter = DataFilterParameter()
self.colorMap = ColorMapParameter()
self.params = ptree.Parameter.create(name='params', type='group', children=[self.filter, self.colorMap])
self.ptree.setParameters(self.params, showTop=False)
self.plot = PlotWidget()
self.ctrlPanel.addWidget(self.fieldList)
self.ctrlPanel.addWidget(self.ptree)
self.addWidget(self.plot)
bg = fn.mkColor(getConfigOption('background'))
bg.setAlpha(150)
self.filterText = TextItem(border=getConfigOption('foreground'), color=bg)
self.filterText.setPos(60,20)
self.filterText.setParentItem(self.plot.plotItem)
self.data = None
self.mouseOverField = None
self.scatterPlot = None
self.style = dict(pen=None, symbol='o')
self.fieldList.itemSelectionChanged.connect(self.fieldSelectionChanged)
self.filter.sigFilterChanged.connect(self.filterChanged)
self.colorMap.sigColorMapChanged.connect(self.updatePlot)
def setFields(self, fields, mouseOverField=None):
"""
Set the list of field names/units to be processed.
The format of *fields* is the same as used by
:func:`ColorMapWidget.setFields <pyqtgraph.widgets.ColorMapWidget.ColorMapParameter.setFields>`
"""
self.fields = OrderedDict(fields)
self.mouseOverField = mouseOverField
self.fieldList.clear()
for f,opts in fields:
item = QtGui.QListWidgetItem(f)
item.opts = opts
item = self.fieldList.addItem(item)
self.filter.setFields(fields)
self.colorMap.setFields(fields)
def setSelectedFields(self, *fields):
self.fieldList.itemSelectionChanged.disconnect(self.fieldSelectionChanged)
try:
self.fieldList.clearSelection()
for f in fields:
i = self.fields.keys().index(f)
item = self.fieldList.item(i)
item.setSelected(True)
finally:
self.fieldList.itemSelectionChanged.connect(self.fieldSelectionChanged)
self.fieldSelectionChanged()
def setData(self, data):
"""
Set the data to be processed and displayed.
Argument must be a numpy record array.
"""
self.data = data
self.filtered = None
self.updatePlot()
def fieldSelectionChanged(self):
sel = self.fieldList.selectedItems()
if len(sel) > 2:
self.fieldList.blockSignals(True)
try:
for item in sel[1:-1]:
item.setSelected(False)
finally:
self.fieldList.blockSignals(False)
self.updatePlot()
def filterChanged(self, f):
self.filtered = None
self.updatePlot()
desc = self.filter.describe()
if len(desc) == 0:
self.filterText.setVisible(False)
else:
self.filterText.setText('\n'.join(desc))
self.filterText.setVisible(True)
def updatePlot(self):
self.plot.clear()
if self.data is None:
return
if self.filtered is None:
self.filtered = self.filter.filterData(self.data)
data = self.filtered
if len(data) == 0:
return
colors = np.array([fn.mkBrush(*x) for x in self.colorMap.map(data)])
style = self.style.copy()
## Look up selected columns and units
sel = list([str(item.text()) for item in self.fieldList.selectedItems()])
units = list([item.opts.get('units', '') for item in self.fieldList.selectedItems()])
if len(sel) == 0:
self.plot.setTitle('')
return
if len(sel) == 1:
self.plot.setLabels(left=('N', ''), bottom=(sel[0], units[0]), title='')
if len(data) == 0:
return
#x = data[sel[0]]
#y = None
xy = [data[sel[0]], None]
elif len(sel) == 2:
self.plot.setLabels(left=(sel[1],units[1]), bottom=(sel[0],units[0]))
if len(data) == 0:
return
xy = [data[sel[0]], data[sel[1]]]
#xydata = []
#for ax in [0,1]:
#d = data[sel[ax]]
### scatter catecorical values just a bit so they show up better in the scatter plot.
##if sel[ax] in ['MorphologyBSMean', 'MorphologyTDMean', 'FIType']:
##d += np.random.normal(size=len(cells), scale=0.1)
#xydata.append(d)
#x,y = xydata
## convert enum-type fields to float, set axis labels
enum = [False, False]
for i in [0,1]:
axis = self.plot.getAxis(['bottom', 'left'][i])
if xy[i] is not None and (self.fields[sel[i]].get('mode', None) == 'enum' or xy[i].dtype.kind in ('S', 'O')):
vals = self.fields[sel[i]].get('values', list(set(xy[i])))
xy[i] = np.array([vals.index(x) if x in vals else len(vals) for x in xy[i]], dtype=float)
axis.setTicks([list(enumerate(vals))])
enum[i] = True
else:
axis.setTicks(None) # reset to automatic ticking
## mask out any nan values
mask = np.ones(len(xy[0]), dtype=bool)
if xy[0].dtype.kind == 'f':
mask &= np.isfinite(xy[0])
if xy[1] is not None and xy[1].dtype.kind == 'f':
mask &= np.isfinite(xy[1])
xy[0] = xy[0][mask]
style['symbolBrush'] = colors[mask]
## Scatter y-values for a histogram-like appearance
if xy[1] is None:
## column scatter plot
xy[1] = fn.pseudoScatter(xy[0])
else:
## beeswarm plots
xy[1] = xy[1][mask]
for ax in [0,1]:
if not enum[ax]:
continue
imax = int(xy[ax].max()) if len(xy[ax]) > 0 else 0
for i in range(imax+1):
keymask = xy[ax] == i
scatter = fn.pseudoScatter(xy[1-ax][keymask], bidir=True)
if len(scatter) == 0:
continue
smax = np.abs(scatter).max()
if smax != 0:
scatter *= 0.2 / smax
xy[ax][keymask] += scatter
if self.scatterPlot is not None:
try:
self.scatterPlot.sigPointsClicked.disconnect(self.plotClicked)
except:
pass
self.scatterPlot = self.plot.plot(xy[0], xy[1], data=data[mask], **style)
self.scatterPlot.sigPointsClicked.connect(self.plotClicked)
def plotClicked(self, plot, points):
self.sigScatterPlotClicked.emit(self, points)
| 38.965368
| 121
| 0.576936
|
ad6d8b82a642d86349b749e9de501a814a68bf2f
| 18,162
|
py
|
Python
|
spherov2/controls/v2.py
|
Cole1220/spherov2.py
|
8981cee23055bf36bafc75c4355a8930a00eb6bd
|
[
"MIT"
] | 23
|
2020-06-22T16:17:21.000Z
|
2022-03-15T15:53:39.000Z
|
spherov2/controls/v2.py
|
Cole1220/spherov2.py
|
8981cee23055bf36bafc75c4355a8930a00eb6bd
|
[
"MIT"
] | 16
|
2020-06-11T12:52:26.000Z
|
2021-12-11T02:38:23.000Z
|
spherov2/controls/v2.py
|
Cole1220/spherov2.py
|
8981cee23055bf36bafc75c4355a8930a00eb6bd
|
[
"MIT"
] | 5
|
2021-01-14T17:39:27.000Z
|
2021-07-30T19:09:07.000Z
|
import threading
from collections import OrderedDict, defaultdict
from enum import IntEnum, Enum, auto, IntFlag
from typing import Dict, List, Callable, NamedTuple, Tuple
from spherov2.commands.drive import DriveFlags
from spherov2.commands.drive import RawMotorModes as DriveRawMotorModes
from spherov2.commands.io import IO
from spherov2.controls import RawMotorModes, PacketDecodingException, CommandExecuteError
from spherov2.helper import to_bytes, to_int, packet_chk
from spherov2.listeners.sensor import StreamingServiceData
class Packet(NamedTuple):
"""Packet protocol v2, from https://sdk.sphero.com/docs/api_spec/general_api
[SOP, FLAGS, TID (optional), SID (optional), DID, CID, SEQ, ERR (at response), DATA..., CHK, EOP]"""
flags: 'Packet.Flags'
did: int
cid: int
seq: int
tid: int
sid: int
data: bytearray
err: 'Packet.Error' = None
class Flags(IntFlag):
is_response = 0b1
requests_response = 0b10
requests_only_error_response = 0b100
is_activity = 0b1000
has_target_id = 0b10000
has_source_id = 0b100000
unused = 0b1000000
extended_flags = 0b10000000
class Encoding(IntEnum):
escape = 0xAB
start = 0x8D
end = 0xD8
escaped_escape = 0x23
escaped_start = 0x05
escaped_end = 0x50
class Error(IntEnum):
success = 0x00
bad_device_id = 0x01
bad_command_id = 0x02
not_yet_implemented = 0x03
command_is_restricted = 0x04
bad_data_length = 0x05
command_failed = 0x06
bad_parameter_value = 0x07
busy = 0x08
bad_target_id = 0x09
target_unavailable = 0x0a
@staticmethod
def parse_response(data) -> 'Packet':
sop, *data, eop = data
if sop != Packet.Encoding.start:
raise PacketDecodingException('Unexpected start of packet')
if eop != Packet.Encoding.end:
raise PacketDecodingException('Unexpected end of packet')
*data, chk = Packet.__unescape_data(data)
if packet_chk(data) != chk:
raise PacketDecodingException('Bad response checksum')
flags = data.pop(0)
tid = None
if flags & Packet.Flags.has_target_id:
tid = data.pop(0)
sid = None
if flags & Packet.Flags.has_source_id:
sid = data.pop(0)
did, cid, seq, *data = data
err = Packet.Error.success
if flags & Packet.Flags.is_response:
err = Packet.Error(data.pop(0))
return Packet(flags, did, cid, seq, tid, sid, bytearray(data), err)
@staticmethod
def __unescape_data(response_data) -> List[int]:
raw_data = []
iter_response_data = iter(response_data)
for b in iter_response_data:
if b == Packet.Encoding.escape:
b = next(iter_response_data, None)
if b == Packet.Encoding.escaped_escape:
b = Packet.Encoding.escape
elif b == Packet.Encoding.escaped_start:
b = Packet.Encoding.start
elif b == Packet.Encoding.escaped_end:
b = Packet.Encoding.end
else:
raise PacketDecodingException('Unexpected escaping byte')
raw_data.append(b)
return raw_data
@property
def id(self) -> Tuple:
return self.did, self.cid, self.seq
def build(self) -> bytearray:
packet = bytearray([self.flags])
if self.flags & Packet.Flags.has_target_id:
packet.append(self.tid)
if self.flags & Packet.Flags.has_source_id:
packet.append(self.sid)
packet.extend(self.id)
if self.flags & Packet.Flags.is_response:
packet.append(self.err)
packet.extend(self.data)
packet.append(packet_chk(packet))
escaped_packet = bytearray([Packet.Encoding.start])
for c in packet:
if c == Packet.Encoding.escape:
escaped_packet.extend((Packet.Encoding.escape, Packet.Encoding.escaped_escape))
elif c == Packet.Encoding.start:
escaped_packet.extend((Packet.Encoding.escape, Packet.Encoding.escaped_start))
elif c == Packet.Encoding.end:
escaped_packet.extend((Packet.Encoding.escape, Packet.Encoding.escaped_end))
else:
escaped_packet.append(c)
escaped_packet.append(Packet.Encoding.end)
return escaped_packet
def check_error(self):
if self.err != Packet.Error.success:
raise CommandExecuteError(self.err)
class Manager:
def __init__(self):
self.__seq = 0
def new_packet(self, did, cid, tid=None, data=None):
flags = Packet.Flags.requests_response | Packet.Flags.is_activity
sid = None
if tid is not None:
flags |= Packet.Flags.has_source_id | Packet.Flags.has_target_id
sid = 0x1
packet = Packet(flags, did, cid, self.__seq, tid, sid, bytearray(data or []))
self.__seq = (self.__seq + 1) % 0xff
return packet
class Collector:
def __init__(self, callback):
self.__callback = callback
self.__data = []
def add(self, data):
for b in data:
self.__data.append(b)
if b == Packet.Encoding.end:
pkt = self.__data
self.__data = []
if len(pkt) < 6:
raise PacketDecodingException(f'Very small packet {[hex(x) for x in pkt]}')
self.__callback(Packet.parse_response(pkt))
class AnimationControl:
def __init__(self, toy):
self.__toy = toy
class DriveControl:
def __init__(self, toy):
self.__toy = toy
self.__is_boosting = False
def roll_start(self, heading, speed):
flag = DriveFlags.FORWARD
if speed < 0:
flag = DriveFlags.BACKWARD
heading = (heading + 180) % 360
if self.__is_boosting:
flag |= DriveFlags.TURBO
speed = min(255, abs(speed))
self.__toy.drive_with_heading(speed, heading, flag)
def roll_stop(self, heading):
self.roll_start(heading, 0)
set_heading = roll_stop
def set_stabilization(self, stabilize):
self.__toy.set_stabilization(stabilize)
def set_raw_motors(self, left_mode, left_speed, right_mode, right_speed):
if left_mode == RawMotorModes.FORWARD:
left_drive_mode = DriveRawMotorModes.FORWARD
elif left_mode == RawMotorModes.REVERSE:
left_drive_mode = DriveRawMotorModes.REVERSE
else:
left_drive_mode = DriveRawMotorModes.OFF
if right_mode == RawMotorModes.FORWARD:
right_drive_mode = DriveRawMotorModes.FORWARD
elif right_mode == RawMotorModes.REVERSE:
right_drive_mode = DriveRawMotorModes.REVERSE
else:
right_drive_mode = DriveRawMotorModes.OFF
self.__toy.set_raw_motors(left_drive_mode, left_speed, right_drive_mode, right_speed)
def reset_heading(self):
self.__toy.reset_yaw()
class FirmwareUpdateControl:
def __init__(self, toy):
self.__toy = toy
class LedControl:
def __init__(self, toy):
self.__toy = toy
def set_leds(self, mapping: Dict[IntEnum, int]):
mask = 0
led_values = []
for e in self.__toy.LEDs:
if e in mapping:
mask |= 1 << e
led_values.append(mapping[e])
if mask:
if self.__toy.implements(IO.set_all_leds_with_32_bit_mask):
self.__toy.set_all_leds_with_32_bit_mask(mask, led_values)
elif self.__toy.implements(IO.set_all_leds_with_16_bit_mask):
self.__toy.set_all_leds_with_16_bit_mask(mask, led_values)
elif hasattr(self.__toy, 'set_all_leds_with_8_bit_mask'):
self.__toy.set_all_leds_with_8_bit_mask(mask, led_values)
class SensorControl:
def __init__(self, toy):
toy.add_sensor_streaming_data_notify_listener(self.__process_sensor_stream_data)
self.__toy = toy
self.__count = 0
self.__interval = 250
self.__enabled = {}
self.__enabled_extended = {}
self.__listeners = set()
def __process_sensor_stream_data(self, sensor_data: List[float]):
data = {}
def __new_data():
n = {}
for name, component in components.items():
d = sensor_data.pop(0)
if component.modifier:
d = component.modifier(d)
n[name] = d
data[sensor] = n
for sensor, components in self.__toy.sensors.items():
if sensor in self.__enabled:
__new_data()
for sensor, components in self.__toy.extended_sensors.items():
if sensor in self.__enabled_extended:
__new_data()
for f in self.__listeners:
threading.Thread(target=f, args=(data,)).start()
def add_sensor_data_listener(self, listener: Callable[[Dict[str, Dict[str, float]]], None]):
self.__listeners.add(listener)
def remove_sensor_data_listener(self, listener: Callable[[Dict[str, Dict[str, float]]], None]):
self.__listeners.remove(listener)
def set_count(self, count: int):
if count >= 0 and count != self.__count:
self.__count = count
self.__update()
def set_interval(self, interval: int):
if interval >= 0 and interval != self.__interval:
self.__interval = interval
self.__update()
def __update(self):
sensors_mask = extended_sensors_mask = 0
for sensor in self.__enabled.values():
for component in sensor.values():
sensors_mask |= component.bit
for sensor in self.__enabled_extended.values():
for component in sensor.values():
extended_sensors_mask |= component.bit
self.__toy.set_sensor_streaming_mask(0, self.__count, sensors_mask)
self.__toy.set_extended_sensor_streaming_mask(extended_sensors_mask)
self.__toy.set_sensor_streaming_mask(self.__interval, self.__count, sensors_mask)
def enable(self, *sensors):
for sensor in sensors:
if sensor in self.__toy.sensors:
self.__enabled[sensor] = self.__toy.sensors[sensor]
elif sensor in self.__toy.extended_sensors:
self.__enabled_extended[sensor] = self.__toy.extended_sensors[sensor]
self.__update()
def disable(self, *sensors):
for sensor in sensors:
self.__enabled.pop(sensor, None)
self.__enabled_extended.pop(sensor, None)
self.__update()
def disable_all(self):
self.__enabled.clear()
self.__enabled_extended.clear()
self.__update()
class StatsControl:
def __init__(self, toy):
self.__toy = toy
class Processors(IntEnum):
UNKNOWN = 0
PRIMARY = 1
SECONDARY = 2
class StreamingServiceAttribute(NamedTuple):
min_value: int
max_value: int
modifier: Callable[[float], float] = None
class StreamingDataSizes(IntEnum):
EightBit = 0
SixteenBit = 1
ThirtyTwoBit = 2
class StreamingService(NamedTuple):
attributes: OrderedDict
slot: int
processor: Processors = Processors.SECONDARY
data_size: StreamingDataSizes = StreamingDataSizes.ThirtyTwoBit
class StreamingServiceState(Enum):
Unknown = auto()
Stop = auto()
Start = auto()
Restart = auto()
class StreamingControl:
__streaming_services = {
'quaternion': StreamingService(OrderedDict(
w=StreamingServiceAttribute(-1, 1),
x=StreamingServiceAttribute(-1, 1),
y=StreamingServiceAttribute(-1, 1),
z=StreamingServiceAttribute(-1, 1)
), 1),
'imu': StreamingService(OrderedDict(
pitch=StreamingServiceAttribute(-180, 180),
roll=StreamingServiceAttribute(-90, 90),
yaw=StreamingServiceAttribute(-180, 180)
), 1),
'accelerometer': StreamingService(OrderedDict(
x=StreamingServiceAttribute(-16, 16),
y=StreamingServiceAttribute(-16, 16),
z=StreamingServiceAttribute(-16, 16)
), 1),
'color_detection': StreamingService(OrderedDict(
r=StreamingServiceAttribute(0, 255),
g=StreamingServiceAttribute(0, 255),
b=StreamingServiceAttribute(0, 255),
index=StreamingServiceAttribute(0, 255),
confidence=StreamingServiceAttribute(0, 1)
), 1, Processors.PRIMARY, StreamingDataSizes.EightBit),
'gyroscope': StreamingService(OrderedDict(
x=StreamingServiceAttribute(-2000, 2000),
y=StreamingServiceAttribute(-2000, 2000),
z=StreamingServiceAttribute(-2000, 2000)
), 1),
'core_time_lower': StreamingService(OrderedDict(time_lower=StreamingServiceAttribute(0, 1 << 63)), 3),
'locator': StreamingService(OrderedDict(
x=StreamingServiceAttribute(-16000, 16000, lambda x: x * 100.),
y=StreamingServiceAttribute(-16000, 16000, lambda x: x * 100.),
), 2),
'velocity': StreamingService(OrderedDict(
x=StreamingServiceAttribute(-5, 5, lambda x: x * 100.),
y=StreamingServiceAttribute(-5, 5, lambda x: x * 100.),
), 2),
'speed': StreamingService(OrderedDict(speed=StreamingServiceAttribute(0, 5, lambda x: x * 100.)), 2),
'core_time_upper': StreamingService(OrderedDict(time_upper=StreamingServiceAttribute(0, 1 << 63)), 3),
'ambient_light': StreamingService(
OrderedDict(light=StreamingServiceAttribute(0, 120000)), 2, Processors.PRIMARY
),
}
def __init__(self, toy):
toy.add_streaming_service_data_notify_listener(self.__streaming_service_data)
self.__toy = toy
self.__slots = {
Processors.PRIMARY: defaultdict(list),
Processors.SECONDARY: defaultdict(list)
}
self.__enabled = set()
self.__listeners = set()
self.__interval = 500
def add_sensor_data_listener(self, listener: Callable[[Dict[str, Dict[str, float]]], None]):
self.__listeners.add(listener)
def remove_sensor_data_listener(self, listener: Callable[[Dict[str, Dict[str, float]]], None]):
self.__listeners.remove(listener)
def enable(self, *sensors):
changed = False
for sensor in sensors:
if sensor not in self.__enabled and sensor in self.__streaming_services:
self.__enabled.add(sensor)
changed = True
if changed:
self.__configure(StreamingServiceState.Start)
def disable(self, *sensors):
changed = False
for sensor in sensors:
if sensor in self.__enabled:
self.__enabled.remove(sensor)
changed = True
if changed:
self.__configure(StreamingServiceState.Start if self.__enabled else StreamingServiceState.Stop)
def disable_all(self):
if not self.__enabled:
return
self.__enabled.clear()
self.__configure(StreamingServiceState.Stop)
def set_count(self, count: int):
pass
def set_interval(self, interval: int):
if interval < 0:
raise ValueError('Interval attempted to be set with negative value')
self.__interval = interval
if self.__enabled:
self.__configure(StreamingServiceState.Restart)
def __configure(self, state: StreamingServiceState):
for target in [Processors.PRIMARY, Processors.SECONDARY]:
self.__toy.stop_streaming_service(target)
if state == StreamingServiceState.Stop:
self.__toy.clear_streaming_service(target)
elif state == StreamingServiceState.Start:
self.__toy.clear_streaming_service(target)
slots = self.__slots[target]
slots.clear()
for index, (s, sensor) in enumerate(self.__streaming_services.items()):
if s in self.__enabled and sensor.processor == target:
slots[sensor.slot].append((index, s, sensor))
if slots:
for slot, services in slots.items():
data = []
for index, _, sensor in services:
data.extend(to_bytes(index, 2))
data.append(sensor.data_size)
self.__toy.configure_streaming_service(slot, data, target)
self.__toy.start_streaming_service(self.__interval, target)
elif state == StreamingServiceState.Restart:
self.__toy.start_streaming_service(self.__interval, target)
def __streaming_service_data(self, source_id, data: StreamingServiceData):
node = data.token & 0xf
processor = source_id & 0xf
sensor_data = data.sensor_data
services = self.__slots[processor][node]
data = {}
for _, sensor_name, sensor in services:
n = {}
for name, component in sensor.attributes.items():
data_size = 1 << sensor.data_size
value, sensor_data = to_int(sensor_data[:data_size]), sensor_data[data_size:]
value = value / ((1 << data_size * 8) - 1) * (
component.max_value - component.min_value) + component.min_value
if component.modifier is not None:
value = component.modifier(value)
n[name] = value
if sensor_name == 'color_detection' and node != 0:
continue
data[sensor_name] = n
for f in self.__listeners:
threading.Thread(target=f, args=(data,)).start()
| 35.611765
| 110
| 0.613864
|
8ce6ea873c70fec92b504405bfc134392dc9a19d
| 1,169
|
py
|
Python
|
pythonlearn/Third-Module/Tornado/TEST_DEMOS/Form/server.py
|
yc19890920/Learn
|
3990e75b469225ba7b430539ef9a16abe89eb863
|
[
"Apache-2.0"
] | 1
|
2021-01-11T06:30:44.000Z
|
2021-01-11T06:30:44.000Z
|
pythonlearn/Third-Module/Tornado/TEST_DEMOS/Form/server.py
|
yc19890920/Learn
|
3990e75b469225ba7b430539ef9a16abe89eb863
|
[
"Apache-2.0"
] | 23
|
2020-02-12T02:35:49.000Z
|
2022-02-11T03:45:40.000Z
|
pythonlearn/Third-Module/Tornado/TEST_DEMOS/Form/server.py
|
yc19890920/Learn
|
3990e75b469225ba7b430539ef9a16abe89eb863
|
[
"Apache-2.0"
] | 2
|
2020-04-08T15:39:46.000Z
|
2020-10-10T10:13:09.000Z
|
# -*- coding: utf-8 -*-
#
import os
import tornado.web
import tornado.ioloop
import tornado.options
import tornado.httpserver
from tornado import locale
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render('index.html')
class PoemPageHandle(tornado.web.RequestHandler):
def post(self):
noun1 = self.get_argument('noun1')
noun2 = self.get_argument('noun2')
verb = self.get_argument('verb')
noun3 = self.get_argument('noun3')
self.render('poem.html', roads=noun1, wood=noun2, made=verb,
difference=noun3)
if __name__ == "__main__":
tornado.options.parse_command_line()
app = tornado.web.Application(
handlers=[
(r'/', IndexHandler),
(r'/poem', PoemPageHandle)
],
template_path=os.path.join(os.path.dirname(__file__), "templates")
)
server = tornado.httpserver.HTTPServer(app)
server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| 28.512195
| 75
| 0.640719
|
ba627fcb347a4e424105628be406de1ede07998c
| 3,283
|
py
|
Python
|
pomdp_py/utils/interfaces/simple_rl.py
|
beckymathew/pomdp-py
|
4864514277e49dee4b7c3fb303970de7b6a888ea
|
[
"MIT"
] | 87
|
2020-02-16T03:12:10.000Z
|
2022-03-31T08:38:10.000Z
|
pomdp_py/utils/interfaces/simple_rl.py
|
beckymathew/pomdp-py
|
4864514277e49dee4b7c3fb303970de7b6a888ea
|
[
"MIT"
] | 15
|
2020-08-01T00:25:33.000Z
|
2022-02-19T22:37:11.000Z
|
pomdp_py/utils/interfaces/simple_rl.py
|
beckymathew/pomdp-py
|
4864514277e49dee4b7c3fb303970de7b6a888ea
|
[
"MIT"
] | 26
|
2020-02-20T01:15:33.000Z
|
2022-03-30T16:21:37.000Z
|
"""
Provides utility functions that interfaces with `simple_rl <https://github.com/david-abel/simple_rl>`_.
Essentially, this will convert an agent in pomdp_py into a simple_rl.MDPClass
or POMDPClass. Note that since creating these classes require enumerable
aciton and observation spaces, this conversion is only feasible for agents
whose ObservationModel and PolicyModel can return a list of all observations /
actions.
Note: simple_rl is a library for Reinforcement Learning developed and
maintained by David Abel. It is also an early-stage library.
Warning:
simple_rl is simple_rl's POMDP functionality is currently relatively
lacking. Providing this inteface is mostly to potentially leverage the MDP
algorithms in simple_rl.
"""
import simple_rl
def convert_to_MDPClass(pomdp, discount_factor=0.99, step_cost=0):
"""Converts the pomdp to the building block MDPClass of simple_rl. There are a lot of
variants of MDPClass in simple_rl. It is up to the user to then convert this
MDPClass into those variants, if necessary.
Clearly, if this agent is partially observable, this conversion
will change the problem and make it no longer a POMDP."""
agent = pomdp.agent
env = pomdp.env
try:
all_actions = agent.policy_model.get_all_actions()
except NotImplementedError:
raise ValueError("This agent does not have enumerable action space.")
# Since we do not know how env.state is represented, we
# cannot turn it into a simple_rl State with "features",
# since the features must be represented as a list; In
# any case, the user, with knowledge of the state
# representaion, could later convert it into the format
# that simple_rl is supposed to work with.
state = simple_rl.State(data=env.state)
return simple_rl.MDP(all_actions,
agent.transition_model.sample,
agent.reward_model.sample,
gamma=discount_factor,
step_cost=step_cost)
def convert_to_POMDPClass(pomdp,
discount_factor=0.99, step_cost=0,
belief_updater_type="discrete"):
agent = pomdp.agent
env = pomdp.env
try:
all_actions = agent.policy_model.get_all_actions()
except NotImplementedError:
raise ValueError("This agent does not have enumerable action space.")
try:
all_observations = agent.observation_model.get_all_observations()
except NotImplementedError:
raise ValueError("This agent does not have enumerable observation space.")
try:
belief_hist = agent.belief.get_histogram()
except Exception:
raise ValueError("Agent belief cannot be converted into a histogram;\n"
"thus cannot create POMDPClass.")
return simple_rl.POMDP(all_actions,
all_observations,
agent.transition_model.sample,
agent.reward_model.sample,
agent.observation_model.sample,
belief_hist,
belief_updater_type=belief_updater_type,
gamma=discount_factor,
step_cost=step_cost)
| 42.089744
| 103
| 0.673469
|
f149870ce5099dc14c5ea591f5e0b61107cd647f
| 276
|
py
|
Python
|
demo.py
|
re-treat/DialoFlow
|
b7b729ddf5d146ee201c987f3a44894ad24938ce
|
[
"MIT"
] | null | null | null |
demo.py
|
re-treat/DialoFlow
|
b7b729ddf5d146ee201c987f3a44894ad24938ce
|
[
"MIT"
] | null | null | null |
demo.py
|
re-treat/DialoFlow
|
b7b729ddf5d146ee201c987f3a44894ad24938ce
|
[
"MIT"
] | null | null | null |
from generate import *
history = []
while True:
new_sentence = input('>> User: ').replace('\n','')
history.append(new_sentence)
resps = get_responses(history[-5:],mode='sample')
best_resp = resps[0]
print(">> Bot:",best_resp)
history.append(best_resp)
| 27.6
| 54
| 0.644928
|
279e060f2fe087b6cc2ad799fcd36463630e1c95
| 9,021
|
py
|
Python
|
.ycm_extra_conf.py
|
WenDaTesi/pdc-graph
|
1565c01258a05e2949d86f2fa4fc8423d79c0f58
|
[
"MIT"
] | null | null | null |
.ycm_extra_conf.py
|
WenDaTesi/pdc-graph
|
1565c01258a05e2949d86f2fa4fc8423d79c0f58
|
[
"MIT"
] | null | null | null |
.ycm_extra_conf.py
|
WenDaTesi/pdc-graph
|
1565c01258a05e2949d86f2fa4fc8423d79c0f58
|
[
"MIT"
] | null | null | null |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
from distutils.sysconfig import get_python_inc
import os
import platform
import os.path as p
import subprocess
DIR_OF_THIS_SCRIPT = p.abspath( p.dirname( __file__ ) )
DIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )
DIR_OF_WATCHDOG_DEPS = p.join( DIR_OF_THIRD_PARTY, 'watchdog_deps' )
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
database = None
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER and/or -DYCM_EXPORT in your flags;
# only the YCM source code needs it.
'-DUSE_CLANG_COMPLETER',
'-DYCM_EXPORT=',
# THIS IS IMPORTANT! Without the '-x' flag, Clang won't know which language to
# use when compiling headers. So it will guess. Badly. So C++ headers will be
# compiled as C headers. You don't want that so ALWAYS specify the '-x' flag.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include/*',
'-isystem',
'/usr/lib/*',
'-isystem',
'./include',
'-isystem',
'./test/include',
'-isystem',
'/usr/include/gtkmm-3.0',
'-isystem',
'/usr/include/gtk-3.0',
'-isystem',
'/usr/include/cairomm-1.0/',
'-isystem',
'/usr/include/cairo/'
'-I',
'/usr/include/gtkmm-3.0',
'-I',
'/usr/lib/gtkmm-3.0/include',
'-I',
'/usr/include/atkmm-1.6',
'-I',
'/usr/include/atk-1.0',
'-I',
'/usr/include/glib-2.0',
'-I',
'/usr/lib/glib-2.0/include',
'-I',
'/usr/include/glibmm-2.4',
'-I',
'/usr/lib/glibmm-2.4/include',
'-I',
'/usr/include/sigc++-2.0',
'-I',
'/usr/lib/sigc++-2.0/include',
'-I',
'/usr/include/giomm-2.4',
'-I',
'/usr/lib/giomm-2.4/include',
'-I',
'/usr/include/libmount',
'-I',
'/usr/include/blkid',
'-I',
'/usr/include/pangomm-1.4',
'-I',
'/usr/lib/pangomm-1.4/include',
'-I',
'/usr/include/cairomm-1.0',
'-I',
'/usr/lib/cairomm-1.0/include',
'-I',
'/usr/include/cairo',
'-I',
'/usr/include/pixman-1',
'-I',
'/usr/include/freetype2',
'-I',
'/usr/include/libpng16',
'-I',
'/usr/include/harfbuzz',
'-I',
'/usr/include/pango-1.0',
'-I',
'/usr/include/fribidi',
'-I',
'/usr/include/gtk-3.0',
'-I',
'/usr/include/gdk-pixbuf-2.0',
'-I',
'/usr/include/gio-unix-2.0',
'-I',
'/usr/include/at-spi2-atk/2.0',
'-I',
'/usr/include/dbus-1.0',
'-I',
'/usr/lib/dbus-1.0/include',
'-I',
'/usr/include/at-spi-2.0',
'-I',
'/usr/include/gtk-3.0/unix-print',
'-I',
'/usr/include/gdkmm-3.0',
'-I',
'/usr/lib/gdkmm-3.0/include',
'-pthread'
]
# Clang automatically sets the '-std=' flag to 'c++14' for MSVC 2015 or later,
# which is required for compiling the standard library, and to 'c++11' for older
# versions.
if platform.system() != 'Windows':
flags.append( '-std=c++11' )
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
def IsHeaderFile( filename ):
extension = p.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def FindCorrespondingSourceFile( filename ):
if IsHeaderFile( filename ):
basename = p.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if p.exists( replacement_file ):
return replacement_file
return filename
def PathToPythonUsedDuringBuild():
try:
filepath = p.join( DIR_OF_THIS_SCRIPT, 'PYTHON_USED_DURING_BUILDING' )
with open( filepath ) as f:
return f.read().strip()
except OSError:
return None
def Settings( **kwargs ):
# Do NOT import ycm_core at module scope.
import ycm_core
global database
if database is None and p.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
language = kwargs[ 'language' ]
if language == 'cfamily':
# If the file is a header, try to find the corresponding source file and
# retrieve its flags from the compilation database if using one. This is
# necessary since compilation databases don't have entries for header files.
# In addition, use this source file as the translation unit. This makes it
# possible to jump from a declaration in the header file to its definition
# in the corresponding source file.
filename = FindCorrespondingSourceFile( kwargs[ 'filename' ] )
if not database:
return {
'flags': flags,
'include_paths_relative_to_dir': DIR_OF_THIS_SCRIPT,
'override_filename': filename
}
compilation_info = database.GetCompilationInfoForFile( filename )
if not compilation_info.compiler_flags_:
return {}
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object.
final_flags = list( compilation_info.compiler_flags_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
return {
'flags': final_flags,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_,
'override_filename': filename
}
if language == 'python':
return {
'interpreter_path': PathToPythonUsedDuringBuild()
}
return {}
def PythonSysPath( **kwargs ):
sys_path = kwargs[ 'sys_path' ]
interpreter_path = kwargs[ 'interpreter_path' ]
major_version = subprocess.check_output( [
interpreter_path, '-c', 'import sys; print( sys.version_info[ 0 ] )' ]
).rstrip().decode( 'utf8' )
sys_path[ 0:0 ] = [ p.join( DIR_OF_THIS_SCRIPT ),
p.join( DIR_OF_THIRD_PARTY, 'bottle' ),
p.join( DIR_OF_THIRD_PARTY, 'cregex',
'regex_{}'.format( major_version ) ),
p.join( DIR_OF_THIRD_PARTY, 'frozendict' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'jedi' ),
p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'parso' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps', 'requests' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'urllib3',
'src' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'chardet' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'certifi' ),
p.join( DIR_OF_THIRD_PARTY, 'requests_deps',
'idna' ),
p.join( DIR_OF_WATCHDOG_DEPS, 'watchdog', 'build', 'lib3' ),
p.join( DIR_OF_WATCHDOG_DEPS, 'pathtools' ),
p.join( DIR_OF_THIRD_PARTY, 'waitress' ) ]
sys_path.append( p.join( DIR_OF_THIRD_PARTY, 'jedi_deps', 'numpydoc' ) )
return sys_path
| 31.652632
| 82
| 0.654584
|
425184f4b8ad2a123e9ff9653be3018f77cdee1c
| 322
|
py
|
Python
|
demo.py
|
billyblu2000/Chorderator
|
6e5e077da649966e872dbd1494f3606a23937e8b
|
[
"MIT"
] | 7
|
2021-04-12T08:17:10.000Z
|
2022-01-30T05:55:25.000Z
|
demo.py
|
billyblu2000/Chorderator
|
6e5e077da649966e872dbd1494f3606a23937e8b
|
[
"MIT"
] | null | null | null |
demo.py
|
billyblu2000/Chorderator
|
6e5e077da649966e872dbd1494f3606a23937e8b
|
[
"MIT"
] | null | null | null |
import chorderator as cdt
cdt.set_melody('MIDI demos/inputs/10.mid')
cdt.set_phrase([1])
cdt.set_meta(tonic=cdt.Key.C, mode=cdt.Mode.MAJOR, meter=cdt.Meter.FOUR_FOUR, tempo=120)
cdt.set_output_chord_style(cdt.ChordStyle.EMOTIONAL)
cdt.set_output_progression_style(cdt.ProgressionStyle.POP)
cdt.generate_save('generated')
| 35.777778
| 88
| 0.81677
|
49baa866fd39ed4eb27d113495060f760a0ba625
| 1,356
|
py
|
Python
|
users/migrations/0001_initial.py
|
Dopaman/xcurator_backend
|
1d8253c8bcea9d37e48f745df63c63f122f13db0
|
[
"MIT"
] | null | null | null |
users/migrations/0001_initial.py
|
Dopaman/xcurator_backend
|
1d8253c8bcea9d37e48f745df63c63f122f13db0
|
[
"MIT"
] | 1
|
2021-10-06T13:22:45.000Z
|
2021-10-06T13:22:45.000Z
|
users/migrations/0001_initial.py
|
Dopaman/xcurator_backend
|
1d8253c8bcea9d37e48f745df63c63f122f13db0
|
[
"MIT"
] | 2
|
2021-10-01T13:58:34.000Z
|
2021-10-02T14:52:34.000Z
|
# Generated by Django 3.2.5 on 2021-08-27 19:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('email', models.EmailField(max_length=100, unique=True, verbose_name='email')),
('username', models.CharField(max_length=30, unique=True)),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('last_login', models.DateTimeField(auto_now=True, verbose_name='last login')),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('first_name', models.CharField(max_length=64)),
('last_name', models.CharField(max_length=64)),
],
options={
'abstract': False,
},
),
]
| 38.742857
| 117
| 0.581858
|
6c5f15c10afba95b4860c9f85890b0227228fd3e
| 7,761
|
py
|
Python
|
background_file_handler/backgroundProcess.py
|
MarcoMuellner/APOLLO
|
0011702f93bbf12367fcc30970ba9af25be00a8f
|
[
"MIT"
] | null | null | null |
background_file_handler/backgroundProcess.py
|
MarcoMuellner/APOLLO
|
0011702f93bbf12367fcc30970ba9af25be00a8f
|
[
"MIT"
] | null | null | null |
background_file_handler/backgroundProcess.py
|
MarcoMuellner/APOLLO
|
0011702f93bbf12367fcc30970ba9af25be00a8f
|
[
"MIT"
] | null | null | null |
import re
import subprocess
from res.strings import *
from support.directoryManager import cd
import time
from res.conf_file_str import general_background_result_path,general_binary_path,general_kic\
,internal_noise_value,internal_mag_value,internal_multiple_mag,general_use_pcb,internal_path\
,analysis_folder_prefix
from support.printer import print_int
class BackgroundProcess:
"""
Background Process allows for concurrent runs of diamonds for both modes. Use run() to get it started
"""
def __init__(self,kwargs):
#set up naming convention for noise
if internal_noise_value in kwargs.keys():
self.star_id = str(kwargs[general_kic]) + f"_n_{kwargs[internal_noise_value]}"
elif internal_multiple_mag in kwargs.keys() and kwargs[internal_multiple_mag]:
self.star_id = str(kwargs[general_kic]) + f"_m_{kwargs[internal_mag_value]}"
else:
self.star_id = str(kwargs[general_kic])
#point to used Background binary
if general_binary_path in kwargs.keys():
self.binaryPath = kwargs[general_binary_path]
else:
self.binaryPath = kwargs[internal_path] + "/Background/build/"
self.model = strRunIDBoth
self.kwargs = kwargs
#point to used Background results path
if general_background_result_path in kwargs.keys():
self.resultsPath = kwargs[general_background_result_path]
else:
self.resultsPath = kwargs[internal_path] + "/Background/results/"
self.modes = {strDiModeFull: strDiIntModeFull
,strDiModeNoise : strDiIntModeNoise}
self.status = {}
self.priorChanges = {}
self.run_count = {}
def _getFullPath(self,path):
'''
This method will create an absolute path if the path it inputs wasn't that already
:param path: The path you want to have the absolute of
:type path: str
:return: Absolute path
:rtype: str
'''
if path[0] not in ["~", "/", "\\"]:
path = os.getcwd() + "/" + path
return path
def _getCommandStrings(self):
cmdStrings = {}
self.check_paths = {}
#pcb: If setting is set and false, disable
use_pcb = 0 if general_use_pcb in self.kwargs.keys() and not self.kwargs[general_use_pcb] else 1
binary = self._getFullPath(self.binaryPath + strDiBinary)
for run_id, model in [("Oscillation","ThreeHarvey"),
("Noise","ThreeHarveyNoGaussian")]:
self.check_paths[run_id] = f"{self.resultsPath}{self.kwargs[analysis_folder_prefix]}{self.star_id}/{run_id}/"
if not os.path.exists(self.check_paths[run_id]):
os.makedirs(self.check_paths[run_id])
cmdStrings[run_id] = [binary, self.kwargs[analysis_folder_prefix], self.star_id,run_id,model,str(use_pcb)]
return cmdStrings
def run(self):
cmdStrings = self._getCommandStrings()
for key,value in cmdStrings.items():
self._runCmd(key,value)
def _runBinary(self,cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def _runCmd(self,runID, cmd):
for runCounter in range(1,11):
self.run_count[runID] = runCounter
print_int(f"Starting {runID} model: run {runCounter}",self.kwargs)
with cd(self.binaryPath):
self.status[runID] = strDiStatRunning
p = self._runBinary(cmd)
logCounter = 0
r = re.compile(r"(Nit:\s\d+).+(Ratio: [\w\d\.\+]+)")
time.sleep(5)
total=""
while p.poll() is None:
line = p.stderr.readline().decode("utf-8")
total += line
logCounter = self._logRatio(runID,line,logCounter,r,runCounter)
self.status[runID] = self._checkDiamondsStdOut(self.status[runID], line)
logCounter +=1
line = p.stderr.read().decode("utf-8")
total += line
self._logRatio(runID,line,10,r,runCounter)
self.status[runID] = self._checkDiamondsStdOut(self.status[runID], line)
time.sleep(1)
self.status[runID] = self._checkIfAllFilesExist(runID,self.status[runID])
if self.status[runID] == strDiStatRunning:
self.status[runID] = strDiamondsStatusGood
print_int(f"{runID} model: Finished!",self.kwargs)
return
else:
print_int(f"{runID} model: Run {runCounter} --> Repeating run, due to failure due to {self.status[runID]}",self.kwargs)
raise ValueError(f"{runID} model: FAILED! Tried 10 runs, failed to find result",self.kwargs)
def _logRatio(self,runID,line,counter,r,runCounter):
match = r.findall(line)
if len(match) > 0 and counter >= 5:
for it,ratio in match:
print_int(f"{runID} model: run {runCounter} --> {it},{ratio}",self.kwargs)
counter = 0
elif len(match) == 0 and counter >= 5:
print_int(f"{runID} model: run {runCounter} --> {line}",self.kwargs)
counter = 0
return counter
def _checkDiamondsStdOut(self,oldStatus,text):
"""
Checks the stdout of DIAMONDS and returns the status, that DIAMONDS has.
:param text: stderr/stdout of diamonds that is analyzed.
:type text: str
:return: Statusflag
:rtype: str
"""
status = oldStatus
if strDiamondsErrBetterLikelihood in str(text):
print_int("Diamonds cannot find point with better likelihood. Repeat!",self.kwargs)
status = strDiamondsStatusLikelihood
elif strDiamondsErrCovarianceFailed in str(text):
print_int("Diamonds cannot decompose covariance. Repeat!",self.kwargs)
status = strDiamondsStatusCovariance
elif strDiamondsErrAssertionFailed in str(text):
print_int("Diamonds assertion failed. Repeat!",self.kwargs)
status = strDiamondsStatusAssertion
elif strDiamondsErrMatrixCompositionFailed in str(text):
print_int("Diamonds matrix decomposition failed. Repeat!",self.kwargs)
status = strDiamondsStatusMatrixDecomp
elif strDiamondsErrCoreDumped in str(text):
print_int("Diamonds matrix decomposition failed. Repeat!",self.kwargs)
status = strDiamondsStatusMatrixDecomp
elif strDiamondsErrAborted in str(text):
print_int("Diamonds matrix decomposition failed. Repeat!",self.kwargs)
status = strDiamondsStatusMatrixDecomp
elif strDiamondsErrQuitting in str(text):
print_int("Diamonds matrix decomposition failed. Repeat!",self.kwargs)
status = strDiamondsStatusMatrixDecomp
elif strDiamondsErrSegmentation in str(text):
print_int("Diamonds matrix decomposition failed. Repeat!",self.kwargs)
status = strDiamondsStatusMatrixDecomp
return status
def _checkIfAllFilesExist(self,runID,oldStatus):
content = os.listdir(self.check_paths[runID])
if "background_evidenceInformation.txt" not in content:
print_int(f"Cant fine evidence in {self.check_paths[runID]}. Repeat!",self.kwargs)
return strDiamondsStatusAssertion
elif "background_parameterSummary.txt" not in content:
print_int(f"Cant fine summary in {self.check_paths[runID]}. Repeat!",self.kwargs)
return strDiamondsStatusAssertion
return oldStatus
| 40.847368
| 139
| 0.630589
|
7ab0c8acbe3294ba46f21a5c37fd62582bc15b35
| 28,348
|
py
|
Python
|
pytorch_lightning/core/hooks.py
|
jbuckman/pytorch-lightning
|
cc74fb717a7127fecd4dbb9c743ba28b40de7f64
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/core/hooks.py
|
jbuckman/pytorch-lightning
|
cc74fb717a7127fecd4dbb9c743ba28b40de7f64
|
[
"Apache-2.0"
] | 1
|
2021-03-26T02:16:20.000Z
|
2021-03-26T02:16:20.000Z
|
pytorch_lightning/core/hooks.py
|
jbuckman/pytorch-lightning
|
cc74fb717a7127fecd4dbb9c743ba28b40de7f64
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various hooks to be used in the Lightning code."""
from typing import Any, Dict, List, Optional, Union
import torch
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from pytorch_lightning.utilities import move_data_to_device, rank_zero_warn
from pytorch_lightning.utilities.types import STEP_OUTPUT
class ModelHooks:
"""Hooks to be used in LightningModule."""
def on_fit_start(self) -> None:
"""
Called at the very beginning of fit.
If on DDP it is called on every process
"""
def on_fit_end(self) -> None:
"""
Called at the very end of fit.
If on DDP it is called on every process
"""
def on_train_start(self) -> None:
"""
Called at the beginning of training after sanity check.
"""
def on_train_end(self) -> None:
"""
Called at the end of training before logger experiment is closed.
"""
def on_validation_start(self) -> None:
"""
Called at the beginning of validation.
"""
def on_validation_end(self) -> None:
"""
Called at the end of validation.
"""
def on_test_start(self) -> None:
"""
Called at the beginning of testing.
"""
def on_test_end(self) -> None:
"""
Called at the end of testing.
"""
def on_predict_start(self) -> None:
"""
Called at the beginning of predicting.
"""
def on_predict_end(self) -> None:
"""
Called at the end of predicting.
"""
def on_pretrain_routine_start(self) -> None:
"""
Called at the beginning of the pretrain routine (between fit and train start).
- fit
- pretrain_routine start
- pretrain_routine end
- training_start
"""
def on_pretrain_routine_end(self) -> None:
"""
Called at the end of the pretrain routine (between fit and train start).
- fit
- pretrain_routine start
- pretrain_routine end
- training_start
"""
def on_train_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
"""
Called in the training loop before anything happens for that batch.
If you return -1 here, you will skip training for the rest of the current epoch.
Args:
batch: The batched data as it is returned by the training DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_train_batch_end(self, outputs: STEP_OUTPUT, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
"""
Called in the training loop after the batch.
Args:
outputs: The outputs of training_step_end(training_step(x))
batch: The batched data as it is returned by the training DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_validation_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
"""
Called in the validation loop before anything happens for that batch.
Args:
batch: The batched data as it is returned by the validation DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_validation_batch_end(
self, outputs: Optional[STEP_OUTPUT], batch: Any, batch_idx: int, dataloader_idx: int
) -> None:
"""
Called in the validation loop after the batch.
Args:
outputs: The outputs of validation_step_end(validation_step(x))
batch: The batched data as it is returned by the validation DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_test_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
"""
Called in the test loop before anything happens for that batch.
Args:
batch: The batched data as it is returned by the test DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_test_batch_end(
self, outputs: Optional[STEP_OUTPUT], batch: Any, batch_idx: int, dataloader_idx: int
) -> None:
"""
Called in the test loop after the batch.
Args:
outputs: The outputs of test_step_end(test_step(x))
batch: The batched data as it is returned by the test DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_predict_batch_start(self, batch: Any, batch_idx: int, dataloader_idx: int) -> None:
"""
Called in the predict loop before anything happens for that batch.
Args:
batch: The batched data as it is returned by the test DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_predict_batch_end(self, outputs: Optional[Any], batch: Any, batch_idx: int, dataloader_idx: int) -> None:
"""
Called in the predict loop after the batch.
Args:
outputs: The outputs of predict_step_end(test_step(x))
batch: The batched data as it is returned by the test DataLoader.
batch_idx: the index of the batch
dataloader_idx: the index of the dataloader
"""
def on_validation_model_eval(self) -> None:
"""
Sets the model to eval during the val loop
"""
self.trainer.model.eval()
def on_validation_model_train(self) -> None:
"""
Sets the model to train during the val loop
"""
self.trainer.model.train()
def on_test_model_train(self) -> None:
"""
Sets the model to train during the test loop
"""
self.trainer.model.train()
def on_test_model_eval(self) -> None:
"""
Sets the model to eval during the test loop
"""
self.trainer.model.eval()
def on_predict_model_eval(self) -> None:
"""
Sets the model to eval during the predict loop
"""
self.trainer.model.eval()
def on_epoch_start(self) -> None:
"""
Called when either of train/val/test epoch begins.
"""
def on_epoch_end(self) -> None:
"""
Called when either of train/val/test epoch ends.
"""
def on_train_epoch_start(self) -> None:
"""
Called in the training loop at the very beginning of the epoch.
"""
def on_train_epoch_end(self, unused: Optional = None) -> None:
"""
Called in the training loop at the very end of the epoch.
To access all batch outputs at the end of the epoch, either:
1. Implement `training_epoch_end` in the LightningModule OR
2. Cache data across steps on the attribute(s) of the `LightningModule` and access them in this hook
"""
def on_validation_epoch_start(self) -> None:
"""
Called in the validation loop at the very beginning of the epoch.
"""
def on_validation_epoch_end(self) -> None:
"""
Called in the validation loop at the very end of the epoch.
"""
def on_test_epoch_start(self) -> None:
"""
Called in the test loop at the very beginning of the epoch.
"""
def on_test_epoch_end(self) -> None:
"""
Called in the test loop at the very end of the epoch.
"""
def on_predict_epoch_start(self) -> None:
"""
Called at the beginning of predicting.
"""
def on_predict_epoch_end(self, results: List[Any]) -> None:
"""
Called at the end of predicting.
"""
def on_before_zero_grad(self, optimizer: Optimizer) -> None:
"""
Called after ``training_step()`` and before ``optimizer.zero_grad()``.
Called in the training loop after taking an optimizer step and before zeroing grads.
Good place to inspect weight information with weights updated.
This is where it is called::
for optimizer in optimizers:
out = training_step(...)
model.on_before_zero_grad(optimizer) # < ---- called here
optimizer.zero_grad()
backward()
Args:
optimizer: The optimizer for which grads should be zeroed.
"""
def on_after_backward(self) -> None:
"""
Called in the training loop after loss.backward() and before optimizers do anything.
This is the ideal place to inspect or log gradient information.
Example::
def on_after_backward(self):
# example to inspect gradient information in tensorboard
if self.trainer.global_step % 25 == 0: # don't make the tf file huge
for k, v in self.named_parameters():
self.logger.experiment.add_histogram(
tag=k, values=v.grad, global_step=self.trainer.global_step
)
"""
def on_post_move_to_device(self) -> None:
"""
Called in the ``parameter_validation`` decorator after :meth:`~pytorch_lightning.core.LightningModule.to`
is called. This is a good place to tie weights between modules after moving them to a device. Can be
used when training models with weight sharing properties on TPU.
Addresses the handling of shared weights on TPU:
https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md#xla-tensor-quirks
Example::
def on_post_move_to_device(self):
self.decoder.weight = self.encoder.weight
"""
def configure_sharded_model(self) -> None:
"""
Hook to create modules in a distributed aware context. This is useful for when using sharded plugins,
where we'd like to shard the model instantly, which is useful for extremely large models
which can save memory and initialization time.
The accelerator manages whether to call this hook at every given stage.
For sharded plugins where model parallelism is required, the hook is usually on called once
to initialize the sharded parameters, and not called again in the same process.
By default for accelerators/plugins that do not use model sharding techniques,
this hook is called during each fit/val/test/predict stages.
"""
class DataHooks:
"""Hooks to be used for data related stuff."""
def prepare_data(self) -> None:
"""
Use this to download and prepare data.
.. warning:: DO NOT set state to the model (use `setup` instead)
since this is NOT called on every GPU in DDP/TPU
Example::
def prepare_data(self):
# good
download_data()
tokenize()
etc()
# bad
self.split = data_split
self.some_state = some_other_state()
In DDP prepare_data can be called in two ways (using Trainer(prepare_data_per_node)):
1. Once per node. This is the default and is only called on LOCAL_RANK=0.
2. Once in total. Only called on GLOBAL_RANK=0.
Example::
# DEFAULT
# called once per node on LOCAL_RANK=0 of that node
Trainer(prepare_data_per_node=True)
# call on GLOBAL_RANK=0 (great for shared file systems)
Trainer(prepare_data_per_node=False)
This is called before requesting the dataloaders:
.. code-block:: python
model.prepare_data()
if ddp/tpu: init()
model.setup(stage)
model.train_dataloader()
model.val_dataloader()
model.test_dataloader()
"""
def setup(self, stage: Optional[str] = None) -> None:
"""
Called at the beginning of fit (train + validate), validate, test, predict, or tune.
This is a good hook when you need to build models dynamically or adjust something about them.
This hook is called on every process when using DDP.
Args:
stage: either ``'fit'``, ``'validate'``, ``'test'``, or ``'predict'``
Example::
class LitModel(...):
def __init__(self):
self.l1 = None
def prepare_data(self):
download_data()
tokenize()
# don't do this
self.something = else
def setup(stage):
data = Load_data(...)
self.l1 = nn.Linear(28, data.num_classes)
"""
def teardown(self, stage: Optional[str] = None) -> None:
"""
Called at the end of fit (train + validate), validate, test, predict, or tune.
Args:
stage: either ``'fit'``, ``'validate'``, ``'test'``, or ``'predict'``
"""
def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
"""
Implement one or more PyTorch DataLoaders for training.
Return:
Either a single PyTorch :class:`~torch.utils.data.DataLoader` or a collection of these
(list, dict, nested lists and dicts). In the case of multiple dataloaders, please see
this :ref:`page <multiple-training-dataloaders>`
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
For data processing use the following pattern:
- download in :meth:`prepare_data`
- process and split in :meth:`setup`
However, the above are only necessary for distributed processing.
.. warning:: do not assign state in prepare_data
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`setup`
- :meth:`train_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware.
There is no need to set it yourself.
Example::
# single dataloader
def train_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root='/path/to/mnist/', train=True, transform=transform,
download=True)
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=True
)
return loader
# multiple dataloaders, return as list
def train_dataloader(self):
mnist = MNIST(...)
cifar = CIFAR(...)
mnist_loader = torch.utils.data.DataLoader(
dataset=mnist, batch_size=self.batch_size, shuffle=True
)
cifar_loader = torch.utils.data.DataLoader(
dataset=cifar, batch_size=self.batch_size, shuffle=True
)
# each batch will be a list of tensors: [batch_mnist, batch_cifar]
return [mnist_loader, cifar_loader]
# multiple dataloader, return as dict
def train_dataloader(self):
mnist = MNIST(...)
cifar = CIFAR(...)
mnist_loader = torch.utils.data.DataLoader(
dataset=mnist, batch_size=self.batch_size, shuffle=True
)
cifar_loader = torch.utils.data.DataLoader(
dataset=cifar, batch_size=self.batch_size, shuffle=True
)
# each batch will be a dict of tensors: {'mnist': batch_mnist, 'cifar': batch_cifar}
return {'mnist': mnist_loader, 'cifar': cifar_loader}
"""
rank_zero_warn("`train_dataloader` must be implemented to be used with the Lightning Trainer")
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
r"""
Implement one or multiple PyTorch DataLoaders for testing.
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
For data processing use the following pattern:
- download in :meth:`prepare_data`
- process and split in :meth:`setup`
However, the above are only necessary for distributed processing.
.. warning:: do not assign state in prepare_data
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`setup`
- :meth:`train_dataloader`
- :meth:`val_dataloader`
- :meth:`test_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware.
There is no need to set it yourself.
Return:
Single or multiple PyTorch DataLoaders.
Example::
def test_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root='/path/to/mnist/', train=False, transform=transform,
download=True)
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False
)
return loader
# can also return multiple dataloaders
def test_dataloader(self):
return [loader_a, loader_b, ..., loader_n]
Note:
If you don't need a test dataset and a :meth:`test_step`, you don't need to implement
this method.
Note:
In the case where you return multiple test dataloaders, the :meth:`test_step`
will have an argument ``dataloader_idx`` which matches the order here.
"""
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
r"""
Implement one or multiple PyTorch DataLoaders for validation.
The dataloader you return will not be called every epoch unless you set
:paramref:`~pytorch_lightning.trainer.Trainer.reload_dataloaders_every_epoch` to ``True``.
It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`train_dataloader`
- :meth:`val_dataloader`
- :meth:`test_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware
There is no need to set it yourself.
Return:
Single or multiple PyTorch DataLoaders.
Examples::
def val_dataloader(self):
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (1.0,))])
dataset = MNIST(root='/path/to/mnist/', train=False,
transform=transform, download=True)
loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=self.batch_size,
shuffle=False
)
return loader
# can also return multiple dataloaders
def val_dataloader(self):
return [loader_a, loader_b, ..., loader_n]
Note:
If you don't need a validation dataset and a :meth:`validation_step`, you don't need to
implement this method.
Note:
In the case where you return multiple validation dataloaders, the :meth:`validation_step`
will have an argument ``dataloader_idx`` which matches the order here.
"""
def predict_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
r"""
Implement one or multiple PyTorch DataLoaders for prediction.
It's recommended that all data downloads and preparation happen in :meth:`prepare_data`.
- :meth:`~pytorch_lightning.trainer.Trainer.fit`
- ...
- :meth:`prepare_data`
- :meth:`train_dataloader`
- :meth:`val_dataloader`
- :meth:`test_dataloader`
Note:
Lightning adds the correct sampler for distributed and arbitrary hardware
There is no need to set it yourself.
Return:
Single or multiple PyTorch DataLoaders.
Note:
In the case where you return multiple prediction dataloaders, the :meth:`predict`
will have an argument ``dataloader_idx`` which matches the order here.
"""
def on_train_dataloader(self) -> None:
"""Called before requesting the train dataloader."""
def on_val_dataloader(self) -> None:
"""Called before requesting the val dataloader."""
def on_test_dataloader(self) -> None:
"""Called before requesting the test dataloader."""
def on_predict_dataloader(self) -> None:
"""Called before requesting the predict dataloader."""
def transfer_batch_to_device(self, batch: Any, device: Optional[torch.device] = None) -> Any:
"""
Override this hook if your :class:`~torch.utils.data.DataLoader` returns tensors
wrapped in a custom data structure.
The data types listed below (and any arbitrary nesting of them) are supported out of the box:
- :class:`torch.Tensor` or anything that implements `.to(...)`
- :class:`list`
- :class:`dict`
- :class:`tuple`
- :class:`torchtext.data.batch.Batch`
For anything else, you need to define how the data is moved to the target device (CPU, GPU, TPU, ...).
Note:
This hook should only transfer the data and not modify it, nor should it move the data to
any other device than the one passed in as argument (unless you know what you are doing).
Note:
This hook only runs on single GPU training and DDP (no data-parallel).
Data-Parallel support will come in near future.
Args:
batch: A batch of data that needs to be transferred to a new device.
device: The target device as defined in PyTorch.
Returns:
A reference to the data on the new device.
Example::
def transfer_batch_to_device(self, batch, device):
if isinstance(batch, CustomBatch):
# move all tensors in your custom data structure to the device
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
else:
batch = super().transfer_batch_to_device(data, device)
return batch
Raises:
MisconfigurationException:
If using data-parallel, ``Trainer(accelerator='dp')``.
See Also:
- :meth:`move_data_to_device`
- :meth:`apply_to_collection`
"""
device = device or self.device
return move_data_to_device(batch, device)
def on_before_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any:
"""
Override to alter or apply batch augmentations to your batch before it is transferred to the device.
.. warning:: ``dataloader_idx`` always returns 0, and will be updated to support the true index in the future.
Note:
This hook only runs on single GPU training and DDP (no data-parallel).
Data-Parallel support will come in near future.
Args:
batch: A batch of data that needs to be altered or augmented.
dataloader_idx: DataLoader idx for batch
Returns:
A batch of data
Example::
def on_before_batch_transfer(self, batch, dataloader_idx):
batch['x'] = transforms(batch['x'])
return batch
Raises:
MisconfigurationException:
If using data-parallel, ``Trainer(accelerator='dp')``.
See Also:
- :meth:`on_after_batch_transfer`
- :meth:`transfer_batch_to_device`
"""
return batch
def on_after_batch_transfer(self, batch: Any, dataloader_idx: int) -> Any:
"""
Override to alter or apply batch augmentations to your batch after it is transferred to the device.
.. warning:: ``dataloader_idx`` always returns 0, and will be updated to support the true ``idx`` in the future.
Note:
This hook only runs on single GPU training and DDP (no data-parallel).
Data-Parallel support will come in near future.
Args:
batch: A batch of data that needs to be altered or augmented.
dataloader_idx: DataLoader idx for batch (Default: 0)
Returns:
A batch of data
Example::
def on_after_batch_transfer(self, batch, dataloader_idx):
batch['x'] = gpu_transforms(batch['x'])
return batch
Raises:
MisconfigurationException:
If using data-parallel, ``Trainer(accelerator='dp')``.
See Also:
- :meth:`on_before_batch_transfer`
- :meth:`transfer_batch_to_device`
"""
return batch
class CheckpointHooks:
"""Hooks to be used with Checkpointing."""
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
r"""
Called by Lightning to restore your model.
If you saved something with :meth:`on_save_checkpoint` this is your chance to restore this.
Args:
checkpoint: Loaded checkpoint
Example::
def on_load_checkpoint(self, checkpoint):
# 99% of the time you don't need to implement this method
self.something_cool_i_want_to_save = checkpoint['something_cool_i_want_to_save']
Note:
Lightning auto-restores global step, epoch, and train state including amp scaling.
There is no need for you to restore anything regarding training.
"""
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
r"""
Called by Lightning when saving a checkpoint to give you a chance to store anything
else you might want to save.
Args:
checkpoint: Checkpoint to be saved
Example::
def on_save_checkpoint(self, checkpoint):
# 99% of use cases you don't need to implement this method
checkpoint['something_cool_i_want_to_save'] = my_cool_pickable_object
Note:
Lightning saves all aspects of training (epoch, global step, etc...)
including amp scaling.
There is no need for you to store anything about training.
"""
| 34.782822
| 120
| 0.595774
|
d5163c84bf22b6c35391f74a0eb64536a55c3d09
| 3,496
|
py
|
Python
|
testing/test_rnn_layer.py
|
Elisa-Raspanti/NumPyNet
|
4e8d0d415275088f485457cdcf251d28a6826b0f
|
[
"MIT"
] | null | null | null |
testing/test_rnn_layer.py
|
Elisa-Raspanti/NumPyNet
|
4e8d0d415275088f485457cdcf251d28a6826b0f
|
[
"MIT"
] | null | null | null |
testing/test_rnn_layer.py
|
Elisa-Raspanti/NumPyNet
|
4e8d0d415275088f485457cdcf251d28a6826b0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from random import choice
import numpy as np
import pytest
from hypothesis import given
from hypothesis import settings
from hypothesis import strategies as st
from NumPyNet.layers import RNN_layer
__author__ = ['Mattia Ceccarelli', 'Nico Curti']
__email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it']
class TestRNNLayer:
'''
Tests:
- costructor of RNN_layer object
- print function
to be:
forward function against tf.keras
update function
backward function against tf.keras
'''
@given(outputs=st.integers(min_value=-3, max_value=10),
steps=st.integers(min_value=1, max_value=4),
b=st.integers(min_value=5, max_value=15),
w=st.integers(min_value=15, max_value=100),
h=st.integers(min_value=15, max_value=100),
c=st.integers(min_value=1, max_value=10))
@settings(max_examples=10,
deadline=None)
def test_constructor(self, outputs, steps, b, w, h, c):
numpynet_activ = ['relu', 'logistic', 'tanh', 'linear']
if outputs > 0:
weights_choice = [np.random.uniform(low=-1, high=1., size=(w * h * c, outputs)), None]
bias_choice = [np.random.uniform(low=-1, high=1., size=(outputs,)), None]
else:
with pytest.raises(ValueError):
RNN_layer(outputs=outputs, steps=steps)
outputs += 10
weights_choice = [[np.random.uniform(low=-1, high=1., size=(w * h * c, outputs))] * 3, None]
bias_choice = [[np.random.uniform(low=-1, high=1., size=(outputs,))] * 3, None]
weights = choice(weights_choice)
bias = choice(bias_choice)
for numpynet_act in numpynet_activ:
layer = RNN_layer(outputs=outputs, steps=steps, activation=numpynet_act,
input_shape=(b, w, h, c),
weights=weights, bias=bias)
if weights is not None:
np.testing.assert_allclose(layer.input_layer.weights, weights[0], rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(layer.self_layer.weights, weights[1], rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(layer.output_layer.weights, weights[2], rtol=1e-5, atol=1e-8)
if bias is not None:
np.testing.assert_allclose(layer.input_layer.bias, bias[0], rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(layer.self_layer.bias, bias[1], rtol=1e-5, atol=1e-8)
np.testing.assert_allclose(layer.output_layer.bias, bias[2], rtol=1e-5, atol=1e-8)
assert layer.output is None
@given(outputs=st.integers(min_value=3, max_value=10),
steps=st.integers(min_value=1, max_value=4),
b=st.integers(min_value=5, max_value=15),
w=st.integers(min_value=15, max_value=100),
h=st.integers(min_value=15, max_value=100),
c=st.integers(min_value=1, max_value=10))
@settings(max_examples=10,
deadline=None)
def test_printer(self, outputs, steps, b, w, h, c):
layer = RNN_layer(outputs=outputs, steps=steps, activation='linear')
with pytest.raises(TypeError):
print(layer)
layer = RNN_layer(outputs=outputs, steps=steps, activation='linear', input_shape=(b, w, h, c))
print(layer)
| 36.8
| 104
| 0.622712
|
55a5cfbf8b41c716275973acf1add2b793097f4e
| 19,552
|
py
|
Python
|
keras/applications/mobilenet.py
|
websterkovacek/keras
|
631102a7c9efb57a351355090796d5850285663c
|
[
"Apache-2.0"
] | 1
|
2021-01-01T00:16:04.000Z
|
2021-01-01T00:16:04.000Z
|
keras/applications/mobilenet.py
|
websterkovacek/keras
|
631102a7c9efb57a351355090796d5850285663c
|
[
"Apache-2.0"
] | 1
|
2021-01-10T15:10:05.000Z
|
2021-01-25T09:19:15.000Z
|
keras/applications/mobilenet.py
|
websterkovacek/keras
|
631102a7c9efb57a351355090796d5850285663c
|
[
"Apache-2.0"
] | 1
|
2021-01-10T09:06:39.000Z
|
2021-01-10T09:06:39.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import backend
from keras.applications import imagenet_utils
from keras.engine import training
from keras.layers import VersionAwareLayers
from keras.utils import data_utils
from keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet/')
layers = None
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNet architecture.
Reference:
- [MobileNets: Efficient Convolutional Neural Networks
for Mobile Vision Applications](
https://arxiv.org/abs/1704.04861)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in the `tf.keras.backend.image_data_format()`.
Note: each Keras Application expects a specific kind of input preprocessing.
For MobileNet, call `tf.keras.applications.mobilenet.preprocess_input`
on your inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first`
data format). It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would be one
valid value. Default to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally
decreases the number of filters in each layer. - If `alpha` > 1.0,
proportionally increases the number of filters in each layer. - If
`alpha` = 1, default number of filters from the paper are used at each
layer. Default to 1.0.
depth_multiplier: Depth multiplier for depthwise convolution. This is
called the resolution multiplier in the MobileNet paper. Default to 1.0.
dropout: Dropout rate. Default to 0.001.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Default to `True`.
weights: One of `None` (random initialization), 'imagenet' (pre-training
on ImageNet), or the path to the weights file to be loaded. Default to
`imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
global layers
if 'layers' in kwargs:
layers = kwargs.pop('layers')
else:
layers = VersionAwareLayers()
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or tf.io.gfile.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if backend.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Args:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should
be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width and
height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(inputs)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Args:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(
inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(
x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(
x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
@keras_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| 42.59695
| 80
| 0.652721
|
09904dd97d03cda66177c6197ef0d588d318b6ed
| 6,983
|
py
|
Python
|
tests/core/test_jose/test_jwe.py
|
johnjdailey/authlib
|
de4e4ec735e0a9a57906129f16df2addc6a4c908
|
[
"BSD-3-Clause"
] | 1
|
2020-08-04T08:29:39.000Z
|
2020-08-04T08:29:39.000Z
|
tests/core/test_jose/test_jwe.py
|
johnjdailey/authlib
|
de4e4ec735e0a9a57906129f16df2addc6a4c908
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/test_jose/test_jwe.py
|
johnjdailey/authlib
|
de4e4ec735e0a9a57906129f16df2addc6a4c908
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import unittest
from authlib.jose import errors
from authlib.jose import JsonWebEncryption, JWE_ALGORITHMS, JWS_ALGORITHMS
from tests.util import read_file_path
class JWETest(unittest.TestCase):
def test_register_invalid_algorithms(self):
self.assertRaises(
ValueError,
JsonWebEncryption,
['INVALID']
)
jwe = JsonWebEncryption(algorithms=[])
self.assertRaises(
ValueError,
jwe.register_algorithm,
JWS_ALGORITHMS[0]
)
def test_not_enough_segments(self):
s = 'a.b.c'
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
self.assertRaises(
errors.DecodeError,
jwe.deserialize_compact,
s, None
)
def test_invalid_header(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
public_key = read_file_path('rsa_public.pem')
self.assertRaises(
errors.MissingAlgorithmError,
jwe.serialize_compact, {}, 'a', public_key
)
self.assertRaises(
errors.UnsupportedAlgorithmError,
jwe.serialize_compact, {'alg': 'invalid'}, 'a', public_key
)
self.assertRaises(
errors.MissingEncryptionAlgorithmError,
jwe.serialize_compact, {'alg': 'RSA-OAEP'}, 'a', public_key
)
self.assertRaises(
errors.UnsupportedEncryptionAlgorithmError,
jwe.serialize_compact, {'alg': 'RSA-OAEP', 'enc': 'invalid'},
'a', public_key
)
self.assertRaises(
errors.UnsupportedCompressionAlgorithmError,
jwe.serialize_compact,
{'alg': 'RSA-OAEP', 'enc': 'A256GCM', 'zip': 'invalid'},
'a', public_key
)
def test_not_supported_alg(self):
public_key = read_file_path('rsa_public.pem')
private_key = read_file_path('rsa_private.pem')
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
s = jwe.serialize_compact(
{'alg': 'RSA-OAEP', 'enc': 'A256GCM'},
'hello', public_key
)
jwe = JsonWebEncryption(algorithms=['RSA1_5', 'A256GCM'])
self.assertRaises(
errors.UnsupportedAlgorithmError,
jwe.serialize_compact,
{'alg': 'RSA-OAEP', 'enc': 'A256GCM'},
'hello', public_key
)
self.assertRaises(
errors.UnsupportedCompressionAlgorithmError,
jwe.serialize_compact,
{'alg': 'RSA1_5', 'enc': 'A256GCM', 'zip': 'DEF'},
'hello', public_key
)
self.assertRaises(
errors.UnsupportedAlgorithmError,
jwe.deserialize_compact,
s, private_key,
)
jwe = JsonWebEncryption(algorithms=['RSA-OAEP', 'A192GCM'])
self.assertRaises(
errors.UnsupportedEncryptionAlgorithmError,
jwe.serialize_compact,
{'alg': 'RSA-OAEP', 'enc': 'A256GCM'},
'hello', public_key
)
self.assertRaises(
errors.UnsupportedCompressionAlgorithmError,
jwe.serialize_compact,
{'alg': 'RSA-OAEP', 'enc': 'A192GCM', 'zip': 'DEF'},
'hello', public_key
)
self.assertRaises(
errors.UnsupportedEncryptionAlgorithmError,
jwe.deserialize_compact,
s, private_key,
)
def test_compact_rsa(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
s = jwe.serialize_compact(
{'alg': 'RSA-OAEP', 'enc': 'A256GCM'},
'hello',
read_file_path('rsa_public.pem')
)
data = jwe.deserialize_compact(s, read_file_path('rsa_private.pem'))
header, payload = data['header'], data['payload']
self.assertEqual(payload, b'hello')
self.assertEqual(header['alg'], 'RSA-OAEP')
def test_with_zip_header(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
s = jwe.serialize_compact(
{'alg': 'RSA-OAEP', 'enc': 'A128CBC-HS256', 'zip': 'DEF'},
'hello',
read_file_path('rsa_public.pem')
)
data = jwe.deserialize_compact(s, read_file_path('rsa_private.pem'))
header, payload = data['header'], data['payload']
self.assertEqual(payload, b'hello')
self.assertEqual(header['alg'], 'RSA-OAEP')
def test_aes_JsonWebEncryption(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
sizes = [128, 192, 256]
_enc_choices = [
'A128CBC-HS256', 'A192CBC-HS384', 'A256CBC-HS512',
'A128GCM', 'A192GCM', 'A256GCM'
]
for s in sizes:
alg = 'A{}KW'.format(s)
key = os.urandom(s // 8)
for enc in _enc_choices:
protected = {'alg': alg, 'enc': enc}
data = jwe.serialize_compact(protected, b'hello', key)
rv = jwe.deserialize_compact(data, key)
self.assertEqual(rv['payload'], b'hello')
def test_ase_jwe_invalid_key(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
protected = {'alg': 'A128KW', 'enc': 'A128GCM'}
self.assertRaises(
ValueError,
jwe.serialize_compact,
protected, b'hello', b'invalid-key'
)
def test_rsa_alg(self):
alg = JsonWebEncryption.JWE_AVAILABLE_ALGORITHMS['RSA-OAEP']
pub_key = alg.prepare_key(
read_file_path('rsa_public.pem'))
private_key = alg.prepare_key(
read_file_path('rsa_private.pem'))
cek = (
b'\xb1\xa1\xf4\x80T\x8f\xe1s?\xb4\x03\xffk\x9a\xd4\xf6\x8a\x07'
b'n[p."i/\x82\xcb.z\xea@\xfc'
)
ek = alg.wrap(cek, {}, pub_key)
self.assertEqual(alg.unwrap(ek, {}, private_key), cek)
invalid_ek = b'a' + ek[1:]
self.assertRaises(ValueError, alg.unwrap, invalid_ek, {}, private_key)
def test_aes_gcm_JsonWebEncryption(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
sizes = [128, 192, 256]
_enc_choices = [
'A128CBC-HS256', 'A192CBC-HS384', 'A256CBC-HS512',
'A128GCM', 'A192GCM', 'A256GCM'
]
for s in sizes:
alg = 'A{}GCMKW'.format(s)
key = os.urandom(s // 8)
for enc in _enc_choices:
protected = {'alg': alg, 'enc': enc}
data = jwe.serialize_compact(protected, b'hello', key)
rv = jwe.deserialize_compact(data, key)
self.assertEqual(rv['payload'], b'hello')
def test_ase_gcm_jwe_invalid_key(self):
jwe = JsonWebEncryption(algorithms=JWE_ALGORITHMS)
protected = {'alg': 'A128GCMKW', 'enc': 'A128GCM'}
self.assertRaises(
ValueError,
jwe.serialize_compact,
protected, b'hello', b'invalid-key'
)
| 35.627551
| 78
| 0.573822
|
dd649a4eb9a77d5dac8882593ded91f58fbc0e67
| 1,856
|
py
|
Python
|
test/programytest/dialog/storage/test_file.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 2
|
2018-06-16T09:32:22.000Z
|
2019-07-21T13:16:00.000Z
|
test/programytest/dialog/storage/test_file.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 3
|
2020-07-16T04:00:42.000Z
|
2021-03-31T18:52:22.000Z
|
test/programytest/dialog/storage/test_file.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 4
|
2018-06-29T23:50:44.000Z
|
2020-11-05T08:13:47.000Z
|
import unittest
import os
from programy.dialog.dialog import Sentence, Question, Conversation
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain.brain import BrainConfiguration
from programy.config.bot.bot import BotConfiguration
from programy.config.bot.filestorage import BotConversationsFileStorageConfiguration
from programy.context import ClientContext
from programytest.aiml_tests.client import TestClient
class ConversationFileStorageTests(unittest.TestCase):
def test_persistence(self):
client = TestClient()
client_context = client.create_client_context("testid")
bot_config = BotConfiguration()
bot_config.conversations._type = "file"
bot_config.conversations._storage = BotConversationsFileStorageConfiguration("test")
bot_config.conversations._storage._dir = os.path.dirname(__file__)
bot_config.conversations._max_histories = 3
client_context.bot = Bot(bot_config)
filename = bot_config.conversations._storage._dir + os.sep + client_context.userid + ".convo"
if os.path.exists(filename):
os.remove(filename)
self.assertFalse(os.path.exists(filename))
conversation = client_context.bot.get_conversation(client_context)
conversation.properties['name'] = "fred"
client_context.bot.save_conversation(client_context.userid)
self.assertTrue(os.path.exists(filename))
test_bot2 = Bot(bot_config)
conversation2 = test_bot2.get_conversation(client_context)
self.assertIsNotNone(conversation2.property('name'))
self.assertEqual('fred', conversation2.property('name'))
self.assertTrue(os.path.exists(filename))
if os.path.exists(filename):
os.remove(filename)
self.assertFalse(os.path.exists(filename))
| 40.347826
| 101
| 0.742457
|
2ab5b656be7a7728dab97a14cf9706c1dcfa22f8
| 2,491
|
py
|
Python
|
dell_projector/main.py
|
Abhiseshan/dell-projector
|
ef869afc9c33a6907160948c6548cad2d96e7341
|
[
"MIT"
] | null | null | null |
dell_projector/main.py
|
Abhiseshan/dell-projector
|
ef869afc9c33a6907160948c6548cad2d96e7341
|
[
"MIT"
] | null | null | null |
dell_projector/main.py
|
Abhiseshan/dell-projector
|
ef869afc9c33a6907160948c6548cad2d96e7341
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import socket
import logging
logger = logging.getLogger(__name__)
HOME_PAGE = "/home.htm"
PROTOCOL = "http://"
TIMEOUT = 5
CONTROL_PORT = 41794
ATTRIBUTES = {
"Status:": "STATUS",
"Lamp Hours:": "LAMP_HOURS"
}
COMANDS = {
"on": [0x05, 0x00, 0x06, 0x00, 0x00, 0x03, 0x00, 0x04, 0x00],
"off": [0x05, 0x00, 0x06, 0x00, 0x00, 0x03, 0x00, 0x05, 0x00]
}
class Projector:
def __init__(self, host):
self._host = host
def __callProjectorRest(self):
base_url = PROTOCOL + self._host
request_url = base_url + HOME_PAGE
# use requests.session since we have to get the ATOP cookie to authenticate the calls
session = requests.Session()
try:
# Gets the session cookie
session.get(base_url, timeout=TIMEOUT)
# call the api
logger.debug("Calling home page")
response = session.get(request_url, timeout=TIMEOUT)
return response
except Exception:
logger.error("Failed to connect to projector '%s'", self._host)
return None
def __callProjectorSocket(self, data):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self._host, CONTROL_PORT))
s.send(bytes(data))
finally:
s.close()
def __parseResponse(self, response):
parsedAttributes = {}
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.find_all('table')[0]
for row in table.find_all('tr'):
columns = row.find_all('td')
attr = columns[0].text
if attr in ATTRIBUTES:
value = columns[1].text
parsedAttributes[ATTRIBUTES[attr]] = value.strip()
return parsedAttributes
def getStatus(self):
statusResponse = self.__callProjectorRest()
if (statusResponse == None):
return None
attributes = self.__parseResponse(statusResponse)
return {
"state": attributes["STATUS"] != "Standby",
"attributes": attributes
}
def turnOn(self):
logger.debug("Turning on projector")
self.__callProjectorSocket(COMANDS["on"])
def turnOff(self):
logger.debug("Turning off projector")
self.__callProjectorSocket(COMANDS["off"])
| 29.654762
| 94
| 0.578884
|
fe25062db91ad6bf05dbb4b4a7fc5f1b08ac7f8d
| 11,998
|
py
|
Python
|
kombu/tests/test_entities.py
|
jheld/kombu
|
5ade56f2d52deee2f9ecea0180c2ea56400345be
|
[
"BSD-3-Clause"
] | 1
|
2019-06-22T23:30:30.000Z
|
2019-06-22T23:30:30.000Z
|
kombu/tests/test_entities.py
|
rectalogic/kombu
|
82dc37b4c151a2d4e7eec1f96ada5babc8749e8e
|
[
"BSD-3-Clause"
] | null | null | null |
kombu/tests/test_entities.py
|
rectalogic/kombu
|
82dc37b4c151a2d4e7eec1f96ada5babc8749e8e
|
[
"BSD-3-Clause"
] | 1
|
2019-01-29T00:47:41.000Z
|
2019-01-29T00:47:41.000Z
|
from __future__ import absolute_import
import pickle
from kombu import Connection, Exchange, Producer, Queue, binding
from kombu.exceptions import NotBoundError
from .case import Case, Mock, call
from .mocks import Transport
def get_conn():
return Connection(transport=Transport)
class test_binding(Case):
def test_constructor(self):
x = binding(
Exchange('foo'), 'rkey',
arguments={'barg': 'bval'},
unbind_arguments={'uarg': 'uval'},
)
self.assertEqual(x.exchange, Exchange('foo'))
self.assertEqual(x.routing_key, 'rkey')
self.assertDictEqual(x.arguments, {'barg': 'bval'})
self.assertDictEqual(x.unbind_arguments, {'uarg': 'uval'})
def test_declare(self):
chan = get_conn().channel()
x = binding(Exchange('foo'), 'rkey')
x.declare(chan)
self.assertIn('exchange_declare', chan)
def test_declare_no_exchange(self):
chan = get_conn().channel()
x = binding()
x.declare(chan)
self.assertNotIn('exchange_declare', chan)
def test_bind(self):
chan = get_conn().channel()
x = binding(Exchange('foo'))
x.bind(Exchange('bar')(chan))
self.assertIn('exchange_bind', chan)
def test_unbind(self):
chan = get_conn().channel()
x = binding(Exchange('foo'))
x.unbind(Exchange('bar')(chan))
self.assertIn('exchange_unbind', chan)
def test_repr(self):
b = binding(Exchange('foo'), 'rkey')
self.assertIn('foo', repr(b))
self.assertIn('rkey', repr(b))
class test_Exchange(Case):
def test_bound(self):
exchange = Exchange('foo', 'direct')
self.assertFalse(exchange.is_bound)
self.assertIn('<unbound', repr(exchange))
chan = get_conn().channel()
bound = exchange.bind(chan)
self.assertTrue(bound.is_bound)
self.assertIs(bound.channel, chan)
self.assertIn('bound to chan:%r' % (chan.channel_id, ),
repr(bound))
def test_hash(self):
self.assertEqual(hash(Exchange('a')), hash(Exchange('a')))
self.assertNotEqual(hash(Exchange('a')), hash(Exchange('b')))
def test_can_cache_declaration(self):
self.assertTrue(Exchange('a', durable=True).can_cache_declaration)
self.assertTrue(Exchange('a', durable=False).can_cache_declaration)
self.assertFalse(Exchange('a', auto_delete=True).can_cache_declaration)
self.assertFalse(
Exchange('a',
durable=True,
auto_delete=True
).can_cache_declaration,
)
def test_pickle(self):
e1 = Exchange('foo', 'direct')
e2 = pickle.loads(pickle.dumps(e1))
self.assertEqual(e1, e2)
def test_eq(self):
e1 = Exchange('foo', 'direct')
e2 = Exchange('foo', 'direct')
self.assertEqual(e1, e2)
e3 = Exchange('foo', 'topic')
self.assertNotEqual(e1, e3)
self.assertEqual(e1.__eq__(True), NotImplemented)
def test_revive(self):
exchange = Exchange('foo', 'direct')
conn = get_conn()
chan = conn.channel()
# reviving unbound channel is a noop.
exchange.revive(chan)
self.assertFalse(exchange.is_bound)
self.assertIsNone(exchange._channel)
bound = exchange.bind(chan)
self.assertTrue(bound.is_bound)
self.assertIs(bound.channel, chan)
chan2 = conn.channel()
bound.revive(chan2)
self.assertTrue(bound.is_bound)
self.assertIs(bound._channel, chan2)
def test_assert_is_bound(self):
exchange = Exchange('foo', 'direct')
with self.assertRaises(NotBoundError):
exchange.declare()
conn = get_conn()
chan = conn.channel()
exchange.bind(chan).declare()
self.assertIn('exchange_declare', chan)
def test_set_transient_delivery_mode(self):
exc = Exchange('foo', 'direct', delivery_mode='transient')
self.assertEqual(exc.delivery_mode, Exchange.TRANSIENT_DELIVERY_MODE)
def test_set_passive_mode(self):
exc = Exchange('foo', 'direct', passive=True)
self.assertTrue(exc.passive)
def test_set_persistent_delivery_mode(self):
exc = Exchange('foo', 'direct', delivery_mode='persistent')
self.assertEqual(exc.delivery_mode, Exchange.PERSISTENT_DELIVERY_MODE)
def test_bind_at_instantiation(self):
self.assertTrue(Exchange('foo', channel=get_conn().channel()).is_bound)
def test_create_message(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).Message({'foo': 'bar'})
self.assertIn('prepare_message', chan)
def test_publish(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).publish('the quick brown fox')
self.assertIn('basic_publish', chan)
def test_delete(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).delete()
self.assertIn('exchange_delete', chan)
def test__repr__(self):
b = Exchange('foo', 'topic')
self.assertIn('foo(topic)', repr(b))
self.assertIn('Exchange', repr(b))
def test_bind_to(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
bar = Exchange('bar', 'topic')
foo(chan).bind_to(bar)
self.assertIn('exchange_bind', chan)
def test_bind_to_by_name(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
foo(chan).bind_to('bar')
self.assertIn('exchange_bind', chan)
def test_unbind_from(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
bar = Exchange('bar', 'topic')
foo(chan).unbind_from(bar)
self.assertIn('exchange_unbind', chan)
def test_unbind_from_by_name(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
foo(chan).unbind_from('bar')
self.assertIn('exchange_unbind', chan)
class test_Queue(Case):
def setup(self):
self.exchange = Exchange('foo', 'direct')
def test_hash(self):
self.assertEqual(hash(Queue('a')), hash(Queue('a')))
self.assertNotEqual(hash(Queue('a')), hash(Queue('b')))
def test_repr_with_bindings(self):
ex = Exchange('foo')
x = Queue('foo', bindings=[ex.binding('A'), ex.binding('B')])
self.assertTrue(repr(x))
def test_anonymous(self):
chan = Mock()
x = Queue(bindings=[binding(Exchange('foo'), 'rkey')])
chan.queue_declare.return_value = 'generated', 0, 0
xx = x(chan)
xx.declare()
self.assertEqual(xx.name, 'generated')
def test_basic_get__accept_disallowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = Producer(conn)
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
message = q(conn).get(no_ack=True)
self.assertIsNotNone(message)
with self.assertRaises(q.ContentDisallowed):
message.decode()
def test_basic_get__accept_allowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = Producer(conn)
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
message = q(conn).get(accept=['pickle'], no_ack=True)
self.assertIsNotNone(message)
payload = message.decode()
self.assertTrue(payload['complex'])
def test_when_bound_but_no_exchange(self):
q = Queue('a')
q.exchange = None
self.assertIsNone(q.when_bound())
def test_declare_but_no_exchange(self):
q = Queue('a')
q.queue_declare = Mock()
q.queue_bind = Mock()
q.exchange = None
q.declare()
q.queue_declare.assert_called_with(False, passive=False)
def test_bind_to_when_name(self):
chan = Mock()
q = Queue('a')
q(chan).bind_to('ex')
self.assertTrue(chan.queue_bind.called)
def test_get_when_no_m2p(self):
chan = Mock()
q = Queue('a')(chan)
chan.message_to_python = None
self.assertTrue(q.get())
def test_multiple_bindings(self):
chan = Mock()
q = Queue('mul', [
binding(Exchange('mul1'), 'rkey1'),
binding(Exchange('mul2'), 'rkey2'),
binding(Exchange('mul3'), 'rkey3'),
])
q(chan).declare()
self.assertIn(
call(
nowait=False,
exchange='mul1',
auto_delete=False,
passive=False,
arguments=None,
type='direct',
durable=True,
),
chan.exchange_declare.call_args_list,
)
def test_can_cache_declaration(self):
self.assertTrue(Queue('a', durable=True).can_cache_declaration)
self.assertTrue(Queue('a', durable=False).can_cache_declaration)
def test_eq(self):
q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
self.assertEqual(q1, q2)
self.assertEqual(q1.__eq__(True), NotImplemented)
q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx')
self.assertNotEqual(q1, q3)
def test_exclusive_implies_auto_delete(self):
self.assertTrue(
Queue('foo', self.exchange, exclusive=True).auto_delete,
)
def test_binds_at_instantiation(self):
self.assertTrue(Queue('foo', self.exchange,
channel=get_conn().channel()).is_bound)
def test_also_binds_exchange(self):
chan = get_conn().channel()
b = Queue('foo', self.exchange)
self.assertFalse(b.is_bound)
self.assertFalse(b.exchange.is_bound)
b = b.bind(chan)
self.assertTrue(b.is_bound)
self.assertTrue(b.exchange.is_bound)
self.assertIs(b.channel, b.exchange.channel)
self.assertIsNot(b.exchange, self.exchange)
def test_declare(self):
chan = get_conn().channel()
b = Queue('foo', self.exchange, 'foo', channel=chan)
self.assertTrue(b.is_bound)
b.declare()
self.assertIn('exchange_declare', chan)
self.assertIn('queue_declare', chan)
self.assertIn('queue_bind', chan)
def test_get(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.get()
self.assertIn('basic_get', b.channel)
def test_purge(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.purge()
self.assertIn('queue_purge', b.channel)
def test_consume(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.consume('fifafo', None)
self.assertIn('basic_consume', b.channel)
def test_cancel(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.cancel('fifafo')
self.assertIn('basic_cancel', b.channel)
def test_delete(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.delete()
self.assertIn('queue_delete', b.channel)
def test_queue_unbind(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.queue_unbind()
self.assertIn('queue_unbind', b.channel)
def test_as_dict(self):
q = Queue('foo', self.exchange, 'rk')
d = q.as_dict(recurse=True)
self.assertEqual(d['exchange']['name'], self.exchange.name)
def test__repr__(self):
b = Queue('foo', self.exchange, 'foo')
self.assertIn('foo', repr(b))
self.assertIn('Queue', repr(b))
| 32.080214
| 79
| 0.597433
|
3fbaff2c4928d0324eaa61312102075823f7dd96
| 627
|
py
|
Python
|
ros/src/twist_controller/lowpass.py
|
edufford/CarND-Capstone
|
dd466a3e9e1eeb3c3f4b37175b9694600d8cc8c8
|
[
"MIT"
] | 9
|
2018-03-07T01:38:31.000Z
|
2020-05-14T14:22:52.000Z
|
ros/src/twist_controller/lowpass.py
|
edufford/CarND-Capstone
|
dd466a3e9e1eeb3c3f4b37175b9694600d8cc8c8
|
[
"MIT"
] | 95
|
2018-02-27T11:37:30.000Z
|
2019-02-09T20:37:03.000Z
|
ros/src/twist_controller/lowpass.py
|
edufford/CarND-Capstone
|
dd466a3e9e1eeb3c3f4b37175b9694600d8cc8c8
|
[
"MIT"
] | 4
|
2018-02-26T21:20:17.000Z
|
2019-08-09T15:50:53.000Z
|
class LowPassFilter(object):
def __init__(self, tau, ts):
self.a = 1. / (tau / ts + 1.)
self.b = tau / ts / (tau / ts + 1.)
self.last_val = 0.
# Initialize from last_val = 0 instead of allowing an initial step
# change on activation when dbw_enabled turns ON
self.ready = True
def get(self):
return self.last_val
def filt(self, val):
if self.ready:
val = self.a * val + self.b * self.last_val
else:
self.ready = True
self.last_val = val
return val
def reset(self):
self.ready = False
| 23.222222
| 74
| 0.54067
|
10c634c1a0d842d616a780fae1e7ea6edafe0b34
| 27,598
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20210201/get_application_gateway.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20210201/get_application_gateway.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20210201/get_application_gateway.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetApplicationGatewayResult',
'AwaitableGetApplicationGatewayResult',
'get_application_gateway',
]
@pulumi.output_type
class GetApplicationGatewayResult:
"""
Application gateway resource.
"""
def __init__(__self__, authentication_certificates=None, autoscale_configuration=None, backend_address_pools=None, backend_http_settings_collection=None, custom_error_configurations=None, enable_fips=None, enable_http2=None, etag=None, firewall_policy=None, force_firewall_policy_association=None, frontend_ip_configurations=None, frontend_ports=None, gateway_ip_configurations=None, http_listeners=None, id=None, identity=None, location=None, name=None, operational_state=None, private_endpoint_connections=None, private_link_configurations=None, probes=None, provisioning_state=None, redirect_configurations=None, request_routing_rules=None, resource_guid=None, rewrite_rule_sets=None, sku=None, ssl_certificates=None, ssl_policy=None, ssl_profiles=None, tags=None, trusted_client_certificates=None, trusted_root_certificates=None, type=None, url_path_maps=None, web_application_firewall_configuration=None, zones=None):
if authentication_certificates and not isinstance(authentication_certificates, list):
raise TypeError("Expected argument 'authentication_certificates' to be a list")
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if autoscale_configuration and not isinstance(autoscale_configuration, dict):
raise TypeError("Expected argument 'autoscale_configuration' to be a dict")
pulumi.set(__self__, "autoscale_configuration", autoscale_configuration)
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if backend_http_settings_collection and not isinstance(backend_http_settings_collection, list):
raise TypeError("Expected argument 'backend_http_settings_collection' to be a list")
pulumi.set(__self__, "backend_http_settings_collection", backend_http_settings_collection)
if custom_error_configurations and not isinstance(custom_error_configurations, list):
raise TypeError("Expected argument 'custom_error_configurations' to be a list")
pulumi.set(__self__, "custom_error_configurations", custom_error_configurations)
if enable_fips and not isinstance(enable_fips, bool):
raise TypeError("Expected argument 'enable_fips' to be a bool")
pulumi.set(__self__, "enable_fips", enable_fips)
if enable_http2 and not isinstance(enable_http2, bool):
raise TypeError("Expected argument 'enable_http2' to be a bool")
pulumi.set(__self__, "enable_http2", enable_http2)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if firewall_policy and not isinstance(firewall_policy, dict):
raise TypeError("Expected argument 'firewall_policy' to be a dict")
pulumi.set(__self__, "firewall_policy", firewall_policy)
if force_firewall_policy_association and not isinstance(force_firewall_policy_association, bool):
raise TypeError("Expected argument 'force_firewall_policy_association' to be a bool")
pulumi.set(__self__, "force_firewall_policy_association", force_firewall_policy_association)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if frontend_ports and not isinstance(frontend_ports, list):
raise TypeError("Expected argument 'frontend_ports' to be a list")
pulumi.set(__self__, "frontend_ports", frontend_ports)
if gateway_ip_configurations and not isinstance(gateway_ip_configurations, list):
raise TypeError("Expected argument 'gateway_ip_configurations' to be a list")
pulumi.set(__self__, "gateway_ip_configurations", gateway_ip_configurations)
if http_listeners and not isinstance(http_listeners, list):
raise TypeError("Expected argument 'http_listeners' to be a list")
pulumi.set(__self__, "http_listeners", http_listeners)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if operational_state and not isinstance(operational_state, str):
raise TypeError("Expected argument 'operational_state' to be a str")
pulumi.set(__self__, "operational_state", operational_state)
if private_endpoint_connections and not isinstance(private_endpoint_connections, list):
raise TypeError("Expected argument 'private_endpoint_connections' to be a list")
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
if private_link_configurations and not isinstance(private_link_configurations, list):
raise TypeError("Expected argument 'private_link_configurations' to be a list")
pulumi.set(__self__, "private_link_configurations", private_link_configurations)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if redirect_configurations and not isinstance(redirect_configurations, list):
raise TypeError("Expected argument 'redirect_configurations' to be a list")
pulumi.set(__self__, "redirect_configurations", redirect_configurations)
if request_routing_rules and not isinstance(request_routing_rules, list):
raise TypeError("Expected argument 'request_routing_rules' to be a list")
pulumi.set(__self__, "request_routing_rules", request_routing_rules)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if rewrite_rule_sets and not isinstance(rewrite_rule_sets, list):
raise TypeError("Expected argument 'rewrite_rule_sets' to be a list")
pulumi.set(__self__, "rewrite_rule_sets", rewrite_rule_sets)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if ssl_certificates and not isinstance(ssl_certificates, list):
raise TypeError("Expected argument 'ssl_certificates' to be a list")
pulumi.set(__self__, "ssl_certificates", ssl_certificates)
if ssl_policy and not isinstance(ssl_policy, dict):
raise TypeError("Expected argument 'ssl_policy' to be a dict")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if ssl_profiles and not isinstance(ssl_profiles, list):
raise TypeError("Expected argument 'ssl_profiles' to be a list")
pulumi.set(__self__, "ssl_profiles", ssl_profiles)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trusted_client_certificates and not isinstance(trusted_client_certificates, list):
raise TypeError("Expected argument 'trusted_client_certificates' to be a list")
pulumi.set(__self__, "trusted_client_certificates", trusted_client_certificates)
if trusted_root_certificates and not isinstance(trusted_root_certificates, list):
raise TypeError("Expected argument 'trusted_root_certificates' to be a list")
pulumi.set(__self__, "trusted_root_certificates", trusted_root_certificates)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if url_path_maps and not isinstance(url_path_maps, list):
raise TypeError("Expected argument 'url_path_maps' to be a list")
pulumi.set(__self__, "url_path_maps", url_path_maps)
if web_application_firewall_configuration and not isinstance(web_application_firewall_configuration, dict):
raise TypeError("Expected argument 'web_application_firewall_configuration' to be a dict")
pulumi.set(__self__, "web_application_firewall_configuration", web_application_firewall_configuration)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]:
"""
Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="autoscaleConfiguration")
def autoscale_configuration(self) -> Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse']:
"""
Autoscale Configuration.
"""
return pulumi.get(self, "autoscale_configuration")
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter(name="backendHttpSettingsCollection")
def backend_http_settings_collection(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]:
"""
Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_http_settings_collection")
@property
@pulumi.getter(name="customErrorConfigurations")
def custom_error_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]:
"""
Custom error configurations of the application gateway resource.
"""
return pulumi.get(self, "custom_error_configurations")
@property
@pulumi.getter(name="enableFips")
def enable_fips(self) -> Optional[bool]:
"""
Whether FIPS is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_fips")
@property
@pulumi.getter(name="enableHttp2")
def enable_http2(self) -> Optional[bool]:
"""
Whether HTTP2 is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_http2")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firewallPolicy")
def firewall_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
Reference to the FirewallPolicy resource.
"""
return pulumi.get(self, "firewall_policy")
@property
@pulumi.getter(name="forceFirewallPolicyAssociation")
def force_firewall_policy_association(self) -> Optional[bool]:
"""
If true, associates a firewall policy with an application gateway regardless whether the policy differs from the WAF Config.
"""
return pulumi.get(self, "force_firewall_policy_association")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]:
"""
Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="frontendPorts")
def frontend_ports(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]:
"""
Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ports")
@property
@pulumi.getter(name="gatewayIPConfigurations")
def gateway_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]:
"""
Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "gateway_ip_configurations")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]:
"""
Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedServiceIdentityResponse']:
"""
The identity of the application gateway, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationalState")
def operational_state(self) -> str:
"""
Operational state of the application gateway resource.
"""
return pulumi.get(self, "operational_state")
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.ApplicationGatewayPrivateEndpointConnectionResponse']:
"""
Private Endpoint connections on application gateway.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="privateLinkConfigurations")
def private_link_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayPrivateLinkConfigurationResponse']]:
"""
PrivateLink configurations on application gateway.
"""
return pulumi.get(self, "private_link_configurations")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]:
"""
Probes of the application gateway resource.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the application gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfigurations")
def redirect_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']]:
"""
Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "redirect_configurations")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]:
"""
Request routing rules of the application gateway resource.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the application gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="rewriteRuleSets")
def rewrite_rule_sets(self) -> Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']]:
"""
Rewrite rules for the application gateway resource.
"""
return pulumi.get(self, "rewrite_rule_sets")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ApplicationGatewaySkuResponse']:
"""
SKU of the application gateway resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslCertificates")
def ssl_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]:
"""
SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_certificates")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional['outputs.ApplicationGatewaySslPolicyResponse']:
"""
SSL policy of the application gateway resource.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter(name="sslProfiles")
def ssl_profiles(self) -> Optional[Sequence['outputs.ApplicationGatewaySslProfileResponse']]:
"""
SSL profiles of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_profiles")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedClientCertificates")
def trusted_client_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedClientCertificateResponse']]:
"""
Trusted client certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_client_certificates")
@property
@pulumi.getter(name="trustedRootCertificates")
def trusted_root_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']]:
"""
Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_root_certificates")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]:
"""
URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "url_path_maps")
@property
@pulumi.getter(name="webApplicationFirewallConfiguration")
def web_application_firewall_configuration(self) -> Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']:
"""
Web application firewall configuration.
"""
return pulumi.get(self, "web_application_firewall_configuration")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting where the resource needs to come from.
"""
return pulumi.get(self, "zones")
class AwaitableGetApplicationGatewayResult(GetApplicationGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationGatewayResult(
authentication_certificates=self.authentication_certificates,
autoscale_configuration=self.autoscale_configuration,
backend_address_pools=self.backend_address_pools,
backend_http_settings_collection=self.backend_http_settings_collection,
custom_error_configurations=self.custom_error_configurations,
enable_fips=self.enable_fips,
enable_http2=self.enable_http2,
etag=self.etag,
firewall_policy=self.firewall_policy,
force_firewall_policy_association=self.force_firewall_policy_association,
frontend_ip_configurations=self.frontend_ip_configurations,
frontend_ports=self.frontend_ports,
gateway_ip_configurations=self.gateway_ip_configurations,
http_listeners=self.http_listeners,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
operational_state=self.operational_state,
private_endpoint_connections=self.private_endpoint_connections,
private_link_configurations=self.private_link_configurations,
probes=self.probes,
provisioning_state=self.provisioning_state,
redirect_configurations=self.redirect_configurations,
request_routing_rules=self.request_routing_rules,
resource_guid=self.resource_guid,
rewrite_rule_sets=self.rewrite_rule_sets,
sku=self.sku,
ssl_certificates=self.ssl_certificates,
ssl_policy=self.ssl_policy,
ssl_profiles=self.ssl_profiles,
tags=self.tags,
trusted_client_certificates=self.trusted_client_certificates,
trusted_root_certificates=self.trusted_root_certificates,
type=self.type,
url_path_maps=self.url_path_maps,
web_application_firewall_configuration=self.web_application_firewall_configuration,
zones=self.zones)
def get_application_gateway(application_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationGatewayResult:
"""
Application gateway resource.
:param str application_gateway_name: The name of the application gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationGatewayName'] = application_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20210201:getApplicationGateway', __args__, opts=opts, typ=GetApplicationGatewayResult).value
return AwaitableGetApplicationGatewayResult(
authentication_certificates=__ret__.authentication_certificates,
autoscale_configuration=__ret__.autoscale_configuration,
backend_address_pools=__ret__.backend_address_pools,
backend_http_settings_collection=__ret__.backend_http_settings_collection,
custom_error_configurations=__ret__.custom_error_configurations,
enable_fips=__ret__.enable_fips,
enable_http2=__ret__.enable_http2,
etag=__ret__.etag,
firewall_policy=__ret__.firewall_policy,
force_firewall_policy_association=__ret__.force_firewall_policy_association,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
frontend_ports=__ret__.frontend_ports,
gateway_ip_configurations=__ret__.gateway_ip_configurations,
http_listeners=__ret__.http_listeners,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
operational_state=__ret__.operational_state,
private_endpoint_connections=__ret__.private_endpoint_connections,
private_link_configurations=__ret__.private_link_configurations,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
redirect_configurations=__ret__.redirect_configurations,
request_routing_rules=__ret__.request_routing_rules,
resource_guid=__ret__.resource_guid,
rewrite_rule_sets=__ret__.rewrite_rule_sets,
sku=__ret__.sku,
ssl_certificates=__ret__.ssl_certificates,
ssl_policy=__ret__.ssl_policy,
ssl_profiles=__ret__.ssl_profiles,
tags=__ret__.tags,
trusted_client_certificates=__ret__.trusted_client_certificates,
trusted_root_certificates=__ret__.trusted_root_certificates,
type=__ret__.type,
url_path_maps=__ret__.url_path_maps,
web_application_firewall_configuration=__ret__.web_application_firewall_configuration,
zones=__ret__.zones)
| 50.361314
| 926
| 0.714545
|
fe1cdcd1f163c4b090e05f89313e48818b2fd276
| 4,963
|
py
|
Python
|
tests/parse_shebang_test.py
|
mmabey/pre-commit
|
7728162d7a47f387187e379b8011b224b168555d
|
[
"MIT"
] | 1
|
2022-03-17T22:18:16.000Z
|
2022-03-17T22:18:16.000Z
|
tests/parse_shebang_test.py
|
mmabey/pre-commit
|
7728162d7a47f387187e379b8011b224b168555d
|
[
"MIT"
] | null | null | null |
tests/parse_shebang_test.py
|
mmabey/pre-commit
|
7728162d7a47f387187e379b8011b224b168555d
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
import contextlib
import distutils.spawn
import io
import os
import sys
import pytest
from pre_commit import parse_shebang
from pre_commit.envcontext import envcontext
from pre_commit.envcontext import Var
from pre_commit.util import make_executable
def test_file_doesnt_exist():
assert parse_shebang.parse_filename('herp derp derp') == ()
def test_simple_case(tmpdir):
x = tmpdir.join('f')
x.write_text('#!/usr/bin/env python', encoding='UTF-8')
make_executable(x.strpath)
assert parse_shebang.parse_filename(x.strpath) == ('python',)
def test_find_executable_full_path():
assert parse_shebang.find_executable(sys.executable) == sys.executable
def test_find_executable_on_path():
expected = distutils.spawn.find_executable('echo')
assert parse_shebang.find_executable('echo') == expected
def test_find_executable_not_found_none():
assert parse_shebang.find_executable('not-a-real-executable') is None
def write_executable(shebang, filename='run'):
os.mkdir('bin')
path = os.path.join('bin', filename)
with io.open(path, 'w') as f:
f.write('#!{}'.format(shebang))
make_executable(path)
return path
@contextlib.contextmanager
def bin_on_path():
bindir = os.path.join(os.getcwd(), 'bin')
with envcontext((('PATH', (bindir, os.pathsep, Var('PATH'))),)):
yield
def test_find_executable_path_added(in_tmpdir):
path = os.path.abspath(write_executable('/usr/bin/env sh'))
assert parse_shebang.find_executable('run') is None
with bin_on_path():
assert parse_shebang.find_executable('run') == path
def test_find_executable_path_ext(in_tmpdir):
"""Windows exports PATHEXT as a list of extensions to automatically add
to executables when doing PATH searching.
"""
exe_path = os.path.abspath(
write_executable('/usr/bin/env sh', filename='run.myext'),
)
env_path = {'PATH': os.path.dirname(exe_path)}
env_path_ext = dict(env_path, PATHEXT=os.pathsep.join(('.exe', '.myext')))
assert parse_shebang.find_executable('run') is None
assert parse_shebang.find_executable('run', _environ=env_path) is None
ret = parse_shebang.find_executable('run.myext', _environ=env_path)
assert ret == exe_path
ret = parse_shebang.find_executable('run', _environ=env_path_ext)
assert ret == exe_path
def test_normexe_does_not_exist():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('i-dont-exist-lol')
assert excinfo.value.args == ('Executable `i-dont-exist-lol` not found',)
def test_normexe_does_not_exist_sep():
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./i-dont-exist-lol')
assert excinfo.value.args == ('Executable `./i-dont-exist-lol` not found',)
@pytest.mark.xfail(os.name == 'nt', reason='posix only')
def test_normexe_not_executable(tmpdir): # pragma: windows no cover
tmpdir.join('exe').ensure()
with tmpdir.as_cwd(), pytest.raises(OSError) as excinfo:
parse_shebang.normexe('./exe')
assert excinfo.value.args == ('Executable `./exe` is not executable',)
def test_normexe_is_a_directory(tmpdir):
with tmpdir.as_cwd():
tmpdir.join('exe').ensure_dir()
exe = os.path.join('.', 'exe')
with pytest.raises(OSError) as excinfo:
parse_shebang.normexe(exe)
msg, = excinfo.value.args
assert msg == 'Executable `{}` is a directory'.format(exe)
def test_normexe_already_full_path():
assert parse_shebang.normexe(sys.executable) == sys.executable
def test_normexe_gives_full_path():
expected = distutils.spawn.find_executable('echo')
assert parse_shebang.normexe('echo') == expected
assert os.sep in expected
def test_normalize_cmd_trivial():
cmd = (distutils.spawn.find_executable('echo'), 'hi')
assert parse_shebang.normalize_cmd(cmd) == cmd
def test_normalize_cmd_PATH():
cmd = ('python', '--version')
expected = (distutils.spawn.find_executable('python'), '--version')
assert parse_shebang.normalize_cmd(cmd) == expected
def test_normalize_cmd_shebang(in_tmpdir):
python = distutils.spawn.find_executable('python').replace(os.sep, '/')
path = write_executable(python)
assert parse_shebang.normalize_cmd((path,)) == (python, path)
def test_normalize_cmd_PATH_shebang_full_path(in_tmpdir):
python = distutils.spawn.find_executable('python').replace(os.sep, '/')
path = write_executable(python)
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (python, os.path.abspath(path))
def test_normalize_cmd_PATH_shebang_PATH(in_tmpdir):
python = distutils.spawn.find_executable('python')
path = write_executable('/usr/bin/env python')
with bin_on_path():
ret = parse_shebang.normalize_cmd(('run',))
assert ret == (python, os.path.abspath(path))
| 32.437908
| 79
| 0.71348
|
9874adf2d9213b130e6dd06336a13134fbad3466
| 859
|
py
|
Python
|
lib/dataset/preprocess_utils/lane_utils.py
|
decisionforce/mmTransformer
|
be25d26118d2dfdac72b1d1e0cf6cbf14f7f4a0b
|
[
"Apache-2.0"
] | 199
|
2021-03-23T06:10:50.000Z
|
2022-03-31T08:23:00.000Z
|
lib/dataset/preprocess_utils/lane_utils.py
|
yuzhouxianzhi/mmTransformer
|
be25d26118d2dfdac72b1d1e0cf6cbf14f7f4a0b
|
[
"Apache-2.0"
] | 16
|
2021-04-12T12:48:46.000Z
|
2022-03-10T14:11:26.000Z
|
lib/dataset/preprocess_utils/lane_utils.py
|
yuzhouxianzhi/mmTransformer
|
be25d26118d2dfdac72b1d1e0cf6cbf14f7f4a0b
|
[
"Apache-2.0"
] | 23
|
2021-03-29T01:37:56.000Z
|
2022-03-30T01:48:41.000Z
|
import os
import numpy as np
# Only support nearby in our implementation
def get_nearby_lane_feature_ls(am, agent_df, obs_len, city_name, lane_radius, norm_center, has_attr=False, mode='nearby', query_bbox=None):
'''
compute lane features
args:
norm_center: np.ndarray
mode: 'nearby' return nearby lanes within the radius; 'rect' return lanes within the query bbox
**kwargs: query_bbox= List[int, int, int, int]
returns:
list of list of lane a segment feature, formatted in [centerline, is_intersection, turn_direction, is_traffic_control, lane_id,
predecessor_lanes, successor_lanes, adjacent_lanes]
'''
query_x, query_y = agent_df[['X', 'Y']].values[obs_len-1]
nearby_lane_ids = am.get_lane_ids_in_xy_bbox(
query_x, query_y, city_name, lane_radius)
return nearby_lane_ids
| 37.347826
| 139
| 0.715949
|
eb7ec3447ebf724a3f7d1580af6d37a9a8ebaf62
| 17,522
|
py
|
Python
|
planar_mpc/main.py
|
enhatem/quadrotor_mpc_acados
|
9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295
|
[
"Apache-2.0"
] | null | null | null |
planar_mpc/main.py
|
enhatem/quadrotor_mpc_acados
|
9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295
|
[
"Apache-2.0"
] | null | null | null |
planar_mpc/main.py
|
enhatem/quadrotor_mpc_acados
|
9ca50ecc0a852ba5f9464df0ccd5d40e3ebfc295
|
[
"Apache-2.0"
] | 1
|
2021-12-29T03:37:01.000Z
|
2021-12-29T03:37:01.000Z
|
from extended_kalman_filter.setup_ekf import setup_ekf
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from acados_settings import acados_settings
from extended_kalman_filter.ekf import *
from extended_kalman_filter.jacobians import *
from plotFnc import *
from utils import *
from trajectory import *
# mpc and simulation parameters
Tf = 1 # prediction horizon
N = 100 # number of discretization steps
Ts = Tf / N # sampling time[s]
T_hover = 2 # hovering time[s]
T_traj = 20.00 # trajectory time[s]
T = T_hover + T_traj # total simulation time
# constants
g = 9.81 # m/s^2
# bounds on phi
bound_on_phi = False
# bounds on y and z
bound_on_y_z = False
# measurement noise bool (If True, make sure to set the extended_kalman_filter variable to True as well)
noisy_measurement = False
# input noise bool
noisy_input = False
# extended kalman filter bool
extended_kalman_filter = False
# generate circulare trajectory with velocties
traj_with_vel = False
# single reference point with phi = 2 * pi
ref_point = False
# import trajectory with positions and velocities and inputs
import_trajectory = True
# use acados integrator (if False, numerical integration is used instead):
use_acados_integrator = True
# bool to save measurements and inputs as .csv files
save_data = True
# load model and acados_solver
model, acados_solver, acados_integrator = acados_settings(
Ts, Tf, N, bound_on_phi, bound_on_y_z)
# quadrotor parameters
m = model.params.m
Ixx = model.params.J[0]
# dimensions
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
N_hover = int (T_hover * N / Tf)
N_traj = int (T_traj * N / Tf)
Nsim = int(T * N / Tf)
# initialize data structs
predX = np.ndarray((Nsim+1, nx))
simX = np.ndarray((Nsim+1, nx))
simU = np.ndarray((Nsim, nu))
tot_comp_sum = 0
tcomp_max = 0
# set initial condition for acados integrator
xcurrent = model.x0.reshape((nx,))
simX[0, :] = xcurrent
# creating or extracting trajectory
if ref_point == False and import_trajectory == False:
# creating a reference trajectory
show_ref_traj = False
radius = 1 # m
freq = 6 * np.pi/10 # frequency
if traj_with_vel == False:
y, z = trajectory_generator2D(xcurrent, N_hover, N_traj, N, radius, show_ref_traj)
ref_traj = np.stack((y, z), 1)
else:
y, z, vy, vz = trajectory_generotaor2D_with_vel(
xcurrent, N_hover, model, radius, freq, T_traj, Tf, Ts)
ref_traj = np.stack((y, z, vy, vz), 1)
elif ref_point == False and import_trajectory == True:
T, ref_traj, ref_U = readTrajectory(T_hover, N)
Nsim = int((T-Tf) * N / Tf)
predX = np.ndarray((Nsim+1, nx))
simX = np.ndarray((Nsim+1, nx))
simU = np.ndarray((Nsim, nu))
simX[0, :] = xcurrent
# setup the extended kalman filter
if extended_kalman_filter == True:
ekf, C, D, Q_alpha, Q_beta, Q_gamma = setup_ekf(
Ts, m, Ixx, xcurrent, nx, nu)
MEAS_EVERY_STEPS = 1 # number of steps until a new measurement is taken
states = []
pred = []
covs = []
meas_xs = []
# elif extended_kalman_filter == True and noisy_measurement == False:
# sys.exit("The extended Kalman filter must run with a noisy input")
# set the seed for the random variables ( only relevant if noisy measurement and noisy input are applied)
np.random.seed(20)
# closed loop
for i in range(Nsim):
# updating references
if ref_point == False and import_trajectory == False:
if traj_with_vel == False:
for j in range(N):
yref = np.array([y[i+j], z[i+j], 0.0, 0.0, 0.0,
0.0, model.params.m * g, 0.0])
acados_solver.set(j, "yref", yref)
yref_N = np.array([y[i+N], z[i+N], 0.0, 0.0, 0.0, 0.0])
acados_solver.set(N, "yref", yref_N)
else:
for j in range(N):
yref = np.array([y[i+j], z[i+j], 0.0, vy[i+j],
vz[i+j], 0.0, model.params.m * g, 0.0])
acados_solver.set(j, "yref", yref)
yref_N = np.array([y[i+N], z[i+N], 0.0, vy[i+j], vz[i+j], 0.0])
acados_solver.set(N, "yref", yref_N)
elif ref_point == True and import_trajectory == False:
for j in range(N):
yref = np.array([2.0, 2.0, 0.0 * np.pi, 0.0, 0.0,
0.0, model.params.m * g, 0.0])
acados_solver.set(j, "yref", yref)
yref_N = np.array([2.0, 2.0, 0.0 * np.pi, 0.0, 0.0, 0.0])
acados_solver.set(N, "yref", yref_N)
elif ref_point == False and import_trajectory == True:
# if i == Nsim-5:
# print(f'i={i}')
for j in range(N):
y = ref_traj[i+j, 0]
z = ref_traj[i+j, 1]
phi = ref_traj[i+j, 2]
vy = ref_traj[i+j, 3]
vz = ref_traj[i+j, 4]
phiDot = ref_traj[i+j, 5]
u1 = ref_U[i+j, 0] # Thrust
u2 = ref_U[i+j, 1] # Torque
yref = np.array([y, z, phi, vy, vz, phiDot, u1, u2])
acados_solver.set(j, "yref", yref)
y_e = ref_traj[i+N, 0]
z_e = ref_traj[i+N, 1]
phi_e = ref_traj[i+N, 2]
vy_e = ref_traj[i+N, 3]
vz_e = ref_traj[i+N, 4]
phiDot_e = ref_traj[i+N, 5]
yref_N = np.array([y_e, z_e, phi_e, vy_e, vz_e, phiDot_e])
acados_solver.set(N, "yref", yref_N)
# solve ocp for a fixed reference
acados_solver.set(0, "lbx", xcurrent)
acados_solver.set(0, "ubx", xcurrent)
comp_time = time.time()
status = acados_solver.solve()
if status != 0:
print("acados returned status {} in closed loop iteration {}.".format(status, i))
# manage timings
elapsed = time.time() - comp_time
tot_comp_sum += elapsed
if elapsed > tcomp_max:
tcomp_max = elapsed
# get solution from acados_solver
u0 = acados_solver.get(0, "u")
if noisy_input == True:
magnitude_u1 = np.sqrt(Q_beta[0][0]) # magnitude of the input noise on thrust
magnitude_u2 = np.sqrt(Q_beta[1][1]) # magnitude of the input noise on torque
# adding noise to the inputs
T_noisy = u0[0] + np.array(np.random.normal(0, magnitude_u1))
Tau_noisy = u0[1] + np.array(np.random.normal(0, magnitude_u2))
# making sure that inputs are within bounds
T_noisy = max(min(T_noisy, model.thrust_max), model.thrust_min)
Tau_noisy = max(min(Tau_noisy, model.torque_max), model.torque_min)
u0 = np.array([T_noisy, Tau_noisy])
# storing results from acados solver
simU[i, :] = u0
# add measurement noise
if noisy_measurement == True and extended_kalman_filter==False:
xcurrent_sim = add_measurement_noise(xcurrent)
elif noisy_measurement == True and extended_kalman_filter==True:
xcurrent_sim = add_measurement_noise_with_kalman(xcurrent, Q_gamma)
# xcurrent_sim = add_measurement_noise(xcurrent)
else:
xcurrent_sim = xcurrent
if extended_kalman_filter == True:
phi_k = float (ekf.state[2]) # roll at time k
# stacking the covariance matrix and the state estimation at each time instant
covs.append(ekf.cov)
states.append(ekf.state)
# The measurement vector at each time instant
meas_x = xcurrent_sim
# calculating the Jacobians of the evolution model with respect to the state vector and the input vector
T_k = u0[0] # thrust at time step k
Tau_k = u0[1] # torque at time step k
A_k = getA_k(phi_k, T_k, ekf._m, ekf._dt)
B_k = getB_k(phi_k, Ixx, ekf._m, ekf._dt)
# prediction phase
ekf.predict(A = A_k, B = B_k, u = u0, Q_alpha = Q_alpha, Q_beta = Q_beta)
pred.append(ekf.state)
# correction phase
if i != 0 and i % MEAS_EVERY_STEPS == 0:
ekf.update(C = C, meas = meas_x, meas_variance = Q_gamma)
meas_xs.append(meas_x)
if use_acados_integrator == True:
# simulate the system
acados_integrator.set("x", xcurrent_sim)
acados_integrator.set("u", u0)
status = acados_integrator.solve()
if status != 0:
raise Exception(
'acados integrator returned status {}. Exiting.'.format(status))
# get state
xcurrent = acados_integrator.get("x")
else:
# integrate the accelerations
delta_vel = integrateArray(getDynamics(m, Ixx, xcurrent_sim, u0), Ts)
# integrate the velocities
delta_X = integrateArray(delta_vel, Ts)
# update the state
xcurrent = updateState(xcurrent_sim, delta_X, delta_vel)
# store state
simX[i+1, :] = xcurrent
# save measurements and inputs as .csv files
if save_data == True and extended_kalman_filter == False:
saveData(simX, simU)
if extended_kalman_filter == True:
# converting lists to np arrays for plotting
meas_xs = np.array(meas_xs)
states = np.array(states)
covs = np.array(covs)
pred = np.array(pred)
# print the computation times100
print("Total computation time: {}".format(tot_comp_sum))
print("Average computation time: {}".format(tot_comp_sum / Nsim))
print("Maximum computation time: {}".format(tcomp_max))
if import_trajectory == False:
t = np.arange(0, T, Ts)
else:
t = np.arange(0, T-Tf ,Ts)
# plotting and RMSE
if extended_kalman_filter == True:
if ref_point == False and import_trajectory == False:
# root mean squared error on each axis
rmse_y, rmse_z = rmseX(simX, ref_traj[0:Nsim, :])
rmse_y_kalman, rmse_z_kalman = rmseX_kalman(states, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y: {}".format(rmse_y))
print("RMSE on z: {}".format(rmse_z))
print("RMSE on y with EKF measurements: {}".format(rmse_y_kalman))
print("RMSE on z with EKF measurements: {}".format(rmse_z_kalman))
# check if circular trajectory is with velocity or not
if traj_with_vel == False: # circular trajectory is generated without velocities
plotSim_kalman(simX, states, pred, ref_traj, Nsim, save=True)
plotPos_kalman(t, simX, states, pred, covs, ref_traj, Nsim, save=True)
plotVel_without_vy_vz_references_kalman(t, simX, states, pred, covs, ref_traj, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_without_vel_kalman(t, simX, states, ref_traj, Nsim, save=True)
else: # circular trajectory is generated with velocities
plotSim_kalman(simX, states, pred, ref_traj, Nsim, save=True)
plotPos_kalman(t, simX, states, pred, covs, ref_traj, Nsim, save=True)
plotVel_with_vy_vz_references_kalman(t, simX, states, pred, covs, ref_traj, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_with_vel_kalman(t, simX, states, ref_traj, Nsim, save=True)
if ref_point == True and import_trajectory == False: # For single reference points
plotSim_ref_point_kalman(simX, states, save=True)
plotPos_ref_point_kalman(t, simX, states, save=True)
plotVel_with_ref_pose_kalman(t,simX, states, covs, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
if ref_point == False and import_trajectory == True: # For imported trajectories with velocities and inputs
plotSim_kalman(simX, states, pred, ref_traj, Nsim, save=True)
plotPos_with_imported_traj_kalman(t, simX, states, covs, ref_traj, Nsim, save=True)
plotVel_with_imported_traj_kalman(t, simX, states, covs, ref_traj, Nsim, save=True)
plotSimU_with_ref(t, simU, ref_U, Nsim, save=True)
plotErrors_with_ref_kalman(t, simX, states, ref_traj, Nsim, save=True)
else:
if ref_point == False and import_trajectory == False:
# root mean squared error on each axis
rmse_y, rmse_z = rmseX(simX, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y: {}".format(rmse_y))
print("RMSE on z: {}".format(rmse_z))
# check if circular trajectory is with velocity or not
if traj_with_vel == False: # circular trajectory is generated without velocities
plotSim(simX, ref_traj, Nsim, save=True)
plotPos(t, simX, ref_traj, Nsim, save=True)
plotVel(t, simX, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_no_vel(t, simX, ref_traj, Nsim)
else: # circular trajectory is generated with velocities
plotSim(simX, ref_traj, Nsim, save=True)
plotPos(t, simX, ref_traj, Nsim, save=True)
plotVel_with_vy_vz_references(t,simX, ref_traj, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_with_vel(t, simX, ref_traj, Nsim, save=True)
if ref_point == True and import_trajectory == False: # For single reference points
plotSim_ref_point(simX, save=True)
plotPos_ref_point(t, simX, save=True)
plotVel_with_ref_pose(t,simX, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
if ref_point == False and import_trajectory == True: # For imported trajectories with velocities and inputs
plotSim(simX, ref_traj, Nsim, save=True)
plotPos_with_imported_traj(t, simX, ref_traj, Nsim, save=True)
plotVel_with_imported_traj(t, simX, ref_traj, Nsim, save=True)
plotSimU_with_ref(t, simU, ref_U, Nsim, save=True)
plotErrors_with_ref(t, simX, ref_traj, Nsim, save=True)
'''
# plotting and RMSE
if not ref_point and not import_trajectory:
# check if circular trajectory is with velocity or not
# root mean squared error on each axis
rmse_y, rmse_z = rmseX(simX, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y: {}".format(rmse_y))
print("RMSE on z: {}".format(rmse_z))
if extended_kalman_filter == True:
# root mean squared error on each axis
rmse_y_kalman, rmse_z_kalman = rmseX_kalman(states, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y with EKF measurements: {}".format(rmse_y_kalman))
print("RMSE on z with EKF measurements: {}".format(rmse_z_kalman))
if traj_with_vel == False: # circular trajectory is generated without velocities
plotSim(simX, ref_traj, Nsim, save=True)
plotPos(t, simX, ref_traj, Nsim, save=True)
plotVel(t, simX, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_no_vel(t, simX, ref_traj, Nsim)
elif traj_with_vel == True and extended_kalman_filter == False: # ciruclar trajectory is generated with velocities
plotSim(simX, ref_traj, Nsim, save=True)
plotPos(t, simX, ref_traj, Nsim, save=True)
plotVel_with_vy_vz_references(t, simX, ref_traj, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_with_vel(t, simX, ref_traj, Nsim)
elif traj_with_vel == True and extended_kalman_filter == True: # ciruclar trajectory is generated with velocities
plotSim_kalman(simX, states, pred, ref_traj, Nsim, save=True)
plotPos_kalman(t, simX, states, pred, covs, ref_traj, Nsim, save=True)
plotVel_with_vy_vz_references_kalman(t, simX, states, pred, covs, ref_traj, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
plotErrors_with_vel_kalman(t, simX, states, ref_traj, Nsim)
if ref_point and not import_trajectory: # For single reference points
plotSim_ref_point(simX, save=True)
plotPos_ref_point(t, simX, save=True)
plotVel(t, simX, Nsim, save=True)
plotSimU(t, simU, Nsim, save=True)
if extended_kalman_filter == False:
if not ref_point and import_trajectory: # For imported trajectories with velocities and inputs
# root mean squared error on each axis
rmse_y, rmse_z = rmseX(simX, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y: {}".format(rmse_y))
print("RMSE on z: {}".format(rmse_z))
plotSim(simX, ref_traj, Nsim, save=True)
plotPos_with_ref(t, simX, ref_traj, Nsim, save=True)
plotVel_with_ref(t, simX, ref_traj, Nsim, save=True)
plotSimU_with_ref(t, simU, ref_U, Nsim, save=True)
plotErrors_with_ref(t, simX, ref_traj, Nsim)
else:
rmse_y_kalman, rmse_z_kalman = rmseX_kalman(states, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y with EKF measurements: {}".format(rmse_y_kalman))
print("RMSE on z with EKF measurements: {}".format(rmse_z_kalman))
if not ref_point and import_trajectory: # For imported trajectories with velocities and inputs
# root mean squared error on each axis
rmse_y, rmse_z = rmseX(simX, ref_traj[0:Nsim, :])
# print the RMSE on each axis
print("RMSE on y: {}".format(rmse_y))
print("RMSE on z: {}".format(rmse_z))
plotSim(simX, ref_traj, Nsim, save=True)
plotPos_with_ref(t, simX, ref_traj, Nsim, save=True)
plotVel_with_ref(t, simX, ref_traj, Nsim, save=True)
plotSimU_with_ref(t, simU, ref_U, Nsim, save=True)
plotErrors_with_ref_kalman(t, simX, states, ref_traj, Nsim)
'''
plt.show()
| 38.008677
| 119
| 0.637256
|
12bc8473668334ca22f49eb9f8686eab416a468a
| 1,141
|
py
|
Python
|
Lib/idlelib/idle_test/test_editor.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 854
|
2017-09-11T16:42:28.000Z
|
2022-03-27T14:17:09.000Z
|
Lib/idlelib/idle_test/test_editor.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 242
|
2019-01-29T15:48:27.000Z
|
2022-03-31T22:09:21.000Z
|
Lib/idlelib/idle_test/test_editor.py
|
livioso/cpython
|
077061a7b24917aaf31057885c69919c5a553c88
|
[
"PSF-2.0"
] | 73
|
2017-09-13T18:07:48.000Z
|
2022-03-17T13:02:29.000Z
|
"Test editor, coverage 35%."
from idlelib import editor
import unittest
from test.support import requires
from tkinter import Tk
Editor = editor.EditorWindow
class EditorWindowTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
@classmethod
def tearDownClass(cls):
cls.root.update_idletasks()
for id in cls.root.tk.call('after', 'info'):
cls.root.after_cancel(id)
cls.root.destroy()
del cls.root
def test_init(self):
e = Editor(root=self.root)
self.assertEqual(e.root, self.root)
e._close()
class EditorFunctionTest(unittest.TestCase):
def test_filename_to_unicode(self):
func = Editor._filename_to_unicode
class dummy():
filesystemencoding = 'utf-8'
pairs = (('abc', 'abc'), ('a\U00011111c', 'a\ufffdc'),
(b'abc', 'abc'), (b'a\xf0\x91\x84\x91c', 'a\ufffdc'))
for inp, out in pairs:
self.assertEqual(func(dummy, inp), out)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 24.276596
| 70
| 0.614373
|
f6282198e7ac6786f3abffd7982090e0423a6088
| 1,260
|
py
|
Python
|
701_insert_binary_tree.py
|
claytonjwong/leetcode-py
|
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
|
[
"MIT"
] | 1
|
2020-07-15T14:16:23.000Z
|
2020-07-15T14:16:23.000Z
|
701_insert_binary_tree.py
|
claytonjwong/leetcode-py
|
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
|
[
"MIT"
] | null | null | null |
701_insert_binary_tree.py
|
claytonjwong/leetcode-py
|
16bbf8ac0ba5c80fe3ef67ade0d61a12991270a7
|
[
"MIT"
] | null | null | null |
#
# 701. Insert into a Binary Search Tree
#
# Q: https://leetcode.com/problems/insert-into-a-binary-search-tree/
# A: https://leetcode.com/problems/insert-into-a-binary-search-tree/discuss/881995/Javascript-Python3-C%2B%2B-Recursive
#
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
# concise
class Solution:
def insertIntoBST(self, root: TreeNode, x: int) -> TreeNode:
def go(root, x):
if x < root.val:
root.left = go(root.left, x) if root.left else TreeNode(x)
else:
root.right = go(root.right, x) if root.right else TreeNode(x)
return root
return go(root, x) if root else TreeNode(x)
# verbose
class Solution:
def insertIntoBST(self, root: TreeNode, x: int) -> TreeNode:
def go(root, x):
if x < root.val:
if root.left:
go(root.left, x)
else:
root.left = TreeNode(x)
else:
if root.right:
go(root.right, x)
else:
root.right = TreeNode(x)
return root
return go(root, x) if root else TreeNode(x)
| 30.731707
| 119
| 0.538095
|
c4fefdc214492cea1babb4602c66150baa73caf6
| 1,783
|
py
|
Python
|
two.py
|
hanee211/paper14
|
edb0739e879284aaa2b03b41252b1d4a724bb01d
|
[
"MIT"
] | null | null | null |
two.py
|
hanee211/paper14
|
edb0739e879284aaa2b03b41252b1d4a724bb01d
|
[
"MIT"
] | null | null | null |
two.py
|
hanee211/paper14
|
edb0739e879284aaa2b03b41252b1d4a724bb01d
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import helpers
from tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple
tf.reset_default_graph()
sess = tf.InteractiveSession()
PAD = 0
EOS = 1
vocab_size = 10
input_embedding_size = 20
encoder_hidden_units = 20
decoder_hidden_units = encoder_hidden_units * 2
encoder_inputs = tf.placeholder(shape=(None, None), dtype=tf.int32, name='encoder_inputs')
encoder_inputs_length = tf.placeholder(shape=(None,), dtype=tf.int32, name='encoder_inputs_length')
decoder_targets = tf.placeholder(shape=(None, None), dtype=tf.int32, name='decoder_targets')
embeddings = tf.Variable(tf.random_uniform([vocab_size, input_embedding_size], -1.0, 1.0), dtype=tf.float32)
encoder_inputs_embedded = tf.nn.embedding_lookup(embeddings, encoder_inputs)
encoder_cell = LSTMCell(encoder_hidden_units)
((encoder_fw_outputs,
encoder_bw_outputs),
(encoder_fw_final_state,
encoder_bw_final_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell,
cell_bw=encoder_cell,
inputs=encoder_inputs_embedded,
sequence_length=encoder_inputs_length,
dtype=tf.float32, time_major=True)
)
encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2)
encoder_final_state_c = tf.concat((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
encoder_final_state = LSTMStateTuple(c=encoder_final_state_c, h=encoder_final_state_h)
decoder_cell = LSTMCell(decoder_hidden_units)
encoder_max_time, batch_size = tf.unstack(tf.shape(encoder_inputs))
| 36.387755
| 109
| 0.722378
|
1cbbbc74318e856e912b82f86d91c2557a03f4f4
| 1,902
|
py
|
Python
|
examples/ucass_test.py
|
JGirdwood/AeroSAM_logger
|
0ccd2f99be2546b7728a87fc10d4b462f04a44ed
|
[
"MIT"
] | 1
|
2020-11-11T13:04:25.000Z
|
2020-11-11T13:04:25.000Z
|
examples/ucass_test.py
|
JGirdwood/AeroSAM_logger
|
0ccd2f99be2546b7728a87fc10d4b462f04a44ed
|
[
"MIT"
] | null | null | null |
examples/ucass_test.py
|
JGirdwood/AeroSAM_logger
|
0ccd2f99be2546b7728a87fc10d4b462f04a44ed
|
[
"MIT"
] | null | null | null |
import met
import ucass
import log
import pix
from time import sleep
from gpiozero import LED
if __name__ == '__main__':
act_led = LED(19)
resolution_ms = 500
run_forever = False
run_time = 10
ss = ucass.UCASS(25)
print("UCASS Connected")
#mavcon = pix.MavlinkConnection('/dev/ttyS0', 57600)
#print("Mavlink Connected")
#mavcon.get_date_time()
#name_string = "_".join(["AeroSAM-log", mavcon.start_date, mavcon.start_time])
#sd_log = log.LogFile(name_string, "/home/pi")
#print("".join(["Log File: ", name_string]))
#met_module = met.MetSensorModule(18, 24, 4.39)
#print("MET Sensors Connected")
ss.read_info_string()
print "I Hate This Data Logger"
print ss.info_string
#print ss.bbs
#ss.read_config_vars()
#sd_log.make_headers(mavcon.start_date, mavcon.start_time, str(int(mavcon.epoch_time/1000)), ss.info_string,
#ss.bbs, ss.gsc, ss.id)
#t0 = mavcon.boot_time
counter = 1
while True:
act_led.off()
#mavcon.get_date_time()
#t1 = mavcon.boot_time
#mavcon.fill_info_buffer()
#ss.read_histogram_data()
#met_module.read_hum()
#met_module.read_temp()
#sd_log.write_data_log(mavcon.boot_time, mavcon.press_hPa, mavcon.lat, mavcon.lon, mavcon.alt_m, mavcon.vz_ms,
#met_module.t_deg_c, met_module.rh_true, ss.hist, ss.mtof, ss.period, ss.checksum,
#ss.reject_glitch, ss.reject_ltof, ss.reject_ratio)
#mavcon.get_date_time()
#t2 = mavcon.boot_time
#delay_ms = resolution_ms-(t2-t1)
#print ss.hist
act_led.on()
#if delay_ms > 0:
# sleep(delay_ms/1000)
#else:
# print("Loop Time Exceeds Resolution")
if counter > run_time:
print("Loop Finished")
break
counter += 1
| 33.368421
| 118
| 0.618297
|
fc12ec065a9bd810daaff19fb3bca4a15bb2e4ba
| 19,199
|
py
|
Python
|
homeassistant/bootstrap.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 1
|
2021-04-28T09:51:08.000Z
|
2021-04-28T09:51:08.000Z
|
homeassistant/bootstrap.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 60
|
2020-08-03T07:32:56.000Z
|
2022-03-31T06:02:07.000Z
|
homeassistant/bootstrap.py
|
edofullin/core
|
106dc4d28ad59cb192c60fc7a354cafa86899ea4
|
[
"Apache-2.0"
] | 4
|
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Provide methods to bootstrap a Home Assistant instance."""
from __future__ import annotations
import asyncio
import contextlib
from datetime import datetime
import logging
import logging.handlers
import os
import sys
import threading
from time import monotonic
from typing import TYPE_CHECKING, Any
import voluptuous as vol
import yarl
from homeassistant import config as conf_util, config_entries, core, loader
from homeassistant.components import http
from homeassistant.const import REQUIRED_NEXT_PYTHON_DATE, REQUIRED_NEXT_PYTHON_VER
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import area_registry, device_registry, entity_registry
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import ConfigType
from homeassistant.setup import (
DATA_SETUP,
DATA_SETUP_STARTED,
DATA_SETUP_TIME,
async_set_domains_to_be_loaded,
async_setup_component,
)
from homeassistant.util.async_ import gather_with_concurrency
import homeassistant.util.dt as dt_util
from homeassistant.util.logging import async_activate_log_queue_handler
from homeassistant.util.package import async_get_user_site, is_virtual_env
if TYPE_CHECKING:
from .runner import RuntimeConfig
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = "home-assistant.log"
# hass.data key for logging information.
DATA_LOGGING = "logging"
LOG_SLOW_STARTUP_INTERVAL = 60
SLOW_STARTUP_CHECK_INTERVAL = 1
SIGNAL_BOOTSTRAP_INTEGRATONS = "bootstrap_integrations"
STAGE_1_TIMEOUT = 120
STAGE_2_TIMEOUT = 300
WRAP_UP_TIMEOUT = 300
COOLDOWN_TIME = 60
MAX_LOAD_CONCURRENTLY = 6
DEBUGGER_INTEGRATIONS = {"debugpy"}
CORE_INTEGRATIONS = ("homeassistant", "persistent_notification")
LOGGING_INTEGRATIONS = {
# Set log levels
"logger",
# Error logging
"system_log",
"sentry",
# To record data
"recorder",
}
STAGE_1_INTEGRATIONS = {
# To make sure we forward data to other instances
"mqtt_eventstream",
# To provide account link implementations
"cloud",
# Ensure supervisor is available
"hassio",
# Get the frontend up and running as soon
# as possible so problem integrations can
# be removed
"frontend",
}
async def async_setup_hass(
runtime_config: RuntimeConfig,
) -> core.HomeAssistant | None:
"""Set up Home Assistant."""
hass = core.HomeAssistant()
hass.config.config_dir = runtime_config.config_dir
async_enable_logging(
hass,
runtime_config.verbose,
runtime_config.log_rotate_days,
runtime_config.log_file,
runtime_config.log_no_color,
)
hass.config.skip_pip = runtime_config.skip_pip
if runtime_config.skip_pip:
_LOGGER.warning(
"Skipping pip installation of required modules. This may cause issues"
)
if not await conf_util.async_ensure_config_exists(hass):
_LOGGER.error("Error getting configuration path")
return None
_LOGGER.info("Config directory: %s", runtime_config.config_dir)
config_dict = None
basic_setup_success = False
safe_mode = runtime_config.safe_mode
if not safe_mode:
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try:
config_dict = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(
"Failed to parse configuration.yaml: %s. Activating safe mode",
err,
)
else:
if not is_virtual_env():
await async_mount_local_lib_path(runtime_config.config_dir)
basic_setup_success = (
await async_from_config_dict(config_dict, hass) is not None
)
if config_dict is None:
safe_mode = True
elif not basic_setup_success:
_LOGGER.warning("Unable to set up core integrations. Activating safe mode")
safe_mode = True
elif (
"frontend" in hass.data.get(DATA_SETUP, {})
and "frontend" not in hass.config.components
):
_LOGGER.warning("Detected that frontend did not load. Activating safe mode")
# Ask integrations to shut down. It's messy but we can't
# do a clean stop without knowing what is broken
with contextlib.suppress(asyncio.TimeoutError):
async with hass.timeout.async_timeout(10):
await hass.async_stop()
safe_mode = True
old_config = hass.config
hass = core.HomeAssistant()
hass.config.skip_pip = old_config.skip_pip
hass.config.internal_url = old_config.internal_url
hass.config.external_url = old_config.external_url
hass.config.config_dir = old_config.config_dir
if safe_mode:
_LOGGER.info("Starting in safe mode")
hass.config.safe_mode = True
http_conf = (await http.async_get_last_config(hass)) or {}
await async_from_config_dict(
{"safe_mode": {}, "http": http_conf},
hass,
)
if runtime_config.open_ui:
hass.add_job(open_hass_ui, hass)
return hass
def open_hass_ui(hass: core.HomeAssistant) -> None:
"""Open the UI."""
import webbrowser # pylint: disable=import-outside-toplevel
if hass.config.api is None or "frontend" not in hass.config.components:
_LOGGER.warning("Cannot launch the UI because frontend not loaded")
return
scheme = "https" if hass.config.api.use_ssl else "http"
url = str(
yarl.URL.build(scheme=scheme, host="127.0.0.1", port=hass.config.api.port)
)
if not webbrowser.open(url):
_LOGGER.warning(
"Unable to open the Home Assistant UI in a browser. Open it yourself at %s",
url,
)
async def async_from_config_dict(
config: ConfigType, hass: core.HomeAssistant
) -> core.HomeAssistant | None:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = monotonic()
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
# Set up core.
_LOGGER.debug("Setting up %s", CORE_INTEGRATIONS)
if not all(
await asyncio.gather(
*(
async_setup_component(hass, domain, config)
for domain in CORE_INTEGRATIONS
)
)
):
_LOGGER.error("Home Assistant core failed to initialize. ")
return None
_LOGGER.debug("Home Assistant core initialized")
core_config = config.get(core.DOMAIN, {})
try:
await conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as config_err:
conf_util.async_log_exception(config_err, "homeassistant", core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return None
await _async_set_up_integrations(hass, config)
stop = monotonic()
_LOGGER.info("Home Assistant initialized in %.2fs", stop - start)
if REQUIRED_NEXT_PYTHON_DATE and sys.version_info[:3] < REQUIRED_NEXT_PYTHON_VER:
msg = (
"Support for the running Python version "
f"{'.'.join(str(x) for x in sys.version_info[:3])} is deprecated and will "
f"be removed in the first release after {REQUIRED_NEXT_PYTHON_DATE}. "
"Please upgrade Python to "
f"{'.'.join(str(x) for x in REQUIRED_NEXT_PYTHON_VER)} or "
"higher."
)
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Python version", "python_version"
)
return hass
@core.callback
def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: int | None = None,
log_file: str | None = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
if not log_no_color:
try:
# pylint: disable=import-outside-toplevel
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this will result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
sys.excepthook = lambda *args: logging.getLogger(None).exception(
"Uncaught exception", exc_info=args # type: ignore
)
threading.excepthook = lambda args: logging.getLogger(None).exception(
"Uncaught thread exception",
exc_info=(args.exc_type, args.exc_value, args.exc_traceback), # type: ignore[arg-type]
)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
if log_rotate_days:
err_handler: logging.FileHandler = (
logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
)
else:
err_handler = logging.FileHandler(err_log_path, mode="w", delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
logger = logging.getLogger("")
logger.addHandler(err_handler)
logger.setLevel(logging.INFO if verbose else logging.WARNING)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async_activate_log_queue_handler(hass)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, "deps")
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@core.callback
def _get_domains(hass: core.HomeAssistant, config: dict[str, Any]) -> set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = {key.split(" ")[0] for key in config if key != core.DOMAIN}
# Add config entry domains
if not hass.config.safe_mode:
domains.update(hass.config_entries.async_domains())
# Make sure the Hass.io component is loaded
if "HASSIO" in os.environ:
domains.add("hassio")
return domains
async def _async_watch_pending_setups(hass: core.HomeAssistant) -> None:
"""Periodic log of setups that are pending for longer than LOG_SLOW_STARTUP_INTERVAL."""
loop_count = 0
setup_started: dict[str, datetime] = hass.data[DATA_SETUP_STARTED]
while True:
now = dt_util.utcnow()
remaining_with_setup_started = {
domain: (now - setup_started[domain]).total_seconds()
for domain in setup_started
}
_LOGGER.debug("Integration remaining: %s", remaining_with_setup_started)
async_dispatcher_send(
hass, SIGNAL_BOOTSTRAP_INTEGRATONS, remaining_with_setup_started
)
await asyncio.sleep(SLOW_STARTUP_CHECK_INTERVAL)
loop_count += SLOW_STARTUP_CHECK_INTERVAL
if loop_count >= LOG_SLOW_STARTUP_INTERVAL and setup_started:
_LOGGER.warning(
"Waiting on integrations to complete setup: %s",
", ".join(setup_started),
)
loop_count = 0
_LOGGER.debug("Running timeout Zones: %s", hass.timeout.zones)
async def async_setup_multi_components(
hass: core.HomeAssistant,
domains: set[str],
config: dict[str, Any],
) -> None:
"""Set up multiple domains. Log on failure."""
futures = {
domain: hass.async_create_task(async_setup_component(hass, domain, config))
for domain in domains
}
await asyncio.wait(futures.values())
errors = [domain for domain in domains if futures[domain].exception()]
for domain in errors:
exception = futures[domain].exception()
assert exception is not None
_LOGGER.error(
"Error setting up integration %s - received exception",
domain,
exc_info=(type(exception), exception, exception.__traceback__),
)
async def _async_set_up_integrations(
hass: core.HomeAssistant, config: dict[str, Any]
) -> None:
"""Set up all the integrations."""
hass.data[DATA_SETUP_STARTED] = {}
setup_time = hass.data[DATA_SETUP_TIME] = {}
watch_task = asyncio.create_task(_async_watch_pending_setups(hass))
domains_to_setup = _get_domains(hass, config)
# Resolve all dependencies so we know all integrations
# that will have to be loaded and start rightaway
integration_cache: dict[str, loader.Integration] = {}
to_resolve = domains_to_setup
while to_resolve:
old_to_resolve = to_resolve
to_resolve = set()
integrations_to_process = [
int_or_exc
for int_or_exc in await gather_with_concurrency(
loader.MAX_LOAD_CONCURRENTLY,
*(
loader.async_get_integration(hass, domain)
for domain in old_to_resolve
),
return_exceptions=True,
)
if isinstance(int_or_exc, loader.Integration)
]
resolve_dependencies_tasks = [
itg.resolve_dependencies()
for itg in integrations_to_process
if not itg.all_dependencies_resolved
]
if resolve_dependencies_tasks:
await asyncio.gather(*resolve_dependencies_tasks)
for itg in integrations_to_process:
integration_cache[itg.domain] = itg
for dep in itg.all_dependencies:
if dep in domains_to_setup:
continue
domains_to_setup.add(dep)
to_resolve.add(dep)
_LOGGER.info("Domains to be set up: %s", domains_to_setup)
logging_domains = domains_to_setup & LOGGING_INTEGRATIONS
# Load logging as soon as possible
if logging_domains:
_LOGGER.info("Setting up logging: %s", logging_domains)
await async_setup_multi_components(hass, logging_domains, config)
# Start up debuggers. Start these first in case they want to wait.
debuggers = domains_to_setup & DEBUGGER_INTEGRATIONS
if debuggers:
_LOGGER.debug("Setting up debuggers: %s", debuggers)
await async_setup_multi_components(hass, debuggers, config)
# calculate what components to setup in what stage
stage_1_domains = set()
# Find all dependencies of any dependency of any stage 1 integration that
# we plan on loading and promote them to stage 1
deps_promotion = STAGE_1_INTEGRATIONS
while deps_promotion:
old_deps_promotion = deps_promotion
deps_promotion = set()
for domain in old_deps_promotion:
if domain not in domains_to_setup or domain in stage_1_domains:
continue
stage_1_domains.add(domain)
dep_itg = integration_cache.get(domain)
if dep_itg is None:
continue
deps_promotion.update(dep_itg.all_dependencies)
stage_2_domains = domains_to_setup - logging_domains - debuggers - stage_1_domains
# Load the registries
await asyncio.gather(
device_registry.async_load(hass),
entity_registry.async_load(hass),
area_registry.async_load(hass),
)
# Start setup
if stage_1_domains:
_LOGGER.info("Setting up stage 1: %s", stage_1_domains)
try:
async with hass.timeout.async_timeout(
STAGE_1_TIMEOUT, cool_down=COOLDOWN_TIME
):
await async_setup_multi_components(hass, stage_1_domains, config)
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for stage 1 - moving forward")
# Enables after dependencies
async_set_domains_to_be_loaded(hass, stage_2_domains)
if stage_2_domains:
_LOGGER.info("Setting up stage 2: %s", stage_2_domains)
try:
async with hass.timeout.async_timeout(
STAGE_2_TIMEOUT, cool_down=COOLDOWN_TIME
):
await async_setup_multi_components(hass, stage_2_domains, config)
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for stage 2 - moving forward")
watch_task.cancel()
async_dispatcher_send(hass, SIGNAL_BOOTSTRAP_INTEGRATONS, {})
_LOGGER.debug(
"Integration setup times: %s",
{
integration: timedelta.total_seconds()
for integration, timedelta in sorted(
setup_time.items(), key=lambda item: item[1].total_seconds() # type: ignore
)
},
)
# Wrap up startup
_LOGGER.debug("Waiting for startup to wrap up")
try:
async with hass.timeout.async_timeout(WRAP_UP_TIMEOUT, cool_down=COOLDOWN_TIME):
await hass.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for bootstrap - moving forward")
| 33.216263
| 95
| 0.660972
|
5c4e9d61d96d4d3239d3288d5bc5c3d450f820ca
| 4,512
|
py
|
Python
|
OCR_For_Car_License/chinese_recog.py
|
qw85639229/Car_License_SVM
|
c5b0062e84e5000c7940b1d90cc7c63e52afed21
|
[
"MIT"
] | 1
|
2020-04-20T07:56:07.000Z
|
2020-04-20T07:56:07.000Z
|
OCR_For_Car_License/chinese_recog.py
|
qw85639229/Car_License_SVM
|
c5b0062e84e5000c7940b1d90cc7c63e52afed21
|
[
"MIT"
] | null | null | null |
OCR_For_Car_License/chinese_recog.py
|
qw85639229/Car_License_SVM
|
c5b0062e84e5000c7940b1d90cc7c63e52afed21
|
[
"MIT"
] | null | null | null |
import cv2
import os
import numpy as np
test_root = './data/test_chinese/'
test_img_path = os.listdir(test_root)
template_size = (100, 100)
def calculate_similarity(template, image):
shape1 = template.shape
shape2 = image.shape
if shape1 != shape2:
image = cv2.resize(image, template_size)
template = np.reshape(template, (-1)) / 255.
image = np.reshape(image, (-1)) / 255.
sim = np.sum(template * image) / (np.sqrt(np.sum(template * template)) * np.sqrt(np.sum(image * image)))
return sim
def calculate_similarity2(template, image):
shape1 = template.shape
shape2 = image.shape
if shape1 != shape2:
image = cv2.resize(image, template_size)
template = np.reshape(template, (-1)) / 255.
image = np.reshape(image, (-1)) / 255.
bool_template = template.astype(np.bool)
bool_image = image.astype(np.bool)
imageV = np.bitwise_and(bool_template, bool_image)
imageX = np.bitwise_xor(imageV, bool_image)
imageW = np.bitwise_xor(imageV, bool_template)
T = np.sum(template)
U = np.sum(image)
V = np.sum(imageV)
X = np.sum(imageX)
W = np.sum(imageW)
TUV = (T + U + V) / 3
sim = V / (W * X * np.sqrt(((T-TUV)**2 + (U-TUV)**2 + (V-TUV)**2)/2) / T / U)
return sim
template_imgs = []
template_root = './data/chinese_template_process'
templates = os.listdir(template_root)
chinese_chars = [x.split('.')[0] for x in templates]
for path in test_img_path:
raw_img = cv2.imread(os.path.join(test_root, path))
grey_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2GRAY)
otsu_thres, otsu_img = cv2.threshold(grey_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
black = np.sum(np.where(otsu_img == 0))
white = np.sum(np.where(otsu_img == 255))
if black < white:
otsu_img = 255 - otsu_img
otsu_img_array = np.array(otsu_img)
h_histogram = np.sum(otsu_img_array, axis=1)
his_h_thres = np.max(h_histogram) / 50
start, end = None, None
for i in range(0, len(h_histogram)):
if h_histogram[i] >= his_h_thres and start is None:
start = i
if h_histogram[i] < his_h_thres and start is not None:
if i - start > len(h_histogram) / 2:
end = i
break
else:
start = None
if i == len(h_histogram) - 1 and h_histogram[i] >= his_h_thres and start is not None:
end = i
cropped_otsu_img = otsu_img[start:end, :]
v_histogram = np.sum(cropped_otsu_img, axis=0)
his_v_thres = np.max(v_histogram) / 50
# index = [i for i in range(int(v_histogram.shape[0]))]
# plt.bar(index, v_histogram)
# plt.show()
chars = list()
bin_v_histogram = [1 if val > his_v_thres else 0 for val in v_histogram]
tmp = 0
flag = 0
for i in range(len(bin_v_histogram) - 1):
if bin_v_histogram[i + 1] > bin_v_histogram[i] and flag == 0:
tmp = i
flag = 1
if bin_v_histogram[i + 1] < bin_v_histogram[i] and i - tmp > (end - start) * 3 / 5:
chars.append((tmp, i))
flag = 0
print('Totoal %d characters' % len(chars))
char_imgs = []
for char in chars:
char_imgs.append(cropped_otsu_img[:, char[0]:char[1]])
res_chars = []
for img in char_imgs:
char_h_histogram = np.sum(img, axis=1)
char_his_h_thres = 1
start_ = None
for i in range(0, len(char_h_histogram)):
if char_h_histogram[i] >= char_his_h_thres and start_ is None:
start_ = i
break
end_ = None
for i in range(len(char_h_histogram) - 1, -1, -1):
if char_h_histogram[i] >= char_his_h_thres and end_ is None:
end_ = i
break
cropped_img = img[start_:end_, :]
# cv2.imshow('img', cropped_img)
# cv2.waitKey(0)
similarities = []
for temp in templates:
temp_img = cv2.imread(os.path.join(template_root, temp))
temp_img = cv2.cvtColor(temp_img, cv2.COLOR_BGR2GRAY)
otsu_temp_thres, otsu_temp_img = cv2.threshold(temp_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
similarities.append(calculate_similarity2(otsu_temp_img, cropped_img))
similarities = np.array(similarities)
index = int(np.argmax(similarities))
res_chars.append(chinese_chars[index])
print(''.join(res_chars))
cv2.imshow('raw_image', raw_img)
cv2.waitKey(500)
| 30.693878
| 113
| 0.609043
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.