hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
047b922e0222aa34f6c78018cdd19f790e7e76f5
| 3,263
|
py
|
Python
|
Optimizer.py
|
FGDBTKD/Linguistically-Regularized-LSTM-for-Sentiment-Classification
|
29c1ebd4b59574bb5e66590893c5da55c47ca6b5
|
[
"MIT"
] | 14
|
2018-08-16T02:40:42.000Z
|
2021-08-03T11:52:28.000Z
|
Optimizer.py
|
FGDBTKD/Linguistically-Regularized-LSTM-for-Sentiment-Classification
|
29c1ebd4b59574bb5e66590893c5da55c47ca6b5
|
[
"MIT"
] | 3
|
2018-09-01T08:55:18.000Z
|
2020-04-17T02:03:00.000Z
|
Optimizer.py
|
FGDBTKD/Linguistically-Regularized-LSTM-for-Sentiment-Classification
|
29c1ebd4b59574bb5e66590893c5da55c47ca6b5
|
[
"MIT"
] | 8
|
2018-09-01T03:33:05.000Z
|
2021-01-27T06:04:27.000Z
|
import numpy as np
import logging
OptimizerList = {'SGD': SGD, 'ADAGRAD': ADAGRAD, 'ADADELTA': ADADELTA}
| 42.934211
| 97
| 0.583206
|
import numpy as np
import logging
class ADADELTA(object):
def __init__(self, params, lr=1, lr_word_vector=0.1, lr_decay=0.95, epsilon=1e-6):
logging.info('Optimizer ADADELTA lr %f lr_decay %f epsilon %f' % (lr, lr_decay, epsilon))
self.lr = lr
self.lr_word_vector = lr_word_vector
self.lr_decay = lr_decay
self.epsilon = epsilon
self.acc_grad = {}
self.acc_update = {}
for param in params:
self.acc_grad[param] = np.zeros_like(param.get_value())
self.acc_update[param] = np.zeros_like(param.get_value())
def iterate(self, grads):
lr = self.lr
lr_decay = self.lr_decay
epsilon = self.epsilon
for param, grad in grads.items():
if param.name[0] == 'V':
param.set_value(param.get_value() - grad.get_value() * self.lr_word_vector)
else:
self.acc_grad[param] = lr_decay * self.acc_grad[param] + \
(1 - lr_decay) * (grad.get_value())**2
param_update = np.sqrt(self.acc_update[param] + epsilon) \
/ np.sqrt(self.acc_grad[param] + epsilon) * grad.get_value() * lr
self.acc_update[param] = lr_decay * self.acc_update[param] \
+ (1 - lr_decay) * param_update**2
param.set_value(param.get_value() - param_update)
class ADAGRAD(object):
def __init__(self, params, lr, lr_word_vector=0.1, epsilon=1e-10):
logging.info('Optimizer ADAGRAD lr %f' % (lr, ))
self.lr = lr
self.lr_word_vector = lr_word_vector
self.epsilon = epsilon
self.acc_grad = {}
for param in params:
self.acc_grad[param] = np.zeros_like(param.get_value())
def iterate(self, grads):
lr = self.lr
epsilon = self.epsilon
for param, grad in grads.items():
if param.name[0] == 'V':
param.set_value(param.get_value() - grad.get_value() * self.lr_word_vector)
else:
self.acc_grad[param] = self.acc_grad[param] + grad.get_value()**2
param_update = lr * grad.get_value() / (np.sqrt(self.acc_grad[param]) + epsilon)
param.set_value(param.get_value() - param_update)
class SGD(object):
def __init__(self, params, lr, lr_word_vector=0.1, momentum=0.9):
logging.info('Optimizer SGD lr %s momentum %s' % (lr, momentum))
self.lr = lr
self.lr_word_vector = lr_word_vector
self.momentum = momentum
self.sum_grad = {}
for param in params:
self.sum_grad[param] = np.zeros_like(param.get_value())
def iterate(self, grads):
lr = self.lr
momentum = self.momentum
for param, grad in grads.items():
if param.name[0] == 'V':
param.set_value(param.get_value() - grad.get_value() * self.lr_word_vector)
else:
self.sum_grad[param] = self.sum_grad[param] * momentum + lr * grad.get_value()
param.set_value(param.get_value() - self.sum_grad[param])
grad.set_value(np.zeros_like(param.get_value()))
OptimizerList = {'SGD': SGD, 'ADAGRAD': ADAGRAD, 'ADADELTA': ADADELTA}
| 2,929
| 0
| 228
|
22f548c42a9ed87746e00171ecdba0d38c74fe62
| 702
|
py
|
Python
|
huemans/utils/__init__.py
|
mikkogozalo/huemans
|
7ce3807ac9abbdefd618001a768872f9509fe3d5
|
[
"MIT"
] | 1
|
2017-12-19T15:03:58.000Z
|
2017-12-19T15:03:58.000Z
|
huemans/utils/__init__.py
|
mikkogozalo/huemans
|
7ce3807ac9abbdefd618001a768872f9509fe3d5
|
[
"MIT"
] | null | null | null |
huemans/utils/__init__.py
|
mikkogozalo/huemans
|
7ce3807ac9abbdefd618001a768872f9509fe3d5
|
[
"MIT"
] | null | null | null |
from collections import MutableMapping
| 21.9375
| 53
| 0.638177
|
from collections import MutableMapping
class State(MutableMapping, dict):
_dirty_keys = []
def __getitem__(self, item):
return dict.__getitem__(self, item)
def __setitem__(self, key, value):
self._dirty_keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
def __iter__(self):
return dict.__iter__(self)
def __len__(self):
return dict.__len__(self)
def __contains__(self, x):
return dict.__contains__(self, x)
def set_clean(self):
self._dirty_keys = []
@property
def dirty(self):
return {k: self[k] for k in self._dirty_keys}
| 375
| 265
| 23
|
88acba4e7609f52d44bc071b4f9a53e54c8c5aae
| 1,225
|
py
|
Python
|
watson-conversation-python-slackbot/bot/driver.py
|
relyt0925/kube-samples
|
48254c80826808bbb4193a6956c7de0e1a707008
|
[
"Apache-2.0"
] | null | null | null |
watson-conversation-python-slackbot/bot/driver.py
|
relyt0925/kube-samples
|
48254c80826808bbb4193a6956c7de0e1a707008
|
[
"Apache-2.0"
] | null | null | null |
watson-conversation-python-slackbot/bot/driver.py
|
relyt0925/kube-samples
|
48254c80826808bbb4193a6956c7de0e1a707008
|
[
"Apache-2.0"
] | null | null | null |
import ConfigParser
import datetime
import threading
import time
from slackclient import SlackClient
from responses import bot
Config = ConfigParser.ConfigParser()
Config.read("credentials.ini")
token = Config.get('Slack_Creds', 'token')
print "token: " + token
sc = SlackClient(token)
robot = bot(sc, token)
print "Starting up!"
connected = False
start = time.time()
# Spin and try to connect every 5 seconds upon initial boot.
while connected is False:
if sc.rtm_connect():
print "Successfully connected to Slack!"
connected = True
while connected:
# Read from the Slack Channels that the bot is currently a part of.
try:
Incoming_Message = sc.rtm_read()
except Exception as e:
print e
Incoming_Message = []
connected = False
# Process each incoming message from Slack.
for msg in Incoming_Message:
thread = threading.Thread(
target=robot.Process, args=(msg,))
thread.start()
time.sleep(0.1)
else:
print "Connection Failed, invalid token?"
connected = False
time.sleep(5)
| 26.630435
| 79
| 0.613061
|
import ConfigParser
import datetime
import threading
import time
from slackclient import SlackClient
from responses import bot
Config = ConfigParser.ConfigParser()
Config.read("credentials.ini")
token = Config.get('Slack_Creds', 'token')
print "token: " + token
sc = SlackClient(token)
robot = bot(sc, token)
print "Starting up!"
connected = False
start = time.time()
# Spin and try to connect every 5 seconds upon initial boot.
while connected is False:
if sc.rtm_connect():
print "Successfully connected to Slack!"
connected = True
while connected:
# Read from the Slack Channels that the bot is currently a part of.
try:
Incoming_Message = sc.rtm_read()
except Exception as e:
print e
Incoming_Message = []
connected = False
# Process each incoming message from Slack.
for msg in Incoming_Message:
thread = threading.Thread(
target=robot.Process, args=(msg,))
thread.start()
time.sleep(0.1)
else:
print "Connection Failed, invalid token?"
connected = False
time.sleep(5)
| 0
| 0
| 0
|
d9e27346691061707fec955b28eeb371d35a4d2d
| 15,523
|
py
|
Python
|
pynetbox/core/response.py
|
louis-oui/pynetbox
|
4d2c7496ec0560e193f648fca67aae1b6c85bce2
|
[
"Apache-2.0"
] | null | null | null |
pynetbox/core/response.py
|
louis-oui/pynetbox
|
4d2c7496ec0560e193f648fca67aae1b6c85bce2
|
[
"Apache-2.0"
] | null | null | null |
pynetbox/core/response.py
|
louis-oui/pynetbox
|
4d2c7496ec0560e193f648fca67aae1b6c85bce2
|
[
"Apache-2.0"
] | null | null | null |
"""
(c) 2017 DigitalOcean
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pynetbox.core.query import Request
from pynetbox.core.util import Hashabledict
import pynetbox.core.endpoint
# List of fields that are lists but should be treated as sets.
LIST_AS_SET = ("tags", "tagged_vlans")
def get_return(lookup, return_fields=None):
"""Returns simple representations for items passed to lookup.
Used to return a "simple" representation of objects and collections
sent to it via lookup. If lookup is an IPNetwork object immediately
return the string representation. Otherwise, we look to see if
lookup is a "choices" field (dict with only 'id' and 'value')
or a nested_return. Finally, we check if it's a Record, if
so simply return a string. Order is important due to nested_return
being self-referential.
:arg list,optional return_fields: A list of fields to reference when
calling values on lookup.
"""
for i in return_fields or ["id", "value", "nested_return"]:
if isinstance(lookup, dict) and lookup.get(i):
return lookup[i]
else:
if hasattr(lookup, i):
return getattr(lookup, i)
if isinstance(lookup, Record):
return str(lookup)
else:
return lookup
class JsonField(object):
"""Explicit field type for values that are not to be converted
to a Record object"""
_json_field = True
class Record(object):
"""Create python objects from netbox API responses.
Creates an object from a NetBox response passed as `values`.
Nested dicts that represent other endpoints are also turned
into Record objects. All fields are then assigned to the
object's attributes. If a missing attr is requested
(e.g. requesting a field that's only present on a full response on
a Record made from a nested response) the pynetbox will make a
request for the full object and return the requsted value.
:examples:
Default representation of the object is usually its name
>>> x = nb.dcim.devices.get(1)
>>> x
test1-switch1
>>>
Querying a string field.
>>> x = nb.dcim.devices.get(1)
>>> x.serial
'ABC123'
>>>
Querying a field on a nested object.
>>> x = nb.dcim.devices.get(1)
>>> x.device_type.model
'QFX5100-24Q'
>>>
Casting the object as a dictionary.
>>> from pprint import pprint
>>> pprint(dict(x))
{'asset_tag': None,
'cluster': None,
'comments': '',
'config_context': {},
'created': '2018-04-01',
'custom_fields': {},
'device_role': {'id': 1,
'name': 'Test Switch',
'slug': 'test-switch',
'url': 'http://localhost:8000/api/dcim/device-roles/1/'},
'device_type': {...},
'display_name': 'test1-switch1',
'face': {'label': 'Rear', 'value': 1},
'id': 1,
'name': 'test1-switch1',
'parent_device': None,
'platform': {...},
'position': 1,
'primary_ip': {'address': '192.0.2.1/24',
'family': 4,
'id': 1,
'url': 'http://localhost:8000/api/ipam/ip-addresses/1/'},
'primary_ip4': {...},
'primary_ip6': None,
'rack': {'display_name': 'Test Rack',
'id': 1,
'name': 'Test Rack',
'url': 'http://localhost:8000/api/dcim/racks/1/'},
'serial': 'ABC123',
'site': {'id': 1,
'name': 'TEST',
'slug': 'TEST',
'url': 'http://localhost:8000/api/dcim/sites/1/'},
'status': {'label': 'Active', 'value': 1},
'tags': [],
'tenant': None,
'vc_position': None,
'vc_priority': None,
'virtual_chassis': None}
>>>
Iterating over a Record object.
>>> for i in x:
... print(i)
...
('id', 1)
('name', 'test1-switch1')
('display_name', 'test1-switch1')
>>>
"""
url = None
def __getattr__(self, k):
"""Default behavior for missing attrs.
We'll call `full_details()` if we're asked for an attribute
we don't have.
In order to prevent non-explicit behavior,`k='keys'` is
excluded because casting to dict() calls this attr.
"""
if self.url:
if self.has_details is False and k != "keys":
if self.full_details():
ret = getattr(self, k, None)
if ret or hasattr(self, k):
return ret
raise AttributeError('object has no attribute "{}"'.format(k))
def _parse_values(self, values):
""" Parses values init arg.
Parses values dict at init and sets object attributes with the
values within.
"""
for k, v in values.items():
if isinstance(v, dict):
lookup = getattr(self.__class__, k, None)
if k == "custom_fields" or hasattr(lookup, "_json_field"):
self._add_cache((k, v.copy()))
setattr(self, k, v)
continue
k_endpoint = None
if "url" in v:
k_path_list = v["url"].split("/")
if "api" in k_path_list:
offset = k_path_list.index("api") + 1
if len(k_path_list[offset:]) > 0 \
and k_path_list[offset:][0] == "api":
# domain name is "api"
offset = offset + 1
if len(k_path_list[offset:]) > 1:
k_app = k_path_list[offset:][0]
k_name = k_path_list[offset:][1]
if hasattr(self.api, k_app):
k_endpoint = pynetbox.core.endpoint.\
Endpoint(self.api, getattr(self.api,
k_app),
k_name, model=None)
if lookup:
v = lookup(v, self.api, k_endpoint)
else:
v = self.default_ret(v, self.api, k_endpoint)
self._add_cache((k, v))
elif isinstance(v, list):
v = [list_parser(i) for i in v]
to_cache = list(v)
self._add_cache((k, to_cache))
else:
self._add_cache((k, v))
setattr(self, k, v)
def _compare(self):
"""Compares current attributes to values at instantiation.
In order to be idempotent we run this method in `save()`.
Returns:
Boolean value, True indicates current instance has the same
attributes as the ones passed to `values`.
"""
if self.serialize(init=True) == self.serialize():
return True
return False
def full_details(self):
"""Queries the hyperlinked endpoint if 'url' is defined.
This method will populate the attributes from the detail
endpoint when it's called. Sets the class-level `has_details`
attribute when it's called to prevent being called more
than once.
:returns: True
"""
if self.url:
req = Request(
base=self.url,
token=self.api.token,
session_key=self.api.session_key,
ssl_verify=self.api.ssl_verify,
http_session=self.api.http_session,
)
self._parse_values(req.get())
self.has_details = True
return True
return False
def serialize(self, nested=False, init=False):
"""Serializes an object
Pulls all the attributes in an object and creates a dict that
can be turned into the json that netbox is expecting.
If an attribute's value is a ``Record`` type it's replaced with
the ``id`` field of that object.
.. note::
Using this to get a dictionary representation of the record
is discouraged. It's probably better to cast to dict()
instead. See Record docstring for example.
:returns: dict.
"""
if nested:
return get_return(self)
if init:
init_vals = dict(self._init_cache)
ret = {}
for i in dict(self):
current_val = getattr(self, i) if not init else init_vals.get(i)
if i == "custom_fields":
ret[i] = flatten_custom(current_val)
else:
if isinstance(current_val, Record):
current_val = getattr(current_val, "serialize")(
nested=True
)
if isinstance(current_val, list):
current_val = [
v.id if isinstance(v, Record) else v
for v in current_val
]
if i in LIST_AS_SET:
current_val = list(set(current_val))
ret[i] = current_val
return ret
def save(self):
"""Saves changes to an existing object.
Takes a diff between the objects current state and its state at init
and sends them as a dictionary to Request.patch().
:returns: True if PATCH request was successful.
:example:
>>> x = nb.dcim.devices.get(name='test1-a3-tor1b')
>>> x.serial
u''
>>> x.serial = '1234'
>>> x.save()
True
>>>
"""
if self.id:
diff = self._diff()
if diff:
serialized = self.serialize()
req = Request(
key=self.id,
base=self.endpoint.url,
token=self.api.token,
session_key=self.api.session_key,
ssl_verify=self.api.ssl_verify,
http_session=self.api.http_session,
)
if req.patch({i: serialized[i] for i in diff}):
return True
return False
def update(self, data):
"""Update an object with a dictionary.
Accepts a dict and uses it to update the record and call save().
For nested and choice fields you'd pass an int the same as
if you were modifying the attribute and calling save().
:arg dict data: Dictionary containing the k/v to update the
record object with.
:returns: True if PATCH request was successful.
:example:
>>> x = nb.dcim.devices.get(1)
>>> x.update({
... "name": "test-switch2",
... "serial": "ABC321",
... })
True
"""
for k, v in data.items():
setattr(self, k, v)
return self.save()
def delete(self):
"""Deletes an existing object.
:returns: True if DELETE operation was successful.
:example:
>>> x = nb.dcim.devices.get(name='test1-a3-tor1b')
>>> x.delete()
True
>>>
"""
req = Request(
key=self.id,
base=self.endpoint.url,
token=self.api.token,
session_key=self.api.session_key,
ssl_verify=self.api.ssl_verify,
http_session=self.api.http_session,
)
return True if req.delete() else False
| 32.339583
| 78
| 0.52419
|
"""
(c) 2017 DigitalOcean
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pynetbox.core.query import Request
from pynetbox.core.util import Hashabledict
import pynetbox.core.endpoint
# List of fields that are lists but should be treated as sets.
LIST_AS_SET = ("tags", "tagged_vlans")
def get_return(lookup, return_fields=None):
"""Returns simple representations for items passed to lookup.
Used to return a "simple" representation of objects and collections
sent to it via lookup. If lookup is an IPNetwork object immediately
return the string representation. Otherwise, we look to see if
lookup is a "choices" field (dict with only 'id' and 'value')
or a nested_return. Finally, we check if it's a Record, if
so simply return a string. Order is important due to nested_return
being self-referential.
:arg list,optional return_fields: A list of fields to reference when
calling values on lookup.
"""
for i in return_fields or ["id", "value", "nested_return"]:
if isinstance(lookup, dict) and lookup.get(i):
return lookup[i]
else:
if hasattr(lookup, i):
return getattr(lookup, i)
if isinstance(lookup, Record):
return str(lookup)
else:
return lookup
def flatten_custom(custom_dict):
return {
k: v if not isinstance(v, dict) else v["value"]
for k, v in custom_dict.items()
}
class JsonField(object):
"""Explicit field type for values that are not to be converted
to a Record object"""
_json_field = True
class Record(object):
"""Create python objects from netbox API responses.
Creates an object from a NetBox response passed as `values`.
Nested dicts that represent other endpoints are also turned
into Record objects. All fields are then assigned to the
object's attributes. If a missing attr is requested
(e.g. requesting a field that's only present on a full response on
a Record made from a nested response) the pynetbox will make a
request for the full object and return the requsted value.
:examples:
Default representation of the object is usually its name
>>> x = nb.dcim.devices.get(1)
>>> x
test1-switch1
>>>
Querying a string field.
>>> x = nb.dcim.devices.get(1)
>>> x.serial
'ABC123'
>>>
Querying a field on a nested object.
>>> x = nb.dcim.devices.get(1)
>>> x.device_type.model
'QFX5100-24Q'
>>>
Casting the object as a dictionary.
>>> from pprint import pprint
>>> pprint(dict(x))
{'asset_tag': None,
'cluster': None,
'comments': '',
'config_context': {},
'created': '2018-04-01',
'custom_fields': {},
'device_role': {'id': 1,
'name': 'Test Switch',
'slug': 'test-switch',
'url': 'http://localhost:8000/api/dcim/device-roles/1/'},
'device_type': {...},
'display_name': 'test1-switch1',
'face': {'label': 'Rear', 'value': 1},
'id': 1,
'name': 'test1-switch1',
'parent_device': None,
'platform': {...},
'position': 1,
'primary_ip': {'address': '192.0.2.1/24',
'family': 4,
'id': 1,
'url': 'http://localhost:8000/api/ipam/ip-addresses/1/'},
'primary_ip4': {...},
'primary_ip6': None,
'rack': {'display_name': 'Test Rack',
'id': 1,
'name': 'Test Rack',
'url': 'http://localhost:8000/api/dcim/racks/1/'},
'serial': 'ABC123',
'site': {'id': 1,
'name': 'TEST',
'slug': 'TEST',
'url': 'http://localhost:8000/api/dcim/sites/1/'},
'status': {'label': 'Active', 'value': 1},
'tags': [],
'tenant': None,
'vc_position': None,
'vc_priority': None,
'virtual_chassis': None}
>>>
Iterating over a Record object.
>>> for i in x:
... print(i)
...
('id', 1)
('name', 'test1-switch1')
('display_name', 'test1-switch1')
>>>
"""
url = None
def __init__(self, values, api, endpoint):
self.has_details = False
self._full_cache = []
self._init_cache = []
self.api = api
self.endpoint = endpoint
self.default_ret = Record
if values:
self._parse_values(values)
def __getattr__(self, k):
"""Default behavior for missing attrs.
We'll call `full_details()` if we're asked for an attribute
we don't have.
In order to prevent non-explicit behavior,`k='keys'` is
excluded because casting to dict() calls this attr.
"""
if self.url:
if self.has_details is False and k != "keys":
if self.full_details():
ret = getattr(self, k, None)
if ret or hasattr(self, k):
return ret
raise AttributeError('object has no attribute "{}"'.format(k))
def __iter__(self):
for i in dict(self._init_cache):
cur_attr = getattr(self, i)
if isinstance(cur_attr, Record):
yield i, dict(cur_attr)
elif isinstance(cur_attr, list) and all(
isinstance(i, Record) for i in cur_attr
):
yield i, [dict(x) for x in cur_attr]
else:
yield i, cur_attr
def __getitem__(self, item):
return item
def __str__(self):
return (
getattr(self, "name", None) or getattr(self, "label", None) or ""
)
def __repr__(self):
return str(self)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __key__(self):
if hasattr(self, "id"):
return (self.endpoint.name, self.id)
else:
return (self.endpoint.name)
def __hash__(self):
return hash(self.__key__())
def __eq__(self, other):
if isinstance(other, Record):
return self.__key__() == other.__key__()
return NotImplemented
def _add_cache(self, item):
key, value = item
self._init_cache.append((key, get_return(value)))
def _parse_values(self, values):
""" Parses values init arg.
Parses values dict at init and sets object attributes with the
values within.
"""
def list_parser(list_item):
if isinstance(list_item, dict):
k_endpoint = None
if "url" in list_item:
if "url" in list_item:
k_path_list = list_item["url"].split("/")
if "api" in k_path_list:
offset = k_path_list.index("api") + 1
if len(k_path_list[offset:]) > 0 \
and k_path_list[offset:][0] == "api":
# domain name is "api"
offset = offset + 1
if len(k_path_list[offset:]) > 1:
k_app = k_path_list[offset:][0]
k_name = k_path_list[offset:][1]
if hasattr(self.api, k_app):
k_endpoint = pynetbox.core.endpoint.\
Endpoint(self.api, getattr(self.api,
k_app),
k_name, model=None)
return self.default_ret(list_item, self.api, k_endpoint)
return list_item
for k, v in values.items():
if isinstance(v, dict):
lookup = getattr(self.__class__, k, None)
if k == "custom_fields" or hasattr(lookup, "_json_field"):
self._add_cache((k, v.copy()))
setattr(self, k, v)
continue
k_endpoint = None
if "url" in v:
k_path_list = v["url"].split("/")
if "api" in k_path_list:
offset = k_path_list.index("api") + 1
if len(k_path_list[offset:]) > 0 \
and k_path_list[offset:][0] == "api":
# domain name is "api"
offset = offset + 1
if len(k_path_list[offset:]) > 1:
k_app = k_path_list[offset:][0]
k_name = k_path_list[offset:][1]
if hasattr(self.api, k_app):
k_endpoint = pynetbox.core.endpoint.\
Endpoint(self.api, getattr(self.api,
k_app),
k_name, model=None)
if lookup:
v = lookup(v, self.api, k_endpoint)
else:
v = self.default_ret(v, self.api, k_endpoint)
self._add_cache((k, v))
elif isinstance(v, list):
v = [list_parser(i) for i in v]
to_cache = list(v)
self._add_cache((k, to_cache))
else:
self._add_cache((k, v))
setattr(self, k, v)
def _compare(self):
"""Compares current attributes to values at instantiation.
In order to be idempotent we run this method in `save()`.
Returns:
Boolean value, True indicates current instance has the same
attributes as the ones passed to `values`.
"""
if self.serialize(init=True) == self.serialize():
return True
return False
def full_details(self):
"""Queries the hyperlinked endpoint if 'url' is defined.
This method will populate the attributes from the detail
endpoint when it's called. Sets the class-level `has_details`
attribute when it's called to prevent being called more
than once.
:returns: True
"""
if self.url:
req = Request(
base=self.url,
token=self.api.token,
session_key=self.api.session_key,
ssl_verify=self.api.ssl_verify,
http_session=self.api.http_session,
)
self._parse_values(req.get())
self.has_details = True
return True
return False
def serialize(self, nested=False, init=False):
"""Serializes an object
Pulls all the attributes in an object and creates a dict that
can be turned into the json that netbox is expecting.
If an attribute's value is a ``Record`` type it's replaced with
the ``id`` field of that object.
.. note::
Using this to get a dictionary representation of the record
is discouraged. It's probably better to cast to dict()
instead. See Record docstring for example.
:returns: dict.
"""
if nested:
return get_return(self)
if init:
init_vals = dict(self._init_cache)
ret = {}
for i in dict(self):
current_val = getattr(self, i) if not init else init_vals.get(i)
if i == "custom_fields":
ret[i] = flatten_custom(current_val)
else:
if isinstance(current_val, Record):
current_val = getattr(current_val, "serialize")(
nested=True
)
if isinstance(current_val, list):
current_val = [
v.id if isinstance(v, Record) else v
for v in current_val
]
if i in LIST_AS_SET:
current_val = list(set(current_val))
ret[i] = current_val
return ret
def _diff(self):
def fmt_dict(k, v):
if isinstance(v, dict):
return k, Hashabledict(v)
if isinstance(v, list):
return k, ",".join(map(str, v))
return k, v
current = Hashabledict(
{fmt_dict(k, v) for k, v in self.serialize().items()}
)
init = Hashabledict(
{fmt_dict(k, v) for k, v in self.serialize(init=True).items()}
)
return set([i[0] for i in set(current.items()) ^ set(init.items())])
def save(self):
"""Saves changes to an existing object.
Takes a diff between the objects current state and its state at init
and sends them as a dictionary to Request.patch().
:returns: True if PATCH request was successful.
:example:
>>> x = nb.dcim.devices.get(name='test1-a3-tor1b')
>>> x.serial
u''
>>> x.serial = '1234'
>>> x.save()
True
>>>
"""
if self.id:
diff = self._diff()
if diff:
serialized = self.serialize()
req = Request(
key=self.id,
base=self.endpoint.url,
token=self.api.token,
session_key=self.api.session_key,
ssl_verify=self.api.ssl_verify,
http_session=self.api.http_session,
)
if req.patch({i: serialized[i] for i in diff}):
return True
return False
def update(self, data):
"""Update an object with a dictionary.
Accepts a dict and uses it to update the record and call save().
For nested and choice fields you'd pass an int the same as
if you were modifying the attribute and calling save().
:arg dict data: Dictionary containing the k/v to update the
record object with.
:returns: True if PATCH request was successful.
:example:
>>> x = nb.dcim.devices.get(1)
>>> x.update({
... "name": "test-switch2",
... "serial": "ABC321",
... })
True
"""
for k, v in data.items():
setattr(self, k, v)
return self.save()
def delete(self):
"""Deletes an existing object.
:returns: True if DELETE operation was successful.
:example:
>>> x = nb.dcim.devices.get(name='test1-a3-tor1b')
>>> x.delete()
True
>>>
"""
req = Request(
key=self.id,
base=self.endpoint.url,
token=self.api.token,
session_key=self.api.session_key,
ssl_verify=self.api.ssl_verify,
http_session=self.api.http_session,
)
return True if req.delete() else False
| 3,126
| 0
| 378
|
d71a55c60a56a2572e4916c5e4250c6b4970c260
| 1,056
|
py
|
Python
|
user.py
|
k-koech/password_locker
|
4755fe10f04e4453e95d328d2752c269bbd448e6
|
[
"MIT"
] | null | null | null |
user.py
|
k-koech/password_locker
|
4755fe10f04e4453e95d328d2752c269bbd448e6
|
[
"MIT"
] | null | null | null |
user.py
|
k-koech/password_locker
|
4755fe10f04e4453e95d328d2752c269bbd448e6
|
[
"MIT"
] | 1
|
2021-09-04T08:30:08.000Z
|
2021-09-04T08:30:08.000Z
|
class User:
"""
Class that generates new instances of Users.
"""
user_list = [] # Empty user list
def save_user(self):
'''
This method saves User objects into user_list
'''
User.user_list.append(self)
@classmethod
def user_exist(cls,username,password):
'''
Method that checks if a user exists from the users list.
Boolean: True or false depending if the user exists
'''
for user in cls.user_list:
if user.username == username and user.password==password:
return True
return False
@classmethod
def login(cls,username,password):
'''
Method that logs in a user if it exists from the user list.
'''
for user in cls.user_list:
if user.username == username and user.password==password:
return user
| 21.55102
| 69
| 0.5625
|
class User:
"""
Class that generates new instances of Users.
"""
def __init__(self,username,password):
self.username = username
self.password = password
user_list = [] # Empty user list
def save_user(self):
'''
This method saves User objects into user_list
'''
User.user_list.append(self)
@classmethod
def user_exist(cls,username,password):
'''
Method that checks if a user exists from the users list.
Boolean: True or false depending if the user exists
'''
for user in cls.user_list:
if user.username == username and user.password==password:
return True
return False
@classmethod
def login(cls,username,password):
'''
Method that logs in a user if it exists from the user list.
'''
for user in cls.user_list:
if user.username == username and user.password==password:
return user
| 83
| 0
| 30
|
aa45a19c41356a15e780119763c8e777f62bcc6a
| 1,161
|
py
|
Python
|
Information search/IS - 2. Indexing/docreader.py
|
Shemplo/Study-courses
|
d719cfbddf9358b0f3505e747586d0cc575dd832
|
[
"Apache-2.0"
] | 1
|
2019-03-27T18:42:12.000Z
|
2019-03-27T18:42:12.000Z
|
Information search/IS - 2. Indexing/docreader.py
|
Shemplo/Study-courses
|
d719cfbddf9358b0f3505e747586d0cc575dd832
|
[
"Apache-2.0"
] | 3
|
2018-10-19T07:04:03.000Z
|
2021-12-14T21:15:10.000Z
|
Information search/IS - 2. Indexing/docreader.py
|
Shemplo/Study-courses
|
d719cfbddf9358b0f3505e747586d0cc575dd832
|
[
"Apache-2.0"
] | 3
|
2019-03-21T05:16:21.000Z
|
2021-12-21T11:54:30.000Z
|
#!/usr/bin/env python
import argparse
import document_pb2
import struct
import gzip
import sys
if __name__ == '__main__':
reader = DocumentStreamReader(parse_command_line().files)
for doc in reader:
print "%s\t%d bytes" % (doc.url, len(doc.text))
| 28.317073
| 89
| 0.577089
|
#!/usr/bin/env python
import argparse
import document_pb2
import struct
import gzip
import sys
class DocumentStreamReader:
def __init__(self, paths):
self.paths = paths
def open_single(self, path):
return gzip.open(path, 'rb') if path.endswith('.gz') else open(path, 'rb')
def __iter__(self):
for path in self.paths:
with self.open_single(path) as stream:
while True:
sb = stream.read(4)
if sb == '':
break
size = struct.unpack('i', sb)[0]
msg = stream.read(size)
doc = document_pb2.document()
doc.ParseFromString(msg)
yield doc
def parse_command_line():
parser = argparse.ArgumentParser(description='compressed documents reader')
parser.add_argument('files', nargs='+', help='Input files (.gz or plain) to process')
return parser.parse_args()
if __name__ == '__main__':
reader = DocumentStreamReader(parse_command_line().files)
for doc in reader:
print "%s\t%d bytes" % (doc.url, len(doc.text))
| 762
| 6
| 126
|
b81bcbbebeafe80edcb9b654a8607f21f50b6e6c
| 13,581
|
py
|
Python
|
frameworks/helloworld/tests/test_secrets.py
|
akshitjain/dcos-commons_edited
|
371675b07971afc1604800b0fa6b6ce11ae3a705
|
[
"Apache-2.0"
] | null | null | null |
frameworks/helloworld/tests/test_secrets.py
|
akshitjain/dcos-commons_edited
|
371675b07971afc1604800b0fa6b6ce11ae3a705
|
[
"Apache-2.0"
] | null | null | null |
frameworks/helloworld/tests/test_secrets.py
|
akshitjain/dcos-commons_edited
|
371675b07971afc1604800b0fa6b6ce11ae3a705
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from shakedown import *
import sdk_cmd as cmd
import sdk_install as install
import sdk_plan as plan
import sdk_tasks as tasks
import sdk_marathon as marathon
import time
import json
from tests.config import (
PACKAGE_NAME
)
NUM_HELLO = 2
NUM_WORLD = 3
secret_content_default = "hello-world-secret-data"
secret_content_alternative = "hello-world-secret-data-alternative"
secret_options = {
"service": {
"spec_file": "examples/secrets.yml"
},
"hello": {
"count": NUM_HELLO,
"secret1": "hello-world/secret1",
"secret2": "hello-world/secret2"
},
"world": {
"count": NUM_WORLD,
"secret1": "hello-world/secret1",
"secret2": "hello-world/secret2",
"secret3": "hello-world/secret3"
}
}
options_dcos_space_test = {
"service": {
"spec_file": "examples/secrets.yml"
},
"hello": {
"count": NUM_HELLO,
"secret1": "hello-world/somePath/secret1",
"secret2": "hello-world/somePath/secret2"
},
"world": {
"count": NUM_WORLD,
"secret1": "hello-world/somePath/secret1",
"secret2": "hello-world/somePath/secret2",
"secret3": "hello-world/somePath/secret3"
}
}
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.secrets
@dcos_1_10
@pytest.mark.sanity
@pytest.mark.secrets
@dcos_1_10
@pytest.mark.sanity
@pytest.mark.secrets
@dcos_1_10
@pytest.mark.sanity
@pytest.mark.secrets
@dcos_1_10
@pytest.mark.sanity
@pytest.mark.secrets
@pytest.mark.skip(reason="DCOS_SPACE authorization is not working in testing/master. Enable this test later.")
@dcos_1_10
| 35.928571
| 113
| 0.71519
|
import pytest
from shakedown import *
import sdk_cmd as cmd
import sdk_install as install
import sdk_plan as plan
import sdk_tasks as tasks
import sdk_marathon as marathon
import time
import json
from tests.config import (
PACKAGE_NAME
)
NUM_HELLO = 2
NUM_WORLD = 3
secret_content_default = "hello-world-secret-data"
secret_content_alternative = "hello-world-secret-data-alternative"
secret_options = {
"service": {
"spec_file": "examples/secrets.yml"
},
"hello": {
"count": NUM_HELLO,
"secret1": "hello-world/secret1",
"secret2": "hello-world/secret2"
},
"world": {
"count": NUM_WORLD,
"secret1": "hello-world/secret1",
"secret2": "hello-world/secret2",
"secret3": "hello-world/secret3"
}
}
options_dcos_space_test = {
"service": {
"spec_file": "examples/secrets.yml"
},
"hello": {
"count": NUM_HELLO,
"secret1": "hello-world/somePath/secret1",
"secret2": "hello-world/somePath/secret2"
},
"world": {
"count": NUM_WORLD,
"secret1": "hello-world/somePath/secret1",
"secret2": "hello-world/somePath/secret2",
"secret3": "hello-world/somePath/secret3"
}
}
def setup_module(module):
install.uninstall(PACKAGE_NAME)
cmd.run_cli("package install --cli dcos-enterprise-cli")
delete_secrets_all("{}/".format(PACKAGE_NAME))
delete_secrets_all("{}/somePath/".format(PACKAGE_NAME))
delete_secrets_all()
def teardown_module(module):
install.uninstall(PACKAGE_NAME)
delete_secrets_all("{}/".format(PACKAGE_NAME))
delete_secrets_all("{}/somePath/".format(PACKAGE_NAME))
delete_secrets_all()
@pytest.mark.sanity
@pytest.mark.smoke
@pytest.mark.secrets
@dcos_1_10
def test_secrets_basic():
# 1) create Secrets
# 2) install examples/secrets.yml
# 3) if secret file is not created, tasks will fail
# 4) wait till deployment finishes
# 5) do replace operation
# 6) ensure all tasks are running
# 7) delete Secrets
install.uninstall(PACKAGE_NAME)
create_secrets("{}/".format(PACKAGE_NAME))
install.install(PACKAGE_NAME, NUM_HELLO + NUM_WORLD, additional_options=secret_options)
# default is serial strategy, hello deploys first
# launch will fail if secrets are not available or not accessible
plan.wait_for_completed_deployment(PACKAGE_NAME)
hello_tasks_0 = tasks.get_task_ids(PACKAGE_NAME, "hello-0")
world_tasks_0 = tasks.get_task_ids(PACKAGE_NAME, "word-0")
# ensure that secrets work after replace
cmd.run_cli('hello-world pods replace hello-0')
cmd.run_cli('hello-world pods replace world-0')
tasks.check_tasks_updated(PACKAGE_NAME, "hello-0", hello_tasks_0)
tasks.check_tasks_updated(PACKAGE_NAME, 'world-0', world_tasks_0)
# tasks will fail if secret files are not created by mesos module
tasks.check_running(PACKAGE_NAME, NUM_HELLO + NUM_WORLD)
# clean up and delete secrets
delete_secrets("{}/".format(PACKAGE_NAME))
@pytest.mark.sanity
@pytest.mark.secrets
@dcos_1_10
def test_secrets_verify():
# 1) create Secrets
# 2) install examples/secrets.yml
# 3) verify Secrets content
# 4) delete Secrets
install.uninstall(PACKAGE_NAME)
create_secrets("{}/".format(PACKAGE_NAME))
install.install(PACKAGE_NAME, NUM_HELLO + NUM_WORLD, additional_options=secret_options)
# launch will fail if secrets are not available or not accessible
plan.wait_for_completed_deployment(PACKAGE_NAME)
# tasks will fail if secret file is not created
tasks.check_running(PACKAGE_NAME, NUM_HELLO + NUM_WORLD)
# Verify secret content, one from each pod type
# get task id - only first pod
hello_tasks = tasks.get_task_ids(PACKAGE_NAME, "hello-0")
world_tasks = tasks.get_task_ids(PACKAGE_NAME, "world-0")
# first secret: environment variable name is given in yaml
assert secret_content_default == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'")
# second secret: file path is given in yaml
assert secret_content_default == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE")
# third secret : no file path is given in yaml
# default file path is equal to secret path
assert secret_content_default == task_exec(world_tasks[0], "cat hello-world/secret3")
# hello tasks has container image, world tasks do not
# first secret : environment variable name is given in yaml
assert secret_content_default == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'")
# first secret : both environment variable name and file path are given in yaml
assert secret_content_default == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE")
# second secret : file path is given in yaml
assert secret_content_default == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE")
# clean up and delete secrets
delete_secrets("{}/".format(PACKAGE_NAME))
@pytest.mark.sanity
@pytest.mark.secrets
@dcos_1_10
def test_secrets_update():
# 1) create Secrets
# 2) install examples/secrets.yml
# 3) update Secrets
# 4) restart task
# 5) verify Secrets content (updated after restart)
# 6) delete Secrets
install.uninstall(PACKAGE_NAME)
create_secrets("{}/".format(PACKAGE_NAME))
install.install(PACKAGE_NAME, NUM_HELLO + NUM_WORLD, additional_options=secret_options)
# launch will fail if secrets are not available or not accessible
plan.wait_for_completed_deployment(PACKAGE_NAME)
# tasks will fail if secret file is not created
tasks.check_running(PACKAGE_NAME, NUM_HELLO + NUM_WORLD)
cmd.run_cli("security secrets update --value={} {}/secret1".format(secret_content_alternative, PACKAGE_NAME))
cmd.run_cli("security secrets update --value={} {}/secret2".format(secret_content_alternative, PACKAGE_NAME))
cmd.run_cli("security secrets update --value={} {}/secret3".format(secret_content_alternative, PACKAGE_NAME))
# Verify with hello-0 and world-0, just check with one of the pods
hello_tasks_old = tasks.get_task_ids(PACKAGE_NAME, "hello-0")
world_tasks_old = tasks.get_task_ids(PACKAGE_NAME, "world-0")
# restart pods to retrieve new secret's content
cmd.run_cli('hello-world pods restart hello-0')
cmd.run_cli('hello-world pods restart world-0')
# wait pod restart to complete
tasks.check_tasks_updated(PACKAGE_NAME, "hello-0", hello_tasks_old)
tasks.check_tasks_updated(PACKAGE_NAME, 'world-0', world_tasks_old)
# wait till it is running
tasks.check_running(PACKAGE_NAME, NUM_HELLO + NUM_WORLD)
# get new task ids - only first pod
hello_tasks = tasks.get_task_ids(PACKAGE_NAME, "hello-0")
world_tasks = tasks.get_task_ids(PACKAGE_NAME, "world-0")
# make sure content is changed
assert secret_content_alternative == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'")
assert secret_content_alternative == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE")
assert secret_content_alternative == task_exec(world_tasks[0], "cat {}/secret3".format(PACKAGE_NAME))
# make sure content is changed
assert secret_content_alternative == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE")
# clean up and delete secrets
delete_secrets("{}/".format(PACKAGE_NAME))
@pytest.mark.sanity
@pytest.mark.secrets
@dcos_1_10
def test_secrets_config_update():
# 1) install examples/secrets.yml
# 2) create new Secrets, delete old Secrets
# 2) update configuration with new Secrets
# 4) verify secret content (using new Secrets after config update)
install.uninstall(PACKAGE_NAME)
create_secrets("{}/".format(PACKAGE_NAME))
install.install(PACKAGE_NAME, NUM_HELLO + NUM_WORLD, additional_options=secret_options)
# launch will fail if secrets are not available or not accessible
plan.wait_for_completed_deployment(PACKAGE_NAME)
# tasks will fail if secret file is not created
tasks.check_running(PACKAGE_NAME, NUM_HELLO + NUM_WORLD)
# Verify secret content, one from each pod type
# get tasks ids - only first pods
hello_tasks = tasks.get_task_ids(PACKAGE_NAME, "hello-0")
world_tasks = tasks.get_task_ids(PACKAGE_NAME, "world-0")
# make sure it has the default value
assert secret_content_default == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'")
assert secret_content_default == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE")
assert secret_content_default == task_exec(world_tasks[0], "cat {}/secret3".format(PACKAGE_NAME))
# hello tasks has container image
assert secret_content_default == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'")
assert secret_content_default == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE")
assert secret_content_default == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE")
# clean up and delete secrets (defaults)
delete_secrets("{}/".format(PACKAGE_NAME))
# create new secrets with new content -- New Value
create_secrets(secret_content_arg=secret_content_alternative)
config = marathon.get_config(PACKAGE_NAME)
config['env']['HELLO_SECRET1'] = 'secret1'
config['env']['HELLO_SECRET2'] = 'secret2'
config['env']['WORLD_SECRET1'] = 'secret1'
config['env']['WORLD_SECRET2'] = 'secret2'
config['env']['WORLD_SECRET3'] = 'secret3'
# config update
marathon.update_app(PACKAGE_NAME, config)
# wait till plan is complete - pods are supposed to restart
plan.wait_for_completed_deployment(PACKAGE_NAME)
# all tasks are running
tasks.check_running(PACKAGE_NAME, NUM_HELLO + NUM_WORLD)
# Verify secret content is changed
# get task ids - only first pod
hello_tasks = tasks.get_task_ids(PACKAGE_NAME, "hello-0")
world_tasks = tasks.get_task_ids(PACKAGE_NAME, "world-0")
assert secret_content_alternative == task_exec(world_tasks[0], "bash -c 'echo $WORLD_SECRET1_ENV'")
assert secret_content_alternative == task_exec(world_tasks[0], "cat WORLD_SECRET2_FILE")
assert secret_content_alternative == task_exec(world_tasks[0], "cat secret3")
assert secret_content_alternative == task_exec(hello_tasks[0], "bash -c 'echo $HELLO_SECRET1_ENV'")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET1_FILE")
assert secret_content_alternative == task_exec(hello_tasks[0], "cat HELLO_SECRET2_FILE")
# clean up and delete secrets
delete_secrets()
@pytest.mark.sanity
@pytest.mark.secrets
@pytest.mark.skip(reason="DCOS_SPACE authorization is not working in testing/master. Enable this test later.")
@dcos_1_10
def test_secrets_dcos_space():
# 1) create secrets in hello-world/somePath, i.e. hello-world/somePath/secret1 ...
# 2) Tasks with DCOS_SPACE hello-world/somePath
# or some DCOS_SPACE path under hello-world/somePath
# (for example hello-world/somePath/anotherPath/)
# can access these Secrets
install.uninstall(PACKAGE_NAME)
# cannot access these secrets because of DCOS_SPACE authorization
create_secrets("{}/somePath/".format(PACKAGE_NAME))
try:
install.install(PACKAGE_NAME, NUM_HELLO + NUM_WORLD, additional_options=options_dcos_space_test)
plan.wait_for_completed_deployment(PACKAGE_NAME)
assert False, "Should have failed to install"
except AssertionError as arg:
raise arg
except:
pass # expected to fail
# clean up and delete secrets
delete_secrets("{}/somePath/".format(PACKAGE_NAME))
def create_secrets(path_prefix="", secret_content_arg=secret_content_default):
cmd.run_cli("security secrets create --value={} {}secret1".format(secret_content_arg, path_prefix))
cmd.run_cli("security secrets create --value={} {}secret2".format(secret_content_arg, path_prefix))
cmd.run_cli("security secrets create --value={} {}secret3".format(secret_content_arg, path_prefix))
def delete_secrets(path_prefix=""):
cmd.run_cli("security secrets delete {}secret1".format(path_prefix))
cmd.run_cli("security secrets delete {}secret2".format(path_prefix))
cmd.run_cli("security secrets delete {}secret3".format(path_prefix))
def delete_secrets_all(path_prefix=""):
# if there is any secret left, delete
# use in teardown_module
try:
cmd.run_cli("security secrets get {}secret1".format(path_prefix))
cmd.run_cli("security secrets delete {}secret1".format(path_prefix))
except:
pass
try:
cmd.run_cli("security secrets get {}secret2".format(path_prefix))
cmd.run_cli("security secrets delete {}secret2".format(path_prefix))
except:
pass
try:
cmd.run_cli("security secrets get {}secret3".format(path_prefix))
cmd.run_cli("security secrets delete {}secret3".format(path_prefix))
except:
pass
def task_exec(task_name, command):
lines = cmd.run_cli("task exec {} {}".format(task_name, command)).split('\n')
print(lines)
for i in lines:
# ignore text starting with:
# Overwriting Environment Variable ....
# Overwriting PATH ......
if not i.isspace() and not i.startswith("Overwriting"):
return i
return ""
| 11,627
| 0
| 248
|
78fae72f384b91868f4f205ab3c67b076ca00abb
| 1,481
|
py
|
Python
|
pre_commit_hooks/convert_beginning_helper.py
|
toddnguyen47/pre-commit-hooks
|
ce500b759db3627ac88598dd183bcac473b4f1bb
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/convert_beginning_helper.py
|
toddnguyen47/pre-commit-hooks
|
ce500b759db3627ac88598dd183bcac473b4f1bb
|
[
"MIT"
] | null | null | null |
pre_commit_hooks/convert_beginning_helper.py
|
toddnguyen47/pre-commit-hooks
|
ce500b759db3627ac88598dd183bcac473b4f1bb
|
[
"MIT"
] | null | null | null |
"""Helper functions for converting beginning whitesspace characters"""
import argparse
from collections import deque
from typing import Callable, List
def add_tab_size_option(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Add the `--tab-size` option"""
parser.add_argument(
"--tab-size",
type=int,
required=False,
help="number of whitespaces to substitute tabs with. defaults to 4 spaces",
default=4,
dest="tab_size",
)
return parser
def read_file_convert(
full_path: str, num_spaces: int, handle_per_line: Callable[[List[str], int], str]
):
"""Read file and convert its beginning whitespace per line"""
lines = _read_lines_rb(full_path)
new_lines = []
while lines:
encoded_str = handle_per_line(lines, num_spaces)
new_lines.append(encoded_str)
with open(full_path, mode="wb") as output_file:
output_file.writelines(new_lines)
def _read_lines_rb(full_path: str) -> deque:
"""
We need to open using binary and encode/decode appropriate to enforce that files need to be saved
with Linux line endings
"""
with open(full_path, mode="rb") as input_file:
lines = input_file.readlines()
lines = deque(lines)
return lines
def print_check_changes_message():
"""Print check changes message"""
print(
'You can check the changes made. Then simply "git add --update ." and re-commit'
)
return 1
| 28.480769
| 101
| 0.678596
|
"""Helper functions for converting beginning whitesspace characters"""
import argparse
from collections import deque
from typing import Callable, List
def add_tab_size_option(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
"""Add the `--tab-size` option"""
parser.add_argument(
"--tab-size",
type=int,
required=False,
help="number of whitespaces to substitute tabs with. defaults to 4 spaces",
default=4,
dest="tab_size",
)
return parser
def read_file_convert(
full_path: str, num_spaces: int, handle_per_line: Callable[[List[str], int], str]
):
"""Read file and convert its beginning whitespace per line"""
lines = _read_lines_rb(full_path)
new_lines = []
while lines:
encoded_str = handle_per_line(lines, num_spaces)
new_lines.append(encoded_str)
with open(full_path, mode="wb") as output_file:
output_file.writelines(new_lines)
def _read_lines_rb(full_path: str) -> deque:
"""
We need to open using binary and encode/decode appropriate to enforce that files need to be saved
with Linux line endings
"""
with open(full_path, mode="rb") as input_file:
lines = input_file.readlines()
lines = deque(lines)
return lines
def print_check_changes_message():
"""Print check changes message"""
print(
'You can check the changes made. Then simply "git add --update ." and re-commit'
)
return 1
| 0
| 0
| 0
|
203e3d9089902e24a0a0a006d3ce997493ff8bc6
| 16,783
|
py
|
Python
|
src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
src/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py
|
doraskayo/buildstream
|
1c72d4342ae7df360808de22c5e49f55dbb6bec6
|
[
"Apache-2.0"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.build.bazel.remote.asset.v1 import remote_asset_pb2 as build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2
class FetchStub(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.FetchBlob = channel.unary_unary(
'/build.bazel.remote.asset.v1.Fetch/FetchBlob',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
)
self.FetchDirectory = channel.unary_unary(
'/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
)
class FetchServicer(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def FetchBlob(self, request, context):
"""Resolve or fetch referenced assets, making them available to the caller and
other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
Servers *MAY* fetch content that they do not already have cached, for any
URLs they support.
Servers *SHOULD* ensure that referenced files are present in the CAS at the
time of the response, and (if supported) that they will remain available
for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
be increased if necessary and applicable.
In the event that a client receives a reference to content that is no
longer present, it *MAY* re-issue the request with
`oldest_content_accepted` set to a more recent timestamp than the original
attempt, to induce a re-fetch from origin.
Servers *MAY* cache fetched content and reuse it for subsequent requests,
subject to `oldest_content_accepted`.
Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push]
API and allow content to be directly inserted for use in future fetch
responses.
Servers *MUST* ensure Fetch'd content matches all the specified
qualifiers except in the case of previously Push'd resources, for which
the server *MAY* trust the pushing client to have set the qualifiers
correctly, without validation.
Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push]
API *MUST* reject requests containing qualifiers it does not support.
Servers *MAY* transform assets as part of the fetch. For example a
tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]
might be unpacked, or a Git repository
fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]
might be passed through `git-archive`.
Errors handling the requested assets will be returned as gRPC Status errors
here; errors outside the server's control will be returned inline in the
`status` field of the response (see comment there for details).
The possible RPC errors include:
* `INVALID_ARGUMENT`: One or more arguments were invalid, such as a
qualifier that is not supported by the server.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
perform the requested operation. The client may retry after a delay.
* `UNAVAILABLE`: Due to a transient condition the operation could not be
completed. The client should retry.
* `INTERNAL`: An internal error occurred while performing the operation.
The client should retry.
* `DEADLINE_EXCEEDED`: The fetch could not be completed within the given
RPC deadline. The client should retry for at least as long as the value
provided in `timeout` field of the request.
In the case of unsupported qualifiers, the server *SHOULD* additionally
send a [BadRequest][google.rpc.BadRequest] error detail where, for each
unsupported qualifier, there is a `FieldViolation` with a `field` of
`qualifiers.name` and a `description` of `"{qualifier}" not supported`
indicating the name of the unsupported qualifier.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchDirectory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
# This class is part of an EXPERIMENTAL API.
class Fetch(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
@staticmethod
class PushStub(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PushBlob = channel.unary_unary(
'/build.bazel.remote.asset.v1.Push/PushBlob',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
)
self.PushDirectory = channel.unary_unary(
'/build.bazel.remote.asset.v1.Push/PushDirectory',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
)
class PushServicer(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def PushBlob(self, request, context):
"""These APIs associate the identifying information of a resource, as
indicated by URI and optionally Qualifiers, with content available in the
CAS. For example, associating a repository url and a commit id with a
Directory Digest.
Servers *SHOULD* only allow trusted clients to associate content, and *MAY*
only allow certain URIs to be pushed.
Clients *MUST* ensure associated content is available in CAS prior to
pushing.
Clients *MUST* ensure the Qualifiers listed correctly match the contents,
and Servers *MAY* trust these values without validation.
Fetch servers *MAY* require exact match of all qualifiers when returning
content previously pushed, or allow fetching content with only a subset of
the qualifiers specified on Push.
Clients can specify expiration information that the server *SHOULD*
respect. Subsequent requests can be used to alter the expiration time.
A minimal compliant Fetch implementation may support only Push'd content
and return `NOT_FOUND` for any resource that was not pushed first.
Alternatively, a compliant implementation may choose to not support Push
and only return resources that can be Fetch'd from origin.
Errors will be returned as gRPC Status errors.
The possible RPC errors include:
* `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
perform the requested operation. The client may retry after a delay.
* `UNAVAILABLE`: Due to a transient condition the operation could not be
completed. The client should retry.
* `INTERNAL`: An internal error occurred while performing the operation.
The client should retry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PushDirectory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
# This class is part of an EXPERIMENTAL API.
class Push(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
@staticmethod
| 50.857576
| 148
| 0.718227
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.build.bazel.remote.asset.v1 import remote_asset_pb2 as build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2
class FetchStub(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.FetchBlob = channel.unary_unary(
'/build.bazel.remote.asset.v1.Fetch/FetchBlob',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
)
self.FetchDirectory = channel.unary_unary(
'/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
)
class FetchServicer(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def FetchBlob(self, request, context):
"""Resolve or fetch referenced assets, making them available to the caller and
other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
Servers *MAY* fetch content that they do not already have cached, for any
URLs they support.
Servers *SHOULD* ensure that referenced files are present in the CAS at the
time of the response, and (if supported) that they will remain available
for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
be increased if necessary and applicable.
In the event that a client receives a reference to content that is no
longer present, it *MAY* re-issue the request with
`oldest_content_accepted` set to a more recent timestamp than the original
attempt, to induce a re-fetch from origin.
Servers *MAY* cache fetched content and reuse it for subsequent requests,
subject to `oldest_content_accepted`.
Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push]
API and allow content to be directly inserted for use in future fetch
responses.
Servers *MUST* ensure Fetch'd content matches all the specified
qualifiers except in the case of previously Push'd resources, for which
the server *MAY* trust the pushing client to have set the qualifiers
correctly, without validation.
Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push]
API *MUST* reject requests containing qualifiers it does not support.
Servers *MAY* transform assets as part of the fetch. For example a
tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]
might be unpacked, or a Git repository
fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]
might be passed through `git-archive`.
Errors handling the requested assets will be returned as gRPC Status errors
here; errors outside the server's control will be returned inline in the
`status` field of the response (see comment there for details).
The possible RPC errors include:
* `INVALID_ARGUMENT`: One or more arguments were invalid, such as a
qualifier that is not supported by the server.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
perform the requested operation. The client may retry after a delay.
* `UNAVAILABLE`: Due to a transient condition the operation could not be
completed. The client should retry.
* `INTERNAL`: An internal error occurred while performing the operation.
The client should retry.
* `DEADLINE_EXCEEDED`: The fetch could not be completed within the given
RPC deadline. The client should retry for at least as long as the value
provided in `timeout` field of the request.
In the case of unsupported qualifiers, the server *SHOULD* additionally
send a [BadRequest][google.rpc.BadRequest] error detail where, for each
unsupported qualifier, there is a `FieldViolation` with a `field` of
`qualifiers.name` and a `description` of `"{qualifier}" not supported`
indicating the name of the unsupported qualifier.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchDirectory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FetchServicer_to_server(servicer, server):
rpc_method_handlers = {
'FetchBlob': grpc.unary_unary_rpc_method_handler(
servicer.FetchBlob,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.SerializeToString,
),
'FetchDirectory': grpc.unary_unary_rpc_method_handler(
servicer.FetchDirectory,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.asset.v1.Fetch', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Fetch(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def FetchBlob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchBlob',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FetchDirectory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class PushStub(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PushBlob = channel.unary_unary(
'/build.bazel.remote.asset.v1.Push/PushBlob',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
)
self.PushDirectory = channel.unary_unary(
'/build.bazel.remote.asset.v1.Push/PushDirectory',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
)
class PushServicer(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def PushBlob(self, request, context):
"""These APIs associate the identifying information of a resource, as
indicated by URI and optionally Qualifiers, with content available in the
CAS. For example, associating a repository url and a commit id with a
Directory Digest.
Servers *SHOULD* only allow trusted clients to associate content, and *MAY*
only allow certain URIs to be pushed.
Clients *MUST* ensure associated content is available in CAS prior to
pushing.
Clients *MUST* ensure the Qualifiers listed correctly match the contents,
and Servers *MAY* trust these values without validation.
Fetch servers *MAY* require exact match of all qualifiers when returning
content previously pushed, or allow fetching content with only a subset of
the qualifiers specified on Push.
Clients can specify expiration information that the server *SHOULD*
respect. Subsequent requests can be used to alter the expiration time.
A minimal compliant Fetch implementation may support only Push'd content
and return `NOT_FOUND` for any resource that was not pushed first.
Alternatively, a compliant implementation may choose to not support Push
and only return resources that can be Fetch'd from origin.
Errors will be returned as gRPC Status errors.
The possible RPC errors include:
* `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
perform the requested operation. The client may retry after a delay.
* `UNAVAILABLE`: Due to a transient condition the operation could not be
completed. The client should retry.
* `INTERNAL`: An internal error occurred while performing the operation.
The client should retry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PushDirectory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PushServicer_to_server(servicer, server):
rpc_method_handlers = {
'PushBlob': grpc.unary_unary_rpc_method_handler(
servicer.PushBlob,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.SerializeToString,
),
'PushDirectory': grpc.unary_unary_rpc_method_handler(
servicer.PushDirectory,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.asset.v1.Push', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Push(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def PushBlob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushBlob',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PushDirectory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushDirectory',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 5,058
| 0
| 150
|
be531dde26d46e2c0939b8834da01888321fbd6d
| 2,847
|
py
|
Python
|
chia/instrumentation/message.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | null | null | null |
chia/instrumentation/message.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | 2
|
2021-10-06T13:19:09.000Z
|
2021-10-20T17:32:36.000Z
|
chia/instrumentation/message.py
|
cabrust/chia
|
3eaf815b261dc8a85d64fd698e0079515ec0dde9
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import time
import typing
class ShutdownMessage(Message):
"""This message tells the observers that they should save their data."""
| 29.968421
| 99
| 0.582016
|
import logging
import time
import typing
class Message:
def __init__(self, sender: str):
self.sender = sender
self.timestamp: float = time.time()
def _format_timestamp(self) -> str:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(self.timestamp))
def __str__(self):
return f"[{self._format_timestamp()}] [MESSAGE] [{self.sender}]: {self.__class__.__name__}"
class LogMessage(Message):
def __init__(self, sender: str, level: int, message: str):
super().__init__(sender=sender)
self.sender = sender
self.level = level
self.message = message
def __str__(self):
return (
f"[{self._format_timestamp()}] [{logging.getLevelName(self.level)}] "
f"[{self.sender}]: {self.message}"
)
class ConfigMessage(Message):
def __init__(self, sender: str, field: str, value: typing.Any, source: str):
super().__init__(sender=sender)
self.field = field
self.value = value
self.source = source
def __str__(self):
return (
f"[{self._format_timestamp()}] [CONFIGURATION] [{self.sender}]: "
f"config field {self.field} set to {self.value} from {self.source}"
)
class MetricMessage(Message):
def __init__(self, sender: str, metric: str, value: float, step: int):
super().__init__(sender=sender)
self.metric = metric
self.value = value
self.step = step
def __str__(self):
return (
f"[{self._format_timestamp()}] [METRIC] [{self.sender}]: "
f"metric {self.metric} @{self.step} = {self.value}"
)
class ResultMessage(Message):
def __init__(self, sender: str, result_dict: dict, step: int):
super().__init__(sender=sender)
self.result_dict = result_dict
self.step = step
def __str__(self):
result_strings = []
for key, value in self.result_dict.items():
if isinstance(value, int):
result_strings += [f"{key}={value}"]
elif isinstance(value, float):
result_strings += [f"{key}={value:.3f}"]
else:
result_strings += [f"{key}=[...]"]
result_string = ",".join(result_strings)
return (
f"[{self._format_timestamp()}] [RESULT] [{self.sender}]: "
f"keys are {result_string} @{self.step}"
)
class ShutdownMessage(Message):
"""This message tells the observers that they should save their data."""
def __init__(self, sender: str, successful: bool):
super().__init__(sender)
self.successful = successful
def __str__(self):
return (
f"[{self._format_timestamp()}] [SHUTDOWN] [{self.sender}] "
f"Successful: {self.successful}"
)
| 2,207
| 22
| 461
|
54c92cb6f666cfd61232245deafe4d692362e0d7
| 4,278
|
py
|
Python
|
src/fully-connected-network/gradient-descent/model.py
|
aligholamee/UD730
|
936ef8ae4a0a285de6b19919f351a956c72e4cc8
|
[
"MIT"
] | 2
|
2018-02-07T21:31:09.000Z
|
2018-02-08T06:34:38.000Z
|
src/fully-connected-network/gradient-descent/model.py
|
aligholamee/UD730
|
936ef8ae4a0a285de6b19919f351a956c72e4cc8
|
[
"MIT"
] | null | null | null |
src/fully-connected-network/gradient-descent/model.py
|
aligholamee/UD730
|
936ef8ae4a0a285de6b19919f351a956c72e4cc8
|
[
"MIT"
] | 2
|
2020-04-07T06:39:26.000Z
|
2021-02-03T09:59:38.000Z
|
# ========================================
# [] File Name : model.py
#
# [] Creation Date : January 2018
#
# [] Created By : Ali Gholami (aligholami7596@gmail.com)
# ========================================
"""
Training and Validation on notMNIST Dataset
Fully connected network implementation with tensorflow
"""
import pickle as pickle
import numpy as np
import tensorflow as tf
# Data destination path
PICKLE_FILE = "../../../data/notMNIST.pickle"
# Load the data to the RAM
with open(PICKLE_FILE, "rb") as f:
SAVE_FILE = pickle.load(f)
TRAIN_DATASET = SAVE_FILE['train_dataset']
TRAIN_LABELS = SAVE_FILE['train_labels']
VALID_DATASET = SAVE_FILE['valid_dataset']
VALID_LABELS = SAVE_FILE['valid_labels']
TEST_DATASET = SAVE_FILE['test_dataset']
TEST_LABELS = SAVE_FILE['test_labels']
# Free some memory
del SAVE_FILE
# Reformat to the one-hot encoding mode
# def reformatData(dataset, labels):
IMAGE_SIZE = 28
NUM_LABELS = 10
def accuracy(predictions, labels):
"""
Divides the number of true predictions to the number of total predictions
"""
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
def reformat(dataset, labels):
"""
Reformat data to the one-hot and flattened mode
"""
n_dataset = dataset.reshape((-1, IMAGE_SIZE * IMAGE_SIZE)).astype(np.float32)
# Convert to the one hot format
n_labels = (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)
return n_dataset, n_labels
TRAIN_DATASET, TRAIN_LABELS = reformat(TRAIN_DATASET, TRAIN_LABELS)
VALID_DATASET, VALID_LABELS = reformat(VALID_DATASET, VALID_LABELS)
TEST_DATASET, TEST_LABELS = reformat(TEST_DATASET, TEST_LABELS)
# Display the openend files
print("Training Set ", TRAIN_DATASET.shape, TRAIN_LABELS.shape)
print("Validation Set", VALID_DATASET.shape, VALID_LABELS.shape)
print("Test Set", TEST_DATASET.shape, TEST_LABELS.shape)
# Implements a gradient descent using tensorflow computational graph
TRAIN_SUBSET = 10000
GRAPH = tf.Graph()
with GRAPH.as_default():
"""
Load the training, validation and test data into the constants attached to the graph
"""
TF_TRAIN_DATASET = tf.constant(TRAIN_DATASET[:TRAIN_SUBSET, :])
TF_TRAIN_LABELS = tf.constant(TRAIN_LABELS[:TRAIN_SUBSET])
TF_VALID_DATASET = tf.constant(VALID_DATASET[:TRAIN_SUBSET])
TF_TEST_DATASET = tf.constant(TEST_DATASET[:TRAIN_SUBSET])
"""
Initialize the weights matrix with normal distribution and the biases with zero values
"""
WEIGHTS = tf.Variable(tf.truncated_normal([IMAGE_SIZE * IMAGE_SIZE, NUM_LABELS]))
BIASES = tf.Variable(tf.zeros([NUM_LABELS]))
"""
Compute the logits WX + b and then apply D(S(WX + b), L) on them
"""
LOGITS = tf.matmul(TF_TRAIN_DATASET, WEIGHTS) + BIASES
LOSS = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = TF_TRAIN_LABELS, logits = LOGITS))
"""
Find the minimum of the loss using gradient descent optimizer
remember that the optimizer is an algorithm now - ready to be tested on the test data
"""
OPTIMIZER = tf.train.GradientDescentOptimizer(0.1).minimize(LOSS)
"""
Predictions for the training, validation, and test data.
"""
TRAIN_PREDICTION = tf.nn.softmax(LOGITS)
VALID_PREDICTION = tf.nn.softmax(tf.matmul(TF_VALID_DATASET, WEIGHTS) + BIASES)
TEST_PREDICTION = tf.nn.softmax(tf.matmul(TF_TEST_DATASET, WEIGHTS) + BIASES)
NUM_ITERATIONS = 3000
with tf.Session(graph=GRAPH) as session:
"""
Start the above variable initialization
"""
tf.initialize_all_variables().run()
print("Variables initialized")
for step in range(NUM_ITERATIONS):
_, l, predictions = session.run([OPTIMIZER, LOSS, TRAIN_PREDICTION])
if(step % 100 == 0):
print("Loss at step ", step, ": ", l)
print("Training accuracy: ", accuracy(predictions, TRAIN_LABELS[:TRAIN_SUBSET, :]))
"""
Displays the test prediction results
"""
print("Validation accuracy: ", accuracy(VALID_PREDICTION.eval(), VALID_LABELS))
print("Test accuracy: ", accuracy(TEST_PREDICTION.eval(), TEST_LABELS))
| 31.455882
| 109
| 0.683263
|
# ========================================
# [] File Name : model.py
#
# [] Creation Date : January 2018
#
# [] Created By : Ali Gholami (aligholami7596@gmail.com)
# ========================================
"""
Training and Validation on notMNIST Dataset
Fully connected network implementation with tensorflow
"""
import pickle as pickle
import numpy as np
import tensorflow as tf
# Data destination path
PICKLE_FILE = "../../../data/notMNIST.pickle"
# Load the data to the RAM
with open(PICKLE_FILE, "rb") as f:
SAVE_FILE = pickle.load(f)
TRAIN_DATASET = SAVE_FILE['train_dataset']
TRAIN_LABELS = SAVE_FILE['train_labels']
VALID_DATASET = SAVE_FILE['valid_dataset']
VALID_LABELS = SAVE_FILE['valid_labels']
TEST_DATASET = SAVE_FILE['test_dataset']
TEST_LABELS = SAVE_FILE['test_labels']
# Free some memory
del SAVE_FILE
# Reformat to the one-hot encoding mode
# def reformatData(dataset, labels):
IMAGE_SIZE = 28
NUM_LABELS = 10
def accuracy(predictions, labels):
"""
Divides the number of true predictions to the number of total predictions
"""
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
def reformat(dataset, labels):
"""
Reformat data to the one-hot and flattened mode
"""
n_dataset = dataset.reshape((-1, IMAGE_SIZE * IMAGE_SIZE)).astype(np.float32)
# Convert to the one hot format
n_labels = (np.arange(NUM_LABELS) == labels[:, None]).astype(np.float32)
return n_dataset, n_labels
TRAIN_DATASET, TRAIN_LABELS = reformat(TRAIN_DATASET, TRAIN_LABELS)
VALID_DATASET, VALID_LABELS = reformat(VALID_DATASET, VALID_LABELS)
TEST_DATASET, TEST_LABELS = reformat(TEST_DATASET, TEST_LABELS)
# Display the openend files
print("Training Set ", TRAIN_DATASET.shape, TRAIN_LABELS.shape)
print("Validation Set", VALID_DATASET.shape, VALID_LABELS.shape)
print("Test Set", TEST_DATASET.shape, TEST_LABELS.shape)
# Implements a gradient descent using tensorflow computational graph
TRAIN_SUBSET = 10000
GRAPH = tf.Graph()
with GRAPH.as_default():
"""
Load the training, validation and test data into the constants attached to the graph
"""
TF_TRAIN_DATASET = tf.constant(TRAIN_DATASET[:TRAIN_SUBSET, :])
TF_TRAIN_LABELS = tf.constant(TRAIN_LABELS[:TRAIN_SUBSET])
TF_VALID_DATASET = tf.constant(VALID_DATASET[:TRAIN_SUBSET])
TF_TEST_DATASET = tf.constant(TEST_DATASET[:TRAIN_SUBSET])
"""
Initialize the weights matrix with normal distribution and the biases with zero values
"""
WEIGHTS = tf.Variable(tf.truncated_normal([IMAGE_SIZE * IMAGE_SIZE, NUM_LABELS]))
BIASES = tf.Variable(tf.zeros([NUM_LABELS]))
"""
Compute the logits WX + b and then apply D(S(WX + b), L) on them
"""
LOGITS = tf.matmul(TF_TRAIN_DATASET, WEIGHTS) + BIASES
LOSS = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = TF_TRAIN_LABELS, logits = LOGITS))
"""
Find the minimum of the loss using gradient descent optimizer
remember that the optimizer is an algorithm now - ready to be tested on the test data
"""
OPTIMIZER = tf.train.GradientDescentOptimizer(0.1).minimize(LOSS)
"""
Predictions for the training, validation, and test data.
"""
TRAIN_PREDICTION = tf.nn.softmax(LOGITS)
VALID_PREDICTION = tf.nn.softmax(tf.matmul(TF_VALID_DATASET, WEIGHTS) + BIASES)
TEST_PREDICTION = tf.nn.softmax(tf.matmul(TF_TEST_DATASET, WEIGHTS) + BIASES)
NUM_ITERATIONS = 3000
with tf.Session(graph=GRAPH) as session:
"""
Start the above variable initialization
"""
tf.initialize_all_variables().run()
print("Variables initialized")
for step in range(NUM_ITERATIONS):
_, l, predictions = session.run([OPTIMIZER, LOSS, TRAIN_PREDICTION])
if(step % 100 == 0):
print("Loss at step ", step, ": ", l)
print("Training accuracy: ", accuracy(predictions, TRAIN_LABELS[:TRAIN_SUBSET, :]))
"""
Displays the test prediction results
"""
print("Validation accuracy: ", accuracy(VALID_PREDICTION.eval(), VALID_LABELS))
print("Test accuracy: ", accuracy(TEST_PREDICTION.eval(), TEST_LABELS))
| 0
| 0
| 0
|
cfc0994fa0ab433301f0722a11cc7d4dc07ffd9f
| 876
|
py
|
Python
|
utils/validators.py
|
rilder-almeida/soulchef_bot
|
0f2f20961c91334cbfcf80429fcac782d39da5db
|
[
"MIT"
] | null | null | null |
utils/validators.py
|
rilder-almeida/soulchef_bot
|
0f2f20961c91334cbfcf80429fcac782d39da5db
|
[
"MIT"
] | null | null | null |
utils/validators.py
|
rilder-almeida/soulchef_bot
|
0f2f20961c91334cbfcf80429fcac782d39da5db
|
[
"MIT"
] | null | null | null |
import re
| 23.675676
| 150
| 0.429224
|
import re
def cpf_valid(cpf: str) -> bool:
cpf = cpf.replace('.', '').replace('-', '').replace(' ', '')
if len(cpf) != 11 or len(set(cpf)) == 1:
return False
for NUM in range(9, 11):
c = [int(n) for n in cpf[:NUM]]
n = list(range(NUM + 1, 1, -1))
s = sum(map(lambda i: c[i] * n[i], range(NUM)))
dv = 0
if (s % 11) >= 2:
dv = 11 - (s % 11)
if not int(cpf[NUM]) == dv:
return False
return True
def phone_valid(phone: str) -> bool:
return bool(
re.match(
"^(?:(?:\+|00)?(55)\s?)?\(?[0]?(?:[14689][1-9]|2[12478]|3[1234578]|5[1345]|7[134579])\)? ?(?:[2-8]|9[1-9])[0-9]{3} ?\-?[0-9]{4}$", # noqa
phone,
)
)
def email_valid(email: str) -> bool:
return bool(re.match("^[^\s@]+@([^\s@.,]+\.)+[^\s@.,]{2,}$", email)) # noqa
| 794
| 0
| 69
|
7ff7531e0abdb0ef22687c2a2b673f88b060ca02
| 2,388
|
py
|
Python
|
grove/tests/circuit_primitives/test_swap.py
|
mkeshita/grove
|
dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3
|
[
"Apache-2.0"
] | 229
|
2017-01-10T03:11:54.000Z
|
2018-11-26T10:57:49.000Z
|
grove/tests/circuit_primitives/test_swap.py
|
mkeshita/grove
|
dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3
|
[
"Apache-2.0"
] | 123
|
2017-01-10T21:06:51.000Z
|
2018-11-27T19:38:22.000Z
|
grove/tests/circuit_primitives/test_swap.py
|
mkeshita/grove
|
dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3
|
[
"Apache-2.0"
] | 95
|
2017-01-10T03:03:45.000Z
|
2018-11-28T00:42:28.000Z
|
"""
Tests on the swap test in the circuit_primitives module
"""
import numpy as np
import pytest
from unittest.mock import patch
from pyquil import Program
from pyquil.gates import CSWAP, H
from grove.circuit_primitives.swap import (swap_circuit_generator,
run_swap_test,
RegisterSizeMismatch)
def test_swap_circuit_gen_type():
"""
Test the type checking
"""
with pytest.raises(TypeError):
swap_circuit_generator(5, [1, 2], 0)
with pytest.raises(TypeError):
swap_circuit_generator([1, 2], 5, 0)
with pytest.raises(RegisterSizeMismatch):
swap_circuit_generator([1, 2], [3], 0)
def test_default_ancilla_assignment():
"""
Make sure ancilla is assigned to max(regA + regB) + 1 by default
:return:
"""
test_prog_for_ancilla = swap_circuit_generator([1, 2], [5, 6], None)
instruction = test_prog_for_ancilla.pop()
assert instruction.qubits[0].index == 7
def test_cswap_program():
"""
Test if the correct program is returned. Half way to system test
"""
test_prog = swap_circuit_generator([1, 2], [5, 6], None)
true_prog = Program()
true_prog += H(7)
true_prog += CSWAP(7, 1, 5)
true_prog += CSWAP(7, 2, 6)
true_prog += H(7)
assert test_prog.out() == true_prog.out()
def test_run_swap():
"""
Test the qvm return piece
"""
expected_bitstring = [1, 1, 1, 0, 0, 0, 0, 0, 0]
prog_a = Program().inst(H(0))
prog_b = Program().inst(H(1))
with patch("pyquil.api.QuantumComputer") as qc:
qc.run.return_value = expected_bitstring
test_overlap = run_swap_test(prog_a, prog_b,
number_of_measurements=5,
quantum_resource=qc)
assert np.isclose(np.sqrt(1 - 2 * np.mean(expected_bitstring)),
test_overlap)
expected_bitstring = [1, 1, 1, 0, 1]
prog_a = Program().inst(H(0))
prog_b = Program().inst(H(1))
with patch("pyquil.api.QuantumComputer") as qc:
qc.run.return_value = expected_bitstring
with pytest.raises(ValueError):
test_overlap = run_swap_test(prog_a, prog_b,
number_of_measurements=5,
quantum_resource=qc)
| 30.615385
| 72
| 0.595059
|
"""
Tests on the swap test in the circuit_primitives module
"""
import numpy as np
import pytest
from unittest.mock import patch
from pyquil import Program
from pyquil.gates import CSWAP, H
from grove.circuit_primitives.swap import (swap_circuit_generator,
run_swap_test,
RegisterSizeMismatch)
def test_swap_circuit_gen_type():
"""
Test the type checking
"""
with pytest.raises(TypeError):
swap_circuit_generator(5, [1, 2], 0)
with pytest.raises(TypeError):
swap_circuit_generator([1, 2], 5, 0)
with pytest.raises(RegisterSizeMismatch):
swap_circuit_generator([1, 2], [3], 0)
def test_default_ancilla_assignment():
"""
Make sure ancilla is assigned to max(regA + regB) + 1 by default
:return:
"""
test_prog_for_ancilla = swap_circuit_generator([1, 2], [5, 6], None)
instruction = test_prog_for_ancilla.pop()
assert instruction.qubits[0].index == 7
def test_cswap_program():
"""
Test if the correct program is returned. Half way to system test
"""
test_prog = swap_circuit_generator([1, 2], [5, 6], None)
true_prog = Program()
true_prog += H(7)
true_prog += CSWAP(7, 1, 5)
true_prog += CSWAP(7, 2, 6)
true_prog += H(7)
assert test_prog.out() == true_prog.out()
def test_run_swap():
"""
Test the qvm return piece
"""
expected_bitstring = [1, 1, 1, 0, 0, 0, 0, 0, 0]
prog_a = Program().inst(H(0))
prog_b = Program().inst(H(1))
with patch("pyquil.api.QuantumComputer") as qc:
qc.run.return_value = expected_bitstring
test_overlap = run_swap_test(prog_a, prog_b,
number_of_measurements=5,
quantum_resource=qc)
assert np.isclose(np.sqrt(1 - 2 * np.mean(expected_bitstring)),
test_overlap)
expected_bitstring = [1, 1, 1, 0, 1]
prog_a = Program().inst(H(0))
prog_b = Program().inst(H(1))
with patch("pyquil.api.QuantumComputer") as qc:
qc.run.return_value = expected_bitstring
with pytest.raises(ValueError):
test_overlap = run_swap_test(prog_a, prog_b,
number_of_measurements=5,
quantum_resource=qc)
| 0
| 0
| 0
|
5382b7f814bf0d8ce5cb66f429170d006417a64d
| 7,291
|
py
|
Python
|
neural_style_transfer.py
|
CarlFredriksson/neural_style_transfer
|
1dd640c78a9fc5f25a4c4f5fd474cc54992ee3b5
|
[
"MIT"
] | null | null | null |
neural_style_transfer.py
|
CarlFredriksson/neural_style_transfer
|
1dd640c78a9fc5f25a4c4f5fd474cc54992ee3b5
|
[
"MIT"
] | null | null | null |
neural_style_transfer.py
|
CarlFredriksson/neural_style_transfer
|
1dd640c78a9fc5f25a4c4f5fd474cc54992ee3b5
|
[
"MIT"
] | null | null | null |
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import MaxPooling2D
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Settings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
CONTENT_IMG_PATH = "./input/cat.jpg"
STYLE_IMG_PATH = "./input/starry_night.jpg"
GENERATED_IMG_PATH = "./output/generated_img.jpg"
IMG_SIZE = (400, 300)
NUM_COLOR_CHANNELS = 3
ALPHA = 10
BETA = 40
NOISE_RATIO = 0.6
CONTENT_LAYER_INDEX = 13
STYLE_LAYER_INDICES = [1, 4, 7, 12, 17]
STYLE_LAYER_COEFFICIENTS = [0.2, 0.2, 0.2, 0.2, 0.2]
NUM_ITERATIONS = 500
LEARNING_RATE = 2
VGG_IMAGENET_MEANS = np.array([103.939, 116.779, 123.68]).reshape((1, 1, 3)) # In blue-green-red order
LOG_GRAPH = False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def create_output_dir():
"""Create output dir if it does not exist."""
cwd = os.getcwd()
output_dir_path = os.path.join(cwd, "output")
if not os.path.exists(output_dir_path):
os.makedirs(output_dir_path)
def load_img(path, size, color_means):
"""Load image from path, preprocess it, and return the image."""
img = cv2.imread(path)
img = cv2.resize(img, dsize=size, interpolation=cv2.INTER_CUBIC)
img = img.astype("float32")
img -= color_means
img = np.expand_dims(img, axis=0)
return img
def save_img(img, path, color_means):
"""Save image to path after postprocessing."""
img += color_means
img = np.clip(img, 0, 255)
img = img.astype("uint8")
cv2.imwrite(path, img)
def create_noisy_img(img, noise_ratio):
"""Add noise to img and return it."""
noise = np.random.uniform(-20, 20, (img.shape[0], img.shape[1], img.shape[2], img.shape[3])).astype("float32")
noisy_img = noise_ratio * noise + (1 - noise_ratio) * img
return noisy_img
def create_output_tensors(input_variable, content_layer_index, style_layer_indices):
"""
Create output tensors, using a pretrained Keras VGG19-model.
Return tensors for content and style layers.
"""
vgg_model = VGG19(weights="imagenet", include_top=False)
layers = [l for l in vgg_model.layers]
x = layers[1](input_variable)
x_content_tensor = x
x_style_tensors = []
if 1 in style_layer_indices:
x_style_tensors.append(x)
for i in range(2, len(layers)):
# Use layers from vgg model, but swap max pooling layers for average pooling
if type(layers[i]) == MaxPooling2D:
x = tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
else:
x = layers[i](x)
# Store appropriate layer outputs
if i == content_layer_index:
x_content_tensor = x
if i in style_layer_indices:
x_style_tensors.append(x)
return x_content_tensor, x_style_tensors
def content_cost(a_c, a_g):
"""Return a tensor representing the content cost."""
_, n_h, n_w, n_c = a_c.shape
return (1/(4 * n_h * n_w * n_c)) * tf.reduce_sum(tf.square(tf.subtract(a_c, a_g)))
def style_cost(a_s_layers, a_g_layers, style_layer_coefficients):
"""Return a tensor representing the style cost."""
style_cost = 0
for i in range(len(a_s_layers)):
# Compute gram matrix for the activations of the style image
a_s = a_s_layers[i]
_, n_h, n_w, n_c = a_s.shape
a_s_unrolled = tf.reshape(tf.transpose(a_s), [n_c, n_h*n_w])
a_s_gram = tf.matmul(a_s_unrolled, tf.transpose(a_s_unrolled))
# Compute gram matrix for the activations of the generated image
a_g = a_g_layers[i]
a_g_unrolled = tf.reshape(tf.transpose(a_g), [n_c, n_h*n_w])
a_g_gram = tf.matmul(a_g_unrolled, tf.transpose(a_g_unrolled))
# Compute style cost for the current layer
style_cost_layer = (1/(4 * n_c**2 * (n_w* n_h)**2)) * tf.reduce_sum(tf.square(tf.subtract(a_s_gram, a_g_gram)))
style_cost += style_cost_layer * style_layer_coefficients[i]
return style_cost
def total_cost(content_cost, style_cost, alpha, beta):
"""Return a tensor representing the total cost."""
return alpha * content_cost + beta * style_cost
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
create_output_dir()
# Load, resize, and preprocess content and style images
content_img = load_img(CONTENT_IMG_PATH, IMG_SIZE, VGG_IMAGENET_MEANS)
style_img = load_img(STYLE_IMG_PATH, IMG_SIZE, VGG_IMAGENET_MEANS)
# Create initial generated image, this is the starting point for the optimization process
generated_img_init = create_noisy_img(content_img, NOISE_RATIO)
# Create tensorflow variable that will be used as an input to the network.
# This variable will later be assigned generated_img_init and trained.
input_var = tf.Variable(content_img, dtype=tf.float32, expected_shape=(None, None, None, NUM_COLOR_CHANNELS), name="input_var")
# Create output tensors for the activations of the content and style layers,
# using a Keras VGG19-model pretrained on the ImageNet dataset.
x_content, x_styles = create_output_tensors(input_var, CONTENT_LAYER_INDEX, STYLE_LAYER_INDICES)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
# Use the Keras session instead of creating a new one
with K.get_session() as sess:
sess.run(tf.variables_initializer([input_var]))
# Extract the layer activations for content and style images
a_content = sess.run(x_content, feed_dict={K.learning_phase(): 0})
sess.run(input_var.assign(style_img))
a_styles = sess.run(x_styles, feed_dict={K.learning_phase(): 0})
# Define the cost function
J_content = content_cost(a_content, x_content)
J_style = style_cost(a_styles, x_styles, STYLE_LAYER_COEFFICIENTS)
J_total = total_cost(J_content, J_style, ALPHA, BETA)
# Log the graph. To display use "tensorboard --logdir=log".
if LOG_GRAPH:
writer = tf.summary.FileWriter("log", sess.graph)
writer.close()
# Assign the generated random initial image as input
sess.run(input_var.assign(generated_img_init))
# Create the training operation
train_op = optimizer.minimize(J_total, var_list=[input_var])
sess.run(tf.variables_initializer(optimizer.variables()))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Train the generated image
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for i in range(NUM_ITERATIONS):
sess.run(train_op)
if (i%20) == 0:
print(
"Iteration: " + str(i) +
", Content cost: " + "{:.2e}".format(sess.run(J_content)) +
", Style cost: " + "{:.2e}".format(sess.run(J_style)) +
", Total cost: " + "{:.2e}".format(sess.run(J_total))
)
# Save the generated image
generated_img = sess.run(input_var)[0]
save_img(generated_img, GENERATED_IMG_PATH, VGG_IMAGENET_MEANS)
# Save the generated image
generated_img = sess.run(input_var)[0]
save_img(generated_img, GENERATED_IMG_PATH, VGG_IMAGENET_MEANS)
| 37.973958
| 127
| 0.642436
|
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.applications import VGG19
from tensorflow.keras.layers import MaxPooling2D
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Settings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
CONTENT_IMG_PATH = "./input/cat.jpg"
STYLE_IMG_PATH = "./input/starry_night.jpg"
GENERATED_IMG_PATH = "./output/generated_img.jpg"
IMG_SIZE = (400, 300)
NUM_COLOR_CHANNELS = 3
ALPHA = 10
BETA = 40
NOISE_RATIO = 0.6
CONTENT_LAYER_INDEX = 13
STYLE_LAYER_INDICES = [1, 4, 7, 12, 17]
STYLE_LAYER_COEFFICIENTS = [0.2, 0.2, 0.2, 0.2, 0.2]
NUM_ITERATIONS = 500
LEARNING_RATE = 2
VGG_IMAGENET_MEANS = np.array([103.939, 116.779, 123.68]).reshape((1, 1, 3)) # In blue-green-red order
LOG_GRAPH = False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def create_output_dir():
"""Create output dir if it does not exist."""
cwd = os.getcwd()
output_dir_path = os.path.join(cwd, "output")
if not os.path.exists(output_dir_path):
os.makedirs(output_dir_path)
def load_img(path, size, color_means):
"""Load image from path, preprocess it, and return the image."""
img = cv2.imread(path)
img = cv2.resize(img, dsize=size, interpolation=cv2.INTER_CUBIC)
img = img.astype("float32")
img -= color_means
img = np.expand_dims(img, axis=0)
return img
def save_img(img, path, color_means):
"""Save image to path after postprocessing."""
img += color_means
img = np.clip(img, 0, 255)
img = img.astype("uint8")
cv2.imwrite(path, img)
def create_noisy_img(img, noise_ratio):
"""Add noise to img and return it."""
noise = np.random.uniform(-20, 20, (img.shape[0], img.shape[1], img.shape[2], img.shape[3])).astype("float32")
noisy_img = noise_ratio * noise + (1 - noise_ratio) * img
return noisy_img
def create_output_tensors(input_variable, content_layer_index, style_layer_indices):
"""
Create output tensors, using a pretrained Keras VGG19-model.
Return tensors for content and style layers.
"""
vgg_model = VGG19(weights="imagenet", include_top=False)
layers = [l for l in vgg_model.layers]
x = layers[1](input_variable)
x_content_tensor = x
x_style_tensors = []
if 1 in style_layer_indices:
x_style_tensors.append(x)
for i in range(2, len(layers)):
# Use layers from vgg model, but swap max pooling layers for average pooling
if type(layers[i]) == MaxPooling2D:
x = tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
else:
x = layers[i](x)
# Store appropriate layer outputs
if i == content_layer_index:
x_content_tensor = x
if i in style_layer_indices:
x_style_tensors.append(x)
return x_content_tensor, x_style_tensors
def content_cost(a_c, a_g):
"""Return a tensor representing the content cost."""
_, n_h, n_w, n_c = a_c.shape
return (1/(4 * n_h * n_w * n_c)) * tf.reduce_sum(tf.square(tf.subtract(a_c, a_g)))
def style_cost(a_s_layers, a_g_layers, style_layer_coefficients):
"""Return a tensor representing the style cost."""
style_cost = 0
for i in range(len(a_s_layers)):
# Compute gram matrix for the activations of the style image
a_s = a_s_layers[i]
_, n_h, n_w, n_c = a_s.shape
a_s_unrolled = tf.reshape(tf.transpose(a_s), [n_c, n_h*n_w])
a_s_gram = tf.matmul(a_s_unrolled, tf.transpose(a_s_unrolled))
# Compute gram matrix for the activations of the generated image
a_g = a_g_layers[i]
a_g_unrolled = tf.reshape(tf.transpose(a_g), [n_c, n_h*n_w])
a_g_gram = tf.matmul(a_g_unrolled, tf.transpose(a_g_unrolled))
# Compute style cost for the current layer
style_cost_layer = (1/(4 * n_c**2 * (n_w* n_h)**2)) * tf.reduce_sum(tf.square(tf.subtract(a_s_gram, a_g_gram)))
style_cost += style_cost_layer * style_layer_coefficients[i]
return style_cost
def total_cost(content_cost, style_cost, alpha, beta):
"""Return a tensor representing the total cost."""
return alpha * content_cost + beta * style_cost
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
create_output_dir()
# Load, resize, and preprocess content and style images
content_img = load_img(CONTENT_IMG_PATH, IMG_SIZE, VGG_IMAGENET_MEANS)
style_img = load_img(STYLE_IMG_PATH, IMG_SIZE, VGG_IMAGENET_MEANS)
# Create initial generated image, this is the starting point for the optimization process
generated_img_init = create_noisy_img(content_img, NOISE_RATIO)
# Create tensorflow variable that will be used as an input to the network.
# This variable will later be assigned generated_img_init and trained.
input_var = tf.Variable(content_img, dtype=tf.float32, expected_shape=(None, None, None, NUM_COLOR_CHANNELS), name="input_var")
# Create output tensors for the activations of the content and style layers,
# using a Keras VGG19-model pretrained on the ImageNet dataset.
x_content, x_styles = create_output_tensors(input_var, CONTENT_LAYER_INDEX, STYLE_LAYER_INDICES)
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
# Use the Keras session instead of creating a new one
with K.get_session() as sess:
sess.run(tf.variables_initializer([input_var]))
# Extract the layer activations for content and style images
a_content = sess.run(x_content, feed_dict={K.learning_phase(): 0})
sess.run(input_var.assign(style_img))
a_styles = sess.run(x_styles, feed_dict={K.learning_phase(): 0})
# Define the cost function
J_content = content_cost(a_content, x_content)
J_style = style_cost(a_styles, x_styles, STYLE_LAYER_COEFFICIENTS)
J_total = total_cost(J_content, J_style, ALPHA, BETA)
# Log the graph. To display use "tensorboard --logdir=log".
if LOG_GRAPH:
writer = tf.summary.FileWriter("log", sess.graph)
writer.close()
# Assign the generated random initial image as input
sess.run(input_var.assign(generated_img_init))
# Create the training operation
train_op = optimizer.minimize(J_total, var_list=[input_var])
sess.run(tf.variables_initializer(optimizer.variables()))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Train the generated image
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
for i in range(NUM_ITERATIONS):
sess.run(train_op)
if (i%20) == 0:
print(
"Iteration: " + str(i) +
", Content cost: " + "{:.2e}".format(sess.run(J_content)) +
", Style cost: " + "{:.2e}".format(sess.run(J_style)) +
", Total cost: " + "{:.2e}".format(sess.run(J_total))
)
# Save the generated image
generated_img = sess.run(input_var)[0]
save_img(generated_img, GENERATED_IMG_PATH, VGG_IMAGENET_MEANS)
# Save the generated image
generated_img = sess.run(input_var)[0]
save_img(generated_img, GENERATED_IMG_PATH, VGG_IMAGENET_MEANS)
| 0
| 0
| 0
|
004b00f5d71ab4c3568ca60038dc9509d5e81656
| 9,891
|
py
|
Python
|
train_nerf.py
|
AnimatedRNG/nerf-jax
|
c940bcfbb986623691aff7a4e28bf8273ea70147
|
[
"Apache-2.0"
] | 5
|
2020-10-22T07:27:15.000Z
|
2022-02-25T02:54:39.000Z
|
train_nerf.py
|
AnimatedRNG/nerf-jax
|
c940bcfbb986623691aff7a4e28bf8273ea70147
|
[
"Apache-2.0"
] | 11
|
2021-01-27T01:52:38.000Z
|
2021-02-03T06:35:34.000Z
|
train_nerf.py
|
AnimatedRNG/nerf-jax
|
c940bcfbb986623691aff7a4e28bf8273ea70147
|
[
"Apache-2.0"
] | 2
|
2020-12-15T14:44:07.000Z
|
2021-01-27T03:39:01.000Z
|
#!/usr/bin/env python3
import argparse
import functools
from pathlib import Path
from datetime import datetime
from collections import namedtuple
import numpy as np
import yaml
from box import Box
import jax
from jax import jit, vmap, pmap, grad, value_and_grad
import jax.numpy as jnp
from jax.tree_util import register_pytree_node
from jax.experimental.optimizers import adam
import haiku as hk
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from nerf import loader, sampler
from nerf import run_one_iter_of_nerf, run_network
from nerf import FlexibleNeRFModel, compute_embedding_size
from reference import torch_to_jax
from util import get_ray_bundle
Losses = namedtuple("Losses", ["coarse_loss", "fine_loss"])
register_pytree_node(Losses, lambda xs: (tuple(xs), None), lambda _, xs: Losses(*xs))
if __name__ == "__main__":
import cv2
import torch
from reference import *
import time
main()
| 32.323529
| 88
| 0.632696
|
#!/usr/bin/env python3
import argparse
import functools
from pathlib import Path
from datetime import datetime
from collections import namedtuple
import numpy as np
import yaml
from box import Box
import jax
from jax import jit, vmap, pmap, grad, value_and_grad
import jax.numpy as jnp
from jax.tree_util import register_pytree_node
from jax.experimental.optimizers import adam
import haiku as hk
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from nerf import loader, sampler
from nerf import run_one_iter_of_nerf, run_network
from nerf import FlexibleNeRFModel, compute_embedding_size
from reference import torch_to_jax
from util import get_ray_bundle
Losses = namedtuple("Losses", ["coarse_loss", "fine_loss"])
register_pytree_node(Losses, lambda xs: (tuple(xs), None), lambda _, xs: Losses(*xs))
def create_networks(config):
coarse_embedding = compute_embedding_size(
include_input_xyz=True,
include_input_dir=True,
num_encoding_fn_xyz=config.nerf.model.coarse.num_encoding_fn_xyz,
num_encoding_fn_dir=config.nerf.model.coarse.num_encoding_fn_dir,
)
fine_embedding = compute_embedding_size(
include_input_xyz=True,
include_input_dir=True,
num_encoding_fn_xyz=config.nerf.model.fine.num_encoding_fn_xyz,
num_encoding_fn_dir=config.nerf.model.fine.num_encoding_fn_dir,
)
model_coarse = hk.transform(
lambda x: FlexibleNeRFModel(
num_encoding_fn_xyz=config.nerf.model.coarse.num_encoding_fn_xyz,
num_encoding_fn_dir=config.nerf.model.coarse.num_encoding_fn_dir,
include_input_xyz=True,
include_input_dir=True,
use_viewdirs=config.nerf.model.coarse.use_viewdirs,
)(x)
)
model_fine = hk.transform(
lambda x: FlexibleNeRFModel(
num_encoding_fn_xyz=config.nerf.model.fine.num_encoding_fn_xyz,
num_encoding_fn_dir=config.nerf.model.fine.num_encoding_fn_dir,
include_input_xyz=True,
include_input_dir=True,
use_viewdirs=config.nerf.model.fine.use_viewdirs,
)(x)
)
return (
model_coarse,
model_fine,
coarse_embedding,
fine_embedding,
)
def init_networks(
rng, model_coarse, model_fine, coarse_embedding, fine_embedding, config
):
dummy_input_coarse = jnp.zeros((config.nerf.train.chunksize, sum(coarse_embedding)))
dummy_input_fine = jnp.zeros((config.nerf.train.chunksize, sum(fine_embedding)))
coarse_params = model_coarse.init(rng[0], dummy_input_coarse)
fine_params = model_fine.init(rng[1], dummy_input_fine)
return (coarse_params, fine_params)
def load_networks_from_torch(checkpoint_file="./checkpoint/checkpoint199999.ckpt"):
checkpoint = torch.load(checkpoint_file)
model_coarse_params = torch_to_jax(
checkpoint["model_coarse_state_dict"], "flexible_ne_rf_model"
)
model_fine_params = torch_to_jax(
checkpoint["model_fine_state_dict"], "flexible_ne_rf_model"
)
return (model_coarse_params, model_fine_params)
def train_nerf(config):
# Create random number generator
rng = jax.random.PRNGKey(config.experiment.seed)
# create models
model_coarse, model_fine, coarse_embedding, fine_embedding = create_networks(config)
# model_coarse_params, model_fine_params = load_networks_from_torch(
# "checkpoint/checkpoint00000.ckpt"
# )
rng, *subrng = jax.random.split(rng, 3)
model_coarse_params, model_fine_params = init_networks(
subrng, model_coarse, model_fine, coarse_embedding, fine_embedding, config
)
model_coarse, model_fine = (
hk.without_apply_rng(model_coarse),
hk.without_apply_rng(model_fine),
)
# Create loader
basedir = config.dataset.basedir
print(f"Loading images/poses from {basedir}...")
images, poses, intrinsics = loader(
Path(".") / basedir, config.dataset.filter_chain, jax.devices()[0],
)
print("...done!")
# TODO: figure out optimizer
num_decay_steps = config.nerf.model.optimizer.lr_decay * 1000
init_adam, update, get_params = adam(
lambda iteration: config.nerf.model.optimizer.initial_lr
* (config.nerf.model.optimizer.lr_decay_factor ** (iteration / num_decay_steps))
)
optimizer_state = init_adam((model_coarse_params, model_fine_params))
# Logging
logdir = Path("logs") / "lego" / datetime.now().strftime("%d_%m_%Y_%H_%M_%S")
logdir.mkdir(exist_ok=True)
writer = SummaryWriter(logdir.absolute())
(logdir / "config.yml").open("w").write(config.to_yaml())
rng, subrng_img = jax.random.split(rng, 2)
train_image_seq = jax.random.randint(
subrng_img,
shape=(config.experiment.train_iters,),
minval=0,
maxval=images["train"].shape[0],
dtype=jnp.uint32,
)
def loss_fn(f_rng, cp, fp, image_id):
H, W, focal = (
intrinsics["train"].height,
intrinsics["train"].width,
intrinsics["train"].focal_length,
)
ray_origins, ray_directions, target_s = sampler(
images["train"][image_id],
poses["train"][image_id],
intrinsics["train"],
f_rng[0],
config.dataset.sampler,
)
_, rendered_images = run_one_iter_of_nerf(
H,
W,
focal,
functools.partial(model_coarse.apply, cp),
functools.partial(model_fine.apply, fp),
ray_origins,
ray_directions,
config.nerf.train,
config.nerf.model,
config.dataset.projection,
f_rng[1],
False,
)
rgb_coarse, _, _, rgb_fine, _, _ = (
rendered_images[..., :3],
rendered_images[..., 3:4],
rendered_images[..., 4:5],
rendered_images[..., 5:8],
rendered_images[..., 8:9],
rendered_images[..., 9:10],
)
coarse_loss = jnp.mean(((target_s[..., :3] - rgb_coarse) ** 2.0).flatten())
loss = coarse_loss
if config.nerf.train.num_fine > 0:
fine_loss = jnp.mean(((target_s[..., :3] - rgb_fine) ** 2.0).flatten())
loss = loss + fine_loss
return loss, Losses(coarse_loss=coarse_loss, fine_loss=fine_loss)
@jit
def validation(f_rng, cp, fp, image_id):
H, W, focal = (
intrinsics["val"].height,
intrinsics["val"].width,
intrinsics["val"].focal_length,
)
ray_origins, ray_directions = get_ray_bundle(
H, W, focal, poses["val"][0][:3, :4].astype(np.float32),
)
rng, rendered_images = run_one_iter_of_nerf(
H,
W,
focal,
functools.partial(model_coarse.apply, cp),
functools.partial(model_fine.apply, fp),
ray_origins,
ray_directions,
config.nerf.validation,
config.nerf.model,
config.dataset.projection,
f_rng,
True,
)
rgb_coarse, _, _, rgb_fine, _, _ = (
rendered_images[..., :3],
rendered_images[..., 3:4],
rendered_images[..., 4:5],
rendered_images[..., 5:8],
rendered_images[..., 8:9],
rendered_images[..., 9:10],
)
return rgb_coarse, rgb_fine
@jit
def update_loop(rng, optimizer_state, start, num_iterations):
def inner(i, rng_optimizer_state):
rng, optimizer_state, _ = rng_optimizer_state
rng, *subrng = jax.random.split(rng, 3)
(model_coarse_params, model_fine_params) = get_params(optimizer_state)
(_, losses), (cp_grad, fp_grad) = value_and_grad(
loss_fn, argnums=(1, 2), has_aux=True
)(subrng, model_coarse_params, model_fine_params, train_image_seq[i])
optimizer_state = update(i, (cp_grad, fp_grad), optimizer_state)
return rng, optimizer_state, losses
return jax.lax.fori_loop(
start,
start + num_iterations,
inner,
(rng, optimizer_state, Losses(coarse_loss=0.0, fine_loss=0.0)),
)
for i in trange(0, config.experiment.train_iters, config.experiment.jit_loop):
rng, optimizer_state, losses = update_loop(
rng, optimizer_state, i, config.experiment.jit_loop
)
loss = losses.coarse_loss + losses.fine_loss
# Validation
if (
i % config.experiment.print_every == 0
or i == config.experiment.train_iters - 1
):
tqdm.write(f"Iter {i}: Loss {loss}")
writer.add_scalar("train/loss", loss, i)
writer.add_scalar("train/coarse_loss", losses.coarse_loss, i)
writer.add_scalar("train/fine_loss", losses.fine_loss, i)
if i % config.experiment.validate_every == 0:
start = time.time()
rgb_coarse, rgb_fine = validation(rng, *get_params(optimizer_state), 0)
end = time.time()
to_img = lambda x: np.array(
np.clip(jnp.transpose(x, axes=(2, 1, 0)), 0.0, 1.0) * 255
).astype(np.uint8)
writer.add_image("validation/rgb_coarse", to_img(rgb_coarse), i)
writer.add_image("validation/rgb_fine", to_img(rgb_fine), i)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, required=True)
config_args = parser.parse_args()
with open(config_args.config, "r") as f:
config_dictionary = yaml.load(f, Loader=yaml.FullLoader)
config = Box(config_dictionary)
train_nerf(config)
if __name__ == "__main__":
import cv2
import torch
from reference import *
import time
main()
| 8,826
| 0
| 115
|
2dcc1dfe862513682983900b78ea51d0e14b216f
| 477
|
py
|
Python
|
insta/forms.py
|
Ruweydha/Insta-clone
|
dc0c08b8daa941bffeee751f8e6f77e1ae41f7c0
|
[
"Unlicense"
] | null | null | null |
insta/forms.py
|
Ruweydha/Insta-clone
|
dc0c08b8daa941bffeee751f8e6f77e1ae41f7c0
|
[
"Unlicense"
] | null | null | null |
insta/forms.py
|
Ruweydha/Insta-clone
|
dc0c08b8daa941bffeee751f8e6f77e1ae41f7c0
|
[
"Unlicense"
] | null | null | null |
from pyexpat import model
from django import forms
from .models import Profile, Images, Comments
| 26.5
| 55
| 0.660377
|
from pyexpat import model
from django import forms
from .models import Profile, Images, Comments
class UpdateProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user', 'pub_date']
class ImagesForm(forms.ModelForm):
class Meta:
model = Images
exclude = ['profile', 'date_posted']
class CommentsForm(forms.ModelForm):
class Meta:
model = Comments
exclude =['commentor', 'date_posted', 'image' ]
| 0
| 308
| 73
|
0fbd0c29ffc03075b101be02d82f3c616c84a5ed
| 152
|
py
|
Python
|
Udemy/GeekUniversity/secao_4/ex16_converso_polegada_p_centimetro.py
|
SandboxGTASA/Python-1
|
bbb5f8bdf7d5110528e457b2a9ebdb2d67e40805
|
[
"MIT"
] | null | null | null |
Udemy/GeekUniversity/secao_4/ex16_converso_polegada_p_centimetro.py
|
SandboxGTASA/Python-1
|
bbb5f8bdf7d5110528e457b2a9ebdb2d67e40805
|
[
"MIT"
] | null | null | null |
Udemy/GeekUniversity/secao_4/ex16_converso_polegada_p_centimetro.py
|
SandboxGTASA/Python-1
|
bbb5f8bdf7d5110528e457b2a9ebdb2d67e40805
|
[
"MIT"
] | null | null | null |
# Convertendo polegadas em centimetros
polegadas = float(input('Entre com o tamanho em polegadas: '))
centimetros = polegadas * 2.54
print(centimetros)
| 30.4
| 62
| 0.776316
|
# Convertendo polegadas em centimetros
polegadas = float(input('Entre com o tamanho em polegadas: '))
centimetros = polegadas * 2.54
print(centimetros)
| 0
| 0
| 0
|
248d3ffb18d87c23c825d18f3cf219a8e26cd866
| 179
|
py
|
Python
|
payjp/util.py
|
payjp/payjp-python
|
994b16addd8327781eb936eca60d1abe7f7c7819
|
[
"MIT"
] | 15
|
2015-09-13T14:40:48.000Z
|
2021-04-29T15:21:13.000Z
|
payjp/util.py
|
payjp/payjp-python
|
994b16addd8327781eb936eca60d1abe7f7c7819
|
[
"MIT"
] | 9
|
2015-09-07T07:57:20.000Z
|
2020-12-14T07:11:59.000Z
|
payjp/util.py
|
payjp/payjp-python
|
994b16addd8327781eb936eca60d1abe7f7c7819
|
[
"MIT"
] | 10
|
2015-09-07T07:56:09.000Z
|
2020-05-22T12:43:21.000Z
|
# coding: utf-8
import sys
| 17.9
| 64
| 0.620112
|
# coding: utf-8
import sys
def utf8(value):
if sys.version_info < (3, 0) and isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
| 128
| 0
| 23
|
6bb791e549adf13891b287887a7403545ed07d97
| 17,713
|
py
|
Python
|
examples/pretraining/swav/main_swav.py
|
caglasozen/wilds
|
db2ff095304891244962509459ee48e2fc5fd5e6
|
[
"MIT"
] | null | null | null |
examples/pretraining/swav/main_swav.py
|
caglasozen/wilds
|
db2ff095304891244962509459ee48e2fc5fd5e6
|
[
"MIT"
] | null | null | null |
examples/pretraining/swav/main_swav.py
|
caglasozen/wilds
|
db2ff095304891244962509459ee48e2fc5fd5e6
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# This file has been modified from the original repository's version in the following ways:
# 1. The model loading logic uses a SwAVModel class that acts as a wrapper around WILDS-Unlabeled
# models.
# 2. The data loading logic uses a CustomSplitMultiCropDataset class that is compatible with all
# WILDS-Unlabeled datasets.
# More information about both of these classes can be found in the src/ directory.
#
import argparse
import math
import os
import pdb
import shutil
import sys
import time
from logging import getLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
try:
import apex
from apex.parallel.LARC import LARC
except ImportError as e:
print("Apex not found. Proceeding without it...")
try:
import wandb
except Exception as e:
print("wandb not found. Proceeding without it...")
import wilds
from src.utils import (
bool_flag,
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
ParseKwargs,
plot_experiment,
populate_defaults_for_swav
)
from src.multicropdataset import CustomSplitMultiCropDataset
from src.model import SwAVModel
from examples.models.initializer import initialize_model
from examples.utils import initialize_wandb
logger = getLogger()
parser = argparse.ArgumentParser(description="Implementation of SwAV")
#########################
##### dataset params ####
#########################
parser.add_argument('-d', '--dataset', required=True, choices=wilds.unlabeled_datasets)
parser.add_argument('--root_dir', required=True,
help='The directory where [dataset]/data can be found (or should be downloaded to, if it does not exist).')
parser.add_argument('--dataset_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--loader_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--splits', nargs='+')
#########################
#### data aug params ####
#########################
parser.add_argument("--nmb_crops", type=int, nargs="+", help="list of number of crops")
parser.add_argument("--size_crops", type=int, nargs="+", help="crops resolutions")
parser.add_argument("--min_scale_crops", type=float, nargs="+", help="argument in RandomResizedCrop")
parser.add_argument("--max_scale_crops", type=float, nargs="+", help="argument in RandomResizedCrop")
#########################
## swav specific params #
#########################
parser.add_argument("--crops_for_assign", type=int, nargs="+", default=[0, 1],
help="list of crops id used for computing assignments (default: [0, 1])")
parser.add_argument("--temperature", default=0.1, type=float,
help="temperature parameter in training loss (default: 0.1)")
parser.add_argument("--epsilon", default=0.03, type=float,
help="regularization parameter for Sinkhorn-Knopp algorithm (default: 0.03)")
parser.add_argument("--sinkhorn_iterations", default=3, type=int,
help="number of iterations in Sinkhorn-Knopp algorithm")
parser.add_argument("--feat_dim", default=128, type=int,
help="feature dimension")
parser.add_argument("--nmb_prototypes", type=int, help="number of prototypes")
parser.add_argument("--queue_length", type=int, default=0,
help="length of the queue (0 for no queue)")
parser.add_argument("--epoch_queue_starts", type=int, default=500,
help="from this epoch, we start using a queue")
#########################
#### optim parameters ###
#########################
parser.add_argument('--optimizer_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument("--n_epochs", default=400, type=int,
help="number of total epochs to run")
parser.add_argument("--warmup_epochs", default=0, type=int, help="number of warmup epochs (default: 0)")
parser.add_argument("--batch_size", type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--lr", type=float, help="base learning rate")
parser.add_argument("--final_lr", type=float, help="final learning rate")
parser.add_argument("--freeze_prototypes_niters", default=5005, type=int,
help="freeze the prototypes during this many iterations from the start (default: 5005).")
parser.add_argument("--weight_decay", default=1e-6, type=float, help="weight decay")
parser.add_argument("--start_warmup", default=0, type=float,
help="initial warmup learning rate")
#########################
#### dist parameters ###
#########################
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up distributed
training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
#########################
#### other parameters ###
#########################
parser.add_argument("--model", type=str, help="convnet architecture. If not set, uses default model specified in WILDS.")
parser.add_argument('--model_kwargs', nargs='*', action=ParseKwargs, default={},
help='keyword arguments for model initialization passed as key1=value1 key2=value2')
parser.add_argument("--hidden_mlp", default=2048, type=int,
help="hidden layer dimension in projection head")
parser.add_argument("--checkpoint_freq", type=int, default=50,
help="Save the model periodically")
parser.add_argument("--use_fp16", type=bool_flag, default=True,
help="whether to train with mixed precision or not")
parser.add_argument("--sync_bn", type=str, default="pytorch", help="synchronize bn")
parser.add_argument("--syncbn_process_group_size", type=int, default=8, help=""" see
https://github.com/NVIDIA/apex/blob/master/apex/parallel/__init__.py#L58-L67""")
parser.add_argument("--log_dir", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=0, help="seed")
parser.add_argument("--is_not_slurm_job", type=bool_flag, default=True, help="Set to true if not running in Slurm.")
parser.add_argument("--cpu_only", type=bool_flag, default=False,
help="Set to true to run experiment on CPUs instead of GPUs (for debugging).")
parser.add_argument('--pretrained_model_path', default=None, type=str)
# Weights & Biases
parser.add_argument('--use_wandb', type=bool_flag, nargs='?', default=False)
parser.add_argument('--wandb_api_key_path', type=str,
help="Path to Weights & Biases API Key. If use_wandb is set to True and this argument is not specified, user will be prompted to authenticate.")
parser.add_argument('--wandb_kwargs', nargs='*', action=ParseKwargs, default={},
help="Will be passed directly into wandb.init().")
@torch.no_grad()
if __name__ == "__main__":
main()
| 40.533181
| 164
| 0.614351
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# This file has been modified from the original repository's version in the following ways:
# 1. The model loading logic uses a SwAVModel class that acts as a wrapper around WILDS-Unlabeled
# models.
# 2. The data loading logic uses a CustomSplitMultiCropDataset class that is compatible with all
# WILDS-Unlabeled datasets.
# More information about both of these classes can be found in the src/ directory.
#
import argparse
import math
import os
import pdb
import shutil
import sys
import time
from logging import getLogger
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
try:
import apex
from apex.parallel.LARC import LARC
except ImportError as e:
print("Apex not found. Proceeding without it...")
try:
import wandb
except Exception as e:
print("wandb not found. Proceeding without it...")
import wilds
from src.utils import (
bool_flag,
initialize_exp,
restart_from_checkpoint,
fix_random_seeds,
AverageMeter,
init_distributed_mode,
ParseKwargs,
plot_experiment,
populate_defaults_for_swav
)
from src.multicropdataset import CustomSplitMultiCropDataset
from src.model import SwAVModel
from examples.models.initializer import initialize_model
from examples.utils import initialize_wandb
logger = getLogger()
parser = argparse.ArgumentParser(description="Implementation of SwAV")
#########################
##### dataset params ####
#########################
parser.add_argument('-d', '--dataset', required=True, choices=wilds.unlabeled_datasets)
parser.add_argument('--root_dir', required=True,
help='The directory where [dataset]/data can be found (or should be downloaded to, if it does not exist).')
parser.add_argument('--dataset_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--loader_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument('--splits', nargs='+')
#########################
#### data aug params ####
#########################
parser.add_argument("--nmb_crops", type=int, nargs="+", help="list of number of crops")
parser.add_argument("--size_crops", type=int, nargs="+", help="crops resolutions")
parser.add_argument("--min_scale_crops", type=float, nargs="+", help="argument in RandomResizedCrop")
parser.add_argument("--max_scale_crops", type=float, nargs="+", help="argument in RandomResizedCrop")
#########################
## swav specific params #
#########################
parser.add_argument("--crops_for_assign", type=int, nargs="+", default=[0, 1],
help="list of crops id used for computing assignments (default: [0, 1])")
parser.add_argument("--temperature", default=0.1, type=float,
help="temperature parameter in training loss (default: 0.1)")
parser.add_argument("--epsilon", default=0.03, type=float,
help="regularization parameter for Sinkhorn-Knopp algorithm (default: 0.03)")
parser.add_argument("--sinkhorn_iterations", default=3, type=int,
help="number of iterations in Sinkhorn-Knopp algorithm")
parser.add_argument("--feat_dim", default=128, type=int,
help="feature dimension")
parser.add_argument("--nmb_prototypes", type=int, help="number of prototypes")
parser.add_argument("--queue_length", type=int, default=0,
help="length of the queue (0 for no queue)")
parser.add_argument("--epoch_queue_starts", type=int, default=500,
help="from this epoch, we start using a queue")
#########################
#### optim parameters ###
#########################
parser.add_argument('--optimizer_kwargs', nargs='*', action=ParseKwargs, default={})
parser.add_argument("--n_epochs", default=400, type=int,
help="number of total epochs to run")
parser.add_argument("--warmup_epochs", default=0, type=int, help="number of warmup epochs (default: 0)")
parser.add_argument("--batch_size", type=int,
help="batch size per gpu, i.e. how many unique instances per gpu")
parser.add_argument("--lr", type=float, help="base learning rate")
parser.add_argument("--final_lr", type=float, help="final learning rate")
parser.add_argument("--freeze_prototypes_niters", default=5005, type=int,
help="freeze the prototypes during this many iterations from the start (default: 5005).")
parser.add_argument("--weight_decay", default=1e-6, type=float, help="weight decay")
parser.add_argument("--start_warmup", default=0, type=float,
help="initial warmup learning rate")
#########################
#### dist parameters ###
#########################
parser.add_argument("--dist_url", default="env://", type=str, help="""url used to set up distributed
training; see https://pytorch.org/docs/stable/distributed.html""")
parser.add_argument("--world_size", default=-1, type=int, help="""
number of processes: it is set automatically and
should not be passed as argument""")
parser.add_argument("--rank", default=0, type=int, help="""rank of this process:
it is set automatically and should not be passed as argument""")
parser.add_argument("--local_rank", default=0, type=int,
help="this argument is not used and should be ignored")
#########################
#### other parameters ###
#########################
parser.add_argument("--model", type=str, help="convnet architecture. If not set, uses default model specified in WILDS.")
parser.add_argument('--model_kwargs', nargs='*', action=ParseKwargs, default={},
help='keyword arguments for model initialization passed as key1=value1 key2=value2')
parser.add_argument("--hidden_mlp", default=2048, type=int,
help="hidden layer dimension in projection head")
parser.add_argument("--checkpoint_freq", type=int, default=50,
help="Save the model periodically")
parser.add_argument("--use_fp16", type=bool_flag, default=True,
help="whether to train with mixed precision or not")
parser.add_argument("--sync_bn", type=str, default="pytorch", help="synchronize bn")
parser.add_argument("--syncbn_process_group_size", type=int, default=8, help=""" see
https://github.com/NVIDIA/apex/blob/master/apex/parallel/__init__.py#L58-L67""")
parser.add_argument("--log_dir", type=str, default=".",
help="experiment dump path for checkpoints and log")
parser.add_argument("--seed", type=int, default=0, help="seed")
parser.add_argument("--is_not_slurm_job", type=bool_flag, default=True, help="Set to true if not running in Slurm.")
parser.add_argument("--cpu_only", type=bool_flag, default=False,
help="Set to true to run experiment on CPUs instead of GPUs (for debugging).")
parser.add_argument('--pretrained_model_path', default=None, type=str)
# Weights & Biases
parser.add_argument('--use_wandb', type=bool_flag, nargs='?', default=False)
parser.add_argument('--wandb_api_key_path', type=str,
help="Path to Weights & Biases API Key. If use_wandb is set to True and this argument is not specified, user will be prompted to authenticate.")
parser.add_argument('--wandb_kwargs', nargs='*', action=ParseKwargs, default={},
help="Will be passed directly into wandb.init().")
def main():
global args
args = parser.parse_args()
args = populate_defaults_for_swav(args)
init_distributed_mode(args)
fix_random_seeds(args.seed)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
logger, training_stats = initialize_exp(args, "epoch", "loss")
logger.info(f"Initialized distributed mode and applied WILDS default...\n{args}")
if args.use_wandb:
initialize_wandb(args)
train_dataset = CustomSplitMultiCropDataset(
args.dataset,
args.root_dir,
args.size_crops,
args.nmb_crops,
args.min_scale_crops,
args.max_scale_crops,
args,
)
sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset,
sampler=sampler,
batch_size=args.batch_size,
**args.loader_kwargs,
)
logger.info("Building data done with {} images loaded.".format(len(train_dataset)))
d_out = 1 # this can be arbitrary; final layer is discarded for SwAVModel
base_model, _ = initialize_model(args, d_out, is_featurizer=True) # discard classifier
model = SwAVModel(
base_model, normalize=True, output_dim=args.feat_dim,
hidden_mlp=args.hidden_mlp, nmb_prototypes=args.nmb_prototypes
)
# synchronize batch norm layers
if args.sync_bn == "pytorch":
model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
elif args.sync_bn == "apex":
# with apex syncbn we sync bn per group because it speeds up computation
# compared to global syncbn
process_group = apex.parallel.create_syncbn_process_group(args.syncbn_process_group_size)
model = apex.parallel.convert_syncbn_model(model, process_group=process_group)
# copy model to GPU
model = model.cuda()
if args.rank == 0:
logger.info(model)
logger.info("Building model done.")
# build optimizer
optimizer = torch.optim.SGD(
model.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay,
)
optimizer = LARC(optimizer=optimizer, trust_coefficient=0.001, clip=False)
warmup_lr_schedule = np.linspace(args.start_warmup, args.lr, len(train_loader) * args.warmup_epochs)
iters = np.arange(len(train_loader) * (args.n_epochs - args.warmup_epochs))
cosine_lr_schedule = np.array([args.final_lr + 0.5 * (args.lr - args.final_lr) * (1 + \
math.cos(math.pi * t / (len(train_loader) * (args.n_epochs - args.warmup_epochs)))) for t in iters])
lr_schedule = np.concatenate((warmup_lr_schedule, cosine_lr_schedule))
logger.info("Building optimizer done.")
# init mixed precision
if args.use_fp16:
model, optimizer = apex.amp.initialize(model, optimizer, opt_level="O1")
logger.info("Initializing mixed precision done.")
# wrap model
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.gpu_to_work_on]
)
# optionally resume from a checkpoint
to_restore = {"epoch": 0}
restart_from_checkpoint(
os.path.join(args.log_dir, "checkpoint.pth.tar"),
run_variables=to_restore,
state_dict=model,
optimizer=optimizer,
amp=apex.amp,
)
start_epoch = to_restore["epoch"]
# build the queue
queue = None
queue_path = os.path.join(args.log_dir, "queue" + str(args.rank) + ".pth")
if os.path.isfile(queue_path):
queue = torch.load(queue_path)["queue"]
# the queue needs to be divisible by the batch size
args.queue_length -= args.queue_length % (args.batch_size * args.world_size)
cudnn.benchmark = True
for epoch in range(start_epoch, args.n_epochs):
# train the network for one epoch
logger.info("============ Starting epoch %i ... ============" % epoch)
# set sampler
train_loader.sampler.set_epoch(epoch)
# optionally starts a queue
if args.queue_length > 0 and epoch >= args.epoch_queue_starts and queue is None:
queue = torch.zeros(
len(args.crops_for_assign),
args.queue_length // args.world_size,
args.feat_dim,
).cuda()
# train the network
scores, queue = train(train_loader, model, optimizer, epoch, lr_schedule, queue)
training_stats.update(scores)
# save checkpoints
if args.rank == 0:
save_dict = {
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
if args.use_fp16:
save_dict["amp"] = apex.amp.state_dict()
torch.save(
save_dict,
os.path.join(args.log_dir, "checkpoint.pth.tar"),
)
if epoch % args.checkpoint_freq == 0 or epoch == args.n_epochs - 1:
shutil.copyfile(
os.path.join(args.log_dir, "checkpoint.pth.tar"),
os.path.join(args.dump_checkpoints, "ckp-" + str(epoch) + ".pth"),
)
if queue is not None:
torch.save({"queue": queue}, queue_path)
if args.rank == 0:
plot_experiment(args.log_dir)
def train(train_loader, model, optimizer, epoch, lr_schedule, queue):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
model.train()
use_the_queue = False
end = time.time()
for it, inputs in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# update learning rate
iteration = epoch * len(train_loader) + it
for param_group in optimizer.param_groups:
param_group["lr"] = lr_schedule[iteration]
# normalize the prototypes
with torch.no_grad():
w = model.module.prototypes.weight.data.clone()
w = nn.functional.normalize(w, dim=1, p=2)
model.module.prototypes.weight.copy_(w)
# ============ multi-res forward passes ... ============
embedding, output = model(inputs)
embedding = embedding.detach()
bs = inputs[0].size(0)
# ============ swav loss ... ============
loss = 0
for i, crop_id in enumerate(args.crops_for_assign):
with torch.no_grad():
out = output[bs * crop_id: bs * (crop_id + 1)].detach()
# time to use the queue
if queue is not None:
if use_the_queue or not torch.all(queue[i, -1, :] == 0):
use_the_queue = True
out = torch.cat((torch.mm(
queue[i],
model.module.prototypes.weight.t()
), out))
# fill the queue
queue[i, bs:] = queue[i, :-bs].clone()
queue[i, :bs] = embedding[crop_id * bs: (crop_id + 1) * bs]
# get assignments
q = distributed_sinkhorn(out)[-bs:]
# cluster assignment prediction
subloss = 0
for v in np.delete(np.arange(np.sum(args.nmb_crops)), crop_id):
x = output[bs * v: bs * (v + 1)] / args.temperature
subloss -= torch.mean(torch.sum(q * F.log_softmax(x, dim=1), dim=1))
loss += subloss / (np.sum(args.nmb_crops) - 1)
loss /= len(args.crops_for_assign)
# ============ backward and optim step ... ============
optimizer.zero_grad()
if args.use_fp16:
with apex.amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# cancel gradients for the prototypes
if iteration < args.freeze_prototypes_niters:
for name, p in model.named_parameters():
if "prototypes" in name:
p.grad = None
optimizer.step()
# ============ misc ... ============
losses.update(loss.item(), inputs[0].size(0))
batch_time.update(time.time() - end)
end = time.time()
if args.rank ==0 and it % 50 == 0:
logger.info(
"Epoch: [{0}][{1}]\t"
"Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t"
"Data {data_time.val:.3f} ({data_time.avg:.3f})\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Lr: {lr:.4f}".format(
epoch,
it,
batch_time=batch_time,
data_time=data_time,
loss=losses,
lr=optimizer.optim.param_groups[0]["lr"],
)
)
if args.use_wandb:
wandb.log(
{
"epoch": epoch,
"loss": losses.val,
"loss_avg": losses.avg,
}
)
return (epoch, losses.avg), queue
@torch.no_grad()
def distributed_sinkhorn(out):
Q = torch.exp(out / args.epsilon).t() # Q is K-by-B for consistency with notations from our paper
B = Q.shape[1] * args.world_size # number of samples to assign
K = Q.shape[0] # how many prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
dist.all_reduce(sum_Q)
Q /= sum_Q
for it in range(args.sinkhorn_iterations):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
if __name__ == "__main__":
main()
| 9,880
| 0
| 68
|
0ed6edb1cfba4fa32c570eb33da74bbda6c23148
| 10,300
|
py
|
Python
|
bokeh-app/mainold.py
|
nyc-public-schools/bokeh
|
bad0334b43ad9df295efa980d7c2a2f704888732
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh-app/mainold.py
|
nyc-public-schools/bokeh
|
bad0334b43ad9df295efa980d7c2a2f704888732
|
[
"BSD-3-Clause"
] | null | null | null |
bokeh-app/mainold.py
|
nyc-public-schools/bokeh
|
bad0334b43ad9df295efa980d7c2a2f704888732
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
import numpy as np
from os.path import dirname, join
from bokeh.io import show, output_notebook,output_file, show, save, curdoc, output_notebook, export_png
from bokeh.plotting import figure, output_file, show,save
from bokeh.models.widgets import Panel, Tabs
from bokeh.layouts import column, row, widgetbox
from bokeh.models import HoverTool, LinearColorMapper,TextInput,Label,LabelSet,Title,CustomJS,DataTable, Slider, Div,RangeSlider, Button,RadioGroup,LinearAxis, Range1d, ColumnDataSource, Paragraph,Select, TableColumn
from bokeh.tile_providers import CARTODBPOSITRON, get_provider,OSM, STAMEN_TERRAIN
#colors for each borough
colors=['#7bccc4','#4eb3d3','#2b8cbe','#0868ac','#084081']
#Load data
mid_dyn = pd.read_csv(join(dirname(__file__), "data/MiddleSchools_2006-2018_clean.csv"))
mid_stat = pd.read_csv(join(dirname(__file__), "data/MiddleSchools_2018_clean.csv"))
nums =['female_rate', 'male_rate', 'asian_rate', 'black_rate', 'hispanic_rate',
'other_rate', 'white_rate', 'disabilities_rate', 'ell_rate',
'poverty_rate', 'total_schooldays', 'presence_rate', 'absense_rate',
'release_rate', 'mean_score_math', 'mean_score_ela', 'diversity_index','crime_rate','avg_rent_per_sqft']
for num in nums:
mid_stat[num] = round(mid_stat[num],1)
if num not in ['crime_rate','avg_rent_per_sqft']:
mid_dyn[num] = round(mid_dyn[num],1)
# In[8]:
# In[9]:
#Get data from csv to lists
# In[10]:
# In[14]:
# In[31]:
text_input = TextInput(value='01M034')
text_input.on_change('value',update1,update2)
div1 = Div(text="<b> Write School DBN </b>")
variables = ['Etnicities','Gender','Mean Score']
div2 = Div(text="<b> Choose variable </b>")
radio_group = RadioGroup(labels=variables, active=1)
radio_group.on_change('active',update1,update2)
div3 = Div(text="<b> Location of School </b>")
div4 = Div(text="<b> Overview </b>")
plot,para,m,table = create_plot()
layout = create_slider(plot, 2006, 2018)
div5 = Div(text="<b> </b>")
div6 = Div(text="<b> </b>")
#Combine all controls to get in column
col1= column(div1,text_input,div2,radio_group,div3,m, width=260)
col2 = column(div6, layout, width=510)
col3 = column(div5,table, width=230)
col4 = column(div4, para, width=230)
#Layout
layout = row(col1,col2,col3,col4)
curdoc().add_root(layout)
curdoc().title = "NYC_map"
#output_file("details.html")
#save(layout)
show(layout)
| 31.987578
| 224
| 0.618155
|
#!/usr/bin/env python
# coding: utf-8
# In[7]:
import pandas as pd
import numpy as np
from os.path import dirname, join
from bokeh.io import show, output_notebook,output_file, show, save, curdoc, output_notebook, export_png
from bokeh.plotting import figure, output_file, show,save
from bokeh.models.widgets import Panel, Tabs
from bokeh.layouts import column, row, widgetbox
from bokeh.models import HoverTool, LinearColorMapper,TextInput,Label,LabelSet,Title,CustomJS,DataTable, Slider, Div,RangeSlider, Button,RadioGroup,LinearAxis, Range1d, ColumnDataSource, Paragraph,Select, TableColumn
from bokeh.tile_providers import CARTODBPOSITRON, get_provider,OSM, STAMEN_TERRAIN
#colors for each borough
colors=['#7bccc4','#4eb3d3','#2b8cbe','#0868ac','#084081']
#Load data
mid_dyn = pd.read_csv(join(dirname(__file__), "data/MiddleSchools_2006-2018_clean.csv"))
mid_stat = pd.read_csv(join(dirname(__file__), "data/MiddleSchools_2018_clean.csv"))
nums =['female_rate', 'male_rate', 'asian_rate', 'black_rate', 'hispanic_rate',
'other_rate', 'white_rate', 'disabilities_rate', 'ell_rate',
'poverty_rate', 'total_schooldays', 'presence_rate', 'absense_rate',
'release_rate', 'mean_score_math', 'mean_score_ela', 'diversity_index','crime_rate','avg_rent_per_sqft']
for num in nums:
mid_stat[num] = round(mid_stat[num],1)
if num not in ['crime_rate','avg_rent_per_sqft']:
mid_dyn[num] = round(mid_dyn[num],1)
# In[8]:
def geographic_to_web_mercator(x_lon, y_lat):
if abs(x_lon) <= 180 and abs(y_lat) < 90:
num = x_lon * 0.017453292519943295
x = 6378137.0 * num
a = y_lat * 0.017453292519943295
x_mercator = x
y_mercator = 3189068.5 * np.log((1.0 + np.sin(a)) / (1.0 - np.sin(a)))
return x_mercator, y_mercator
# In[9]:
#Get data from csv to lists
def get_data(school):
school_data = mid_dyn[mid_dyn['dbn']==school]
source = ColumnDataSource(school_data)
return source, school_data
# In[10]:
def create_slider(plot, startYear, endYear):
callback = CustomJS(args=dict(plot=plot), code="""
var a = cb_obj.value;
plot.x_range.start = a[0];
plot.x_range.end = a[1];
""")
range_slider = RangeSlider(start=startYear, end=endYear,value=(startYear, endYear), step=1, width= 500, title="Year Range")
range_slider.js_on_change('value', callback)
layout = column(plot,column(range_slider))
return layout
# In[14]:
def create_plot():
colors=['#7bccc4','#4eb3d3','#2b8cbe','#0868ac','#084081']
radio_idx = radio_group.active
school = text_input.value
variables = ['Etnicities','Gender','Mean Score']
text = mid_stat[mid_stat['dbn']==school]['overview'].iloc[0]
data = mid_stat[mid_stat['dbn']==school]
src, school_data = get_data(school)
if radio_idx == 0:
plot = figure(plot_width = 500, plot_height = 400,
toolbar_location=None,
x_axis_label = 'Year', y_axis_label = '% Etnicity')
races = ['asian_rate', 'black_rate', 'hispanic_rate', 'white_rate']
race_title =['Asian', 'Black', 'Hispanic', 'White']
colors1 = colors[1:]
for (race,tit,color) in zip(races,race_title,colors1):
line=plot.line('year', race, line_width=2, line_color=color, source=src,legend_label=tit)
plot.circle('year', race, fill_color=color, line_color=color, size=8, source=src)
hover = HoverTool(renderers=[line])
hover.tooltips=[
('Year', '@year'),
(tit, '@'+race+'{1.1} %')
]
plot.add_tools(hover)
plot.legend.location ='top_left'
#plot.add_layout(Title(text= '{} School \n'.format(level), text_font_style="italic",text_font_size="14pt", align='center'), 'above')
plot.add_layout(Title(text=school_data['school_name'].unique()[0], text_font_size="16pt",align='center'), 'above',)
#plot.title.align ='center'
#plot.title.text_font_size = "18px"
elif radio_idx == 1:
plot = figure(plot_width = 500, plot_height = 400,
toolbar_location=None,
x_axis_label = 'Year', y_axis_label = '% Gender')
genders = ['female_rate','male_rate']
gender_title =['% Female','% Male']
colors2 = [colors[2]]+[colors[4]]
for (gender,tit,color) in zip(genders,gender_title,colors2):
line=plot.line('year', gender, line_width=2, line_color=color, source=src,legend_label=tit)
plot.circle('year', gender, fill_color=color, line_color=color, size=8, source=src)
hover = HoverTool(renderers=[line])
hover.tooltips=[
('Year', '@year'),
(tit, '@'+gender+'{1.1} %')
]
plot.add_tools(hover)
plot.legend.location ='top_left'
# plot.add_layout(Title(text= '{} School \n'.format(level), text_font_style="italic",text_font_size="14pt", align='center'), 'above')
plot.add_layout(Title(text=school_data['school_name'].unique()[0], text_font_size="16pt",align='center'), 'above',)
elif radio_idx == 2:
plot = figure(plot_width = 500, plot_height = 400,
toolbar_location=None,
x_axis_label = 'Year', y_axis_label = 'Mean Score')
cols = ['mean_score_math', 'mean_score_ela']
cols_tit = ['Mean Math Score', 'Mean ELA Score']
colors3 = [colors[2]]+[colors[4]]
for (col,tit,color) in zip(cols,cols_tit,colors3):
line=plot.line('year', col, line_width=2, line_color=color, source=src,legend_label=tit)
plot.circle('year', col, fill_color=color, line_color=color, size=8, source=src)
hover = HoverTool(renderers=[line])
hover.tooltips=[
('Year', '@year'),
(tit, '@'+col+'{1.1}')
]
plot.add_tools(hover)
plot.legend.location ='top_left'
#plot.add_layout(Title(text= '{} School \n'.format(level), text_font_style="italic",text_font_size="14pt", align='center'), 'above')
plot.add_layout(Title(text=school_data['school_name'].unique()[0], text_font_size="16pt",align='center'), 'above',)
#Add overview paragraph
para = Div(text=text,
width=400, height=400)
cols=[ 'school_name',
'category',
'open_year',
'borough',
'neighborhood',
'district',
'address',
'website',
'total_enrollment',
'female_rate',
'male_rate',
'diversity_index',
'asian_rate',
'black_rate',
'hispanic_rate',
'white_rate',
'ell_rate',
'poverty_rate',
'total_schooldays',
'presence_rate',
'absense_rate',
'mean_score_math',
'mean_score_ela',
'schoolday_duration',
'uniform',
'extendedday',
'summersession',
'weekendprogram',
'electives',
'activities',
'sports',
'pupil_teacher_ratio',
'student_trust_score',
'crime_rate',
'avg_rent_per_sqft']
col_name=[ 'Name',
'Categpry',
'Open year',
'Borough',
'Neighborhood',
'District',
'Address',
'Website',
'Enrollment',
'% Female',
'% Male',
'Diversity index',
'% Asian',
'% Black',
'% Hispanic',
'% White',
'% ELL',
'% Supported',
'Schooldays',
'% Presence',
'% Absense',
'Mean math score',
'Mean ELA score',
'Schoolday',
'Uniform',
'Extended day',
'Summer session',
'Weekend program',
'Electives',
'Activities',
'Sports',
'Class size',
'Satisfaction',
'Crime rate',
'Rent per sqft $']
data_dict ={'columns': col_name, 'data': list(data[cols].iloc[0].values)}
source = ColumnDataSource(data_dict)
columns = [
TableColumn(field="columns", title='DBN: '+data['dbn'].iloc[0],width=100),
TableColumn(field="data", title="",width=1000),
]
table = DataTable(source=source, columns=columns, width=220, height=450, fit_columns=False,index_position=None)
#Get map
x,y = geographic_to_web_mercator(data['lon'].iloc[0],data['lat'].iloc[0])
tile_provider = get_provider(CARTODBPOSITRON)
# range bounds supplied in web mercator coordinates
m = figure(x_range=(x-500, x+500), y_range=(y-500, y+500),height=300,width=260,
x_axis_location=None, y_axis_location=None,toolbar_location='below',tools="pan,wheel_zoom,reset",active_scroll='auto')
m.add_tile(tile_provider)
square=m.circle(x=x,y=y,size=12, fill_color=colors[4], fill_alpha=1)
tooltips = [('Name', data['school_name'].iloc[0]),('Address', data['address'].iloc[0])]
m.add_tools(HoverTool(renderers=[square],tooltips=tooltips))
return plot, para, m, table
# In[31]:
def update1(attr, old, new):
plot,para,m,table = create_plot()
layout.children[1].children[1]= create_slider(plot, 2006, 2018)
def update2(attr, old, new):
plot,para,m,table = create_plot()
layout.children[2].children[1] = table
layout.children[3].children[1] = para
layout.children[0].children[5] = m
text_input = TextInput(value='01M034')
text_input.on_change('value',update1,update2)
div1 = Div(text="<b> Write School DBN </b>")
variables = ['Etnicities','Gender','Mean Score']
div2 = Div(text="<b> Choose variable </b>")
radio_group = RadioGroup(labels=variables, active=1)
radio_group.on_change('active',update1,update2)
div3 = Div(text="<b> Location of School </b>")
div4 = Div(text="<b> Overview </b>")
plot,para,m,table = create_plot()
layout = create_slider(plot, 2006, 2018)
div5 = Div(text="<b> </b>")
div6 = Div(text="<b> </b>")
#Combine all controls to get in column
col1= column(div1,text_input,div2,radio_group,div3,m, width=260)
col2 = column(div6, layout, width=510)
col3 = column(div5,table, width=230)
col4 = column(div4, para, width=230)
#Layout
layout = row(col1,col2,col3,col4)
curdoc().add_root(layout)
curdoc().title = "NYC_map"
#output_file("details.html")
#save(layout)
show(layout)
| 7,652
| 0
| 144
|
35b186f995d1aac5ce56c24edf771e8630572c33
| 2,183
|
py
|
Python
|
tools/deploy/ops/BBoxTransform.py
|
qingswu/Detectron2-CenterNet
|
40b80d1a6aa85c7352d5088f44225d64c242cf83
|
[
"Apache-2.0"
] | 5
|
2020-07-01T07:55:45.000Z
|
2022-03-05T04:03:09.000Z
|
tools/deploy/ops/BBoxTransform.py
|
qingswu/Detectron2-CenterNet
|
40b80d1a6aa85c7352d5088f44225d64c242cf83
|
[
"Apache-2.0"
] | 2
|
2020-12-08T12:40:43.000Z
|
2021-09-08T02:15:21.000Z
|
tools/deploy/ops/BBoxTransform.py
|
qingswu/Detectron2-CenterNet
|
40b80d1a6aa85c7352d5088f44225d64c242cf83
|
[
"Apache-2.0"
] | 2
|
2021-01-17T14:51:09.000Z
|
2021-03-23T02:07:35.000Z
|
import torch
import torch.nn
from detectron2.export.tensorrt import TensorRTModel
from detectron2.utils.logger import setup_logger
from ops import export_onnx, get_inputs, SimpleTracer
if __name__ == '__main__':
logger = setup_logger()
logger.info("example: BBoxTransform")
m = BBoxTransform()
data = get_inputs("rois", "box_regression", "im_info",
root="/autox-sz/users/dongqixu/share/trt_plugins/BBoxTransform", map_location="cpu")
export_onnx(m, data, "model.onnx")
targets = m(data)
TensorRTModel.build_engine("model.onnx", "model.trt", 4, device="CUDA")
e = TensorRTModel("model.trt")
outputs = e.inference(data)
# compare torch output and tensorrt output
assert len(targets) == len(outputs), "Number of outputs does not match!"
targets = [(k, v.cuda()) for k, v in targets.items()]
for i, (name, tensor) in enumerate(targets):
logger.info(name)
diff = outputs[name] - tensor
unique = torch.unique(diff)
logger.info("unique\n{}".format(unique))
logger.info("max\n{}".format(torch.abs(unique).max()))
assert torch.abs(unique).max() < 1e-3
| 31.185714
| 106
| 0.619789
|
import torch
import torch.nn
from detectron2.export.tensorrt import TensorRTModel
from detectron2.utils.logger import setup_logger
from ops import export_onnx, get_inputs, SimpleTracer
class BBoxTransform(SimpleTracer):
def __init__(self):
super(BBoxTransform, self).__init__()
self.cuda()
self.eval()
def inference(self, inputs):
rois = inputs["rois"]
box_regression = inputs["box_regression"]
im_info = inputs["im_info"]
roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(
rois,
box_regression,
im_info,
weights=(10.0, 10.0, 5.0, 5.0),
apply_scale=True,
rotated=False,
angle_bound_on=True,
angle_bound_lo=-180,
angle_bound_hi=180,
clip_angle_thresh=1.0,
legacy_plus_one=False,
)
return {
"roi_pred_bbox": roi_pred_bbox,
"roi_batch_splits": roi_batch_splits,
}
def get_input_names(self):
return ["rois", "box_regression", "im_info"]
def get_output_names(self):
return ["roi_pred_bbox", "roi_batch_splits"]
if __name__ == '__main__':
logger = setup_logger()
logger.info("example: BBoxTransform")
m = BBoxTransform()
data = get_inputs("rois", "box_regression", "im_info",
root="/autox-sz/users/dongqixu/share/trt_plugins/BBoxTransform", map_location="cpu")
export_onnx(m, data, "model.onnx")
targets = m(data)
TensorRTModel.build_engine("model.onnx", "model.trt", 4, device="CUDA")
e = TensorRTModel("model.trt")
outputs = e.inference(data)
# compare torch output and tensorrt output
assert len(targets) == len(outputs), "Number of outputs does not match!"
targets = [(k, v.cuda()) for k, v in targets.items()]
for i, (name, tensor) in enumerate(targets):
logger.info(name)
diff = outputs[name] - tensor
unique = torch.unique(diff)
logger.info("unique\n{}".format(unique))
logger.info("max\n{}".format(torch.abs(unique).max()))
assert torch.abs(unique).max() < 1e-3
| 866
| 13
| 131
|
5b755f777f1ac2efb82d36a71ca1cacf64bd869b
| 446
|
py
|
Python
|
setup.py
|
dsvensson/subsevenzip-python
|
078c3876e5fd1ffd84e57ae992a142d247a5832f
|
[
"0BSD"
] | 3
|
2015-01-05T14:09:25.000Z
|
2018-03-21T19:33:15.000Z
|
setup.py
|
dsvensson/subsevenzip-python
|
078c3876e5fd1ffd84e57ae992a142d247a5832f
|
[
"0BSD"
] | null | null | null |
setup.py
|
dsvensson/subsevenzip-python
|
078c3876e5fd1ffd84e57ae992a142d247a5832f
|
[
"0BSD"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name="subsevenzip",
version="0.1",
description="7-Zip decompressor",
author="Daniel Svensson",
author_email="dsvensson@gmail.com",
license="ISC",
packages=find_packages(),
test_suite="nose.collector",
setup_requires=[
"coverage",
"flake8",
"nose"
],
classifiers=[
"Programming Language :: Python :: 3 :: Only"
]
)
| 21.238095
| 53
| 0.605381
|
from setuptools import setup, find_packages
setup(
name="subsevenzip",
version="0.1",
description="7-Zip decompressor",
author="Daniel Svensson",
author_email="dsvensson@gmail.com",
license="ISC",
packages=find_packages(),
test_suite="nose.collector",
setup_requires=[
"coverage",
"flake8",
"nose"
],
classifiers=[
"Programming Language :: Python :: 3 :: Only"
]
)
| 0
| 0
| 0
|
d4c5e81f71bbec9487f5339b12bc91e1a50a3d76
| 11,459
|
py
|
Python
|
grr/endtoend_tests/base.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
grr/endtoend_tests/base.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
grr/endtoend_tests/base.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Base module for end to end tests that run flows on clients."""
import re
import time
import traceback
import unittest
from grr.lib import aff4
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flow_utils
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib.aff4_objects import aff4_grr
from grr.lib.flows.console import debugging
from grr.lib.rdfvalues import client as rdf_client
class Error(Exception):
"""Test base error."""
class ErrorEmptyCollection(Error):
"""Raise when we expect values in a collection, but it is empty."""
class TestStateUncleanError(Error):
"""Raised when tests encounter bad state that indicates a cleanup failure."""
class ClientTestBase(unittest.TestCase):
"""This is the base class for all client tests.
Tests should only inherit from this class if they are not safe to be run in
prod with the EndToEndTests cronjob.
"""
platforms = []
flow = None
args = {}
network_bytes_limit = None
timeout = flow_utils.DEFAULT_TIMEOUT
test_output_path = None
# How long after flow is marked complete we should expect results to be
# available in the collection. This is essentially how quickly we expect
# results to be available to users in the UI.
RESULTS_SLA_SECONDS = 10
# Only run on clients after this version
client_min_version = None
__metaclass__ = registry.MetaclassRegistry
def VerifyEmpty(self, urns):
"""Verify urns have been deleted."""
try:
for urn in urns:
# TODO(user): aff4.FACTORY.Stat() is the right thing to use here.
# We open each urn to generate InstantiationError on failures, multiopen
# ignores these errors. This isn't too slow since it's almost always
# just one path anyway.
aff4.FACTORY.Open(urn, aff4_type=aff4.AFF4Volume, token=self.token)
except aff4.InstantiationError:
raise TestStateUncleanError("Path wasn't deleted: %s" %
traceback.format_exc())
def DeleteUrn(self, urn):
"""Deletes an object from the db and the index, and flushes the caches."""
data_store.DB.DeleteSubject(urn, token=self.token)
aff4.FACTORY._DeleteChildFromIndex(urn, token=self.token)
aff4.FACTORY.Flush()
def CheckCollectionNotEmptyWithRetry(self, collection_urn, token):
"""Check collection for results, return list if they exist.
Args:
collection_urn: URN of the collection
token: User token
Returns:
The collection contents as a list
Raises:
ErrorEmptyCollection: if the collection has no results after
self.RESULTS_SLA_SECONDS
"""
coll = aff4.FACTORY.Open(collection_urn, mode="r", token=token)
coll_list = list(coll)
if not coll_list:
for _ in range(self.RESULTS_SLA_SECONDS):
time.sleep(1)
coll_list = list(coll)
if coll_list:
return coll_list
raise ErrorEmptyCollection("No values in %s after SLA: %s seconds" %
(collection_urn, self.RESULTS_SLA_SECONDS))
return coll_list
def OpenFDWithRetry(self, file_urn, token):
"""Try to open a aff4 path, retry if it is AFF4Volume."""
fd = aff4.FACTORY.Open(file_urn, mode="r", token=token)
# All types are instances of AFF4Volume so we can't use isinstance.
if fd.__class__ is aff4.AFF4Volume:
for _ in range(self.RESULTS_SLA_SECONDS):
time.sleep(1)
fd = aff4.FACTORY.Open(file_urn, mode="r", token=token)
if fd.__class__ is not aff4.AFF4Volume:
return fd
self.fail(("No results were written to the data store. Maybe the GRR "
"client is not running with root privileges?"))
return fd
class AutomatedTest(ClientTestBase):
"""All tests that are safe to run in prod should inherit from this class."""
__metaclass__ = registry.MetaclassRegistry
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
class TestVFSPathExists(AutomatedTest):
"""Test that checks expected VFS files were created."""
result_type = aff4_grr.VFSFile
def CheckFlow(self):
"""Verify VFS paths were created."""
urn = self.GetURNFromGlobPathWithRetry(self.test_output_path)
fd = self.OpenFDWithRetry(urn, token=self.token)
self.assertEqual(type(fd), self.result_type)
def GetClientTestTargets(client_ids=None,
hostnames=None,
token=None,
checkin_duration_threshold="20m"):
"""Get client urns for end-to-end tests.
Args:
client_ids: list of client id URN strings or rdf_client.ClientURNs
hostnames: list of hostnames to search for
token: access token
checkin_duration_threshold: clients that haven't checked in for this long
will be excluded
Returns:
client_id_set: set of rdf_client.ClientURNs available for end-to-end tests.
"""
if client_ids:
client_ids = set(client_ids)
else:
client_ids = set(config_lib.CONFIG.Get("Test.end_to_end_client_ids"))
if hostnames:
hosts = set(hostnames)
else:
hosts = set(config_lib.CONFIG.Get("Test.end_to_end_client_hostnames"))
if hosts:
client_id_dict = client_index.GetClientURNsForHostnames(hosts, token=token)
for client_list in client_id_dict.values():
client_ids.update(client_list)
client_id_set = set([rdf_client.ClientURN(x) for x in client_ids])
duration_threshold = rdfvalue.Duration(checkin_duration_threshold)
for client in aff4.FACTORY.MultiOpen(client_id_set, token=token):
# Only test against client IDs that have checked in recently. Test machines
# tend to have lots of old client IDs hanging around that will cause lots of
# waiting for timeouts in the tests.
if (rdfvalue.RDFDatetime.Now() - client.Get(client.Schema.LAST) >
duration_threshold):
client_id_set.remove(client.urn)
return client_id_set
| 31.480769
| 80
| 0.68444
|
#!/usr/bin/env python
"""Base module for end to end tests that run flows on clients."""
import re
import time
import traceback
import unittest
from grr.lib import aff4
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flow_utils
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib.aff4_objects import aff4_grr
from grr.lib.flows.console import debugging
from grr.lib.rdfvalues import client as rdf_client
class Error(Exception):
"""Test base error."""
class ErrorEmptyCollection(Error):
"""Raise when we expect values in a collection, but it is empty."""
class TestStateUncleanError(Error):
"""Raised when tests encounter bad state that indicates a cleanup failure."""
def RecursiveListChildren(prefix=None, token=None):
all_urns = set()
act_urns = set([prefix])
while act_urns:
next_urns = set()
for _, children in aff4.FACTORY.MultiListChildren(act_urns, token=token):
for urn in children:
next_urns.add(urn)
all_urns |= next_urns
act_urns = next_urns
return all_urns
class ClientTestBase(unittest.TestCase):
"""This is the base class for all client tests.
Tests should only inherit from this class if they are not safe to be run in
prod with the EndToEndTests cronjob.
"""
platforms = []
flow = None
args = {}
network_bytes_limit = None
timeout = flow_utils.DEFAULT_TIMEOUT
test_output_path = None
# How long after flow is marked complete we should expect results to be
# available in the collection. This is essentially how quickly we expect
# results to be available to users in the UI.
RESULTS_SLA_SECONDS = 10
# Only run on clients after this version
client_min_version = None
__metaclass__ = registry.MetaclassRegistry
def __str__(self):
return self.__class__.__name__
def __init__(self,
client_id=None,
platform=None,
local_worker=False,
token=None,
local_client=True):
# If we get passed a string, turn it into a urn.
self.client_id = rdf_client.ClientURN(client_id)
self.platform = platform
self.token = token
self.local_worker = local_worker
self.local_client = local_client
self.delete_urns = set()
super(ClientTestBase, self).__init__(methodName="runTest")
def _CleanState(self):
if self.test_output_path:
self.delete_urns.add(self.client_id.Add(self.test_output_path))
for urn in self.delete_urns:
self.DeleteUrn(urn)
if self.delete_urns:
self.VerifyEmpty(self.delete_urns)
def setUp(self):
self._CleanState()
def tearDown(self):
self._CleanState()
def runTest(self):
if self.client_min_version:
target_client = aff4.FACTORY.Open(self.client_id, token=self.token)
client_info = target_client.Get(target_client.Schema.CLIENT_INFO)
if client_info.client_version < self.client_min_version:
message = "Skipping version %s less than client_min_version: %s" % (
client_info.client_version, self.client_min_version)
return self.skipTest(message)
if self.local_worker:
self.session_id = debugging.StartFlowAndWorker(self.client_id, self.flow,
**self.args)
else:
self.session_id = flow_utils.StartFlowAndWait(
self.client_id,
flow_name=self.flow,
timeout=self.timeout,
token=self.token,
**self.args)
self.CheckFlow()
def CheckFlow(self):
pass
def VerifyEmpty(self, urns):
"""Verify urns have been deleted."""
try:
for urn in urns:
# TODO(user): aff4.FACTORY.Stat() is the right thing to use here.
# We open each urn to generate InstantiationError on failures, multiopen
# ignores these errors. This isn't too slow since it's almost always
# just one path anyway.
aff4.FACTORY.Open(urn, aff4_type=aff4.AFF4Volume, token=self.token)
except aff4.InstantiationError:
raise TestStateUncleanError("Path wasn't deleted: %s" %
traceback.format_exc())
def DeleteUrn(self, urn):
"""Deletes an object from the db and the index, and flushes the caches."""
data_store.DB.DeleteSubject(urn, token=self.token)
aff4.FACTORY._DeleteChildFromIndex(urn, token=self.token)
aff4.FACTORY.Flush()
def GetGRRBinaryName(self, run_interrogate=True):
client = aff4.FACTORY.Open(self.client_id, mode="r", token=self.token)
self.assertIsInstance(client, aff4_grr.VFSGRRClient)
config = client.Get(aff4_grr.VFSGRRClient.SchemaCls.GRR_CONFIGURATION)
if config is None:
# Try running Interrogate once.
if run_interrogate:
flow_utils.StartFlowAndWait(
self.client_id, flow_name="Interrogate", token=self.token)
return self.GetGRRBinaryName(run_interrogate=False)
else:
self.fail("No valid configuration found, interrogate the client before "
"running this test.")
else:
try:
self.binary_name = config["Client.binary_name"]
except KeyError:
self.binary_name = config["Client.name"]
return self.binary_name
def CheckMacMagic(self, fd):
data = fd.Read(10)
magic_values = ["cafebabe", "cefaedfe", "cffaedfe"]
magic_values = [x.decode("hex") for x in magic_values]
self.assertIn(
data[:4],
magic_values,
msg="Data %s not one of %s" % (data[:4], magic_values))
def CheckELFMagic(self, fd):
data = fd.Read(10)
self.assertEqual(data[1:4], "ELF")
def CheckPEMagic(self, fd):
data = fd.Read(10)
self.assertEqual(data[:2], "MZ")
def CheckCollectionNotEmptyWithRetry(self, collection_urn, token):
"""Check collection for results, return list if they exist.
Args:
collection_urn: URN of the collection
token: User token
Returns:
The collection contents as a list
Raises:
ErrorEmptyCollection: if the collection has no results after
self.RESULTS_SLA_SECONDS
"""
coll = aff4.FACTORY.Open(collection_urn, mode="r", token=token)
coll_list = list(coll)
if not coll_list:
for _ in range(self.RESULTS_SLA_SECONDS):
time.sleep(1)
coll_list = list(coll)
if coll_list:
return coll_list
raise ErrorEmptyCollection("No values in %s after SLA: %s seconds" %
(collection_urn, self.RESULTS_SLA_SECONDS))
return coll_list
def OpenFDWithRetry(self, file_urn, token):
"""Try to open a aff4 path, retry if it is AFF4Volume."""
fd = aff4.FACTORY.Open(file_urn, mode="r", token=token)
# All types are instances of AFF4Volume so we can't use isinstance.
if fd.__class__ is aff4.AFF4Volume:
for _ in range(self.RESULTS_SLA_SECONDS):
time.sleep(1)
fd = aff4.FACTORY.Open(file_urn, mode="r", token=token)
if fd.__class__ is not aff4.AFF4Volume:
return fd
self.fail(("No results were written to the data store. Maybe the GRR "
"client is not running with root privileges?"))
return fd
def GetURNFromGlobPathWithRetry(self, path):
pos = path.find("*")
if pos > 0:
base_urn = self.client_id.Add(path[:pos])
for _ in range(self.RESULTS_SLA_SECONDS):
for file_urn in RecursiveListChildren(
prefix=base_urn, token=self.token):
if re.search(path + "$", str(file_urn)):
self.delete_urns.add(file_urn)
return file_urn
time.sleep(1)
self.fail(("Output file %s not found. Maybe the GRR client "
"is not running with root privileges?" % path))
else:
self.delete_urns.add(self.client_id.Add(path))
return self.client_id.Add(path)
class AutomatedTest(ClientTestBase):
"""All tests that are safe to run in prod should inherit from this class."""
__metaclass__ = registry.MetaclassRegistry
# Prevents this from automatically registering.
__abstract = True # pylint: disable=g-bad-name
class TestVFSPathExists(AutomatedTest):
"""Test that checks expected VFS files were created."""
result_type = aff4_grr.VFSFile
def CheckFlow(self):
"""Verify VFS paths were created."""
urn = self.GetURNFromGlobPathWithRetry(self.test_output_path)
fd = self.OpenFDWithRetry(urn, token=self.token)
self.assertEqual(type(fd), self.result_type)
class VFSPathContentExists(AutomatedTest):
test_output_path = None
def CheckFlow(self):
urn = self.GetURNFromGlobPathWithRetry(self.test_output_path)
fd = self.OpenFDWithRetry(urn, token=self.token)
return self.CheckFile(fd)
def CheckFile(self, fd):
data = fd.Read(10)
# Some value was read from the sysctl.
self.assertTrue(data)
class VFSPathContentIsELF(VFSPathContentExists):
def CheckFile(self, fd):
self.CheckELFMagic(fd)
class VFSPathContentIsMachO(VFSPathContentExists):
def CheckFile(self, fd):
self.CheckMacMagic(fd)
class VFSPathContentIsPE(VFSPathContentExists):
def CheckFile(self, fd):
self.CheckPEMagic(fd)
class LocalWorkerTest(ClientTestBase):
SKIP_MESSAGE = ("This test uses a flow that is debug only. Use a "
"local worker to run this test.")
def runTest(self):
if not self.local_worker:
print self.SKIP_MESSAGE
return self.skipTest(self.SKIP_MESSAGE)
super(LocalWorkerTest, self).runTest()
class LocalClientTest(ClientTestBase):
SKIP_MESSAGE = ("This test needs to run with a local client and be invoked"
" with local_client=True.")
def runTest(self):
if not self.local_client:
print self.SKIP_MESSAGE
return self.skipTest(self.SKIP_MESSAGE)
super(LocalClientTest, self).runTest()
def GetClientTestTargets(client_ids=None,
hostnames=None,
token=None,
checkin_duration_threshold="20m"):
"""Get client urns for end-to-end tests.
Args:
client_ids: list of client id URN strings or rdf_client.ClientURNs
hostnames: list of hostnames to search for
token: access token
checkin_duration_threshold: clients that haven't checked in for this long
will be excluded
Returns:
client_id_set: set of rdf_client.ClientURNs available for end-to-end tests.
"""
if client_ids:
client_ids = set(client_ids)
else:
client_ids = set(config_lib.CONFIG.Get("Test.end_to_end_client_ids"))
if hostnames:
hosts = set(hostnames)
else:
hosts = set(config_lib.CONFIG.Get("Test.end_to_end_client_hostnames"))
if hosts:
client_id_dict = client_index.GetClientURNsForHostnames(hosts, token=token)
for client_list in client_id_dict.values():
client_ids.update(client_list)
client_id_set = set([rdf_client.ClientURN(x) for x in client_ids])
duration_threshold = rdfvalue.Duration(checkin_duration_threshold)
for client in aff4.FACTORY.MultiOpen(client_id_set, token=token):
# Only test against client IDs that have checked in recently. Test machines
# tend to have lots of old client IDs hanging around that will cause lots of
# waiting for timeouts in the tests.
if (rdfvalue.RDFDatetime.Now() - client.Get(client.Schema.LAST) >
duration_threshold):
client_id_set.remove(client.urn)
return client_id_set
| 4,382
| 510
| 536
|
5d38c82f8b5b5aab3e6e5cd285d57442d2cff181
| 13,803
|
py
|
Python
|
src/gluonts/model/predictor.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 2,648
|
2019-06-03T17:18:27.000Z
|
2022-03-31T08:29:22.000Z
|
src/gluonts/model/predictor.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 1,220
|
2019-06-04T09:00:14.000Z
|
2022-03-31T10:45:43.000Z
|
src/gluonts/model/predictor.py
|
Xiaoxiong-Liu/gluon-ts
|
097c492769258dd70b7f223f826b17b0051ceee9
|
[
"Apache-2.0"
] | 595
|
2019-06-04T01:04:31.000Z
|
2022-03-30T10:40:26.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import functools
import itertools
import json
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING, Callable, Iterator, Optional, Type
import numpy as np
import gluonts
from gluonts.core import fqname_for
from gluonts.core.component import equals, from_hyperparameters, validated
from gluonts.core.serde import dump_json, load_json
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.exceptions import GluonTSException
from gluonts.model.forecast import Forecast
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
@classmethod
def deserialize(cls, path: Path, **kwargs) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
**kwargs
Optional context/device parameter to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, **kwargs)
@classmethod
@classmethod
@classmethod
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
@classmethod
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
| 32.942721
| 102
| 0.606897
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import functools
import itertools
import json
import logging
import multiprocessing as mp
import sys
import traceback
from pathlib import Path
from pydoc import locate
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING, Callable, Iterator, Optional, Type
import numpy as np
import gluonts
from gluonts.core import fqname_for
from gluonts.core.component import equals, from_hyperparameters, validated
from gluonts.core.serde import dump_json, load_json
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.exceptions import GluonTSException
from gluonts.model.forecast import Forecast
if TYPE_CHECKING: # avoid circular import
from gluonts.model.estimator import Estimator # noqa
OutputTransform = Callable[[DataEntry, np.ndarray], np.ndarray]
class Predictor:
"""
Abstract class representing predictor objects.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
__version__: str = gluonts.__version__
def __init__(
self, prediction_length: int, freq: str, lead_time: int = 0
) -> None:
assert (
prediction_length > 0
), "The value of `prediction_length` should be > 0"
assert lead_time >= 0, "The value of `lead_time` should be >= 0"
self.prediction_length = prediction_length
self.freq = freq
self.lead_time = lead_time
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
"""
Compute forecasts for the time series in the provided dataset.
This method is not implemented in this abstract class; please
use one of the subclasses.
Parameters
----------
dataset
The dataset containing the time series to predict.
Returns
-------
Iterator[Forecast]
Iterator over the forecasts, in the same order as the dataset
iterable was provided.
"""
raise NotImplementedError
def serialize(self, path: Path) -> None:
# serialize Predictor type
with (path / "type.txt").open("w") as fp:
fp.write(fqname_for(self.__class__))
with (path / "version.json").open("w") as fp:
json.dump(
{"model": self.__version__, "gluonts": gluonts.__version__}, fp
)
@classmethod
def deserialize(cls, path: Path, **kwargs) -> "Predictor":
"""
Load a serialized predictor from the given path
Parameters
----------
path
Path to the serialized files predictor.
**kwargs
Optional context/device parameter to be used with the predictor.
If nothing is passed will use the GPU if available and CPU otherwise.
"""
# deserialize Predictor type
with (path / "type.txt").open("r") as fp:
tpe = locate(fp.readline())
# ensure that predictor_cls is a subtype of Predictor
if not issubclass(tpe, Predictor):
raise IOError(
f"Class {fqname_for(tpe)} is not "
f"a subclass of {fqname_for(Predictor)}"
)
# call deserialize() for the concrete Predictor type
return tpe.deserialize(path, **kwargs)
@classmethod
def from_hyperparameters(cls, **hyperparameters):
return from_hyperparameters(cls, **hyperparameters)
@classmethod
def derive_auto_fields(cls, train_iter):
return {}
@classmethod
def from_inputs(cls, train_iter, **params):
# auto_params usually include `use_feat_dynamic_real`, `use_feat_static_cat` and `cardinality`
auto_params = cls.derive_auto_fields(train_iter)
# user specified 'params' will take precedence:
params = {**auto_params, **params}
return cls.from_hyperparameters(**params)
class RepresentablePredictor(Predictor):
"""
An abstract predictor that can be subclassed by models that are not based
on Gluon. Subclasses should have @validated() constructors.
(De)serialization and value equality are all implemented on top of the
@validated() logic.
Parameters
----------
prediction_length
Prediction horizon.
freq
Frequency of the predicted data.
"""
@validated()
def __init__(
self, prediction_length: int, freq: str, lead_time: int = 0
) -> None:
super().__init__(
freq=freq, lead_time=lead_time, prediction_length=prediction_length
)
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
for item in dataset:
yield self.predict_item(item)
def predict_item(self, item: DataEntry) -> Forecast:
raise NotImplementedError
def __eq__(self, that):
"""
Two RepresentablePredictor instances are considered equal if they
have the same constructor arguments.
"""
return equals(self, that)
def serialize(self, path: Path) -> None:
# call Predictor.serialize() in order to serialize the class name
super().serialize(path)
with (path / "predictor.json").open("w") as fp:
print(dump_json(self), file=fp)
@classmethod
def deserialize(cls, path: Path) -> "RepresentablePredictor":
with (path / "predictor.json").open("r") as fp:
return load_json(fp.read())
class WorkerError:
def __init__(self, msg):
self.msg = msg
def _worker_loop(
predictor_path: Path,
input_queue: mp.Queue,
output_queue: mp.Queue,
worker_id,
**kwargs,
):
"""
Worker loop for multiprocessing Predictor.
Loads the predictor serialized in predictor_path
reads inputs from input_queue and writes forecasts to output_queue
"""
predictor = Predictor.deserialize(predictor_path)
while True:
idx, data_chunk = input_queue.get()
if idx is None:
output_queue.put((None, None, None))
break
try:
result = list(predictor.predict(data_chunk, **kwargs))
except Exception:
we = WorkerError(
"".join(traceback.format_exception(*sys.exc_info()))
)
output_queue.put((we, None, None))
break
output_queue.put((idx, worker_id, result))
class ParallelizedPredictor(Predictor):
"""
Runs multiple instances (workers) of a predictor in parallel.
Exceptions are propagated from the workers.
Note: That there is currently an issue with tqdm that will cause things
to hang if the ParallelizedPredictor is used with tqdm and an exception
occurs during prediction.
https://github.com/tqdm/tqdm/issues/548
Parameters
----------
base_predictor
A representable predictor that will be used
num_workers
Number of workers (processes) to use. If set to
None, one worker per CPU will be used.
chunk_size
Number of items to pass per call
"""
def __init__(
self,
base_predictor: Predictor,
num_workers: Optional[int] = None,
chunk_size=1,
) -> None:
super().__init__(
freq=base_predictor.freq,
lead_time=base_predictor.lead_time,
prediction_length=base_predictor.prediction_length,
)
self._base_predictor = base_predictor
self._num_workers = (
num_workers if num_workers is not None else mp.cpu_count()
)
self._chunk_size = chunk_size
self._num_running_workers = 0
self._input_queues = []
self._output_queue = None
def _grouper(self, iterable, n):
iterator = iter(iterable)
group = tuple(itertools.islice(iterator, n))
while group:
yield group
group = tuple(itertools.islice(iterator, n))
def terminate(self):
for q in self._input_queues:
q.put((None, None))
for w in self._workers:
w.terminate()
for i, w in enumerate(self._workers):
w.join()
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
with TemporaryDirectory() as tempdir:
predictor_path = Path(tempdir)
self._base_predictor.serialize(predictor_path)
# TODO: Consider using shared memory for the data transfer.
self._input_queues = [mp.Queue() for _ in range(self._num_workers)]
self._output_queue = mp.Queue()
workers = []
for worker_id, in_q in enumerate(self._input_queues):
worker = mp.Process(
target=_worker_loop,
args=(predictor_path, in_q, self._output_queue, worker_id),
kwargs=kwargs,
)
worker.daemon = True
worker.start()
workers.append(worker)
self._num_running_workers += 1
self._workers = workers
chunked_data = self._grouper(dataset, self._chunk_size)
self._send_idx = 0
self._next_idx = 0
self._data_buffer = {}
worker_ids = list(range(self._num_workers))
def receive():
idx, worker_id, result = self._output_queue.get()
if isinstance(idx, WorkerError):
self._num_running_workers -= 1
self.terminate()
raise Exception(idx.msg)
if idx is not None:
self._data_buffer[idx] = result
return idx, worker_id, result
def get_next_from_buffer():
while self._next_idx in self._data_buffer:
result_batch = self._data_buffer.pop(self._next_idx)
self._next_idx += 1
for result in result_batch:
yield result
def send(worker_id, chunk):
q = self._input_queues[worker_id]
q.put((self._send_idx, chunk))
self._send_idx += 1
try:
# prime the queues
for wid in worker_ids:
chunk = next(chunked_data)
send(wid, chunk)
while True:
idx, wid, result = receive()
for res in get_next_from_buffer():
yield res
chunk = next(chunked_data)
send(wid, chunk)
except StopIteration:
# signal workers end of data
for q in self._input_queues:
q.put((None, None))
# collect any outstanding results
while self._num_running_workers > 0:
idx, worker_id, result = receive()
if idx is None:
self._num_running_workers -= 1
continue
for res in get_next_from_buffer():
yield res
assert len(self._data_buffer) == 0
assert self._send_idx == self._next_idx
class Localizer(Predictor):
"""
A Predictor that uses an estimator to train a local model per time series and
immediatly calls this to predict.
Parameters
----------
estimator
The estimator object to train on each dataset entry at prediction time.
"""
def __init__(self, estimator: "Estimator"):
super().__init__(
freq=estimator.freq,
lead_time=estimator.lead_time,
prediction_length=estimator.prediction_length,
)
self.estimator = estimator
def predict(self, dataset: Dataset, **kwargs) -> Iterator[Forecast]:
logger = logging.getLogger(__name__)
for i, ts in enumerate(dataset, start=1):
logger.info(f"training for time series {i} / {len(dataset)}")
trained_pred = self.estimator.train([ts])
logger.info(f"predicting for time series {i} / {len(dataset)}")
yield from trained_pred.predict([ts], **kwargs)
class FallbackPredictor(Predictor):
@classmethod
def from_predictor(
cls, base: RepresentablePredictor, **overrides
) -> Predictor:
# Create predictor based on an existing predictor.
# This let's us create a MeanPredictor as a fallback on the fly.
return cls.from_hyperparameters(
**getattr(base, "__init_args__"), **overrides
)
def fallback(fallback_cls: Type[FallbackPredictor]):
def decorator(predict_item):
@functools.wraps(predict_item)
def fallback_predict(self, item: DataEntry) -> Forecast:
try:
return predict_item(self, item)
except GluonTSException:
raise
except Exception:
logging.warning(
f"Base predictor failed with: {traceback.format_exc()}"
)
fallback_predictor = fallback_cls.from_predictor(self)
return fallback_predictor.predict_item(item)
return fallback_predict
return decorator
| 7,521
| 54
| 522
|
ab2c952692c66b0a5c9210538c5e5f17b73574c5
| 14,213
|
py
|
Python
|
c7n/actions/securityhub.py
|
edonkor1/cloud-custodian
|
6f54735acd071b6fc6a0cca851d36e1a1fa46aa0
|
[
"Apache-2.0"
] | null | null | null |
c7n/actions/securityhub.py
|
edonkor1/cloud-custodian
|
6f54735acd071b6fc6a0cca851d36e1a1fa46aa0
|
[
"Apache-2.0"
] | null | null | null |
c7n/actions/securityhub.py
|
edonkor1/cloud-custodian
|
6f54735acd071b6fc6a0cca851d36e1a1fa46aa0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from datetime import datetime
from dateutil.tz import tzutc
import hashlib
import jmespath
import json
from .core import BaseAction
from c7n.utils import type_schema, local_session, chunks, dumps, filter_empty
from c7n.manager import resources as aws_resources
from c7n.version import version
FindingTypes = {
"Software and Configuration Checks": [
"Vulnerabilities",
"Vulnerabilities/CVE",
"AWS Security Best Practices",
"AWS Security Best Practices/Network Reachability",
"Industry and Regulatory Standards",
"Industry and Regulatory Standards/CIS Host Hardening Benchmarks",
"Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
"Industry and Regulatory Standards/PCI-DSS Controls",
"Industry and Regulatory Standards/Cloud Security Alliance Controls",
"Industry and Regulatory Standards/ISO 90001 Controls",
"Industry and Regulatory Standards/ISO 27001 Controls",
"Industry and Regulatory Standards/ISO 27017 Controls",
"Industry and Regulatory Standards/ISO 27018 Controls",
"Industry and Regulatory Standards/SOC 1",
"Industry and Regulatory Standards/SOC 2",
"Industry and Regulatory Standards/HIPAA Controls (USA)",
"Industry and Regulatory Standards/NIST 800-53 Controls (USA)",
"Industry and Regulatory Standards/NIST CSF Controls (USA)",
"Industry and Regulatory Standards/IRAP Controls (Australia)",
"Industry and Regulatory Standards/K-ISMS Controls (Korea)",
"Industry and Regulatory Standards/MTCS Controls (Singapore)",
"Industry and Regulatory Standards/FISC Controls (Japan)",
"Industry and Regulatory Standards/My Number Act Controls (Japan)",
"Industry and Regulatory Standards/ENS Controls (Spain)",
"Industry and Regulatory Standards/Cyber Essentials Plus Controls (UK)",
"Industry and Regulatory Standards/G-Cloud Controls (UK)",
"Industry and Regulatory Standards/C5 Controls (Germany)",
"Industry and Regulatory Standards/IT-Grundschutz Controls (Germany)",
"Industry and Regulatory Standards/GDPR Controls (Europe)",
"Industry and Regulatory Standards/TISAX Controls (Europe)",
],
"TTPs": [
"Initial Access",
"Execution",
"Persistence",
"Privilege Escalation",
"Defense Evasion",
"Credential Access",
"Discovery",
"Lateral Movement",
"Collection",
"Command and Control",
],
"Effects": [
"Data Exposure",
"Data Exfiltration",
"Data Destruction",
"Denial of Service",
"Resource Consumption",
],
}
# Mostly undocumented value size limit
SECHUB_VALUE_SIZE_LIMIT = 1024
class PostFinding(BaseAction):
"""Report a finding to AWS Security Hub.
Custodian acts as a finding provider, allowing users to craft
policies that report to the AWS SecurityHub.
For resources that are taggable, we will tag the resource with an identifier
such that further findings generate updates.
Example generate a finding for accounts that don't have shield enabled.
:example:
.. code-block:: yaml
policies:
- name: account-shield-enabled
resource: account
filters:
- shield-enabled
actions:
- type: post-finding
severity_normalized: 6
types:
- "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)"
recommendation: "Enable shield"
recommendation_url: "https://www.example.com/policies/AntiDDoS.html"
confidence: 100
compliance_status: FAILED
""" # NOQA
FindingVersion = "2018-10-08"
ProductName = "default"
permissions = ('securityhub:BatchImportFindings',)
schema_alias = True
schema = type_schema(
"post-finding",
required=["types"],
title={"type": "string"},
severity={"type": "number", 'default': 0},
severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0},
confidence={"type": "number", "min": 0, "max": 100},
criticality={"type": "number", "min": 0, "max": 100},
# Cross region aggregation
region={'type': 'string', 'description': 'cross-region aggregation target'},
recommendation={"type": "string"},
recommendation_url={"type": "string"},
fields={"type": "object"},
batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},
types={
"type": "array",
"items": {"type": "string", "enum": build_vocabulary()},
},
compliance_status={
"type": "string",
"enum": ["PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"],
},
)
NEW_FINDING = 'New'
aws_resources.subscribe(
aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource)
| 36.821244
| 110
| 0.576585
|
# Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from datetime import datetime
from dateutil.tz import tzutc
import hashlib
import jmespath
import json
from .core import BaseAction
from c7n.utils import type_schema, local_session, chunks, dumps, filter_empty
from c7n.manager import resources as aws_resources
from c7n.version import version
FindingTypes = {
"Software and Configuration Checks": [
"Vulnerabilities",
"Vulnerabilities/CVE",
"AWS Security Best Practices",
"AWS Security Best Practices/Network Reachability",
"Industry and Regulatory Standards",
"Industry and Regulatory Standards/CIS Host Hardening Benchmarks",
"Industry and Regulatory Standards/CIS AWS Foundations Benchmark",
"Industry and Regulatory Standards/PCI-DSS Controls",
"Industry and Regulatory Standards/Cloud Security Alliance Controls",
"Industry and Regulatory Standards/ISO 90001 Controls",
"Industry and Regulatory Standards/ISO 27001 Controls",
"Industry and Regulatory Standards/ISO 27017 Controls",
"Industry and Regulatory Standards/ISO 27018 Controls",
"Industry and Regulatory Standards/SOC 1",
"Industry and Regulatory Standards/SOC 2",
"Industry and Regulatory Standards/HIPAA Controls (USA)",
"Industry and Regulatory Standards/NIST 800-53 Controls (USA)",
"Industry and Regulatory Standards/NIST CSF Controls (USA)",
"Industry and Regulatory Standards/IRAP Controls (Australia)",
"Industry and Regulatory Standards/K-ISMS Controls (Korea)",
"Industry and Regulatory Standards/MTCS Controls (Singapore)",
"Industry and Regulatory Standards/FISC Controls (Japan)",
"Industry and Regulatory Standards/My Number Act Controls (Japan)",
"Industry and Regulatory Standards/ENS Controls (Spain)",
"Industry and Regulatory Standards/Cyber Essentials Plus Controls (UK)",
"Industry and Regulatory Standards/G-Cloud Controls (UK)",
"Industry and Regulatory Standards/C5 Controls (Germany)",
"Industry and Regulatory Standards/IT-Grundschutz Controls (Germany)",
"Industry and Regulatory Standards/GDPR Controls (Europe)",
"Industry and Regulatory Standards/TISAX Controls (Europe)",
],
"TTPs": [
"Initial Access",
"Execution",
"Persistence",
"Privilege Escalation",
"Defense Evasion",
"Credential Access",
"Discovery",
"Lateral Movement",
"Collection",
"Command and Control",
],
"Effects": [
"Data Exposure",
"Data Exfiltration",
"Data Destruction",
"Denial of Service",
"Resource Consumption",
],
}
# Mostly undocumented value size limit
SECHUB_VALUE_SIZE_LIMIT = 1024
def build_vocabulary():
vocab = []
for ns, quals in FindingTypes.items():
for q in quals:
vocab.append("{}/{}".format(ns, q))
return vocab
class PostFinding(BaseAction):
"""Report a finding to AWS Security Hub.
Custodian acts as a finding provider, allowing users to craft
policies that report to the AWS SecurityHub.
For resources that are taggable, we will tag the resource with an identifier
such that further findings generate updates.
Example generate a finding for accounts that don't have shield enabled.
:example:
.. code-block:: yaml
policies:
- name: account-shield-enabled
resource: account
filters:
- shield-enabled
actions:
- type: post-finding
severity_normalized: 6
types:
- "Software and Configuration Checks/Industry and Regulatory Standards/NIST CSF Controls (USA)"
recommendation: "Enable shield"
recommendation_url: "https://www.example.com/policies/AntiDDoS.html"
confidence: 100
compliance_status: FAILED
""" # NOQA
FindingVersion = "2018-10-08"
ProductName = "default"
permissions = ('securityhub:BatchImportFindings',)
schema_alias = True
schema = type_schema(
"post-finding",
required=["types"],
title={"type": "string"},
severity={"type": "number", 'default': 0},
severity_normalized={"type": "number", "min": 0, "max": 100, 'default': 0},
confidence={"type": "number", "min": 0, "max": 100},
criticality={"type": "number", "min": 0, "max": 100},
# Cross region aggregation
region={'type': 'string', 'description': 'cross-region aggregation target'},
recommendation={"type": "string"},
recommendation_url={"type": "string"},
fields={"type": "object"},
batch_size={'type': 'integer', 'minimum': 1, 'maximum': 10},
types={
"type": "array",
"items": {"type": "string", "enum": build_vocabulary()},
},
compliance_status={
"type": "string",
"enum": ["PASSED", "WARNING", "FAILED", "NOT_AVAILABLE"],
},
)
NEW_FINDING = 'New'
def get_finding_tag(self, resource):
finding_tag = None
tags = resource.get('Tags', [])
finding_key = '{}:{}'.format('c7n:FindingId',
self.data.get('title', self.manager.ctx.policy.name))
# Support Tags as dictionary
if isinstance(tags, dict):
return tags.get(finding_key)
# Support Tags as list of {'Key': 'Value'}
for t in tags:
key = t['Key']
value = t['Value']
if key == finding_key:
finding_tag = value
return finding_tag
def group_resources(self, resources):
grouped_resources = {}
for r in resources:
finding_tag = self.get_finding_tag(r) or self.NEW_FINDING
grouped_resources.setdefault(finding_tag, []).append(r)
return grouped_resources
def process(self, resources, event=None):
region_name = self.data.get('region', self.manager.config.region)
client = local_session(
self.manager.session_factory).client(
"securityhub", region_name=region_name)
now = datetime.utcnow().replace(tzinfo=tzutc()).isoformat()
# default batch size to one to work around security hub console issue
# which only shows a single resource in a finding.
batch_size = self.data.get('batch_size', 1)
stats = Counter()
for key, grouped_resources in self.group_resources(resources).items():
for resource_set in chunks(grouped_resources, batch_size):
stats['Finding'] += 1
if key == self.NEW_FINDING:
finding_id = None
created_at = now
updated_at = now
else:
finding_id, created_at = self.get_finding_tag(
resource_set[0]).split(':', 1)
updated_at = now
finding = self.get_finding(
resource_set, finding_id, created_at, updated_at)
import_response = client.batch_import_findings(
Findings=[finding])
if import_response['FailedCount'] > 0:
stats['Failed'] += import_response['FailedCount']
self.log.error(
"import_response=%s" % (import_response))
if key == self.NEW_FINDING:
stats['New'] += len(resource_set)
# Tag resources with new finding ids
tag_action = self.manager.action_registry.get('tag')
if tag_action is None:
continue
tag_action({
'key': '{}:{}'.format(
'c7n:FindingId',
self.data.get(
'title', self.manager.ctx.policy.name)),
'value': '{}:{}'.format(
finding['Id'], created_at)},
self.manager).process(resource_set)
else:
stats['Update'] += len(resource_set)
self.log.debug(
"policy:%s securityhub %d findings resources %d new %d updated %d failed",
self.manager.ctx.policy.name,
stats['Finding'],
stats['New'],
stats['Update'],
stats['Failed'])
def get_finding(self, resources, existing_finding_id, created_at, updated_at):
policy = self.manager.ctx.policy
model = self.manager.resource_type
region = self.data.get('region', self.manager.config.region)
if existing_finding_id:
finding_id = existing_finding_id
else:
finding_id = '{}/{}/{}/{}'.format(
self.manager.config.region,
self.manager.config.account_id,
hashlib.md5(json.dumps(
policy.data).encode('utf8')).hexdigest(),
hashlib.md5(json.dumps(list(sorted(
[r[model.id] for r in resources]))).encode(
'utf8')).hexdigest())
finding = {
"SchemaVersion": self.FindingVersion,
"ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format(
region,
self.manager.config.account_id,
self.manager.config.account_id,
self.ProductName,
),
"AwsAccountId": self.manager.config.account_id,
"Description": self.data.get(
"description", policy.data.get("description", "")
).strip(),
"Title": self.data.get("title", policy.name),
'Id': finding_id,
"GeneratorId": policy.name,
'CreatedAt': created_at,
'UpdatedAt': updated_at,
"RecordState": "ACTIVE",
}
severity = {'Product': 0, 'Normalized': 0}
if self.data.get("severity") is not None:
severity["Product"] = self.data["severity"]
if self.data.get("severity_normalized") is not None:
severity["Normalized"] = self.data["severity_normalized"]
if severity:
finding["Severity"] = severity
recommendation = {}
if self.data.get("recommendation"):
recommendation["Text"] = self.data["recommendation"]
if self.data.get("recommendation_url"):
recommendation["Url"] = self.data["recommendation_url"]
if recommendation:
finding["Remediation"] = {"Recommendation": recommendation}
if "confidence" in self.data:
finding["Confidence"] = self.data["confidence"]
if "criticality" in self.data:
finding["Criticality"] = self.data["criticality"]
if "compliance_status" in self.data:
finding["Compliance"] = {"Status": self.data["compliance_status"]}
fields = {
'resource': policy.resource_type,
'ProviderName': 'CloudCustodian',
'ProviderVersion': version
}
if "fields" in self.data:
fields.update(self.data["fields"])
else:
tags = {}
for t in policy.tags:
if ":" in t:
k, v = t.split(":", 1)
else:
k, v = t, ""
tags[k] = v
fields.update(tags)
if fields:
finding["ProductFields"] = fields
finding_resources = []
for r in resources:
finding_resources.append(self.format_resource(r))
finding["Resources"] = finding_resources
finding["Types"] = list(self.data["types"])
return filter_empty(finding)
def format_resource(self, r):
raise NotImplementedError("subclass responsibility")
class OtherResourcePostFinding(PostFinding):
fields = ()
def format_resource(self, r):
details = {}
for k in r:
if isinstance(k, (list, dict)):
continue
details[k] = r[k]
for f in self.fields:
value = jmespath.search(f['expr'], r)
if not value:
continue
details[f['key']] = value
for k, v in details.items():
if isinstance(v, datetime):
v = v.isoformat()
elif isinstance(v, (list, dict)):
v = dumps(v)
elif isinstance(v, (int, float, bool)):
v = str(v)
else:
continue
details[k] = v[:SECHUB_VALUE_SIZE_LIMIT]
details['c7n:resource-type'] = self.manager.type
other = {
'Type': 'Other',
'Id': self.manager.get_arns([r])[0],
'Region': self.manager.config.region,
'Details': {'Other': filter_empty(details)}
}
tags = {t['Key']: t['Value'] for t in r.get('Tags', [])}
if tags:
other['Tags'] = tags
return other
@classmethod
def register_resource(klass, registry, event):
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if 'post-finding' in resource_manager.action_registry:
continue
resource_manager.action_registry.register('post-finding', klass)
aws_resources.subscribe(
aws_resources.EVENT_FINAL, OtherResourcePostFinding.register_resource)
| 8,282
| 111
| 181
|
c072119317e8f50050b11b6d6e6ffec573282f73
| 640
|
py
|
Python
|
daemon/core/gui/graph/tags.py
|
b00ga/core
|
e0842197e389f3f14c73dc5db6cc26f78e665f62
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/graph/tags.py
|
b00ga/core
|
e0842197e389f3f14c73dc5db6cc26f78e665f62
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/gui/graph/tags.py
|
b00ga/core
|
e0842197e389f3f14c73dc5db6cc26f78e665f62
|
[
"BSD-2-Clause"
] | null | null | null |
ANNOTATION = "annotation"
GRIDLINE = "gridline"
SHAPE = "shape"
SHAPE_TEXT = "shapetext"
EDGE = "edge"
LINK_LABEL = "linklabel"
WIRELESS_EDGE = "wireless"
ANTENNA = "antenna"
NODE_LABEL = "nodename"
NODE = "node"
WALLPAPER = "wallpaper"
SELECTION = "selectednodes"
MARKER = "marker"
ORGANIZE_TAGS = [
WALLPAPER,
GRIDLINE,
SHAPE,
SHAPE_TEXT,
EDGE,
WIRELESS_EDGE,
LINK_LABEL,
ANTENNA,
NODE,
NODE_LABEL,
SELECTION,
MARKER,
]
RESET_TAGS = [
EDGE,
NODE,
NODE_LABEL,
WALLPAPER,
LINK_LABEL,
ANTENNA,
WIRELESS_EDGE,
SELECTION,
SHAPE,
SHAPE_TEXT,
MARKER,
]
| 15.609756
| 27
| 0.640625
|
ANNOTATION = "annotation"
GRIDLINE = "gridline"
SHAPE = "shape"
SHAPE_TEXT = "shapetext"
EDGE = "edge"
LINK_LABEL = "linklabel"
WIRELESS_EDGE = "wireless"
ANTENNA = "antenna"
NODE_LABEL = "nodename"
NODE = "node"
WALLPAPER = "wallpaper"
SELECTION = "selectednodes"
MARKER = "marker"
ORGANIZE_TAGS = [
WALLPAPER,
GRIDLINE,
SHAPE,
SHAPE_TEXT,
EDGE,
WIRELESS_EDGE,
LINK_LABEL,
ANTENNA,
NODE,
NODE_LABEL,
SELECTION,
MARKER,
]
RESET_TAGS = [
EDGE,
NODE,
NODE_LABEL,
WALLPAPER,
LINK_LABEL,
ANTENNA,
WIRELESS_EDGE,
SELECTION,
SHAPE,
SHAPE_TEXT,
MARKER,
]
| 0
| 0
| 0
|
24804bfc195e51973e66a3f559e01d09536750e9
| 17,099
|
py
|
Python
|
custom_components/sun2/sensor.py
|
Nag94/HomeAssistantConfig
|
d5f806e05be8d92bf487c58322d20cd9b08c6b98
|
[
"Unlicense"
] | 97
|
2019-06-19T19:06:53.000Z
|
2022-03-30T06:58:06.000Z
|
custom_components/sun2/sensor.py
|
Nag94/HomeAssistantConfig
|
d5f806e05be8d92bf487c58322d20cd9b08c6b98
|
[
"Unlicense"
] | 42
|
2019-06-19T20:20:45.000Z
|
2022-03-31T13:02:35.000Z
|
custom_components/sun2/sensor.py
|
Nag94/HomeAssistantConfig
|
d5f806e05be8d92bf487c58322d20cd9b08c6b98
|
[
"Unlicense"
] | 20
|
2020-04-18T19:28:47.000Z
|
2022-03-06T18:23:09.000Z
|
"""Sun2 Sensor."""
from datetime import datetime, time, timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MONITORED_CONDITIONS,
CONF_TIME_ZONE,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.core import callback
from homeassistant.util import dt as dt_util
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import slugify
from .helpers import (
async_init_astral_loc,
astral_event,
get_local_info,
nearest_second,
SIG_LOC_UPDATED,
)
_LOGGER = logging.getLogger(__name__)
_SOLAR_DEPRESSIONS = ("astronomical", "civil", "nautical")
_ELEV_RND = 0.5
_ELEV_MAX_ERR = 0.02
_DELTA = timedelta(minutes=5)
_ONE_DAY = timedelta(days=1)
ATTR_NEXT_CHANGE = "next_change"
class Sun2Sensor(Entity):
"""Sun2 Sensor."""
def __init__(self, hass, sensor_type, icon, info, default_solar_depression=0):
"""Initialize sensor."""
self.hass = hass
if any(sol_dep in sensor_type for sol_dep in _SOLAR_DEPRESSIONS):
self._solar_depression, self._event = sensor_type.rsplit("_", 1)
else:
self._solar_depression = default_solar_depression
self._event = sensor_type
self._icon = icon
self._name = self._orig_name = sensor_type.replace("_", " ").title()
self._state = None
self._yesterday = None
self._today = None
self._tomorrow = None
self._use_local_info = info is None
if self._use_local_info:
self._info = get_local_info(hass)
else:
self._info = info
self._unsub_loc_updated = None
self._unsub_update = None
@property
@_info.setter
@property
def should_poll(self):
"""Do not poll."""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._device_state_attributes()
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._icon
async def async_loc_updated(self):
"""Location updated."""
self._loc_updated()
async def async_added_to_hass(self):
"""Subscribe to update signal and set up fixed updating."""
slug = slugify(self._orig_name)
object_id = self.entity_id.split('.')[1]
if slug != object_id and object_id.endswith(slug):
prefix = object_id[:-len(slug)].replace("_", " ").strip().title()
self._name = f"{prefix} {self._orig_name}"
if self._use_local_info:
self._unsub_loc_updated = async_dispatcher_connect(
self.hass, SIG_LOC_UPDATED, self.async_loc_updated
)
self._setup_fixed_updating()
async def async_will_remove_from_hass(self):
"""Disconnect from update signal and cancel fixed updating."""
if self._unsub_loc_updated:
self._unsub_loc_updated()
if self._unsub_update:
self._unsub_update()
self._name = self._orig_name
async def async_update(self):
"""Update state."""
self._update()
class Sun2PointInTimeSensor(Sun2Sensor):
"""Sun2 Point in Time Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, "civil")
@property
def device_class(self):
"""Return the class of this device."""
return DEVICE_CLASS_TIMESTAMP
class Sun2PeriodOfTimeSensor(Sun2Sensor):
"""Sun2 Period of Time Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, 0.833)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "hr"
class Sun2MinMaxElevationSensor(Sun2Sensor):
"""Sun2 Min/Max Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info, is_min):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info)
self._event = "solar_midnight" if is_min else "solar_noon"
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "°"
class Sun2MinElevationSensor(Sun2MinMaxElevationSensor):
"""Sun2 Min Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, is_min=True)
class Sun2MaxElevationSensor(Sun2MinMaxElevationSensor):
"""Sun2 Max Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, is_min=False)
class Sun2ElevationSensor(Sun2Sensor):
"""Sun2 Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info)
self._reset()
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_NEXT_CHANGE: self._next_change}
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "°"
def _loc_updated(self):
"""Location updated."""
self._reset()
super()._loc_updated()
_SENSOR_TYPES = {
# Points in time
"solar_midnight": (Sun2PointInTimeSensor, "mdi:weather-night"),
"astronomical_dawn": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"nautical_dawn": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"dawn": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"sunrise": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"solar_noon": (Sun2PointInTimeSensor, "mdi:weather-sunny"),
"sunset": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
"dusk": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
"nautical_dusk": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
"astronomical_dusk": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
# Time periods
"daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"civil_daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"nautical_daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"astronomical_daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
"civil_night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
"nautical_night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
"astronomical_night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
# Min/Max elevation
"min_elevation": (Sun2MinElevationSensor, "mdi:weather-night"),
"max_elevation": (Sun2MaxElevationSensor, "mdi:weather-sunny"),
# Elevation
"elevation": (Sun2ElevationSensor, "mdi:weather-sunny"),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(_SENSOR_TYPES)]
),
vol.Inclusive(CONF_LATITUDE, "location"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "location"): cv.longitude,
vol.Inclusive(CONF_TIME_ZONE, "location"): cv.time_zone,
vol.Inclusive(CONF_ELEVATION, "location"): vol.Coerce(float),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up sensors."""
if CONF_LATITUDE in config:
info = (
config[CONF_LATITUDE],
config[CONF_LONGITUDE],
config[CONF_TIME_ZONE],
config[CONF_ELEVATION],
)
else:
info = None
async_add_entities(
[
_SENSOR_TYPES[event][0](hass, event, _SENSOR_TYPES[event][1], info)
for event in config[CONF_MONITORED_CONDITIONS]
],
True,
)
| 33.926587
| 87
| 0.62606
|
"""Sun2 Sensor."""
from datetime import datetime, time, timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MONITORED_CONDITIONS,
CONF_TIME_ZONE,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.core import callback
from homeassistant.util import dt as dt_util
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import slugify
from .helpers import (
async_init_astral_loc,
astral_event,
get_local_info,
nearest_second,
SIG_LOC_UPDATED,
)
_LOGGER = logging.getLogger(__name__)
_SOLAR_DEPRESSIONS = ("astronomical", "civil", "nautical")
_ELEV_RND = 0.5
_ELEV_MAX_ERR = 0.02
_DELTA = timedelta(minutes=5)
_ONE_DAY = timedelta(days=1)
ATTR_NEXT_CHANGE = "next_change"
def next_midnight(dt):
return datetime.combine(dt.date() + _ONE_DAY, time(), dt.tzinfo)
class Sun2Sensor(Entity):
"""Sun2 Sensor."""
def __init__(self, hass, sensor_type, icon, info, default_solar_depression=0):
"""Initialize sensor."""
self.hass = hass
if any(sol_dep in sensor_type for sol_dep in _SOLAR_DEPRESSIONS):
self._solar_depression, self._event = sensor_type.rsplit("_", 1)
else:
self._solar_depression = default_solar_depression
self._event = sensor_type
self._icon = icon
self._name = self._orig_name = sensor_type.replace("_", " ").title()
self._state = None
self._yesterday = None
self._today = None
self._tomorrow = None
self._use_local_info = info is None
if self._use_local_info:
self._info = get_local_info(hass)
else:
self._info = info
self._unsub_loc_updated = None
self._unsub_update = None
@property
def _info(self):
return self.__info
@_info.setter
def _info(self, info):
self.__info = info
self._tzinfo = dt_util.get_time_zone(info[2])
async_init_astral_loc(self.hass, info)
@property
def should_poll(self):
"""Do not poll."""
return False
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._state
def _device_state_attributes(self):
return {
"yesterday": self._yesterday,
"today": self._today,
"tomorrow": self._tomorrow,
}
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._device_state_attributes()
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._icon
def _setup_fixed_updating(self):
# Default behavior is to update every midnight.
# Override for sensor types that should update at a different time,
# or that have a more dynamic update schedule (in which case override
# with a method that does nothing and set up the update at the end of
# an override of _update instead.)
@callback
def async_update_at_midnight(now):
next_midn = next_midnight(now.astimezone(self._tzinfo))
self._unsub_update = async_track_point_in_utc_time(
self.hass, async_update_at_midnight, next_midn
)
self.async_schedule_update_ha_state(True)
next_midn = next_midnight(dt_util.now(self._tzinfo))
self._unsub_update = async_track_point_in_utc_time(
self.hass, async_update_at_midnight, next_midn
)
def _loc_updated(self):
if self._unsub_update:
self._unsub_update()
self._unsub_update = None
self._info = get_local_info(self.hass)
self._setup_fixed_updating()
self.async_schedule_update_ha_state(True)
async def async_loc_updated(self):
"""Location updated."""
self._loc_updated()
async def async_added_to_hass(self):
"""Subscribe to update signal and set up fixed updating."""
slug = slugify(self._orig_name)
object_id = self.entity_id.split('.')[1]
if slug != object_id and object_id.endswith(slug):
prefix = object_id[:-len(slug)].replace("_", " ").strip().title()
self._name = f"{prefix} {self._orig_name}"
if self._use_local_info:
self._unsub_loc_updated = async_dispatcher_connect(
self.hass, SIG_LOC_UPDATED, self.async_loc_updated
)
self._setup_fixed_updating()
async def async_will_remove_from_hass(self):
"""Disconnect from update signal and cancel fixed updating."""
if self._unsub_loc_updated:
self._unsub_loc_updated()
if self._unsub_update:
self._unsub_update()
self._name = self._orig_name
def _get_astral_event(self, event, date_or_dt):
return astral_event(self._info, event, date_or_dt, self._solar_depression)
def _get_data(self, date_or_dt):
return self._get_astral_event(self._event, date_or_dt)
def _update(self):
today = dt_util.now(self._tzinfo).date()
self._yesterday = self._get_data(today - _ONE_DAY)
self._state = self._today = self._get_data(today)
self._tomorrow = self._get_data(today + _ONE_DAY)
async def async_update(self):
"""Update state."""
self._update()
class Sun2PointInTimeSensor(Sun2Sensor):
"""Sun2 Point in Time Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, "civil")
@property
def device_class(self):
"""Return the class of this device."""
return DEVICE_CLASS_TIMESTAMP
def _update(self):
super()._update()
if self._state != "none":
self._state = self._state.isoformat()
def _hours_to_hms(hours):
try:
return str(timedelta(hours=hours)).split(".")[0]
except TypeError:
return None
class Sun2PeriodOfTimeSensor(Sun2Sensor):
"""Sun2 Period of Time Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, 0.833)
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "hr"
def _device_state_attributes(self):
data = super()._device_state_attributes()
data.update(
{
"yesterday_hms": _hours_to_hms(data["yesterday"]),
"today_hms": _hours_to_hms(data["today"]),
"tomorrow_hms": _hours_to_hms(data["tomorrow"]),
}
)
return data
def _get_data(self, date_or_dt):
if "daylight" in self._event:
start = self._get_astral_event("dawn", date_or_dt)
end = self._get_astral_event("dusk", date_or_dt)
else:
start = self._get_astral_event("dusk", date_or_dt)
end = self._get_astral_event("dawn", date_or_dt + _ONE_DAY)
if "none" in (start, end):
return None
return (end - start).total_seconds() / 3600
def _update(self):
super()._update()
if self._state is not None:
self._state = round(self._state, 3)
class Sun2MinMaxElevationSensor(Sun2Sensor):
"""Sun2 Min/Max Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info, is_min):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info)
self._event = "solar_midnight" if is_min else "solar_noon"
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "°"
def _get_data(self, date_or_dt):
event_time = self._get_astral_event(self._event, date_or_dt)
return self._get_astral_event("solar_elevation", event_time)
def _update(self):
super()._update()
if self._state is not None:
self._state = round(self._state, 3)
class Sun2MinElevationSensor(Sun2MinMaxElevationSensor):
"""Sun2 Min Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, is_min=True)
class Sun2MaxElevationSensor(Sun2MinMaxElevationSensor):
"""Sun2 Max Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info, is_min=False)
def _nearest_multiple(value, multiple):
return int(round(value / multiple)) * multiple
def _calc_nxt_time(time0, elev0, time1, elev1, trg_elev):
return nearest_second(
time0 + (time1 - time0) * ((trg_elev - elev0) / (elev1 - elev0))
)
class Sun2ElevationSensor(Sun2Sensor):
"""Sun2 Elevation Sensor."""
def __init__(self, hass, sensor_type, icon, info):
"""Initialize sensor."""
super().__init__(hass, sensor_type, icon, info)
self._reset()
def _reset(self):
self._prv_sol_midn = None
self._sol_noon = None
self._sol_midn = None
self._prv_time = None
self._prv_elev = None
self._next_change = None
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_NEXT_CHANGE: self._next_change}
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "°"
def _loc_updated(self):
"""Location updated."""
self._reset()
super()._loc_updated()
def _setup_fixed_updating(self):
pass
def _get_nxt_time(self, time1, elev1, trg_elev, min_time, max_time):
if self._prv_time < min_time:
return None
time0 = self._prv_time
elev0 = self._prv_elev
nxt_elev = trg_elev + 1.5 * _ELEV_MAX_ERR
while abs(nxt_elev - trg_elev) >= _ELEV_MAX_ERR:
try:
nxt_time = _calc_nxt_time(time0, elev0, time1, elev1, trg_elev)
except ZeroDivisionError:
return None
if nxt_time < min_time or nxt_time > max_time:
return None
if nxt_time in (time0, time1):
break
nxt_elev = astral_event(self._info, "solar_elevation", nxt_time)
if nxt_time > time1:
time0 = time1
elev0 = elev1
time1 = nxt_time
elev1 = nxt_elev
elif elev0 < trg_elev < nxt_elev or elev0 > trg_elev > nxt_elev:
time1 = nxt_time
elev1 = nxt_elev
else:
time0 = nxt_time
elev0 = nxt_elev
return nxt_time
def _set_nxt_time(self, cur_time):
if self._sol_noon - _DELTA <= cur_time < self._sol_noon:
return self._sol_noon
elif self._sol_midn - _DELTA <= cur_time:
return self._sol_midn
else:
return cur_time + _DELTA
def _update(self):
# Astral package ignores microseconds, so round to nearest second
# before continuing.
cur_time = nearest_second(dt_util.now(self._tzinfo))
cur_elev = astral_event(self._info, "solar_elevation", cur_time)
self._state = f"{cur_elev:0.1f}"
_LOGGER.debug("Raw elevation = %f -> %s", cur_elev, self._state)
# Find the next solar midnight AFTER the current time, and the solar noon and
# solar midnight that precede it. This only needs to be done once a day when we
# reach or pass the previously determined solar midnight.
if not self._sol_midn or cur_time >= self._sol_midn:
date = cur_time.date()
# solar_midnight() returns the solar midnight (which is when the
# sun reaches its lowest point) nearest to the start of today. Note
# that it may have occurred yesterday.
self._sol_midn = astral_event(self._info, "solar_midnight", date)
while self._sol_midn <= cur_time:
date += _ONE_DAY
self._sol_midn = astral_event(self._info, "solar_midnight", date)
self._sol_noon = astral_event(self._info, "solar_noon", date - _ONE_DAY)
self._prv_sol_midn = astral_event(
self._info, "solar_midnight", date - _ONE_DAY
)
_LOGGER.debug(
"Solar midnight/noon/midnight: %s/%0.2f, %s/%0.2f, %s/%0.2f",
self._prv_sol_midn,
astral_event(self._info, "solar_elevation", self._prv_sol_midn),
self._sol_noon,
astral_event(self._info, "solar_elevation", self._sol_noon),
self._sol_midn,
astral_event(self._info, "solar_elevation", self._sol_midn),
)
if self._prv_time:
# Extrapolate based on previous point and current point to find
# next point.
rnd_elev = _nearest_multiple(cur_elev, _ELEV_RND)
if cur_time < self._sol_noon:
nxt_time = self._get_nxt_time(
cur_time,
cur_elev,
rnd_elev + _ELEV_RND,
self._prv_sol_midn,
self._sol_noon,
)
else:
nxt_time = self._get_nxt_time(
cur_time,
cur_elev,
rnd_elev - _ELEV_RND,
self._sol_noon,
self._sol_midn,
)
else:
nxt_time = None
if not nxt_time:
nxt_time = self._set_nxt_time(cur_time)
self._prv_time = cur_time
self._prv_elev = cur_elev
self._next_change = dt_util.as_local(nxt_time)
@callback
def async_update(now):
self._unsub_update = None
self.async_schedule_update_ha_state(True)
self._unsub_update = async_track_point_in_utc_time(
self.hass, async_update, nxt_time
)
_SENSOR_TYPES = {
# Points in time
"solar_midnight": (Sun2PointInTimeSensor, "mdi:weather-night"),
"astronomical_dawn": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"nautical_dawn": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"dawn": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"sunrise": (Sun2PointInTimeSensor, "mdi:weather-sunset-up"),
"solar_noon": (Sun2PointInTimeSensor, "mdi:weather-sunny"),
"sunset": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
"dusk": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
"nautical_dusk": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
"astronomical_dusk": (Sun2PointInTimeSensor, "mdi:weather-sunset-down"),
# Time periods
"daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"civil_daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"nautical_daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"astronomical_daylight": (Sun2PeriodOfTimeSensor, "mdi:weather-sunny"),
"night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
"civil_night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
"nautical_night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
"astronomical_night": (Sun2PeriodOfTimeSensor, "mdi:weather-night"),
# Min/Max elevation
"min_elevation": (Sun2MinElevationSensor, "mdi:weather-night"),
"max_elevation": (Sun2MaxElevationSensor, "mdi:weather-sunny"),
# Elevation
"elevation": (Sun2ElevationSensor, "mdi:weather-sunny"),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(_SENSOR_TYPES)]
),
vol.Inclusive(CONF_LATITUDE, "location"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "location"): cv.longitude,
vol.Inclusive(CONF_TIME_ZONE, "location"): cv.time_zone,
vol.Inclusive(CONF_ELEVATION, "location"): vol.Coerce(float),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up sensors."""
if CONF_LATITUDE in config:
info = (
config[CONF_LATITUDE],
config[CONF_LONGITUDE],
config[CONF_TIME_ZONE],
config[CONF_ELEVATION],
)
else:
info = None
async_add_entities(
[
_SENSOR_TYPES[event][0](hass, event, _SENSOR_TYPES[event][1], info)
for event in config[CONF_MONITORED_CONDITIONS]
],
True,
)
| 8,004
| 0
| 603
|
a1eadad05f7982e2c83e9588ec9bbf91d80cb087
| 780
|
py
|
Python
|
backend/applications/models/abstract/allable.py
|
simonfong6/applications
|
dea9e324f33b3647ab8e1fcf01f67b37e20e37a9
|
[
"MIT"
] | null | null | null |
backend/applications/models/abstract/allable.py
|
simonfong6/applications
|
dea9e324f33b3647ab8e1fcf01f67b37e20e37a9
|
[
"MIT"
] | null | null | null |
backend/applications/models/abstract/allable.py
|
simonfong6/applications
|
dea9e324f33b3647ab8e1fcf01f67b37e20e37a9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Allable
"""
from applications.database.table import Table
from .base import Base
from .buildable import Buildable
from .jsonable import Jsonable
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-a', '--arg1',
help="An argument.",
type=str,
default='default')
args = parser.parse_args()
main(args)
| 17.727273
| 50
| 0.580769
|
#!/usr/bin/env python3
"""
Allable
"""
from applications.database.table import Table
from .base import Base
from .buildable import Buildable
from .jsonable import Jsonable
class Allable(
Buildable,
Jsonable
):
@classmethod
def all(cls, json=False):
items = cls.table.get_all()
objs = [cls.build(item) for item in items]
if json:
objs = [obj.json() for obj in objs]
return objs
def main(args):
pass
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-a', '--arg1',
help="An argument.",
type=str,
default='default')
args = parser.parse_args()
main(args)
| 182
| 68
| 46
|
a554c06e0f65bba35d7853d913b3a09e1943fe3a
| 8,166
|
py
|
Python
|
tests/test_views.py
|
gradel/aldryn-forms
|
a53e751078e810436504a175101f3216649d21cd
|
[
"BSD-3-Clause"
] | 24
|
2019-03-31T17:27:37.000Z
|
2021-12-14T22:38:10.000Z
|
tests/test_views.py
|
gradel/aldryn-forms
|
a53e751078e810436504a175101f3216649d21cd
|
[
"BSD-3-Clause"
] | 126
|
2015-01-21T19:39:42.000Z
|
2019-01-09T06:50:45.000Z
|
tests/test_views.py
|
gradel/aldryn-forms
|
a53e751078e810436504a175101f3216649d21cd
|
[
"BSD-3-Clause"
] | 44
|
2015-08-25T08:19:45.000Z
|
2019-01-03T12:34:18.000Z
|
import sys
from distutils.version import LooseVersion
from unittest import skipIf, skipUnless
from django import VERSION as DJANGO_VERSION
from django.urls import clear_url_caches
import cms
from cms.api import add_plugin, create_page
from cms.appresolver import clear_app_resolvers
from cms.test_utils.testcases import CMSTestCase
# These means "less than or equal"
DJANGO_111 = DJANGO_VERSION[:2] >= (1, 11)
CMS_3_6 = LooseVersion(cms.__version__) < LooseVersion('4.0')
| 28.354167
| 104
| 0.563189
|
import sys
from distutils.version import LooseVersion
from unittest import skipIf, skipUnless
from django import VERSION as DJANGO_VERSION
from django.urls import clear_url_caches
import cms
from cms.api import add_plugin, create_page
from cms.appresolver import clear_app_resolvers
from cms.test_utils.testcases import CMSTestCase
# These means "less than or equal"
DJANGO_111 = DJANGO_VERSION[:2] >= (1, 11)
CMS_3_6 = LooseVersion(cms.__version__) < LooseVersion('4.0')
class SubmitFormViewTest(CMSTestCase):
def setUp(self):
self.APP_MODULE = 'aldryn_forms.cms_apps.FormsApp'
clear_app_resolvers()
clear_url_caches()
if self.APP_MODULE in sys.modules:
del sys.modules[self.APP_MODULE]
self.page = create_page(
'tpage',
'test_page.html',
'en',
published=True,
apphook='FormsApp',
)
try:
self.placeholder = self.page.placeholders.get(slot='content')
except AttributeError:
self.placeholder = self.page.get_placeholders('en').get(slot='content')
self.redirect_url = 'http://www.google.com'
plugin_data = {
'redirect_type': 'redirect_to_url',
'url': self.redirect_url,
}
self.form_plugin = add_plugin(self.placeholder, 'FormPlugin', 'en', **plugin_data) # noqa: E501
add_plugin(
self.placeholder,
'SubmitButton',
'en',
target=self.form_plugin,
label='Submit',
)
self.form_plugin.action_backend = 'default'
self.form_plugin.save()
if CMS_3_6:
self.page.publish('en')
self.reload_urls()
self.apphook_clear()
def tearDown(self):
clear_app_resolvers()
clear_url_caches()
if self.APP_MODULE in sys.modules:
del sys.modules[self.APP_MODULE]
self.reload_urls()
self.apphook_clear()
def reload_urls(self):
from django.conf import settings
url_modules = [
'cms.urls',
self.APP_MODULE,
settings.ROOT_URLCONF,
]
clear_app_resolvers()
clear_url_caches()
for module in url_modules:
if module in sys.modules:
del sys.modules[module]
@skipUnless(DJANGO_111, 'Django>=1.11')
def test_form_view_and_submission_with_apphook_django_gte_111(self):
if CMS_3_6:
public_page = self.page.publisher_public
else:
public_page = self.page
try:
public_placeholder = public_page.placeholders.first()
except AttributeError:
public_placeholder = public_page.get_placeholders('en').first()
public_page_form_plugin = (
public_placeholder
.cmsplugin_set
.filter(plugin_type='FormPlugin')
.first()
)
response = self.client.get(self.page.get_absolute_url('en'))
input_string = '<input type="hidden" name="form_plugin_id" value="{}"'
self.assertContains(response, input_string.format(public_page_form_plugin.id)) # noqa: E501
response = self.client.post(self.page.get_absolute_url('en'), {
'form_plugin_id': public_page_form_plugin.id,
})
self.assertRedirects(response, self.redirect_url, fetch_redirect_response=False) # noqa: E501
@skipIf(DJANGO_111, 'Django<1.11')
def test_form_view_and_submission_with_apphook_django_lt_111(self):
public_placeholder = (
self
.page
.publisher_public
.placeholders
.first()
)
public_page_form_plugin = (
public_placeholder
.cmsplugin_set
.filter(plugin_type='FormPlugin')
.first()
)
response = self.client.get(self.page.get_absolute_url('en'))
input_string = '<input type="hidden" name="form_plugin_id" value="{}"'
self.assertContains(response, input_string.format(public_page_form_plugin.id)) # noqa: E501
response = self.client.post(self.page.get_absolute_url('en'), {
'form_plugin_id': public_page_form_plugin.id,
})
self.assertRedirects(response, self.redirect_url, fetch_redirect_response=False) # noqa: E501
def test_view_submit_one_form_instead_multiple(self):
"""Test checks if only one form is send instead of multiple on page together"""
page = create_page(
'multiple forms',
'test_page.html',
'en',
published=True,
apphook='FormsApp',
)
placeholder = page.placeholders.get(slot='content')
form_plugin = add_plugin(
placeholder,
'FormPlugin',
'en',
) # noqa: E501
add_plugin(
placeholder,
'EmailField',
'en',
name='email_1',
required=True,
target=form_plugin,
label='Submit',
)
add_plugin(
placeholder,
'SubmitButton',
'en',
target=form_plugin,
label='Submit',
)
form_plugin.action_backend = 'default'
form_plugin.save()
plugin_data2 = {
'redirect_type': 'redirect_to_url',
'url': 'https://google.com/',
}
form_plugin2 = add_plugin(
placeholder,
'FormPlugin',
'en',
**plugin_data2
) # noqa: E501
add_plugin(
placeholder,
'SubmitButton',
'en',
target=form_plugin2,
label='Submit',
)
form_plugin2.action_backend = 'default'
form_plugin2.save()
page.publish('en')
self.reload_urls()
self.apphook_clear()
response = self.client.post(page.get_absolute_url('en'), {
'form_plugin_id': form_plugin2.id,
'email_1': 'test@test',
})
self.assertRedirects(response, plugin_data2['url'], fetch_redirect_response=False) # noqa: E501
def test_view_submit_one_valid_form_instead_multiple(self):
"""Test checks if only one form is validated instead multiple on a page"""
page = create_page(
'multiple forms',
'test_page.html',
'en',
published=True,
apphook='FormsApp',
)
placeholder = page.placeholders.get(slot='content')
form_plugin = add_plugin(
placeholder,
'FormPlugin',
'en',
) # noqa: E501
add_plugin(
placeholder,
'EmailField',
'en',
name='email_1',
required=True,
target=form_plugin,
)
add_plugin(
placeholder,
'SubmitButton',
'en',
target=form_plugin,
label='Submit',
)
form_plugin.action_backend = 'default'
form_plugin.save()
form_plugin2 = add_plugin(
placeholder,
'FormPlugin',
'en',
) # noqa: E501
add_plugin(
placeholder,
'EmailField',
'en',
name='email_2',
required=True,
target=form_plugin2,
)
add_plugin(
placeholder,
'SubmitButton',
'en',
target=form_plugin2,
label='Submit',
)
form_plugin2.action_backend = 'default'
form_plugin2.save()
page.publish('en')
self.reload_urls()
self.apphook_clear()
response = self.client.post(page.get_absolute_url('en'), {
'form_plugin_id': form_plugin2.id,
'email_2': 'test@test',
})
email_field = '<input type="email" name="{name}"'
self.assertContains(response, email_field.format(name='email_1'))
self.assertContains(response, email_field.format(name='email_2'))
| 3,649
| 4,017
| 23
|
4988e3e2f37fae2c39d045aac6365b9829ae00a4
| 478
|
py
|
Python
|
deepl/main.py
|
bitplus/deepl-cli
|
da7c76f1ea20977275d009552c1c02f756cbf32b
|
[
"MIT"
] | 56
|
2020-10-30T15:05:32.000Z
|
2022-03-27T21:16:58.000Z
|
deepl/main.py
|
bitplus/deepl-cli
|
da7c76f1ea20977275d009552c1c02f756cbf32b
|
[
"MIT"
] | 38
|
2020-07-13T06:22:35.000Z
|
2022-03-31T20:16:28.000Z
|
deepl/main.py
|
bitplus/deepl-cli
|
da7c76f1ea20977275d009552c1c02f756cbf32b
|
[
"MIT"
] | 10
|
2020-07-28T18:40:22.000Z
|
2022-03-17T11:07:17.000Z
|
#!/usr/bin/env python3
import sys
from . import deepl
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 20.782609
| 64
| 0.604603
|
#!/usr/bin/env python3
import sys
from . import deepl
def main() -> None:
t = deepl.DeepLCLI()
t.chk_cmdargs()
t.fr_lang, t.to_lang = sys.argv[1].split(':')
script = sys.stdin.read()
print('Translating...', end='', file=sys.stderr, flush=True)
result = t.translate(script)
print('\033[1K\033[G', end='', file=sys.stderr, flush=True)
print(result)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 303
| 0
| 23
|
9447782acc0cd1ca418445929d22ee41dc741757
| 3,101
|
py
|
Python
|
babyrobot/src/speech_features/src/speech_feature_server.py
|
babyrobot-eu/core-modules
|
7e8c006c40153fb649208c9a78fc71aa70243f69
|
[
"MIT"
] | 1
|
2019-02-07T15:32:06.000Z
|
2019-02-07T15:32:06.000Z
|
babyrobot/src/speech_features/src/speech_feature_server.py
|
babyrobot-eu/core-modules
|
7e8c006c40153fb649208c9a78fc71aa70243f69
|
[
"MIT"
] | 9
|
2020-01-28T22:09:41.000Z
|
2022-03-11T23:39:17.000Z
|
babyrobot/src/speech_features/src/speech_feature_server.py
|
babyrobot-eu/core-modules
|
7e8c006c40153fb649208c9a78fc71aa70243f69
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import csv
import os
import rospy
import uuid
from babyrobot.lib import utils as br_utils
from babyrobot.speech_features import config as sf_config
from babyrobot_msgs.msg import SpeechFeatures, Feature
from babyrobot_msgs.srv import SpeechFeatureExtraction
from babyrobot_msgs.srv import SpeechFeatureExtractionResponse
def handle_speech_features(req):
'''
Extract speech features upon a client request.
Args:
req: A request object containing the following fields:
audio_segment
opensmile_config
response_format
metadata
Returns:
A SpeechFeatureExtractionResponse containing a SpeechFeatures
ROS message.
'''
br_utils.write_wav(req.audio_segment.clip, sf_config.TEMP_FILE.WAV)
if req.response_format == 'arff':
out_file = sf_config.TEMP_FILE.ARFF_OUTPUT
out_file_cmd = '--arffoutput {}'.format(out_file)
else:
out_file = sf_config.TEMP_FILE.CSV_OUTPUT
out_file_cmd = '--csvoutput {}'.format(out_file)
opensmile_conf = os.path.join(sf_config.OPENSMILE.CONFIG_DIR,
req.opensmile_conf)
if not opensmile_conf.endswith('.conf'):
opensmile_conf += '.conf'
cmd = '{0}/SMILExtract -C {1} -I {2} {3}'.format(
sf_config.OPENSMILE.ROOT_DIR,
opensmile_conf,
sf_config.TEMP_FILE.WAV,
out_file_cmd)
#rospy.loginfo('Extracting features using "{}"'.format(cmd))
ret_code, stdout, stderr = br_utils.run_cmd(cmd)
if ret_code != 0:
rospy.logerr('Failed to extract features. Returning empty message')
rospy.logerr(stdout)
rospy.logerr(stderr)
return SpeechFeatureExtractionResponse(SpeechFeatures())
msg = SpeechFeatures()
msg.related_segment_id = req.audio_segment.header.id
msg.header.id = str(uuid.uuid1())
msg.header.timestamp = rospy.Time.now()
with open(out_file, 'r') as out_fd:
if req.response_format == 'arff':
msg.arff_file = out_fd.read()
elif req.response_format in ['list', '']:
reader = csv.DictReader(out_fd)
data = reader.next()
msg.features = []
for d in data.iteritems():
feat = Feature()
feat.feature_name = d[0]
feat.feature_value = float(d[1])
msg.features.append(feat)
else:
rospy.logerr('Invalid response format. Returning empty message')
return SpeechFeatureExtractionResponse(SpeechFeatures())
return SpeechFeatureExtractionResponse(msg)
def speech_features_server():
'''
Initialize a ROS node and run the SpeechFeatureExtraction service
Args:
Returns:
'''
rospy.init_node(sf_config.ROS_CONFIG.SERVER_NODE)
rospy.Service(sf_config.ROS_CONFIG.SERVICE_NAME,
SpeechFeatureExtraction,
handle_speech_features)
#rospy.loginfo("Speech Features server started.")
rospy.spin()
if __name__ == "__main__":
speech_features_server()
| 32.302083
| 76
| 0.656885
|
#!/usr/bin/env python
import csv
import os
import rospy
import uuid
from babyrobot.lib import utils as br_utils
from babyrobot.speech_features import config as sf_config
from babyrobot_msgs.msg import SpeechFeatures, Feature
from babyrobot_msgs.srv import SpeechFeatureExtraction
from babyrobot_msgs.srv import SpeechFeatureExtractionResponse
def handle_speech_features(req):
'''
Extract speech features upon a client request.
Args:
req: A request object containing the following fields:
audio_segment
opensmile_config
response_format
metadata
Returns:
A SpeechFeatureExtractionResponse containing a SpeechFeatures
ROS message.
'''
br_utils.write_wav(req.audio_segment.clip, sf_config.TEMP_FILE.WAV)
if req.response_format == 'arff':
out_file = sf_config.TEMP_FILE.ARFF_OUTPUT
out_file_cmd = '--arffoutput {}'.format(out_file)
else:
out_file = sf_config.TEMP_FILE.CSV_OUTPUT
out_file_cmd = '--csvoutput {}'.format(out_file)
opensmile_conf = os.path.join(sf_config.OPENSMILE.CONFIG_DIR,
req.opensmile_conf)
if not opensmile_conf.endswith('.conf'):
opensmile_conf += '.conf'
cmd = '{0}/SMILExtract -C {1} -I {2} {3}'.format(
sf_config.OPENSMILE.ROOT_DIR,
opensmile_conf,
sf_config.TEMP_FILE.WAV,
out_file_cmd)
#rospy.loginfo('Extracting features using "{}"'.format(cmd))
ret_code, stdout, stderr = br_utils.run_cmd(cmd)
if ret_code != 0:
rospy.logerr('Failed to extract features. Returning empty message')
rospy.logerr(stdout)
rospy.logerr(stderr)
return SpeechFeatureExtractionResponse(SpeechFeatures())
msg = SpeechFeatures()
msg.related_segment_id = req.audio_segment.header.id
msg.header.id = str(uuid.uuid1())
msg.header.timestamp = rospy.Time.now()
with open(out_file, 'r') as out_fd:
if req.response_format == 'arff':
msg.arff_file = out_fd.read()
elif req.response_format in ['list', '']:
reader = csv.DictReader(out_fd)
data = reader.next()
msg.features = []
for d in data.iteritems():
feat = Feature()
feat.feature_name = d[0]
feat.feature_value = float(d[1])
msg.features.append(feat)
else:
rospy.logerr('Invalid response format. Returning empty message')
return SpeechFeatureExtractionResponse(SpeechFeatures())
return SpeechFeatureExtractionResponse(msg)
def speech_features_server():
'''
Initialize a ROS node and run the SpeechFeatureExtraction service
Args:
Returns:
'''
rospy.init_node(sf_config.ROS_CONFIG.SERVER_NODE)
rospy.Service(sf_config.ROS_CONFIG.SERVICE_NAME,
SpeechFeatureExtraction,
handle_speech_features)
#rospy.loginfo("Speech Features server started.")
rospy.spin()
if __name__ == "__main__":
speech_features_server()
| 0
| 0
| 0
|
0193adfbe712d27ed14b6903b2a69b42e44904be
| 473
|
py
|
Python
|
2015/task_2/first.py
|
romanthekat/advent_of_code
|
d9005d9824ab7aadd6bc93fd88421f6fdc95520e
|
[
"Apache-2.0"
] | null | null | null |
2015/task_2/first.py
|
romanthekat/advent_of_code
|
d9005d9824ab7aadd6bc93fd88421f6fdc95520e
|
[
"Apache-2.0"
] | 5
|
2016-07-03T19:07:55.000Z
|
2019-12-10T19:24:25.000Z
|
2015/task_2/first.py
|
EvilKhaosKat/advent_of_code
|
d9005d9824ab7aadd6bc93fd88421f6fdc95520e
|
[
"Apache-2.0"
] | null | null | null |
total = 0
with open("input.txt") as f:
total = sum(calculate_paper(gift) for gift in f.readlines())
print(total)
| 19.708333
| 66
| 0.575053
|
def get_smallest_area(l, w, h):
sizes = [l, w, h]
sizes.remove(max(sizes))
return sizes[0] * sizes[1]
def calculate_paper(gift):
l, w, h = map(int, gift.split("x"))
return get_surface_area(h, l, w) + get_smallest_area(l, w, h)
def get_surface_area(l, w, h):
return 2 * l * w + 2 * w * h + 2 * h * l
total = 0
with open("input.txt") as f:
total = sum(calculate_paper(gift) for gift in f.readlines())
print(total)
| 269
| 0
| 73
|
f7a2819ac7ec77e96ae6a88ef722bf74ac99b857
| 17,937
|
py
|
Python
|
avro_json_serializer/test/test_avro_json_serializer.py
|
rushton/python-avro-json-serializer
|
52549a0d5958b08b0fca3e419eaefc3e0c6ec99c
|
[
"Apache-2.0"
] | 126
|
2015-02-26T20:20:04.000Z
|
2022-03-25T19:36:22.000Z
|
avro_json_serializer/test/test_avro_json_serializer.py
|
rushton/python-avro-json-serializer
|
52549a0d5958b08b0fca3e419eaefc3e0c6ec99c
|
[
"Apache-2.0"
] | 15
|
2016-06-01T17:26:58.000Z
|
2022-03-17T20:40:37.000Z
|
avro_json_serializer/test/test_avro_json_serializer.py
|
rushton/python-avro-json-serializer
|
52549a0d5958b08b0fca3e419eaefc3e0c6ec99c
|
[
"Apache-2.0"
] | 39
|
2015-01-20T22:12:05.000Z
|
2022-02-18T03:23:41.000Z
|
# -*- coding: utf-8 -*-
# (c) [2014] LinkedIn Corp. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import avro.io
import avro.schema
import six
from unittest import TestCase
if six.PY2:
from avro.schema import make_avsc_object
else:
from avro.schema import SchemaFromJSONData as make_avsc_object
long = int
from avro_json_serializer import AvroJsonSerializer, AvroJsonDeserializer
| 34.560694
| 233
| 0.53543
|
# -*- coding: utf-8 -*-
# (c) [2014] LinkedIn Corp. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import avro.io
import avro.schema
import six
from unittest import TestCase
if six.PY2:
from avro.schema import make_avsc_object
else:
from avro.schema import SchemaFromJSONData as make_avsc_object
long = int
from avro_json_serializer import AvroJsonSerializer, AvroJsonDeserializer
class TestAvroJsonSerializer(TestCase):
FIELD_ENUM = {
"name": "fruit",
"type": {
"name": "Fruit",
"type": "enum",
"symbols": [
"ORANGE",
"APPLE",
"PINEAPPLE"
]
}
}
FIELD_INT = {
"name": "fint",
"type": "int"
}
FIELD_LONG = {
"name": "flong",
"type": "long"
}
FIELD_FLOAT = {
"name": "ffloat",
"type": "float"
}
FIELD_DOUBLE = {
"name": "fdouble",
"type": "double"
}
FIELD_STRING = {
"name": "fstring",
"type": "string"
}
FIELD_ARRAY_INT = {
"type": {"type": "array", "items": "int"},
"name": "intarr"
}
FIELD_MAP_INT = {
"type": {"type": "map", "values": "int"},
"name": "intmap"
}
FIELD_FIXED = {
"type": {
"name": "fixed_16",
"size": 16,
"type": "fixed"
},
"size": 16,
"name": "ffixed"
}
FIELD_RECORD = {
"type": {
"name": "Rec",
"fields": [{
"name": "subfint",
"type": "int"
}],
"type": "record"
},
"name": "frec"
}
FIELD_UNION_NULL_INT = {
"name": "funion_null",
"type": [
"int",
"null"
]
}
FIELD_UNION_RECORDS = {
"name": "funion_rec",
"type": [
{
"type": "record",
"name": "rec1",
"fields": [
{
"name": "field",
"type": "int"
}
]
},
{
"type": "record",
"namespace": "example.avro",
"name": "rec2",
"fields": [
{
"name": "field",
"type": "string"
}
]
}
]
}
ALL_FIELDS_SCHEMA = {
"type": "record",
"name": "all_field",
"fields": [
FIELD_ENUM,
FIELD_INT,
FIELD_LONG,
FIELD_STRING,
FIELD_FIXED,
FIELD_RECORD,
FIELD_UNION_NULL_INT,
FIELD_FLOAT,
FIELD_DOUBLE,
FIELD_ARRAY_INT,
FIELD_MAP_INT
],
"namespace": "com.some.thing"
}
UNION_FIELDS_SCHEMA = {
"type": "record",
"name": "unions",
"fields": [
FIELD_UNION_NULL_INT
]
}
UNION_RECORDS_SCHEMA = {
"type": "record",
"name": "unions",
"fields": [
FIELD_UNION_RECORDS
]
}
VALID_DATA_ALL_FIELDS = {
"fruit": "ORANGE",
"fint": 1,
"flong": long(1),
"ffloat": 1.0,
"fdouble": 2.0,
"fstring": "hi there",
"ffixed": b"1234567890123456",
"frec": {
"subfint": 2
},
"funion_null": None,
"intarr": [1, 2, 3],
"intmap": {"one": 1}
}
# unions can't be serialized directly; must be in a record
INDIVIDUALLY_SERIALIZABLE = list(ALL_FIELDS_SCHEMA['fields'])
INDIVIDUALLY_SERIALIZABLE.remove(FIELD_UNION_NULL_INT)
def test_all_supported_types(self):
avro_schema = make_avsc_object(self.ALL_FIELDS_SCHEMA, avro.schema.Names())
data = self.VALID_DATA_ALL_FIELDS
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
self.assertEquals(avro_json, """{"fruit":"ORANGE","fint":1,"flong":1,"fstring":"hi there","ffixed":"1234567890123456","frec":{"subfint":2},"funion_null":null,"ffloat":1.0,"fdouble":2.0,"intarr":[1,2,3],"intmap":{"one":1}}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_individually_allowed_fields_separately(self):
for field in self.INDIVIDUALLY_SERIALIZABLE:
# unwrap enum, fixed, array, and map but save the name for value lookup
name = field['name']
if isinstance(field['type'], dict):
field = field['type']
avro_schema = make_avsc_object(field, avro.schema.Names())
data = self.VALID_DATA_ALL_FIELDS[name]
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_fails_validation(self):
avro_schema = make_avsc_object(self.ALL_FIELDS_SCHEMA, avro.schema.Names())
data = dict(self.VALID_DATA_ALL_FIELDS)
data["ffloat"] = "hi"
serializer = AvroJsonSerializer(avro_schema)
self.assertRaises(avro.io.AvroTypeException, serializer.to_json, data)
def test_union_serialization_null(self):
avro_schema = make_avsc_object(self.UNION_FIELDS_SCHEMA, avro.schema.Names())
data = {
"funion_null": None
}
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
self.assertEquals(avro_json, """{"funion_null":null}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_union_serialization_not_null(self):
avro_schema = make_avsc_object(self.UNION_FIELDS_SCHEMA, avro.schema.Names())
data = {
"funion_null": 1
}
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
self.assertEquals(avro_json, """{"funion_null":{"int":1}}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_union_serialization_invalid(self):
avro_schema = make_avsc_object(self.UNION_FIELDS_SCHEMA, avro.schema.Names())
data = {
"funion_null": "hi"
}
serializer = AvroJsonSerializer(avro_schema)
self.assertRaises(avro.io.AvroTypeException, serializer.to_json, data)
def test_records_union(self):
avro_schema = make_avsc_object(self.UNION_RECORDS_SCHEMA, avro.schema.Names())
data = {
"funion_rec": {
"field": 1
}
}
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
self.assertEquals(avro_json, """{"funion_rec":{"rec1":{"field":1}}}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
data_another_record = {
"funion_rec": {
"field": "hi"
}
}
another_record_json = AvroJsonSerializer(avro_schema).to_json(data_another_record)
self.assertEquals(another_record_json, """{"funion_rec":{"example.avro.rec2":{"field":"hi"}}}""")
another_json_data = AvroJsonDeserializer(avro_schema).from_json(another_record_json)
self.assertEquals(another_json_data, data_another_record)
def test_map(self):
schema_dict = {
"type": "record",
"name": "rec",
"fields": [
self.FIELD_MAP_INT
]
}
data = {
"intmap": {
"one": 1,
"two": 2
}
}
unicode_dict = {
'intmap': {
'one': 1,
u'two': 2
}
}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
# Dictionaries are unsorted
self.assertIn(avro_json, ("""{"intmap":{"one":1,"two":2}}""", """{"intmap":{"two":2,"one":1}}"""))
deserializer = AvroJsonDeserializer(avro_schema)
json_data = deserializer.from_json(avro_json)
self.assertEquals(json_data, data)
mixed_unicode = deserializer.from_dict(unicode_dict)
self.assertEquals(mixed_unicode, data)
def test_array(self):
schema_dict = {
"type": "record",
"name": "rec",
"fields": [
self.FIELD_ARRAY_INT
]
}
data = {
"intarr": [1, 2, 3]
}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
avro_json = AvroJsonSerializer(avro_schema).to_json(data)
self.assertEquals(avro_json, """{"intarr":[1,2,3]}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_user_record(self):
"""
This schema example is from documentation http://avro.apache.org/docs/1.7.6/gettingstartedpython.html
"""
schema_dict = {
"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}
]
}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
serializer = AvroJsonSerializer(avro_schema)
deserializer = AvroJsonDeserializer(avro_schema)
alyssa = {"name": "Alyssa", "favorite_number": 256}
alyssa_full = {"name": "Alyssa", "favorite_number": 256, "favorite_color": None}
alyssa_json = """{"name":"Alyssa","favorite_number":{"int":256},"favorite_color":null}"""
self.assertEquals(serializer.to_json(alyssa), alyssa_json)
self.assertEquals(deserializer.from_json(alyssa_json), alyssa_full)
ben = {"name": "Ben", "favorite_number": 7, "favorite_color": "red"}
ben_json = """{"name":"Ben","favorite_number":{"int":7},"favorite_color":{"string":"red"}}"""
self.assertEquals(serializer.to_json(ben), ben_json)
self.assertEquals(deserializer.from_json(ben_json), ben)
lion = {"name": "Lion"}
lion_full = {"name": "Lion", "favorite_number": None, "favorite_color": None}
lion_json = """{"name":"Lion","favorite_number":null,"favorite_color":null}"""
self.assertEquals(serializer.to_json(lion), lion_json)
self.assertEquals(deserializer.from_json(lion_json), lion_full)
def test_nested_union_records(self):
schema_dict = {
"namespace": "nested",
"name": "OuterType",
"type": "record",
"fields": [{
"name": "outer",
"type": ["null", {
"name": "MiddleType",
"type": "record",
"fields": [{
"name": "middle",
"type": ["null", {
"name": "InnerType",
"type": "record",
"fields": [{
"name": "inner",
"type": "int"
}]
}]
}]
}]
}]
}
data1 = {"outer": {"middle": {"inner": 1}}}
data2 = {"outer": {"middle": None}}
avro1 = """{"outer":{"nested.MiddleType":{"middle":{"nested.InnerType":{"inner":1}}}}}"""
avro2 = """{"outer":{"nested.MiddleType":{"middle":null}}}"""
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
serializer = AvroJsonSerializer(avro_schema)
self.assertEquals(serializer.to_json(data1), avro1)
self.assertEquals(serializer.to_json(data2), avro2)
deserializer = AvroJsonDeserializer(avro_schema)
self.assertEquals(deserializer.from_json(avro1), data1)
self.assertEquals(deserializer.from_json(avro2), data2)
def test_fixed_non_ascii(self):
schema_dict = {
"namespace": "example.avro",
"type": "record",
"name": "WithFixed",
"fields": [
self.FIELD_FIXED
]
}
data = {"ffixed": b"(~^\xfbzoW\x13p\x19!4\x0b+\x00\x00"}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
serializer = AvroJsonSerializer(avro_schema)
avro_json = serializer.to_json(data)
self.assertEquals(avro_json, """{"ffixed":"(~^\\u00fbzoW\\u0013p\\u0019!4\\u000b+\\u0000\\u0000"}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_fixed_ascii(self):
schema_dict = {
"namespace": "example.avro",
"type": "record",
"name": "WithFixed",
"fields": [
self.FIELD_FIXED
]
}
data = {"ffixed": b"fixed text here!"}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
serializer = AvroJsonSerializer(avro_schema)
avro_json = serializer.to_json(data)
self.assertEquals(avro_json, """{"ffixed":"fixed text here!"}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_bytes_field_non_ascii(self):
schema_dict = {
"namespace": "example.avro",
"type": "record",
"name": "WithFixed",
"fields": [
{
"type": "bytes",
"name": "fbytes"
}
]
}
data = {"fbytes": b"(~^\xfbzoW\x13p\x19!4\x0b+\x00\x00\x0b+\x00\x00"}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
serializer = AvroJsonSerializer(avro_schema)
avro_json = serializer.to_json(data)
self.assertEquals(avro_json, """{"fbytes":"(~^\\u00fbzoW\\u0013p\\u0019!4\\u000b+\\u0000\\u0000\\u000b+\\u0000\\u0000"}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
def test_bytes_field_ascii(self):
schema_dict = {
"namespace": "example.avro",
"type": "record",
"name": "WithFixed",
"fields": [
{
"type": "bytes",
"name": "fbytes"
}
]
}
data = {"fbytes": b"this is some long bytes field"}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
serializer = AvroJsonSerializer(avro_schema)
avro_json = serializer.to_json(data)
self.assertEquals(avro_json, """{"fbytes":"this is some long bytes field"}""")
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, data)
class TestAvroJsonDeserializer(TestCase):
def test_missing_nullable_field(self):
schema_dict = {
"type": "record",
"name": "WithDefault",
"fields": [
{
"type": "string",
"name": "name"
},
{
"type": ["null", "int"],
"name": "version",
"default": None
}
]
}
avro_json = """{"name":"mcnameface"}"""
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
deserializer = AvroJsonDeserializer(avro_schema)
self.assertRaises(avro.io.AvroTypeException, deserializer.from_json, avro_json)
def test_unknown_fields_are_ignored(self):
schema_dict = {
"type": "record",
"name": "BasicName",
"fields": [
{
"type": "string",
"name": "name"
}
]
}
avro_json = """{"name":"todd","age":1}"""
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
json_data = AvroJsonDeserializer(avro_schema).from_json(avro_json)
self.assertEquals(json_data, {"name": "todd"})
def test_dict_with_unicode_bytes(self):
schema_dict = {
"namespace": "example.avro",
"type": "record",
"name": "WithBytes",
"fields": [
{
"type": "bytes",
"name": "fbytes"
}
]
}
# byte arrays should be left alone
byte_data = {"fbytes": b"(~^\xfbzoW\x13p\x19!4\x0b+\x00\x00\x0b+\x00\x00"}
avro_schema = make_avsc_object(schema_dict, avro.schema.Names())
self.assertEquals(AvroJsonDeserializer(avro_schema).from_dict(byte_data), byte_data)
# unicode strings should be turned into iso-8859-1 bytes
iso8859_data = {'fbytes': b"(~^\xfbzoW\x13p\x19!4\x0b+\x00\x00"}
unicode_data = {u'fbytes': u'(~^\xfbzoW\x13p\x19!4\x0b+\x00\x00'}
self.assertEquals(AvroJsonDeserializer(avro_schema).from_dict(unicode_data), iso8859_data)
| 11,461
| 5,541
| 126
|
37d5a92d4d65aceb39da6abee706d1d900f41616
| 74
|
py
|
Python
|
src/vve_cli/__init__.py
|
buckw6eat/vve_cli
|
0efd238818ac7f620c4707542aa815110777c69a
|
[
"MIT"
] | null | null | null |
src/vve_cli/__init__.py
|
buckw6eat/vve_cli
|
0efd238818ac7f620c4707542aa815110777c69a
|
[
"MIT"
] | null | null | null |
src/vve_cli/__init__.py
|
buckw6eat/vve_cli
|
0efd238818ac7f620c4707542aa815110777c69a
|
[
"MIT"
] | null | null | null |
from vve_cli.main import run
__version__ = "0.0.1"
__all__ = ["run"]
| 14.8
| 29
| 0.648649
|
from vve_cli.main import run
__version__ = "0.0.1"
__all__ = ["run"]
| 0
| 0
| 0
|
3ec7091ec5c1783e0cf99f2f40d55b372d54f92b
| 134
|
py
|
Python
|
graph_db/driver/null/__init__.py
|
josegomezr/graph_db
|
1ef286c9afdd4fd18559cccee9456dbc72ba7a8d
|
[
"Apache-2.0"
] | 4
|
2015-11-19T01:22:19.000Z
|
2020-09-05T03:03:24.000Z
|
graph_db/driver/null/__init__.py
|
josegomezr/graph_db
|
1ef286c9afdd4fd18559cccee9456dbc72ba7a8d
|
[
"Apache-2.0"
] | 1
|
2016-03-10T01:11:03.000Z
|
2016-03-10T01:11:03.000Z
|
graph_db/driver/null/__init__.py
|
josegomezr/graph_db
|
1ef286c9afdd4fd18559cccee9456dbc72ba7a8d
|
[
"Apache-2.0"
] | 1
|
2016-03-08T00:03:18.000Z
|
2016-03-08T00:03:18.000Z
|
from . import driver
from ... import exceptions
def Factory(settings):
"""@todo docstring"""
return driver.DBDriver(settings)
| 22.333333
| 36
| 0.708955
|
from . import driver
from ... import exceptions
def Factory(settings):
"""@todo docstring"""
return driver.DBDriver(settings)
| 0
| 0
| 0
|
0ac7b5a001174a156fb9a33cfe506c862fca047e
| 18,781
|
py
|
Python
|
cpp_faster_fifo/cpp_lib/googletest/googletest-1.10.0/googletest/test/gtest_xml_output_unittest.py
|
leikinman/faster-fifo
|
a8d472247640de592c3b332cc0e3068be50f083b
|
[
"MIT"
] | 72
|
2020-04-09T01:43:31.000Z
|
2022-03-24T17:08:31.000Z
|
cpp_faster_fifo/cpp_lib/googletest/googletest-1.10.0/googletest/test/gtest_xml_output_unittest.py
|
leikinman/faster-fifo
|
a8d472247640de592c3b332cc0e3068be50f083b
|
[
"MIT"
] | 26
|
2020-04-09T02:33:14.000Z
|
2022-03-31T11:41:21.000Z
|
cpp_faster_fifo/cpp_lib/googletest/googletest-1.10.0/googletest/test/gtest_xml_output_unittest.py
|
leikinman/faster-fifo
|
a8d472247640de592c3b332cc0e3068be50f083b
|
[
"MIT"
] | 13
|
2020-04-08T04:10:45.000Z
|
2022-02-08T07:10:38.000Z
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="24" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 2
 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
2
3%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" result="suppressed" time="*" timestamp="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="OutputsCData" status="run" result="completed" time="*" timestamp="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="InvalidCharactersInMessage" status="run" result="completed" time="*" timestamp="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*" timestamp="*">
<testcase name="DISABLED_test_not_run" status="notrun" result="suppressed" time="*" timestamp="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="SkippedTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Skipped" status="run" result="skipped" time="*" timestamp="*" classname="SkippedTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
<testcase name="OneProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
</properties>
</testcase>
<testcase name="IntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_int" value="1"/>
</properties>
</testcase>
<testcase name="ThreeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
<property name="key_2" value="2"/>
<property name="key_3" value="3"/>
</properties>
</testcase>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="RecordProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key" value="1"/>
</properties>
</testcase>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_int" value="1"/>
</properties>
</testcase>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_string" value="1"/>
</properties>
</testcase>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/0" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/1" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/1" />
</testsuite>
</testsuites>""" % {
'stack': STACK_TRACE_TEMPLATE
}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_SHARDED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], {}, 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is available only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def testShardedTestXmlOutput(self):
"""Verifies XML output when run using multiple shards.
Runs a test program that executes only one shard and verifies that tests
from other shards do not show up in the XML output.
"""
self._TestXmlOutput(
GTEST_PROGRAM_NAME,
EXPECTED_SHARDED_TEST_XML,
0,
extra_env={SHARD_INDEX_ENV_VAR: '0',
TOTAL_SHARDS_ENV_VAR: '10'})
def _GetXmlOutput(self, gtest_prog_name, extra_args, extra_env,
expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
environ_copy = os.environ.copy()
if extra_env:
environ_copy.update(extra_env)
p = gtest_test_utils.Subprocess(command, env=environ_copy)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None, extra_env=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
extra_env or {}, expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 48.15641
| 225
| 0.694159
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = '--gtest_output'
GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml'
GTEST_PROGRAM_NAME = 'gtest_xml_output_unittest_'
# The flag indicating stacktraces are not supported
NO_STACKTRACE_SUPPORT_FLAG = '--no_stacktrace_support'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
SUPPORTS_STACK_TRACES = NO_STACKTRACE_SUPPORT_FLAG not in sys.argv
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
# unittest.main() can't handle unknown flags
sys.argv.remove(NO_STACKTRACE_SUPPORT_FLAG)
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="24" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" result="completed" time="*" timestamp="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 1
 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
1
2%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Expected equality of these values:
 2
 3" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Expected equality of these values:
2
3%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" result="suppressed" time="*" timestamp="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="OutputsCData" status="run" result="completed" time="*" timestamp="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="InvalidCharactersInMessage" status="run" result="completed" time="*" timestamp="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*" timestamp="*">
<testcase name="DISABLED_test_not_run" status="notrun" result="suppressed" time="*" timestamp="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="SkippedTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Skipped" status="run" result="skipped" time="*" timestamp="*" classname="SkippedTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
<testcase name="OneProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
</properties>
</testcase>
<testcase name="IntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_int" value="1"/>
</properties>
</testcase>
<testcase name="ThreeProperties" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="1"/>
<property name="key_2" value="2"/>
<property name="key_3" value="3"/>
</properties>
</testcase>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="RecordProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key" value="1"/>
</properties>
</testcase>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_int" value="1"/>
</properties>
</testcase>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" result="completed" time="*" timestamp="*" classname="NoFixtureTest">
<properties>
<property name="key_for_utility_string" value="1"/>
</properties>
</testcase>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/0" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestSuite/1" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" result="completed" time="*" timestamp="*" classname="Single/TypeParameterizedTestSuite/1" />
</testsuite>
</testsuites>""" % {
'stack': STACK_TRACE_TEMPLATE
}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_SHARDED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="Succeeds" status="run" result="completed" time="*" timestamp="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" SetUpTestSuite="yes" TearDownTestSuite="aye">
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" result="completed" time="*" timestamp="*" classname="PropertyRecordingTest">
<properties>
<property name="key_1" value="2"/>
</properties>
</testcase>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" result="completed" time="*" timestamp="*" classname="Single/ValueParamTest" />
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], {}, 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError:
e = sys.exc_info()[1]
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is available only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def testShardedTestXmlOutput(self):
"""Verifies XML output when run using multiple shards.
Runs a test program that executes only one shard and verifies that tests
from other shards do not show up in the XML output.
"""
self._TestXmlOutput(
GTEST_PROGRAM_NAME,
EXPECTED_SHARDED_TEST_XML,
0,
extra_env={SHARD_INDEX_ENV_VAR: '0',
TOTAL_SHARDS_ENV_VAR: '10'})
def _GetXmlOutput(self, gtest_prog_name, extra_args, extra_env,
expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
environ_copy = os.environ.copy()
if extra_env:
environ_copy.update(extra_env)
p = gtest_test_utils.Subprocess(command, env=environ_copy)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None, extra_env=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
extra_env or {}, expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| 0
| 0
| 0
|
b9e3aa32da7570f3bf7048bb5bf62a38902e8fb3
| 7,796
|
py
|
Python
|
3_MNIST/4_5_fc_layers_relu_lrdeclay.py
|
ray-g/TensorFlow-Examples
|
9f0360f0d7f30ffc36e34ea42606da3300e26df2
|
[
"MIT"
] | null | null | null |
3_MNIST/4_5_fc_layers_relu_lrdeclay.py
|
ray-g/TensorFlow-Examples
|
9f0360f0d7f30ffc36e34ea42606da3300e26df2
|
[
"MIT"
] | null | null | null |
3_MNIST/4_5_fc_layers_relu_lrdeclay.py
|
ray-g/TensorFlow-Examples
|
9f0360f0d7f30ffc36e34ea42606da3300e26df2
|
[
"MIT"
] | null | null | null |
import os
import inspect
import shutil
import math
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
tf.set_random_seed(0)
# Calculate LOG_DIR according to current file
CUR_FILE = inspect.getfile(inspect.currentframe())
LOG_DIR = os.path.join(
os.path.dirname(os.path.abspath(CUR_FILE)), 'logs',
os.path.splitext(os.path.basename(CUR_FILE))[0])
LOG_DIR_TRAIN = os.path.join(LOG_DIR, 'train')
LOG_DIR_VALID = os.path.join(LOG_DIR, 'valid')
LOG_DIR_TEST = os.path.join(LOG_DIR, 'test')
# Check is the LOG_DIR empty. If not ask for clean.
clean_logs(LOG_DIR, False)
print('TensorFlow Version: ' + tf.__version__)
######################################################################
# Main
_DEBUG_GRAPH = False
MNIST_WIDTH = 28
MNIST_HEIGHT = 28
MNIST_CHANNEL = 1
NUM_CLASSES = 10
BATCH_SIZE = 100
EPOCH_NUM = 20
LEARNING_RATE = 5e-3
TRAIN_SIZE = 55000
VALID_SIZE = 5000
TEST_SIZE = 10000
######################################################################
# The Model
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (relu) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (relu) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (relu) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (relu) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
#
######################################################################
mnist = mnist_data.read_data_sets("data", one_hot=True, reshape=False)
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, [None, MNIST_HEIGHT, MNIST_WIDTH, MNIST_CHANNEL], name='X')
y = tf.placeholder(tf.float32, [None, NUM_CLASSES], name='Y')
x_flatten = tf.reshape(x, [-1, MNIST_HEIGHT*MNIST_WIDTH], name='X_Flatten')
last_output = x_flatten
layer_size = [MNIST_HEIGHT*MNIST_WIDTH, 200, 100, 60, 30]
for layer in range(4):
name = "layer_{}".format(layer)
last_output = create_fc_layer(last_output, layer_size[layer], layer_size[layer+1], name=name)
with tf.name_scope('Output'):
W = tf.Variable(tf.truncated_normal([layer_size[-1], NUM_CLASSES], stddev=0.1), name='Weights')
b = tf.Variable(tf.zeros([NUM_CLASSES]), name='Biases')
logits = tf.matmul(last_output, W) + b
y_pred = tf.argmax(logits)
tf.summary.histogram('weight', W)
tf.summary.histogram('bias', b)
tf.summary.histogram('logits', logits)
with tf.name_scope('Loss'):
# loss function: cross-entropy = - sum( Y_true * log(Y_pred) )
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
xent = tf.reduce_mean(xent) * BATCH_SIZE
tf.summary.scalar('xent', xent)
with tf.name_scope('Accuracy'):
correct_pred = tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('accuracy', accuracy)
with tf.name_scope('Optimizer'):
lr = tf.placeholder(tf.float32, name='lr')
optimize = tf.train.AdamOptimizer(lr).minimize(xent)
with tf.name_scope('Status'):
global_step = tf.Variable(tf.constant(0), 'step')
with tf.name_scope('Summaries'):
all_summaries = tf.summary.merge_all()
with tf.name_scope('Global_Ops'):
init = tf.global_variables_initializer()
inc_step = global_step.assign_add(1)
if _DEBUG_GRAPH:
writer = tf.summary.FileWriter(LOG_DIR, tf.get_default_graph())
writer.flush()
writer.close()
else:
with tf.Session() as sess:
train_writer = tf.summary.FileWriter(LOG_DIR_TRAIN, sess.graph)
# valid_writer = tf.summary.FileWriter(LOG_DIR_VALID, sess.graph)
test_writer = tf.summary.FileWriter(LOG_DIR_TEST, sess.graph)
sess.run(init)
best_acc = 0
for epoch in range(EPOCH_NUM + 1):
for i in range(TRAIN_SIZE//BATCH_SIZE + 1):
max_lr = 3e-3
min_lr = 1e-4
declay_speed = 2000.
learning_rate = min_lr + (max_lr - min_lr) * math.exp(-i/declay_speed)
# Train
x_batch, y_batch = mnist.train.next_batch(BATCH_SIZE)
step_, _ = sess.run([inc_step, optimize], feed_dict = {x: x_batch, y: y_batch, lr: learning_rate})
if step_ % 10 == 0 or step_ == 1:
acc_, xent_, summ_ = sess.run([accuracy, xent, all_summaries], feed_dict = {x: x_batch, y: y_batch})
print("Train Accuracy: {}, Train loss: {}, step: {}, epoch: {}, iter: {}".format(acc_, xent_, step_, epoch, i))
train_writer.add_summary(summ_, global_step=step_)
if step_ % 100 == 0 or step_ == 1:
# acc_, xent_, summ_ = sess.run([accuracy, xent, all_summaries], feed_dict = {x: mnist.validation.images, y: mnist.validation.labels})
# print("Validation Accuracy: {}, Validation loss: {}, step: {}, epoch: {}, iter: {}".format(acc_, xent_, step_, epoch, i))
# valid_writer.add_summary(summ_, global_step=step_)
acc_, xent_, summ_ = sess.run([accuracy, xent, all_summaries], feed_dict = {x: mnist.test.images, y: mnist.test.labels})
test_writer.add_summary(summ_, global_step=step_)
if acc_ > best_acc:
best_acc = acc_
print("******** Epoch: {} ********: Test Accuracy: {}, Test Loss: {}".format(epoch, acc_, xent_))
print("All Done. Best accuracy: {}".format(best_acc))
train_writer.flush()
train_writer.close()
test_writer.flush()
test_writer.close()
# End of Main
######################################################################
# Automatically show TensorBoard if needed.
show_tensorboard(True)
| 40.604167
| 155
| 0.557081
|
import os
import inspect
import shutil
import math
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data as mnist_data
tf.set_random_seed(0)
# Calculate LOG_DIR according to current file
CUR_FILE = inspect.getfile(inspect.currentframe())
LOG_DIR = os.path.join(
os.path.dirname(os.path.abspath(CUR_FILE)), 'logs',
os.path.splitext(os.path.basename(CUR_FILE))[0])
LOG_DIR_TRAIN = os.path.join(LOG_DIR, 'train')
LOG_DIR_VALID = os.path.join(LOG_DIR, 'valid')
LOG_DIR_TEST = os.path.join(LOG_DIR, 'test')
# Check is the LOG_DIR empty. If not ask for clean.
def clean_logs(logdir, ask=True):
if logdir == None or len(logdir) < 4:
return
if os.path.exists(logdir) and len(os.listdir(logdir)) > 0:
if ask:
answer = input('Log Folder: ' + logdir + ' is not empty. Clean it? [y/N]')
if answer in ['Y', 'y']:
shutil.rmtree(logdir)
else:
shutil.rmtree(logdir)
clean_logs(LOG_DIR, False)
print('TensorFlow Version: ' + tf.__version__)
######################################################################
# Main
_DEBUG_GRAPH = False
MNIST_WIDTH = 28
MNIST_HEIGHT = 28
MNIST_CHANNEL = 1
NUM_CLASSES = 10
BATCH_SIZE = 100
EPOCH_NUM = 20
LEARNING_RATE = 5e-3
TRAIN_SIZE = 55000
VALID_SIZE = 5000
TEST_SIZE = 10000
######################################################################
# The Model
# neural network with 5 layers
#
# · · · · · · · · · · (input data, flattened pixels) X [batch, 784] # 784 = 28*28
# \x/x\x/x\x/x\x/x\x/ -- fully connected layer (relu) W1 [784, 200] B1[200]
# · · · · · · · · · Y1 [batch, 200]
# \x/x\x/x\x/x\x/ -- fully connected layer (relu) W2 [200, 100] B2[100]
# · · · · · · · Y2 [batch, 100]
# \x/x\x/x\x/ -- fully connected layer (relu) W3 [100, 60] B3[60]
# · · · · · Y3 [batch, 60]
# \x/x\x/ -- fully connected layer (relu) W4 [60, 30] B4[30]
# · · · Y4 [batch, 30]
# \x/ -- fully connected layer (softmax) W5 [30, 10] B5[10]
# · Y5 [batch, 10]
#
######################################################################
mnist = mnist_data.read_data_sets("data", one_hot=True, reshape=False)
def create_fc_layer(inputs, size_in, size_out, stddev=0.1, name='fc'):
with tf.name_scope(name):
W = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=stddev), name='Weight')
b = tf.Variable(tf.ones([size_out])/10, name='Bias')
act = tf.nn.relu(tf.matmul(inputs, W) + b)
tf.summary.histogram('Weight', W)
tf.summary.histogram('Bias', b)
return act
with tf.name_scope('Input'):
x = tf.placeholder(tf.float32, [None, MNIST_HEIGHT, MNIST_WIDTH, MNIST_CHANNEL], name='X')
y = tf.placeholder(tf.float32, [None, NUM_CLASSES], name='Y')
x_flatten = tf.reshape(x, [-1, MNIST_HEIGHT*MNIST_WIDTH], name='X_Flatten')
last_output = x_flatten
layer_size = [MNIST_HEIGHT*MNIST_WIDTH, 200, 100, 60, 30]
for layer in range(4):
name = "layer_{}".format(layer)
last_output = create_fc_layer(last_output, layer_size[layer], layer_size[layer+1], name=name)
with tf.name_scope('Output'):
W = tf.Variable(tf.truncated_normal([layer_size[-1], NUM_CLASSES], stddev=0.1), name='Weights')
b = tf.Variable(tf.zeros([NUM_CLASSES]), name='Biases')
logits = tf.matmul(last_output, W) + b
y_pred = tf.argmax(logits)
tf.summary.histogram('weight', W)
tf.summary.histogram('bias', b)
tf.summary.histogram('logits', logits)
with tf.name_scope('Loss'):
# loss function: cross-entropy = - sum( Y_true * log(Y_pred) )
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
xent = tf.reduce_mean(xent) * BATCH_SIZE
tf.summary.scalar('xent', xent)
with tf.name_scope('Accuracy'):
correct_pred = tf.equal(tf.argmax(logits, axis=1), tf.argmax(y, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('accuracy', accuracy)
with tf.name_scope('Optimizer'):
lr = tf.placeholder(tf.float32, name='lr')
optimize = tf.train.AdamOptimizer(lr).minimize(xent)
with tf.name_scope('Status'):
global_step = tf.Variable(tf.constant(0), 'step')
with tf.name_scope('Summaries'):
all_summaries = tf.summary.merge_all()
with tf.name_scope('Global_Ops'):
init = tf.global_variables_initializer()
inc_step = global_step.assign_add(1)
if _DEBUG_GRAPH:
writer = tf.summary.FileWriter(LOG_DIR, tf.get_default_graph())
writer.flush()
writer.close()
else:
with tf.Session() as sess:
train_writer = tf.summary.FileWriter(LOG_DIR_TRAIN, sess.graph)
# valid_writer = tf.summary.FileWriter(LOG_DIR_VALID, sess.graph)
test_writer = tf.summary.FileWriter(LOG_DIR_TEST, sess.graph)
sess.run(init)
best_acc = 0
for epoch in range(EPOCH_NUM + 1):
for i in range(TRAIN_SIZE//BATCH_SIZE + 1):
max_lr = 3e-3
min_lr = 1e-4
declay_speed = 2000.
learning_rate = min_lr + (max_lr - min_lr) * math.exp(-i/declay_speed)
# Train
x_batch, y_batch = mnist.train.next_batch(BATCH_SIZE)
step_, _ = sess.run([inc_step, optimize], feed_dict = {x: x_batch, y: y_batch, lr: learning_rate})
if step_ % 10 == 0 or step_ == 1:
acc_, xent_, summ_ = sess.run([accuracy, xent, all_summaries], feed_dict = {x: x_batch, y: y_batch})
print("Train Accuracy: {}, Train loss: {}, step: {}, epoch: {}, iter: {}".format(acc_, xent_, step_, epoch, i))
train_writer.add_summary(summ_, global_step=step_)
if step_ % 100 == 0 or step_ == 1:
# acc_, xent_, summ_ = sess.run([accuracy, xent, all_summaries], feed_dict = {x: mnist.validation.images, y: mnist.validation.labels})
# print("Validation Accuracy: {}, Validation loss: {}, step: {}, epoch: {}, iter: {}".format(acc_, xent_, step_, epoch, i))
# valid_writer.add_summary(summ_, global_step=step_)
acc_, xent_, summ_ = sess.run([accuracy, xent, all_summaries], feed_dict = {x: mnist.test.images, y: mnist.test.labels})
test_writer.add_summary(summ_, global_step=step_)
if acc_ > best_acc:
best_acc = acc_
print("******** Epoch: {} ********: Test Accuracy: {}, Test Loss: {}".format(epoch, acc_, xent_))
print("All Done. Best accuracy: {}".format(best_acc))
train_writer.flush()
train_writer.close()
test_writer.flush()
test_writer.close()
# End of Main
######################################################################
# Automatically show TensorBoard if needed.
def show_tensorboard(auto_show=False):
cmd = 'tensorboard --logdir=train:' + LOG_DIR_TRAIN + ',validation:' + LOG_DIR_VALID + ',test:' + LOG_DIR_TEST
if auto_show:
answer = input('Show tensorboard? [Y/n]')
if not answer in ['N', 'n']:
os.system(cmd)
else:
print("\nRun this command to see logs:\n" + cmd)
show_tensorboard(True)
| 1,104
| 0
| 71
|
0a194f9bd131b7ce2b90308f5d00bdcf14b64528
| 147
|
py
|
Python
|
textract-pipeline/node_modules/aws-cdk/lib/init-templates/app/python/app.template.py
|
musa-b/amazon-textract-serverless-large-scale-document-processing
|
eb628684bc661c9a1fdde4d9d5032b5b3632eece
|
[
"Apache-2.0"
] | null | null | null |
textract-pipeline/node_modules/aws-cdk/lib/init-templates/app/python/app.template.py
|
musa-b/amazon-textract-serverless-large-scale-document-processing
|
eb628684bc661c9a1fdde4d9d5032b5b3632eece
|
[
"Apache-2.0"
] | null | null | null |
textract-pipeline/node_modules/aws-cdk/lib/init-templates/app/python/app.template.py
|
musa-b/amazon-textract-serverless-large-scale-document-processing
|
eb628684bc661c9a1fdde4d9d5032b5b3632eece
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from aws_cdk import cdk
from %name%.%name%_stack import PyStack
app = cdk.App()
PyStack(app, "%name%-cdk-1")
app.run()
| 12.25
| 39
| 0.680272
|
#!/usr/bin/env python3
from aws_cdk import cdk
from %name%.%name%_stack import PyStack
app = cdk.App()
PyStack(app, "%name%-cdk-1")
app.run()
| 0
| 0
| 0
|
33dfded66f6f6ed12c10687161b04860c3c771dc
| 372
|
py
|
Python
|
oembed/tests/tests/__init__.py
|
EightMedia/djangoembed
|
ee325f7375c48405f9c3e7e2c0fa7f5a08fafd48
|
[
"MIT"
] | 8
|
2015-02-06T19:18:49.000Z
|
2021-01-01T05:46:02.000Z
|
oembed/tests/tests/__init__.py
|
ericholscher/djangoembed
|
8d6c3edcde782285076445577c4a2ad1c96a0350
|
[
"MIT"
] | null | null | null |
oembed/tests/tests/__init__.py
|
ericholscher/djangoembed
|
8d6c3edcde782285076445577c4a2ad1c96a0350
|
[
"MIT"
] | 5
|
2015-03-15T11:41:26.000Z
|
2018-03-08T09:45:26.000Z
|
from oembed.tests.tests.consumer import *
from oembed.tests.tests.models import *
from oembed.tests.tests.parsers import *
from oembed.tests.tests.providers import *
from oembed.tests.tests.resources import *
from oembed.tests.tests.sites import *
from oembed.tests.tests.templatetags import *
from oembed.tests.tests.utils import *
from oembed.tests.tests.views import *
| 37.2
| 45
| 0.806452
|
from oembed.tests.tests.consumer import *
from oembed.tests.tests.models import *
from oembed.tests.tests.parsers import *
from oembed.tests.tests.providers import *
from oembed.tests.tests.resources import *
from oembed.tests.tests.sites import *
from oembed.tests.tests.templatetags import *
from oembed.tests.tests.utils import *
from oembed.tests.tests.views import *
| 0
| 0
| 0
|
97e99c470df1390f7aea25ce8ca02429083c1569
| 2,097
|
py
|
Python
|
my_app/io/cts_parser.py
|
gedoensmanagement/Transkribus_spell_checker
|
1413f1de137e786a4f078e6806b2e856120e78ea
|
[
"MIT"
] | null | null | null |
my_app/io/cts_parser.py
|
gedoensmanagement/Transkribus_spell_checker
|
1413f1de137e786a4f078e6806b2e856120e78ea
|
[
"MIT"
] | null | null | null |
my_app/io/cts_parser.py
|
gedoensmanagement/Transkribus_spell_checker
|
1413f1de137e786a4f078e6806b2e856120e78ea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
A collection of functions that can extract additional data fields from
the work and passage fields of a Cts object. Which additional fields
are possible depends on the namespace. The "parse_cts" function recognizes
the namespace and decides which function to use for the extraction.
At the end of the whole process the Cts object with its additional data fields
will be returned.
Created on Thu Sep 17 17:58:49 2020
@author: muell018
"""
import re
def zotero_parser(zt):
""" Treats the Cts object as a Zotero cts object. """
#print("zotero_parser")
#print(zt)
if zt.m.group(4) != '':
zt.mode = zt.m.group(4)
zt.number = zt.m.group(6)
#print(f"Zotero extras: mode = {zt.mode}, number = {zt.number}")
return zt
def transkribus_parser(tr):
""" Treats the Cts object as a Transkribus cts object. """
#print("transkribus_parser")
#print(tr)
tr.col = tr.m.group(2)
tr.doc = tr.m.group(3)
tr.page = tr.m.group(5)
#print(f"Transkribus extras:\ncol = {tr.col}\ndoc = {tr.doc}\npage = {tr.page}")
if tr.m.group(6) != '':
tr.rl = ''
p = re.compile(r'r(\d?)l?(\d?)')
m = p.match(tr.m.group(6))
if m.group(1):
tr.region = m.group(1)
tr.rl += "r"+tr.region
#print(f"region = {tr.region}")
if m.group(2):
tr.line = m.group(2)
tr.rl += "l"+tr.line
#print(f"line = {tr.line}")
if tr.subreference != '':
tr.word = tr.subreference
#print(f"word = {tr.word}")
return tr
def censorship_parser(zs):
""" Treats the Cts object as a censorship cts. """
#print("censorship_parser: yet to be programmed... ")
if __name__ == "__main__":
main()
| 29.535211
| 85
| 0.570815
|
# -*- coding: utf-8 -*-
"""
A collection of functions that can extract additional data fields from
the work and passage fields of a Cts object. Which additional fields
are possible depends on the namespace. The "parse_cts" function recognizes
the namespace and decides which function to use for the extraction.
At the end of the whole process the Cts object with its additional data fields
will be returned.
Created on Thu Sep 17 17:58:49 2020
@author: muell018
"""
import re
def parse_cts(cts_object):
namespaces = {'zt': zotero_parser,
'tr': transkribus_parser,
'zs': censorship_parser}
namespaces[cts_object.namespace](cts_object)
def zotero_parser(zt):
""" Treats the Cts object as a Zotero cts object. """
#print("zotero_parser")
#print(zt)
if zt.m.group(4) != '':
zt.mode = zt.m.group(4)
zt.number = zt.m.group(6)
#print(f"Zotero extras: mode = {zt.mode}, number = {zt.number}")
return zt
def transkribus_parser(tr):
""" Treats the Cts object as a Transkribus cts object. """
#print("transkribus_parser")
#print(tr)
tr.col = tr.m.group(2)
tr.doc = tr.m.group(3)
tr.page = tr.m.group(5)
#print(f"Transkribus extras:\ncol = {tr.col}\ndoc = {tr.doc}\npage = {tr.page}")
if tr.m.group(6) != '':
tr.rl = ''
p = re.compile(r'r(\d?)l?(\d?)')
m = p.match(tr.m.group(6))
if m.group(1):
tr.region = m.group(1)
tr.rl += "r"+tr.region
#print(f"region = {tr.region}")
if m.group(2):
tr.line = m.group(2)
tr.rl += "l"+tr.line
#print(f"line = {tr.line}")
if tr.subreference != '':
tr.word = tr.subreference
#print(f"word = {tr.word}")
return tr
def censorship_parser(zs):
""" Treats the Cts object as a censorship cts. """
#print("censorship_parser: yet to be programmed... ")
def main():
pass
if __name__ == "__main__":
main()
| 190
| 0
| 54
|
f12ba53f4df48091653b5508515d7229865bd54b
| 3,647
|
py
|
Python
|
handlers/drive_handler.py
|
google/b-con
|
365ba51cef6fbdd05ceb410ce6fad3e542a65cf4
|
[
"Apache-2.0"
] | 4
|
2020-08-27T07:00:09.000Z
|
2021-10-21T00:43:36.000Z
|
handlers/drive_handler.py
|
google/b-con
|
365ba51cef6fbdd05ceb410ce6fad3e542a65cf4
|
[
"Apache-2.0"
] | null | null | null |
handlers/drive_handler.py
|
google/b-con
|
365ba51cef6fbdd05ceb410ce6fad3e542a65cf4
|
[
"Apache-2.0"
] | 2
|
2020-09-15T04:17:28.000Z
|
2020-09-15T04:23:27.000Z
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for accessing files and folders on sharepoint.
This handler includes all the functions needed to access and modify files and
folders on sharepoint.
"""
import glob
import io
import os
import pathlib
import zipfile
from typing import List
from absl import app
from absl import flags
from absl import logging
from googleapiclient import http
from utils import config
from utils import service_account_credentials
FLAGS = flags.FLAGS
def _download_file(service: object, file_id: str, file_name: str,
download_path: str):
"""Download files to the specified path."""
request = service.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = http.MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
with open(f'{download_path}/{file_name}', 'wb') as f:
f.write(fh.getvalue())
def download_invoices(folder_id: str, download_path: str):
"""Downloads invoices from the given drive folder id to the specified path."""
# Check that the destination folder exists.
pathlib.Path(download_path).mkdir(parents=True, exist_ok=True)
service = service_account_credentials.get_drive_service()
files = service.files().list(
q=f'(\'{folder_id}\' in parents) and (mimeType != \'application/vnd.google-apps.folder\')',
).execute()
for f in files['files']:
_download_file(service, f['id'], f['name'], download_path)
def extract_zip_files(download_path: str, extract_path: str):
"""Extracts the zip files of the invoices downloaded."""
for f in glob.glob(os.path.join(download_path, '*.zip')):
with zipfile.ZipFile(f) as zf:
zf.extractall(extract_path)
def get_files(extract_path: str):
"""Fetch the paths to all the files."""
files = []
for f in glob.glob(os.path.join(extract_path, '*.csv')):
files.append(f)
return files
def delete_downloaded_files(folder_paths: List[str]):
"""Deletes downloaded files."""
for folder_path in folder_paths:
for f in glob.glob(os.path.join(folder_path, '*')):
logging.info('Deleting %s', f)
pathlib.Path(f).unlink()
def _move_drive_file_to_completed(service: object, file_obj: object,
completed_folder_id: str):
"""Moves the file object to the completed drive folder."""
file_id = file_obj['id']
previous_parents = ','.join(file_obj['parents'])
service.files().update(
fileId=file_id,
addParents=completed_folder_id,
removeParents=previous_parents,
fields='id, parents',
).execute()
def mark_drive_files_completed(folder_id, completed_folder_id):
"""Marks all the files in the folder as completed."""
logging.info('Marking all drive files completed.')
service = service_account_credentials.get_drive_service()
files = service.files().list(
q=f'(\'{folder_id}\' in parents) and (mimeType != \'application/vnd.google-apps.folder\')',
fields='files(id, parents)',
).execute()
for f in files['files']:
_move_drive_file_to_completed(service, f, completed_folder_id)
| 31.439655
| 97
| 0.717302
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for accessing files and folders on sharepoint.
This handler includes all the functions needed to access and modify files and
folders on sharepoint.
"""
import glob
import io
import os
import pathlib
import zipfile
from typing import List
from absl import app
from absl import flags
from absl import logging
from googleapiclient import http
from utils import config
from utils import service_account_credentials
FLAGS = flags.FLAGS
def _download_file(service: object, file_id: str, file_name: str,
download_path: str):
"""Download files to the specified path."""
request = service.files().get_media(fileId=file_id)
fh = io.BytesIO()
downloader = http.MediaIoBaseDownload(fh, request)
done = False
while not done:
status, done = downloader.next_chunk()
with open(f'{download_path}/{file_name}', 'wb') as f:
f.write(fh.getvalue())
def download_invoices(folder_id: str, download_path: str):
"""Downloads invoices from the given drive folder id to the specified path."""
# Check that the destination folder exists.
pathlib.Path(download_path).mkdir(parents=True, exist_ok=True)
service = service_account_credentials.get_drive_service()
files = service.files().list(
q=f'(\'{folder_id}\' in parents) and (mimeType != \'application/vnd.google-apps.folder\')',
).execute()
for f in files['files']:
_download_file(service, f['id'], f['name'], download_path)
def extract_zip_files(download_path: str, extract_path: str):
"""Extracts the zip files of the invoices downloaded."""
for f in glob.glob(os.path.join(download_path, '*.zip')):
with zipfile.ZipFile(f) as zf:
zf.extractall(extract_path)
def get_files(extract_path: str):
"""Fetch the paths to all the files."""
files = []
for f in glob.glob(os.path.join(extract_path, '*.csv')):
files.append(f)
return files
def delete_downloaded_files(folder_paths: List[str]):
"""Deletes downloaded files."""
for folder_path in folder_paths:
for f in glob.glob(os.path.join(folder_path, '*')):
logging.info('Deleting %s', f)
pathlib.Path(f).unlink()
def _move_drive_file_to_completed(service: object, file_obj: object,
completed_folder_id: str):
"""Moves the file object to the completed drive folder."""
file_id = file_obj['id']
previous_parents = ','.join(file_obj['parents'])
service.files().update(
fileId=file_id,
addParents=completed_folder_id,
removeParents=previous_parents,
fields='id, parents',
).execute()
def mark_drive_files_completed(folder_id, completed_folder_id):
"""Marks all the files in the folder as completed."""
logging.info('Marking all drive files completed.')
service = service_account_credentials.get_drive_service()
files = service.files().list(
q=f'(\'{folder_id}\' in parents) and (mimeType != \'application/vnd.google-apps.folder\')',
fields='files(id, parents)',
).execute()
for f in files['files']:
_move_drive_file_to_completed(service, f, completed_folder_id)
| 0
| 0
| 0
|
117de2060310920f81abc8439d94be701d21ee15
| 335
|
py
|
Python
|
Testes/type_set.py
|
Renanrbsc/PadawanPython
|
6dc06a502d59127d0f180847e19b40c581baddd0
|
[
"MIT"
] | null | null | null |
Testes/type_set.py
|
Renanrbsc/PadawanPython
|
6dc06a502d59127d0f180847e19b40c581baddd0
|
[
"MIT"
] | null | null | null |
Testes/type_set.py
|
Renanrbsc/PadawanPython
|
6dc06a502d59127d0f180847e19b40c581baddd0
|
[
"MIT"
] | null | null | null |
import random
# Efetua 1000 testes:
for _ in range(1000):
# Gera um conjunto de 10 números inteiros entre 0 e 9:
a = set(random.randint(0, 9) for __ in range(10))
# Verifica se o conjunto é igual à sua respectiva lista sortida:
if list(a) != sorted(a):
# Se for diferente, exibe o conjunto:
print(a)
| 25.769231
| 68
| 0.644776
|
import random
# Efetua 1000 testes:
for _ in range(1000):
# Gera um conjunto de 10 números inteiros entre 0 e 9:
a = set(random.randint(0, 9) for __ in range(10))
# Verifica se o conjunto é igual à sua respectiva lista sortida:
if list(a) != sorted(a):
# Se for diferente, exibe o conjunto:
print(a)
| 0
| 0
| 0
|
173b1c09a24ba29b73e806482460e8a77d27c133
| 4,051
|
py
|
Python
|
alipay/aop/api/domain/AlipayOverseasTaxNeworderStatusSyncModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayOverseasTaxNeworderStatusSyncModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayOverseasTaxNeworderStatusSyncModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
| 30.923664
| 87
| 0.602567
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayOverseasTaxNeworderStatusSyncModel(object):
def __init__(self):
self._doc_id = None
self._extend_param = None
self._status = None
self._status_change_time = None
self._status_msg = None
self._tax_order_no = None
self._tax_payment_no = None
@property
def doc_id(self):
return self._doc_id
@doc_id.setter
def doc_id(self, value):
self._doc_id = value
@property
def extend_param(self):
return self._extend_param
@extend_param.setter
def extend_param(self, value):
self._extend_param = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def status_change_time(self):
return self._status_change_time
@status_change_time.setter
def status_change_time(self, value):
self._status_change_time = value
@property
def status_msg(self):
return self._status_msg
@status_msg.setter
def status_msg(self, value):
self._status_msg = value
@property
def tax_order_no(self):
return self._tax_order_no
@tax_order_no.setter
def tax_order_no(self, value):
self._tax_order_no = value
@property
def tax_payment_no(self):
return self._tax_payment_no
@tax_payment_no.setter
def tax_payment_no(self, value):
self._tax_payment_no = value
def to_alipay_dict(self):
params = dict()
if self.doc_id:
if hasattr(self.doc_id, 'to_alipay_dict'):
params['doc_id'] = self.doc_id.to_alipay_dict()
else:
params['doc_id'] = self.doc_id
if self.extend_param:
if hasattr(self.extend_param, 'to_alipay_dict'):
params['extend_param'] = self.extend_param.to_alipay_dict()
else:
params['extend_param'] = self.extend_param
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.status_change_time:
if hasattr(self.status_change_time, 'to_alipay_dict'):
params['status_change_time'] = self.status_change_time.to_alipay_dict()
else:
params['status_change_time'] = self.status_change_time
if self.status_msg:
if hasattr(self.status_msg, 'to_alipay_dict'):
params['status_msg'] = self.status_msg.to_alipay_dict()
else:
params['status_msg'] = self.status_msg
if self.tax_order_no:
if hasattr(self.tax_order_no, 'to_alipay_dict'):
params['tax_order_no'] = self.tax_order_no.to_alipay_dict()
else:
params['tax_order_no'] = self.tax_order_no
if self.tax_payment_no:
if hasattr(self.tax_payment_no, 'to_alipay_dict'):
params['tax_payment_no'] = self.tax_payment_no.to_alipay_dict()
else:
params['tax_payment_no'] = self.tax_payment_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOverseasTaxNeworderStatusSyncModel()
if 'doc_id' in d:
o.doc_id = d['doc_id']
if 'extend_param' in d:
o.extend_param = d['extend_param']
if 'status' in d:
o.status = d['status']
if 'status_change_time' in d:
o.status_change_time = d['status_change_time']
if 'status_msg' in d:
o.status_msg = d['status_msg']
if 'tax_order_no' in d:
o.tax_order_no = d['tax_order_no']
if 'tax_payment_no' in d:
o.tax_payment_no = d['tax_payment_no']
return o
| 3,140
| 773
| 23
|
dd0d1e45a6b03586912ed2af58c3ca9521a86418
| 618
|
py
|
Python
|
migrations/versions/4b01613bfbed_.py
|
J4LP/J4OAuth
|
ca757958f1e7069f08e0dae3becd70b90507c871
|
[
"MIT"
] | 1
|
2015-12-15T03:17:15.000Z
|
2015-12-15T03:17:15.000Z
|
migrations/versions/4b01613bfbed_.py
|
J4LP/J4OAuth
|
ca757958f1e7069f08e0dae3becd70b90507c871
|
[
"MIT"
] | null | null | null |
migrations/versions/4b01613bfbed_.py
|
J4LP/J4OAuth
|
ca757958f1e7069f08e0dae3becd70b90507c871
|
[
"MIT"
] | 1
|
2015-12-15T03:17:19.000Z
|
2015-12-15T03:17:19.000Z
|
"""empty message
Revision ID: 4b01613bfbed
Revises: 406cccb640c3
Create Date: 2014-02-07 00:29:22.150808
"""
# revision identifiers, used by Alembic.
revision = '4b01613bfbed'
down_revision = '406cccb640c3'
from alembic import op
import sqlalchemy as sa
| 22.888889
| 88
| 0.697411
|
"""empty message
Revision ID: 4b01613bfbed
Revises: 406cccb640c3
Create Date: 2014-02-07 00:29:22.150808
"""
# revision identifiers, used by Alembic.
revision = '4b01613bfbed'
down_revision = '406cccb640c3'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('client', sa.Column('homepage', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('client', 'homepage')
### end Alembic commands ###
| 312
| 0
| 46
|
963b388fd46ae6be2ff0ea7cc00541ff85694744
| 529
|
py
|
Python
|
django_auth2/tasks.py
|
Nick1994209/django-auth2
|
b8678f06ade985d2b5b0606e6e49bd9d2a49931a
|
[
"MIT"
] | null | null | null |
django_auth2/tasks.py
|
Nick1994209/django-auth2
|
b8678f06ade985d2b5b0606e6e49bd9d2a49931a
|
[
"MIT"
] | null | null | null |
django_auth2/tasks.py
|
Nick1994209/django-auth2
|
b8678f06ade985d2b5b0606e6e49bd9d2a49931a
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import # for python 2.7
from django.conf import settings
from django.core.mail import send_mail as django_send_mail
try:
from celery.task import task
except ImportError:
task = func_add_delay
@task
| 24.045455
| 78
| 0.720227
|
from __future__ import absolute_import # for python 2.7
from django.conf import settings
from django.core.mail import send_mail as django_send_mail
try:
from celery.task import task
except ImportError:
def func_add_delay(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.delay = func
return wrapper
task = func_add_delay
@task
def send_mail(subject, message, to_emails):
django_send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, to_emails)
| 237
| 0
| 48
|
c603a2f92a55764ce391c8c6d64d7603bd21bea4
| 2,938
|
py
|
Python
|
main/summary.py
|
tucan9389/MobileHumanPose
|
94183778c8384a2412729fec179e66ca2cd15b60
|
[
"MIT"
] | 137
|
2021-04-13T14:33:32.000Z
|
2022-03-24T22:28:15.000Z
|
main/summary.py
|
tucan9389/MobileHumanPose
|
94183778c8384a2412729fec179e66ca2cd15b60
|
[
"MIT"
] | 27
|
2021-05-16T08:52:03.000Z
|
2022-03-30T11:49:38.000Z
|
main/summary.py
|
tucan9389/MobileHumanPose
|
94183778c8384a2412729fec179e66ca2cd15b60
|
[
"MIT"
] | 13
|
2021-04-13T17:18:28.000Z
|
2022-03-22T12:49:03.000Z
|
import torch
import argparse
import os
import os.path as osp
import torch.backends.cudnn as cudnn
from torchsummary import summary
from torch.nn.parallel.data_parallel import DataParallel
from config import cfg
from model import get_pose_net
from thop import profile
from thop import clever_format
from ptflops import get_model_complexity_info
# argument parsing
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.benchmark = True
# joint set
joint_num = args.joint
joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) )
if joint_num == 18:
skeleton = ( (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6) )
if joint_num == 21:
skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
# snapshot load
model_path = os.path.join(cfg.model_dir, 'snapshot_%d.pth.tar' % args.test_epoch)
assert osp.exists(model_path), 'Cannot find model at ' + model_path
model = get_pose_net(args.backbone, args.frontbone, False, joint_num)
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'])
single_model = model.module
summary(single_model, (3, 256, 256))
input = torch.randn(1, 3, 256, 256).cuda()
macs, params = profile(single_model, inputs=(input,))
macs, params = clever_format([macs, params], "%.3f")
flops, params1 = get_model_complexity_info(single_model, (3, 256, 256),as_strings=True, print_per_layer_stat=False)
print('{:<30} {:<8}'.format('Computational complexity: ', flops))
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
print('{:<30} {:<8}'.format('Number of parameters: ', params1))
| 41.971429
| 233
| 0.642954
|
import torch
import argparse
import os
import os.path as osp
import torch.backends.cudnn as cudnn
from torchsummary import summary
from torch.nn.parallel.data_parallel import DataParallel
from config import cfg
from model import get_pose_net
from thop import profile
from thop import clever_format
from ptflops import get_model_complexity_info
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, dest='gpu_ids')
parser.add_argument('--epoch', type=int, dest='test_epoch')
parser.add_argument('--jointnum', type=int, dest='joint')
parser.add_argument('--backbone', type=str, dest='backbone')
args = parser.parse_args()
# test gpus
if not args.gpu_ids:
assert 0, print("Please set proper gpu ids")
if not args.joint:
assert print("please insert number of joint")
if '-' in args.gpu_ids:
gpus = args.gpu_ids.split('-')
gpus[0] = 0 if not gpus[0].isdigit() else int(gpus[0])
gpus[1] = len(mem_info()) if not gpus[1].isdigit() else int(gpus[1]) + 1
args.gpu_ids = ','.join(map(lambda x: str(x), list(range(*gpus))))
return args
# argument parsing
args = parse_args()
cfg.set_args(args.gpu_ids)
cudnn.benchmark = True
# joint set
joint_num = args.joint
joints_name = ('Head_top', 'Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee', 'L_Ankle', 'Pelvis', 'Spine', 'Head', 'R_Hand', 'L_Hand', 'R_Toe', 'L_Toe')
flip_pairs = ( (2, 5), (3, 6), (4, 7), (8, 11), (9, 12), (10, 13), (17, 18), (19, 20) )
if joint_num == 18:
skeleton = ( (0, 7), (7, 8), (8, 9), (9, 10), (8, 11), (11, 12), (12, 13), (8, 14), (14, 15), (15, 16), (0, 1), (1, 2), (2, 3), (0, 4), (4, 5), (5, 6) )
if joint_num == 21:
skeleton = ( (0, 16), (16, 1), (1, 15), (15, 14), (14, 8), (14, 11), (8, 9), (9, 10), (10, 19), (11, 12), (12, 13), (13, 20), (1, 2), (2, 3), (3, 4), (4, 17), (1, 5), (5, 6), (6, 7), (7, 18) )
# snapshot load
model_path = os.path.join(cfg.model_dir, 'snapshot_%d.pth.tar' % args.test_epoch)
assert osp.exists(model_path), 'Cannot find model at ' + model_path
model = get_pose_net(args.backbone, args.frontbone, False, joint_num)
model = DataParallel(model).cuda()
ckpt = torch.load(model_path)
model.load_state_dict(ckpt['network'])
single_model = model.module
summary(single_model, (3, 256, 256))
input = torch.randn(1, 3, 256, 256).cuda()
macs, params = profile(single_model, inputs=(input,))
macs, params = clever_format([macs, params], "%.3f")
flops, params1 = get_model_complexity_info(single_model, (3, 256, 256),as_strings=True, print_per_layer_stat=False)
print('{:<30} {:<8}'.format('Computational complexity: ', flops))
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
print('{:<30} {:<8}'.format('Number of parameters: ', params1))
| 792
| 0
| 23
|
eadd411cee7bade1921682585bda0bf351b1063a
| 3,302
|
py
|
Python
|
fastflix/models/encode.py
|
benedicteb/FastFlix
|
45208b7c74a21758cb528c949422effcd0c01f44
|
[
"MIT"
] | null | null | null |
fastflix/models/encode.py
|
benedicteb/FastFlix
|
45208b7c74a21758cb528c949422effcd0c01f44
|
[
"MIT"
] | null | null | null |
fastflix/models/encode.py
|
benedicteb/FastFlix
|
45208b7c74a21758cb528c949422effcd0c01f44
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Union
from fastflix.models.base import BaseDataClass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
@dataclass
| 22.161074
| 56
| 0.633253
|
# -*- coding: utf-8 -*-
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Union
from fastflix.models.base import BaseDataClass
@dataclass
class AudioTrack(BaseDataClass):
index: int
outdex: int
codec: str = ""
downmix: int = 0
title: str = ""
language: str = ""
conversion_bitrate: str = ""
conversion_codec: str = ""
@dataclass
class SubtitleTrack(BaseDataClass):
index: int
outdex: int
disposition: str = ""
burn_in: bool = False
language: str = ""
@dataclass
class AttachmentTrack(BaseDataClass):
outdex: int
index: Union[int, None] = None
attachment_type: str = "cover"
file_path: Union[Path, None] = None
filename: Union[str, None] = None
@dataclass
class EncoderSettings(BaseDataClass):
max_muxing_queue_size: str = "1024"
pix_fmt: str = "yuv420p10le"
extra: str = ""
@dataclass
class x265Settings(EncoderSettings):
name = "HEVC (x265)" # MUST match encoder main.name
preset: str = "medium"
intra_encoding: bool = False
profile: str = "default"
tune: str = "default"
hdr10: bool = False
hdr10_opt: bool = False
dhdr10_opt: bool = False
repeat_headers: bool = False
aq_mode: int = 2
hdr10plus_metadata: str = ""
crf: Union[int, None] = None
bitrate: Union[str, None] = None
x265_params: List[str] = field(default_factory=list)
bframes: int = 4
lossless: bool = False
b_adapt: int = 2
intra_refresh: bool = False
intra_smoothing: bool = True
frame_threads: int = 0
@dataclass
class x264Settings(EncoderSettings):
name = "AVC (x264)"
preset: str = "medium"
profile: str = "default"
tune: str = "default"
pix_fmt: str = "yuv420p"
crf: Union[int, None] = None
bitrate: Union[str, None] = None
@dataclass
class rav1eSettings(EncoderSettings):
name = "AV1 (rav1e)"
speed: str = "-1"
tile_columns: str = "-1"
tile_rows: str = "-1"
tiles: str = "0"
single_pass: bool = False
qp: Union[int, None] = None
bitrate: Union[str, None] = None
@dataclass
class SVTAV1Settings(EncoderSettings):
name = "AV1 (SVT AV1)"
tile_columns: str = "0"
tile_rows: str = "0"
tier: str = "main"
# scene_detection: str = "false"
single_pass: bool = False
speed: str = "7"
qp: Union[int, None] = None
bitrate: Union[str, None] = None
@dataclass
class VP9Settings(EncoderSettings):
name = "VP9"
profile: int = 2
quality: str = "good"
speed: str = "0"
row_mt: int = 0
single_pass: bool = False
crf: Union[int, None] = None
bitrate: Union[str, None] = None
@dataclass
class AOMAV1Settings(EncoderSettings):
name = "AV1 (AOM)"
tile_columns: str = "0"
tile_rows: str = "0"
usage: str = "good"
row_mt: str = "enabled"
cpu_used: str = "4"
crf: Union[int, None] = None
bitrate: Union[str, None] = None
@dataclass
class WebPSettings(EncoderSettings):
name = "WebP"
lossless: str = "0"
compression: str = "3"
preset: str = "none"
qscale: int = 15
@dataclass
class GIFSettings(EncoderSettings):
name = "GIF"
fps: int = 15
dither: str = "sierra2_4a"
@dataclass
class CopySettings(EncoderSettings):
name = "Copy"
| 0
| 2,678
| 286
|
8929dca934368386b0442cdca9cb8e9309b1777f
| 17,130
|
py
|
Python
|
scripts/featureGenerator.py
|
dcompgriff/cs839_Entity_Extractor
|
a117256061cc75850c1da1ce837a2992c15db0fb
|
[
"MIT"
] | null | null | null |
scripts/featureGenerator.py
|
dcompgriff/cs839_Entity_Extractor
|
a117256061cc75850c1da1ce837a2992c15db0fb
|
[
"MIT"
] | null | null | null |
scripts/featureGenerator.py
|
dcompgriff/cs839_Entity_Extractor
|
a117256061cc75850c1da1ce837a2992c15db0fb
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import argparse
import glob
import os
import time
import re
from multiprocessing import Pool
'''
****************************************************************
GLOBAL VARIABLES
****************************************************************
'''
MAX_ENTITY_LENGTH = 20
MAX_ENTITY_WORD_LENGTH = 8
NUM_FEATURES = 20
globalVerbSet = set()
with open('../data/verbs.txt', 'r') as f:
for line in f:
globalVerbSet.add(line.strip())
instituteKeywords = re.compile(r'\b(Inc|Incorporation|Corp|Corporation|Institute|\
University|School|College|Department|Org|Organization|Times|Committee|Foundation|\
Party|Agency|Council|News)\b', re.I)
badKeywords = re.compile(r'\b(the|an|as|be|am|is|are|was|were|has|have|had|\
at|from|to|in|under|before|after|near|far|away|\
that|them|there|their|they|his|her|hers|him|it|you|your|yours|\
ceo|chairman|founder|head|director|\
email)\b', re.I)
#allow . - _ & : ' ` and " inside entity words. As these are there while marking up
badpunc = re.compile(r'\~|\!|\@|\#|\$|\%|\^|\*|\(|\)|\+|\=|\{|\}|\[|\]|\;|\<|\>|\,|\?|\/|\\')
endSentence = re.compile(r'(\d\s*\.\s*\D)|([a-z]\s*\.)')
domain = re.compile(r'\.\s*(com|org)\b')
globalCount = 0
missedTuples = []
'''
****************************************************************
PROGRAM FUNCTION SCRIPTS
****************************************************************
'''
'''
This function accepts a list of strings, where each
string represents a single line in a text file (which has
been pre-processed with at most 1 word on each
line.
@:param fileContents List with every line of the file contents.
@:return A pandas dataframe object that has
'''
'''
Feature list:
F0: "[The]" occurs 1 or two lines before string.
F1: Number of capitol Letters.
F2: Verb occurs 1 or two lines after the string.
F3: Total character length
F4: Total number of words
F5: Number of capitol letters before the string.
F5: Number of capitol letters in line after this string.
F6: "on" comes before
F7: "called" comes before # shouldn't the verb have taken care of it?
F8: "they" comes after
F9: .?! comes in the middle of and entry# should no longer be true ever
F10: Number of "."s
F11: "," is in the raw string "NOTE: This feature reliably improves precision!", #should no longer be True ever
F12: "," is in the first or last raw string position "NOTE: This feature reliably improves precision!", #should no lenger be True ever
F13: "." is in the first or last raw string position.
F14: "as", "a", "an" is in the raw string., # Invalid as discussed, to be removed
F15: The faction of the number of words where only the first character is capitalized to all words.
F16: The rawString has a Single capitalized word after it.
F17: Contains a keyword
F18: fraction of capital letters to wordCount
F19: Contains bad punctuation in raw string.
Each "tuple" object is a Pandas series with first entry tuple[0] the index, and
all following entries the entries of each row from the string tuples dataframe.
'''
'''
****************************************************************
PROGRAM RUNNING AND MANAGEMENT SCRIPTS
****************************************************************
'''
'''
For each file in the directory provided to the program, generate all of the
possible feature sets.
'''
if __name__ == '__main__':
#Parse command line arguments
parser = argparse.ArgumentParser(description="""Fake news feature generator. Generates features from files
whos' words have been split to multiple lines. It also handles files where entities have been pre-marked.""")
parser.add_argument('FileFolder', metavar='f', type=str)
parser.add_argument('Mode', metavar='m', type=str, help="""U is for update, and C is for create""")
parser.add_argument('UpdateListString', metavar='--l', type=str, default="", help="""Use a string 'F0 F12 F13' of features, or '' empty string if no features. """)
args = parser.parse_args()
main(args)
| 39.837209
| 167
| 0.588266
|
import numpy as np
import pandas as pd
import argparse
import glob
import os
import time
import re
from multiprocessing import Pool
'''
****************************************************************
GLOBAL VARIABLES
****************************************************************
'''
MAX_ENTITY_LENGTH = 20
MAX_ENTITY_WORD_LENGTH = 8
NUM_FEATURES = 20
globalVerbSet = set()
with open('../data/verbs.txt', 'r') as f:
for line in f:
globalVerbSet.add(line.strip())
instituteKeywords = re.compile(r'\b(Inc|Incorporation|Corp|Corporation|Institute|\
University|School|College|Department|Org|Organization|Times|Committee|Foundation|\
Party|Agency|Council|News)\b', re.I)
badKeywords = re.compile(r'\b(the|an|as|be|am|is|are|was|were|has|have|had|\
at|from|to|in|under|before|after|near|far|away|\
that|them|there|their|they|his|her|hers|him|it|you|your|yours|\
ceo|chairman|founder|head|director|\
email)\b', re.I)
#allow . - _ & : ' ` and " inside entity words. As these are there while marking up
badpunc = re.compile(r'\~|\!|\@|\#|\$|\%|\^|\*|\(|\)|\+|\=|\{|\}|\[|\]|\;|\<|\>|\,|\?|\/|\\')
endSentence = re.compile(r'(\d\s*\.\s*\D)|([a-z]\s*\.)')
domain = re.compile(r'\.\s*(com|org)\b')
globalCount = 0
missedTuples = []
'''
****************************************************************
PROGRAM FUNCTION SCRIPTS
****************************************************************
'''
'''
This function accepts a list of strings, where each
string represents a single line in a text file (which has
been pre-processed with at most 1 word on each
line.
@:param fileContents List with every line of the file contents.
@:return A pandas dataframe object that has
'''
def generateStringTuples(fileContents, fileName):
global globalCount
# Create initial pandas dataframe for data objects.
# rawString: as read form the file after removing entity markers
# string: after stripping punctuations from inside rawString
# wordCount: number of words in 'string' field
# start, end: index in file
# class: class label if marked entity
#tupleDF = pd.DataFrame(columns=['rawString', 'file', 'start', 'end', 'string', 'wordCount' 'class'])
# Create native python list for appending to, which is faster than pandas DF append or concat.
tupleList = []
reg = re.compile(r'[a-zA-Z0-9_\’\']+')# use to strip inner punctuations, except _ and \’
tupleColumns=['rawString', 'file', 'start', 'end', 'string', 'wordCount', 'label']
global missedTuples
for entityLength in range(1, MAX_ENTITY_LENGTH):
for i in range(len(fileContents)-entityLength):#reversed order here to prevent i+entityLength overflow
# For each possible entityLength, generate string from each index.
# Strip punctuations in order to get wordCount
# make tuples only from those whose word count is <= MAX_ENTITY_WORD_LENGTH, >=0 and unique
try:
tuple = ['', fileName, i, i+entityLength, '', 0, '-']
entityList = list(map(lambda item: str(item).strip(), fileContents[i:i+entityLength]))
# Set class to positive if '<[>' in first list word, and '<]>' in last word in list.
if '<[>' in entityList[0].strip() and '<]>' in entityList[-1].strip():
# If '<[>' and '<]>' appear in any other places internally in the string, then the
# string isn't a single entity, and is actually two entities that have been grouped
# together. Ex '<[>Project Veritas<]> shows how the <[>Clinton campaign<]>'.
# Count the number of times left and right tags occur in the string.
lCount = 0#sum(map(lambda item: 1 if '<[>' in item else 0, entityList))
rCount = 0#sum(map(lambda item: 1 if '<]>' in item else 0, entityList))
for cStr in entityList:
if '<[>' in cStr:
lCount += 1
if '<]>' in cStr:
rCount += 1
if lCount + rCount == 2:
tuple[-1] = '+'
globalCount += 1
else:
tuple[-1] = '-'
# Remove any entity tags from the string.
entityList = list(map(lambda item: item.replace('<[>', ''), entityList))
entityList = list(map(lambda item: item.replace('<]>', ''), entityList))
# Update the rest of the tuple information.
tuple[0] = ' '.join(entityList).strip()#rawString
#groups of only continuous alpha numeric characters. Not including '.' as a separate group.
words = re.findall(reg, tuple[0])
tuple[4] = ' '.join(words).strip()# string after stripping inner punctuations
tuple[5] = len(words)# wordCount
#################################
# PRE-PROCESSING RULES
#################################
#if ',' in tuple[0].strip().split()[0] or ',' in tuple[0].strip().split()[-1]:
# continue
# #if ('.' in tuple[0].strip().split()[0] or '.' in tuple[0].strip().split()[-1]) and len(entityList):
# # continue
# if ('-' in tuple[0].strip()):
# continue
#if ('(' in tuple[0].strip() or ')' in tuple[0].strip()):
# continue
# if 'as' in tuple[0].lower() or 'a' in tuple[0].lower() or 'an' in tuple[0].lower():
# continue
failed = False# use this to remove negative entries
#empty or too long remaining string
failed = failed or tuple[5]==0 or tuple[5]>MAX_ENTITY_WORD_LENGTH
#begins with a .
failed = failed or tuple[0][0]=='.'
#full tuple contains any unwanted punctuations
failed = failed or len(re.findall(badpunc, tuple[0]))>0
#Want atleast 2 english chars. Removes number only cases
failed = failed or len(re.findall(r'[a-zA-Z]', tuple[4]))<2
#Looks like end of a sentence, except when a domain name
failed = failed or len(re.findall(endSentence, tuple[0])) - len(re.findall(domain, tuple[0]))>0
#contains a bad keyword
failed = failed or len(re.findall(badKeywords, tuple[4]))
if failed:
if tuple[-1] == '+': missedTuples.append(tuple)
continue
tupleList.append(tuple)
except IndexError:
continue
return pd.DataFrame(tupleList, columns=tupleColumns)
def F0(tuple, fileContents):
try:
if fileContents[tuple.start - 1].strip().lower() == 'the' or fileContents[tuple.start - 2].strip().lower() == 'the':
return 1
else:
return 0
except IndexError:
return 0
def F1(tuple, fileContents):
return sum(1 for char in tuple.string if char.isupper())
def F2(tuple, fileContents):
try:
if fileContents[tuple.end].strip().lower() in globalVerbSet:
return 1
else:
return 0
except IndexError:
return 0
def F3(tuple, fileContents):
return len(tuple.string.strip())
def F4(tuple, fileContents):
return tuple.wordCount#len(tuple.string.strip().split())
def F5(tuple, fileContents):
try:
return sum(1 for char in fileContents[tuple.start - 1] if char.isupper())
except:
return -1
def F6(tuple, fileContents):
try:
if fileContents[tuple.start - 1].strip().lower() == 'on':
return 1
else:
return 0
except IndexError:
return 0
def F7(tuple, fileContents):
try:
if fileContents[tuple.start - 1].strip().lower() == 'called':
return 1
else:
return 0
except IndexError:
return 0
def F8(tuple, fileContents):
try:
if fileContents[tuple.end].strip().lower() == 'they':
return 1
else:
return 0
except IndexError:
return 0
def F9(tuple, fileContents):
try:
if "." in tuple.rawString.split()[1:-1] or "!" in tuple.rawString.split()[1:-1] or "?" in tuple.rawString.split()[1:-1]:
return 1
else:
return 0
except IndexError:
return 0
def F10(tuple, fileContents):
return tuple.rawString.count('.')
def F11(tuple, fileContents):
if ',' in tuple.rawString:
return 1
else:
return 0
def F12(tuple, fileContents):
if ',' in tuple.rawString.strip().split()[0] or ',' in tuple.rawString.strip().split()[-1]:
return 1
else:
return 0
def F13(tuple, fileContents):
if '.' in tuple.rawString.strip().split()[0] or '.' in tuple.rawString.strip().split()[-1]:
return 1
else:
return 0
def F14(tuple, fileContents):
if 'as' in tuple.rawString.lower() or 'a' in tuple.rawString.lower() or 'an' in tuple.rawString.lower():
return 1
else:
return 0
def F15(tuple, fileContents):
count = 0
for word in tuple.rawString.strip().split():
if word[0].isupper() and word[1:] == word[1:].lower():
count += 1
return count / len(tuple.rawString.strip().split())
def F16(tuple, fileContents):
try:
if fileContents[tuple.end][0].isupper() and fileContents[tuple.end][1:] == fileContents[tuple.end][1:].lower():
return 1
else:
return 0
except:
return 0
def F17(tuple, fileContents):
return 1 if len(re.findall(instituteKeywords, tuple.string))>0 else 0#case ignoring search criteria
def F18(tuple, fileContents):
try:
return sum(1 for char in tuple.string if char.isupper())*1.0/tuple.wordCount
except:
return -1
def F19(tuple, fileContents):
if ":" in tuple.rawString or "-" in tuple.rawString or '"' in tuple.rawString or "&" in tuple.rawString:
return 1
else:
return 0
'''
Feature list:
F0: "[The]" occurs 1 or two lines before string.
F1: Number of capitol Letters.
F2: Verb occurs 1 or two lines after the string.
F3: Total character length
F4: Total number of words
F5: Number of capitol letters before the string.
F5: Number of capitol letters in line after this string.
F6: "on" comes before
F7: "called" comes before # shouldn't the verb have taken care of it?
F8: "they" comes after
F9: .?! comes in the middle of and entry# should no longer be true ever
F10: Number of "."s
F11: "," is in the raw string "NOTE: This feature reliably improves precision!", #should no longer be True ever
F12: "," is in the first or last raw string position "NOTE: This feature reliably improves precision!", #should no lenger be True ever
F13: "." is in the first or last raw string position.
F14: "as", "a", "an" is in the raw string., # Invalid as discussed, to be removed
F15: The faction of the number of words where only the first character is capitalized to all words.
F16: The rawString has a Single capitalized word after it.
F17: Contains a keyword
F18: fraction of capital letters to wordCount
F19: Contains bad punctuation in raw string.
Each "tuple" object is a Pandas series with first entry tuple[0] the index, and
all following entries the entries of each row from the string tuples dataframe.
'''
def generateFeaturesFromFile(fileContents, fileName):
tuplesDF = generateStringTuples(fileContents, fileName)
allFeaturesList = []
# Call each feature generation function on each dataframe tuple.
for i in range(0, NUM_FEATURES):
featureList = []
for tuple in tuplesDF.itertuples():
featureList.append(eval('F' + str(i) + '(tuple, fileContents)'))
allFeaturesList.append(featureList)
allFeaturesList.append(tuplesDF['label'].tolist())
# TODO: write to a csv file the entire matrix of examples and features. Randomize. Remove some to ensure almost even split b/w + and -
return pd.DataFrame(np.array(allFeaturesList).T, columns=['F' + str(i) for i in range(NUM_FEATURES)] + ['label']), tuplesDF
def updateFeaturesFromFile(fileContents, fileName, functionName):
tuplesDF = generateStringTuples(fileContents, fileName)
featureList = []
for tuple in tuplesDF.itertuples():
featureList.append(eval(functionName + '(tuple, fileContents)'))
return featureList
'''
****************************************************************
PROGRAM RUNNING AND MANAGEMENT SCRIPTS
****************************************************************
'''
'''
For each file in the directory provided to the program, generate all of the
possible feature sets.
'''
def main(args):
if args.Mode == "C":
# Get sorted file list names from the given directory.
fileList = sorted(filter(lambda item: '.txt' in str(item), os.listdir(args.FileFolder)), key=lambda item: int(item.split('_')[0]))
startTime = time.time()
global missedTuples
missedTuples = []
global globalCount
globalCount = 0
fullDF = pd.DataFrame(columns=['F' + str(i) for i in range(NUM_FEATURES)] + ['label'])
tuplesDF = pd.DataFrame(columns=['rawString', 'file', 'start', 'end', 'string', 'wordCount', 'label'])
# For each file, parse into tuples, then parse into features, and create a full pandas data frame object.
print('Performing featurization...')
for file in fileList:
if '.txt' in file:
with open(args.FileFolder + file, "r", encoding="ISO-8859-1") as f:
print(file)
fileDF, fileTuplesDF = generateFeaturesFromFile(f.readlines(), file)
fullDF = pd.concat([fullDF, fileDF])
tuplesDF = pd.concat([tuplesDF, fileTuplesDF])
endTime = time.time()
print(fullDF.shape)
print('Done!')
print("Total time to run: %s seconds." %str(endTime-startTime))
# Save the entire pandas data frame object of features and classes.
print('Saving the full dataframe...')
fullDF.to_csv('../data/featurized_instances.csv')
# Update tuples index to full data set.
tuplesDF.index = pd.Series(list(range(0, fullDF.shape[0])))
tuplesDF.to_csv('../data/tuples_instances.csv')
print('Done!')
print(globalCount)
if len(missedTuples)>0:
print("Missed", len(missedTuples), "items overall")
elif args.Mode == "U":
fullDF = pd.read_csv('../data/featurized_instances.csv', index_col=0)
tuplesDF = pd.read_csv('../data/tuples_instances.csv', index_col=0)
fileList = sorted(filter(lambda item: '.txt' in str(item), os.listdir(args.FileFolder)), key=lambda item: int(item.split('_')[0]))
# For each file, parse into tuples, then parse into features, and create a full pandas data frame object.
print('Performing featurization...')
startTime = time.time()
for functionName in args.UpdateListString.strip().split():
print(functionName)
featureList = []
for file in fileList:
if '.txt' in file:
print(file)
with open(args.FileFolder + file, "r", encoding="ISO-8859-1") as f:
newList = updateFeaturesFromFile(f.readlines(), file, functionName)
featureList.extend(newList)
# All features for current function have been generated, so update the full data frame.
fullDF.loc[:, functionName] = pd.Series(featureList, index=fullDF.index)
endTime = time.time()
print('Done!')
print("Total time to run: %s seconds." % str(endTime - startTime))
columnsList = list(fullDF.columns)
columnsList.remove('label')
fullDF = fullDF[columnsList + ['label']]
# Save the entire pandas data frame object of features and classes.
print('Saving the full dataframe...')
fullDF.to_csv('../data/featurized_instances.csv')
tuplesDF.to_csv('../data/tuples_instances.csv')
if __name__ == '__main__':
#Parse command line arguments
parser = argparse.ArgumentParser(description="""Fake news feature generator. Generates features from files
whos' words have been split to multiple lines. It also handles files where entities have been pre-marked.""")
parser.add_argument('FileFolder', metavar='f', type=str)
parser.add_argument('Mode', metavar='m', type=str, help="""U is for update, and C is for create""")
parser.add_argument('UpdateListString', metavar='--l', type=str, default="", help="""Use a string 'F0 F12 F13' of features, or '' empty string if no features. """)
args = parser.parse_args()
main(args)
| 12,510
| 0
| 549
|
27ea8f89b590262da723db9961dd54d30a7fc92d
| 9,134
|
py
|
Python
|
zodiacy/corpus.py
|
greenify/zodiacy
|
faf46a10b9b70869cb4caca02027921f1418cfcf
|
[
"MIT"
] | 1
|
2015-10-16T10:24:53.000Z
|
2015-10-16T10:24:53.000Z
|
zodiacy/corpus.py
|
greenify/zodiacy
|
faf46a10b9b70869cb4caca02027921f1418cfcf
|
[
"MIT"
] | null | null | null |
zodiacy/corpus.py
|
greenify/zodiacy
|
faf46a10b9b70869cb4caca02027921f1418cfcf
|
[
"MIT"
] | null | null | null |
import collections
import logging
from math import sqrt
from wordnik import swagger, WordApi
from .utils import weighted_choice
"""corpus.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
logger = logging.getLogger('root')
class Corpus():
"""
Generates a corpus from the provided database
Args:
zodiac_sign: select only entries for a specific zodiac sign
keyword: select only entries for a specific keyword
with_rating: weight entries after a predefined rating
threshold: minimal amount of entries needed for a valid corpus
with_synonyms: query wordnik for synonyms
wordnik_api_url: Wordnik API URL
wordnik_api_key: Wordnik API Key
"""
def __str__(self):
""" dumps the class on print - useful for debugging """
return str(self.__dict__)
def _get_zodiac_sign(self, zodiac_sign=None):
""" converts the string representation of a zodiac sign into a ordinal one
Arguments:
zodiac_sign: sign as string
Returns:
ordinal zodiac sign (from 0 to 12)
"""
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo',
'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
return zodiac_signs[zodiac_sign]
def _add_filter(self, filter_sql, filter_value):
""" help method to add a new filter
Arguments:
filter_sql: SQL string (with '?' for parameters)
filter_value: parameters to bind to the SQL string (either a single value or list)
"""
self._filters.append(filter_sql)
if isinstance(filter_value, collections.MutableSequence):
assert filter_sql.count("?") == len(filter_value)
self._filters_values += filter_value
else:
assert filter_sql.count("?") == 1
self._filters_values.append(filter_value)
def _create_filters(self):
""" builds all filters """
self._filters = []
self._filters_values = []
if self.zodiac_sign is not None:
self._add_filter("sign=?", self.zodiac_sign_ordinal)
if self.with_synonyms:
present_synonyms = self.get_present_synonyms()
if len(present_synonyms) == 0:
logger.warn("No related synonyms found")
present_synonyms.append(self.keyword)
synonyms_sql_array = ','.join(('?' for _ in present_synonyms))
self._add_filter('keyword in (%s)' %
synonyms_sql_array, present_synonyms)
elif self.keyword is not None:
# synonyms is already filtering on keyword
self._add_filter("keyword=?", self.keyword)
def _build_filters(self):
""" concatenates all available filter to SQL """
filters = ""
if len(self._filters) > 0:
filters += " WHERE "
filters += " AND ".join(self._filters)
return filters
def _execute_and_log(self, base_stmt, values=[]):
""" execute logs the entire SQL string
This is expensive as we need to make a request to our SQLite database.
Hence it is only performed when the debugging is enabled - the level
of the root logger needs to be logging.DEBUG or less"""
if logger.getEffectiveLevel() <= logging.DEBUG:
sql_with_vals = base_stmt
if len(values) > 0:
self.cursor.execute(
"SELECT " + ", ".join(["quote(?)" for i in values]), values)
quoted_values = self.cursor.fetchone()
for quoted_value in quoted_values:
sql_with_vals = sql_with_vals.replace(
'?', str(quoted_value), 1)
logger.debug("query: %s", sql_with_vals)
self.cursor.execute(base_stmt, values)
def _execute_query(self):
""" Builds and executes the SQL query to fetch the corpus """
columns = 'interp'
columns += ',rating' if self.with_rating or self.with_synonyms else ''
columns += ',keyword' if self.keyword else ''
base_stmt = 'SELECT %s from %s' % (columns, self._table_name)
base_stmt += self._build_filters()
self._execute_and_log(base_stmt, self._filters_values)
def _count_entries(self):
""" Returns the number of found entries in the database
Reason:
cursor.rowcount returns -1 until all results have been fetched
"""
base_stmt = 'SELECT COUNT(*) from %s' % self._table_name
base_stmt += self._build_filters()
self.cursor.execute(base_stmt, self._filters_values)
return self.cursor.fetchone()[0]
def _build(self):
""" Returns a cursor with all horoscopes for the given parameters """
self._create_filters()
nr_entries = self._count_entries()
if nr_entries < self.threshold:
raise ValueError("Found %d matches" % nr_entries)
logger.debug("%d entries found in corpus db", nr_entries)
self._execute_query()
def __iter__(self):
""" Lazy corpus iterator """
self._build()
return self
def __next__(self):
""" returns the corpus lazy """
row = next(self.cursor, None)
if row is None or row[0] is None:
# maybe someone wants to access the results again
raise StopIteration
rating = None
if self.with_rating:
rating = row[1]
if self.with_synonyms:
if row[0] is None:
# filter invalid entries
logger.debug("invalid row %s", row)
return self.__next__()
if row[0] == self.keyword or len(self.synonyms) == 0:
rating = row[1]
else:
rating = self.synonym_influence * \
row[1] * sqrt(len(self.synonyms))
if rating is None:
return (row[0],)
else:
return (row[0], rating)
def list_keywords(self):
""" lists all available keywords """
self._execute_and_log(("SELECT keyword, count(*) as count FROM horoscopes "
"WHERE length(keyword) > 0 GROUP BY keyword ORDER BY count desc"))
return self.cursor.fetchall()
def select_keyword_range(self, min_val=0, max_val=1, val=1):
""" evenly maps a val with min and max to the keywords
As the mapping is surjective the remaining slice is then weighted_chosen"""
kws = [k for k in self.list_keywords() if k[1] >= self.threshold]
val_range = max_val - min_val
factor = int(len(kws) / val_range)
upper_end = val * factor
if upper_end < factor:
upper_end = factor
lower_end = upper_end - factor
self.keyword = weighted_choice(kws[lower_end:upper_end])
logger.debug("moon keyword selected: %s", self.keyword)
def _get_synonyms(self, keyword):
""" Queries Wordnik for synonyms """
client = swagger.ApiClient(self.wordnik_api_key, self.wordnik_api_url)
word_api = WordApi.WordApi(client)
words = word_api.getRelatedWords(keyword, relationshipTypes='synonym')
if words is None or len(words) == 0:
return []
else:
return words[0].words
def get_present_synonyms(self):
""" Compares Wordnik result with present synonyms in DB
Returns:
List of synonyms occurring in the database
"""
self.synonyms = self._get_synonyms(self.keyword)
logger.debug("found %d synonyms", len(self.synonyms))
if len(self.synonyms) > 0:
self._execute_and_log('SELECT keyword FROM horoscopes WHERE keyword IN (%s) GROUP BY keyword' %
','.join('?' for _ in self.synonyms), tuple(self.synonyms))
return [row[0] for row in self.cursor if row is not None]
else:
return []
| 38.868085
| 115
| 0.600285
|
import collections
import logging
from math import sqrt
from wordnik import swagger, WordApi
from .utils import weighted_choice
"""corpus.py: Generates horoscopes based provided corpuses"""
__author__ = "Project Zodiacy"
__copyright__ = "Copyright 2015, Project Zodiacy"
logger = logging.getLogger('root')
class Corpus():
"""
Generates a corpus from the provided database
Args:
zodiac_sign: select only entries for a specific zodiac sign
keyword: select only entries for a specific keyword
with_rating: weight entries after a predefined rating
threshold: minimal amount of entries needed for a valid corpus
with_synonyms: query wordnik for synonyms
wordnik_api_url: Wordnik API URL
wordnik_api_key: Wordnik API Key
"""
def __init__(self, conn, with_rating=False, with_synonyms=False,
zodiac_sign=None, keyword=None, threshold=5,
wordnik_api_url=None, wordnik_api_key=None):
kws = locals()
for key, val in kws.items():
if key != "conn":
self.__dict__[key] = val
assert conn is not None
self.cursor = conn.cursor()
self.synonym_influence = 0.2
self.zodiac_sign_ordinal = self._get_zodiac_sign(self.zodiac_sign)
self._table_name = "horoscopes"
def __str__(self):
""" dumps the class on print - useful for debugging """
return str(self.__dict__)
def _get_zodiac_sign(self, zodiac_sign=None):
""" converts the string representation of a zodiac sign into a ordinal one
Arguments:
zodiac_sign: sign as string
Returns:
ordinal zodiac sign (from 0 to 12)
"""
zodiac_signs = dict(zip(['general', 'aries', 'taurus', 'gemini', 'cancer', 'leo', 'virgo',
'libra', 'scorpio', 'sagittarius', 'capricorn', 'aquarius', 'pisces'], range(13)))
if zodiac_sign not in zodiac_signs:
if zodiac_sign is not None:
raise ValueError('Invalid zodiac sign')
else:
return zodiac_signs[zodiac_sign]
def _add_filter(self, filter_sql, filter_value):
""" help method to add a new filter
Arguments:
filter_sql: SQL string (with '?' for parameters)
filter_value: parameters to bind to the SQL string (either a single value or list)
"""
self._filters.append(filter_sql)
if isinstance(filter_value, collections.MutableSequence):
assert filter_sql.count("?") == len(filter_value)
self._filters_values += filter_value
else:
assert filter_sql.count("?") == 1
self._filters_values.append(filter_value)
def _create_filters(self):
""" builds all filters """
self._filters = []
self._filters_values = []
if self.zodiac_sign is not None:
self._add_filter("sign=?", self.zodiac_sign_ordinal)
if self.with_synonyms:
present_synonyms = self.get_present_synonyms()
if len(present_synonyms) == 0:
logger.warn("No related synonyms found")
present_synonyms.append(self.keyword)
synonyms_sql_array = ','.join(('?' for _ in present_synonyms))
self._add_filter('keyword in (%s)' %
synonyms_sql_array, present_synonyms)
elif self.keyword is not None:
# synonyms is already filtering on keyword
self._add_filter("keyword=?", self.keyword)
def _build_filters(self):
""" concatenates all available filter to SQL """
filters = ""
if len(self._filters) > 0:
filters += " WHERE "
filters += " AND ".join(self._filters)
return filters
def _execute_and_log(self, base_stmt, values=[]):
""" execute logs the entire SQL string
This is expensive as we need to make a request to our SQLite database.
Hence it is only performed when the debugging is enabled - the level
of the root logger needs to be logging.DEBUG or less"""
if logger.getEffectiveLevel() <= logging.DEBUG:
sql_with_vals = base_stmt
if len(values) > 0:
self.cursor.execute(
"SELECT " + ", ".join(["quote(?)" for i in values]), values)
quoted_values = self.cursor.fetchone()
for quoted_value in quoted_values:
sql_with_vals = sql_with_vals.replace(
'?', str(quoted_value), 1)
logger.debug("query: %s", sql_with_vals)
self.cursor.execute(base_stmt, values)
def _execute_query(self):
""" Builds and executes the SQL query to fetch the corpus """
columns = 'interp'
columns += ',rating' if self.with_rating or self.with_synonyms else ''
columns += ',keyword' if self.keyword else ''
base_stmt = 'SELECT %s from %s' % (columns, self._table_name)
base_stmt += self._build_filters()
self._execute_and_log(base_stmt, self._filters_values)
def _count_entries(self):
""" Returns the number of found entries in the database
Reason:
cursor.rowcount returns -1 until all results have been fetched
"""
base_stmt = 'SELECT COUNT(*) from %s' % self._table_name
base_stmt += self._build_filters()
self.cursor.execute(base_stmt, self._filters_values)
return self.cursor.fetchone()[0]
def _build(self):
""" Returns a cursor with all horoscopes for the given parameters """
self._create_filters()
nr_entries = self._count_entries()
if nr_entries < self.threshold:
raise ValueError("Found %d matches" % nr_entries)
logger.debug("%d entries found in corpus db", nr_entries)
self._execute_query()
def __iter__(self):
""" Lazy corpus iterator """
self._build()
return self
def __next__(self):
""" returns the corpus lazy """
row = next(self.cursor, None)
if row is None or row[0] is None:
# maybe someone wants to access the results again
raise StopIteration
rating = None
if self.with_rating:
rating = row[1]
if self.with_synonyms:
if row[0] is None:
# filter invalid entries
logger.debug("invalid row %s", row)
return self.__next__()
if row[0] == self.keyword or len(self.synonyms) == 0:
rating = row[1]
else:
rating = self.synonym_influence * \
row[1] * sqrt(len(self.synonyms))
if rating is None:
return (row[0],)
else:
return (row[0], rating)
def random_keyword(self):
valid_keywords = [k for k in self.list_keywords() if k[1] >= self.threshold]
self.keyword = weighted_choice(valid_keywords)
logger.debug("keyword selected: %s", self.keyword)
def list_keywords(self):
""" lists all available keywords """
self._execute_and_log(("SELECT keyword, count(*) as count FROM horoscopes "
"WHERE length(keyword) > 0 GROUP BY keyword ORDER BY count desc"))
return self.cursor.fetchall()
def select_keyword_range(self, min_val=0, max_val=1, val=1):
""" evenly maps a val with min and max to the keywords
As the mapping is surjective the remaining slice is then weighted_chosen"""
kws = [k for k in self.list_keywords() if k[1] >= self.threshold]
val_range = max_val - min_val
factor = int(len(kws) / val_range)
upper_end = val * factor
if upper_end < factor:
upper_end = factor
lower_end = upper_end - factor
self.keyword = weighted_choice(kws[lower_end:upper_end])
logger.debug("moon keyword selected: %s", self.keyword)
def _get_synonyms(self, keyword):
""" Queries Wordnik for synonyms """
client = swagger.ApiClient(self.wordnik_api_key, self.wordnik_api_url)
word_api = WordApi.WordApi(client)
words = word_api.getRelatedWords(keyword, relationshipTypes='synonym')
if words is None or len(words) == 0:
return []
else:
return words[0].words
def get_present_synonyms(self):
""" Compares Wordnik result with present synonyms in DB
Returns:
List of synonyms occurring in the database
"""
self.synonyms = self._get_synonyms(self.keyword)
logger.debug("found %d synonyms", len(self.synonyms))
if len(self.synonyms) > 0:
self._execute_and_log('SELECT keyword FROM horoscopes WHERE keyword IN (%s) GROUP BY keyword' %
','.join('?' for _ in self.synonyms), tuple(self.synonyms))
return [row[0] for row in self.cursor if row is not None]
else:
return []
| 721
| 0
| 54
|
f49d473cc0d800aa406361c6dce4b00757bccf54
| 2,906
|
py
|
Python
|
remedy/remedy.py
|
4n6ir/remedy-delete-default-vpcs
|
ce1250140c7f3c967646c9be5ef09dde88c191b0
|
[
"Apache-2.0"
] | null | null | null |
remedy/remedy.py
|
4n6ir/remedy-delete-default-vpcs
|
ce1250140c7f3c967646c9be5ef09dde88c191b0
|
[
"Apache-2.0"
] | null | null | null |
remedy/remedy.py
|
4n6ir/remedy-delete-default-vpcs
|
ce1250140c7f3c967646c9be5ef09dde88c191b0
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import json
import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
| 44.707692
| 93
| 0.399862
|
import boto3
import json
import logging
import os
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
client = boto3.client('ec2')
regions = client.describe_regions()
for region in regions['Regions']:
ec2_client = boto3.client('ec2', region_name=region['RegionName'])
paginator = ec2_client.get_paginator('describe_vpcs')
response_iterator = paginator.paginate()
for page in response_iterator:
if len(page['Vpcs']) > 0:
for item in page['Vpcs']:
if item['IsDefault'] is True:
paginator2 = ec2_client.get_paginator('describe_internet_gateways')
response_iterator2 = paginator2.paginate()
for page2 in response_iterator2:
for item2 in page2['InternetGateways']:
if len(page2['InternetGateways']) > 0:
if item2['Attachments'][0]['VpcId'] == item['VpcId']:
try:
ec2_client.detach_internet_gateway(
InternetGatewayId=item2['InternetGatewayId'],
VpcId=item['VpcId']
)
ec2_client.delete_internet_gateway(
InternetGatewayId=item2['InternetGatewayId']
)
except:
logger.info('USED '+str(item2))
paginator3 = ec2_client.get_paginator('describe_subnets')
response_iterator3 = paginator3.paginate()
for page3 in response_iterator3:
for item3 in page3['Subnets']:
if len(page3['Subnets']) > 0:
if item3['VpcId'] == item['VpcId']:
try:
ec2_client.delete_subnet(
SubnetId=item3['SubnetId']
)
except:
logger.info('USED '+str(item3))
try:
ec2_client.delete_vpc(
VpcId=item['VpcId']
)
except:
logger.info('USED '+str(item))
pass
return {
'statusCode': 200,
'body': json.dumps('Delete Default VPCs')
}
| 2,773
| 0
| 23
|
39869b293ee78812692ef8612d361826ac2281dd
| 5,815
|
py
|
Python
|
papers/BLS/BLSBasic/BLS.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | 2
|
2021-11-10T06:16:55.000Z
|
2022-02-22T11:30:04.000Z
|
papers/BLS/BLSBasic/BLS.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | null | null | null |
papers/BLS/BLSBasic/BLS.py
|
mindspore-ai/contrib
|
85dccac7a2ba6e962092ecd51aefd962d7f2aeac
|
[
"Apache-2.0"
] | 1
|
2022-03-22T06:03:15.000Z
|
2022-03-22T06:03:15.000Z
|
import numpy
import numpy as np
from mindspore import Tensor, dtype
from mindspore.train.serialization import export, save_checkpoint
import mindspore.dataset as ds
import mindspore.context as context
import mindspore.ops as ops
import mindspore.nn as N
import mindspore.numpy as mnp
| 44.389313
| 115
| 0.692863
|
import numpy
import numpy as np
from mindspore import Tensor, dtype
from mindspore.train.serialization import export, save_checkpoint
import mindspore.dataset as ds
import mindspore.context as context
import mindspore.ops as ops
import mindspore.nn as N
import mindspore.numpy as mnp
class BLSBasicTrain(N.Cell):
def __init__(self) -> None:
super(BLSBasicTrain, self).__init__()
self.s = 0.8
self.c = 2 ** -15
self.n1 = 10
self.n2 = 10
self.n3 = 20
self.y_max = 1
self.y_min = 0
self.iterations = 2
# 用于训练结果输出
self.argmax_op = ops.Argmax()
self.sign_op = ops.Sign()
self.select_op = ops.Select()
self.accuracy_op = N.Accuracy('classification')
def construct(self, _train_data, _train_label):
output, weight, _, _, _, _, _ = self.train(_train_data, _train_label)
return output, weight
def train(self, x, y):
standardized_data = self.standardize_input(x)
feature, mapped_features, _, _ = self.generate_mapped_features(standardized_data)
feature_with_bias = self.enhance_layer_input(feature)
enhance_layer_weight = self.generate_random_weight_of_enhance_layer()
enhance_layer_output, shrink_parameter = self.enhance_layer_output(feature_with_bias, enhance_layer_weight)
output, output_weight = self.final_output(feature, enhance_layer_output, y)
return output, output_weight, mapped_features, _, _, enhance_layer_weight, shrink_parameter
def generate_mapped_features(self, standardized_train_x):
feature = self.input_features(standardized_train_x)
output = []
weight = mnp.full((self.n2, feature.shape[1], self.n1), 0.0)
max_list = mnp.full((self.n2, self.n1), 0.0)
min_list = mnp.full((self.n2, self.n1), 0.0)
for i in range(self.n2):
# 生成随机权重
weight_of_each_window = self.generate_random_weight_of_window(standardized_train_x, i)
# 生成窗口特征
temp_feature_of_each_window = mnp.matmul(feature, weight_of_each_window)
# 压缩
feature_of_each_window, _, _ = self.mapminmax(temp_feature_of_each_window, -1.0, 1.0)
# 通过稀疏化计算,生成最终权重
beta = self.sparse_bls(feature_of_each_window, feature)
# 计算窗口输出 T1
output_of_each_window_next = self.window_output(feature, beta)
# 压缩
output_of_each_window_next, max_list, min_list = self.mapminmax(output_of_each_window_next, 0.0, 1.0)
# 拼接窗口输出
output = self.concat_window_output(output, output_of_each_window_next)
# 更新输出的权重
weight[i] = beta
max_list[i] = max_list
min_list[i] = min_list
output = self.stack_window_output(output)
return output, weight, max_list, min_list
def generate_random_weight_of_enhance_layer(self):
weight = []
uniform = ops.UniformReal(seed=2)
rand = uniform((self.n2 * self.n1 + 1, self.n3), 0.0, 1.0)
weight.append(self.orthonormalize(2 * rand - mnp.full(rand.shape, 1.0)))
return mnp.stack(weight, axis=1)
def final_output(self, _output_of_feature_mapping_layer, _output_of_enhance_layer, _train_label):
# 拼接T2和y, 生成T3
concat = mnp.concatenate((_output_of_feature_mapping_layer, _output_of_enhance_layer), axis=1)
weight = self.pseudo_inverse(concat, _train_label)
# 生成训练输出
output = self.output_layer(concat, weight)
return output, weight
def generate_random_weight_of_window(self, standardized_x, i):
uniform = ops.UniformReal(seed=2)
weight = 2.0 * uniform((standardized_x.shape[1] + 1, self.n1)) - 1.0 # 生成每个窗口的权重系数,最后一行为偏差
return weight
def input_features(self, standardized_train_x):
ones = mnp.full((standardized_train_x.shape[0], 1), 0.1)
feature_of_input_data_with_bias = mnp.concatenate((standardized_train_x, ones), axis=1)
return feature_of_input_data_with_bias
def window_output(self, feature_of_input_data_with_bias, beta):
output_of_each_window = mnp.matmul(feature_of_input_data_with_bias, beta)
return output_of_each_window
def concat_window_output(self, output_of_feature_mapping_layer, t1):
output_of_feature_mapping_layer.append(t1)
return output_of_feature_mapping_layer
def stack_window_output(self, output_of_feature_mapping_layer):
res = mnp.stack(output_of_feature_mapping_layer, axis=1)
res = mnp.reshape(res, (res.shape[0], -1))
return res
def enhance_layer_input(self, mapped_feature):
data_concat_second = mnp.full((mapped_feature.shape[0], 1), 0.1)
res = mnp.concatenate((mapped_feature, data_concat_second), axis=1)
return res
def enhance_layer_output(self, _input_of_enhance_layer_with_bias, _weight_of_enhance_layer):
res_squeeze_input0 = mnp.squeeze(_input_of_enhance_layer_with_bias)
res_squeeze_input1 = mnp.squeeze(_weight_of_enhance_layer)
res_matmul = mnp.matmul(res_squeeze_input0, res_squeeze_input1)
res_reduce_max = mnp.amax(res_matmul)
shrink_parameter = self.s * mnp.full(res_reduce_max.shape, 1.0) / res_reduce_max
res_tanh = mnp.tanh(res_matmul * shrink_parameter)
return res_tanh, shrink_parameter
def pseudo_inverse(self, _concatenate_of_two_layer, _train_y):
pseudo_inverse = self.pinv(_concatenate_of_two_layer, self.c)
new_output_weight = mnp.matmul(pseudo_inverse, _train_y)
return new_output_weight
def output_layer(self, concatenate_of_two_layer, output_weight):
output_of_result = mnp.matmul(concatenate_of_two_layer, output_weight)
return output_of_result
| 5,270
| 7
| 427
|
ced8f7377ff6afeb026e4db4b64351647f6b9de4
| 8,333
|
py
|
Python
|
tests/zquantum/core/wip/circuits/_gates_test.py
|
bartubisgin/z-quantum-core
|
b61aef12cc86f0a8234229b9b26b21cde950d6f1
|
[
"Apache-2.0"
] | null | null | null |
tests/zquantum/core/wip/circuits/_gates_test.py
|
bartubisgin/z-quantum-core
|
b61aef12cc86f0a8234229b9b26b21cde950d6f1
|
[
"Apache-2.0"
] | null | null | null |
tests/zquantum/core/wip/circuits/_gates_test.py
|
bartubisgin/z-quantum-core
|
b61aef12cc86f0a8234229b9b26b21cde950d6f1
|
[
"Apache-2.0"
] | 1
|
2022-03-19T02:23:53.000Z
|
2022-03-19T02:23:53.000Z
|
"""Test cases for _gates module."""
from unittest.mock import Mock
import pytest
import sympy
from zquantum.core.wip.circuits import _builtin_gates
from zquantum.core.wip.circuits._gates import GateOperation, MatrixFactoryGate
GATES_REPRESENTATIVES = [
_builtin_gates.X,
_builtin_gates.Y,
_builtin_gates.Z,
_builtin_gates.T,
_builtin_gates.H,
_builtin_gates.I,
_builtin_gates.RX(sympy.Symbol("theta")),
_builtin_gates.RY(0.5),
_builtin_gates.RZ(0),
_builtin_gates.PHASE(sympy.pi / 5),
_builtin_gates.CZ,
_builtin_gates.CNOT,
_builtin_gates.SWAP,
_builtin_gates.ISWAP,
_builtin_gates.XX(sympy.cos(sympy.Symbol("phi"))),
_builtin_gates.YY(sympy.pi),
_builtin_gates.ZZ(sympy.Symbol("x") + sympy.Symbol("y")),
_builtin_gates.CPHASE(1.5),
]
@pytest.mark.parametrize("gate", GATES_REPRESENTATIVES)
@pytest.mark.parametrize("gate", GATES_REPRESENTATIVES)
| 37.200893
| 88
| 0.677667
|
"""Test cases for _gates module."""
from unittest.mock import Mock
import pytest
import sympy
from zquantum.core.wip.circuits import _builtin_gates
from zquantum.core.wip.circuits._gates import GateOperation, MatrixFactoryGate
GATES_REPRESENTATIVES = [
_builtin_gates.X,
_builtin_gates.Y,
_builtin_gates.Z,
_builtin_gates.T,
_builtin_gates.H,
_builtin_gates.I,
_builtin_gates.RX(sympy.Symbol("theta")),
_builtin_gates.RY(0.5),
_builtin_gates.RZ(0),
_builtin_gates.PHASE(sympy.pi / 5),
_builtin_gates.CZ,
_builtin_gates.CNOT,
_builtin_gates.SWAP,
_builtin_gates.ISWAP,
_builtin_gates.XX(sympy.cos(sympy.Symbol("phi"))),
_builtin_gates.YY(sympy.pi),
_builtin_gates.ZZ(sympy.Symbol("x") + sympy.Symbol("y")),
_builtin_gates.CPHASE(1.5),
]
def example_one_qubit_matrix_factory(a, b):
return sympy.Matrix([[a, b], [b, a]])
def example_two_qubit_matrix_factory(a, b, c):
return sympy.Matrix([[a, 0, 0, 0], [0, b, 0, 0], [0, 0, c, 0], [0, 0, 0, 1]])
class TestMatrixFactoryGate:
@pytest.mark.parametrize(
"params, factory, num_qubits",
[
((0.5, sympy.Symbol("theta")), example_one_qubit_matrix_factory, 1),
(
(sympy.Symbol("alpha"), sympy.Symbol("beta"), 1),
example_two_qubit_matrix_factory,
2,
),
],
)
def test_constructs_its_matrix_by_calling_factory_with_bound_parameter(
self, params, factory, num_qubits
):
wrapped_factory = Mock(wraps=factory)
gate = MatrixFactoryGate("U", wrapped_factory, params, num_qubits)
assert gate.matrix == factory(*params)
wrapped_factory.assert_called_once_with(*params)
def test_binding_parameters_creates_new_instance_with_substituted_free_params(self):
gamma, theta, x, y = sympy.symbols("gamma, theta, x, y")
params = (theta, x + y)
gate = MatrixFactoryGate("U", example_one_qubit_matrix_factory, params, 1)
new_gate = gate.bind({theta: 0.5, x: gamma, y: 3})
assert new_gate.name == gate.name
assert new_gate.matrix_factory == gate.matrix_factory
assert new_gate.num_qubits == gate.num_qubits
assert new_gate.params == (0.5, gamma + 3)
def test_binding_parameters_with_symbol_outside_of_free_symbols_does_not_raise(
self,
):
gamma, theta = sympy.symbols("gamma, theta")
params = (theta, 2 * theta)
gate = MatrixFactoryGate("U", example_one_qubit_matrix_factory, params, 1)
new_gate = gate.bind({gamma: 0.5, theta: 1})
assert new_gate.params == (1, 2)
def test_binding_parameters_does_not_change_parameters_without_free_symbols(self):
theta = sympy.Symbol("theta")
gate = MatrixFactoryGate("V", example_one_qubit_matrix_factory, (1, 2), 1)
new_gate = gate.bind({theta: 5.0})
assert new_gate.params == (1, 2)
def test_replace_parameters_correctly_gives_instance_with_correctly_set_parameters(
self,
):
theta = sympy.Symbol("theta")
gate = MatrixFactoryGate("V", example_one_qubit_matrix_factory, (1, 2), 1)
new_gate = gate.replace_params((theta, 0.5))
assert new_gate == MatrixFactoryGate(
"V", example_one_qubit_matrix_factory, (theta, 0.5), 1
)
def test_daggers_matrix_is_adjoint_of_original_gates_matrix(self):
gate = MatrixFactoryGate("V", example_one_qubit_matrix_factory, (1, 2), 1)
assert gate.dagger.matrix == gate.matrix.adjoint()
def test_dagger_has_the_same_params_and_num_qubits_as_wrapped_gate(self):
gate = MatrixFactoryGate(
"U", example_two_qubit_matrix_factory, (0.5, 0.1, sympy.Symbol("a")), 2
)
assert gate.dagger.num_qubits == gate.num_qubits
assert gate.dagger.params == gate.params
def test_dagger_of_hermitian_gate_is_the_same_gate(self):
gate = MatrixFactoryGate(
"V", example_one_qubit_matrix_factory, (1, 0), 1, is_hermitian=True
)
assert gate.dagger is gate
def test_binding_gates_in_dagger_is_propagated_to_wrapped_gate(self):
theta = sympy.Symbol("theta")
gate = MatrixFactoryGate("V", example_one_qubit_matrix_factory, (theta, 0), 1)
assert gate.dagger.bind({theta: 0.5}) == gate.bind({theta: 0.5}).dagger
def test_dagger_of_dagger_is_the_same_as_original_gate(self):
gate = MatrixFactoryGate("V", example_one_qubit_matrix_factory, (1, 0), 1)
assert gate.dagger.dagger is gate
def test_applying_dagger_and_replacing_parameters_commutes(self):
gate = MatrixFactoryGate("V", example_one_qubit_matrix_factory, (1, 0), 1)
new_params = (sympy.Symbol("theta"), 4.2)
assert (
gate.dagger.replace_params(new_params)
== gate.replace_params(new_params).dagger
)
def test_applying_gate_returns_operation_with_correct_gate_and_indices(self):
theta = sympy.Symbol("theta")
gamma = sympy.Symbol("gamma")
gate = MatrixFactoryGate(
"A", example_two_qubit_matrix_factory, (theta, gamma, 42), 2
)
operation = gate(4, 1)
assert operation.gate == gate
assert operation.qubit_indices == (4, 1)
@pytest.mark.parametrize("gate", GATES_REPRESENTATIVES)
class TestControlledGate:
def test_num_qubits_equal_to_wrapped_gates_num_qubits_plus_num_controlled_qubits(
self, gate
):
assert gate.controlled(3).num_qubits == gate.num_qubits + 3
def test_has_matrix_with_eye_and_wrapped_gates_matrix_as_bottom_left_block(
self, gate
):
controlled_gate = gate.controlled(2)
n = gate.matrix.shape[0]
assert gate.matrix.shape[1] == n
assert controlled_gate.matrix[0:-n, 0:-n] == sympy.eye(
2 ** controlled_gate.num_qubits - n
)
assert controlled_gate.matrix[-n:, -n:] == gate.matrix
def test_controlled_of_controlled_gate_has_summed_number_of_control_qubits(
self, gate
):
controlled_gate = gate.controlled(2)
double_controlled_gate = controlled_gate.controlled(3)
assert double_controlled_gate.wrapped_gate == gate
assert double_controlled_gate.num_qubits == gate.num_qubits + 2 + 3
assert double_controlled_gate.num_control_qubits == 2 + 3
assert double_controlled_gate.matrix.shape == 2 * (
2 ** (gate.num_qubits + 2 + 3),
)
def test_has_the_same_parameters_as_wrapped_gate(self, gate):
controlled_gate = gate.controlled(4)
assert controlled_gate.params == gate.params
def test_dagger_of_controlled_gate_is_controlled_gate_wrapping_dagger(self, gate):
controlled_gate = gate.controlled(4)
assert controlled_gate.dagger == gate.dagger.controlled(4)
def test_binding_parameters_in_control_gate_is_propagated_to_wrapped_gate(
self, gate
):
controlled_gate = gate.controlled(2)
symbols_map = {sympy.Symbol("theta"): 0.5, sympy.Symbol("x"): 3}
assert controlled_gate.bind(symbols_map) == gate.bind(symbols_map).controlled(2)
def test_constructing_controlled_gate_and_replacing_parameters_commute(self, gate):
controlled_gate = gate.controlled(2)
new_params = tuple(3 * param for param in controlled_gate.params)
assert controlled_gate.replace_params(new_params) == gate.replace_params(
new_params
).controlled(2)
@pytest.mark.parametrize("gate", GATES_REPRESENTATIVES)
class TestGateOperation:
def test_bound_symbols_are_not_present_in_gate_parameters(self, gate):
op = GateOperation(gate, tuple(range(gate.num_qubits)))
symbols_map = {sympy.Symbol("phi"): 0.5, sympy.Symbol("y"): 1.1}
assert all(
symbol not in sympy.sympify(param).atoms(sympy.Symbol)
for symbol in symbols_map
for param in op.bind(symbols_map).params
)
def test_replacing_parameters_constructs_operation_of_gate_with_new_parameters(
self, gate
):
op = GateOperation(gate, tuple(range(gate.num_qubits)))
new_params = tuple(-1 * param for param in op.params)
assert op.replace_params(new_params).params == new_params
| 6,372
| 678
| 354
|
5a4509ded52d8b17265869fdb30826814374cffa
| 2,138
|
py
|
Python
|
8-puzzle-game/manhattan.py
|
itepifanio/jogo-dos-8-numeros
|
692b84f9b48fef5d8da9afba67adb1a1f9c13b28
|
[
"MIT"
] | null | null | null |
8-puzzle-game/manhattan.py
|
itepifanio/jogo-dos-8-numeros
|
692b84f9b48fef5d8da9afba67adb1a1f9c13b28
|
[
"MIT"
] | null | null | null |
8-puzzle-game/manhattan.py
|
itepifanio/jogo-dos-8-numeros
|
692b84f9b48fef5d8da9afba67adb1a1f9c13b28
|
[
"MIT"
] | null | null | null |
from game.game import Game
from heapq import heappush, heappop
import time
import itertools
import time
if __name__ == '__main__':
algorithm = Manhanttan()
algorithm.run()
| 30.542857
| 89
| 0.563143
|
from game.game import Game
from heapq import heappush, heappop
import time
import itertools
import time
class Manhanttan(Game):
def heuristic(self, a):
result = 0
node = list(itertools.chain(*a))
for current, target in enumerate(node):
currentRow = int(current/3)
currentColumn = current%3
targetRow = int(target/3)
targetColumn = target%3
result += abs(currentRow-targetRow) + abs(currentColumn-targetColumn)
return result
def run(self):
self.nodeList.append(self.startNode)
self.visitedList.append(self.startNode)
t0 = time.time()
found = False
while (not found and len(self.nodeList) != 0):
fList = []
for node in self.nodeList:
h = self.heuristic(node)
g = len(node)
f = g+h
heappush(fList, (f, node))
currentNode = self.nodeList.pop(
self.nodeList.index(
heappop(fList)[1]
)
)
blankIndex = self.getBlankIndexes(currentNode)
if self.board.canMoveTop(blankIndex):
topNode = self.board.top(currentNode, blankIndex[0], blankIndex[1])
found = self.checkFinal(topNode)
if self.board.canMoveLeft(blankIndex) and found == False:
leftNode = self.board.left(currentNode, blankIndex[0], blankIndex[1])
found = self.checkFinal(leftNode)
if self.board.canMoveRight(blankIndex) and found == False:
rightNode = self.board.right(currentNode, blankIndex[0], blankIndex[1])
found = self.checkFinal(rightNode)
if self.board.canMoveBottom(blankIndex) and found == False:
bottomNode = self.board.bottom(currentNode, blankIndex[0], blankIndex[1])
found = self.checkFinal(bottomNode)
t1 = time.time()
print('Time:', t1-t0)
print('------')
if __name__ == '__main__':
algorithm = Manhanttan()
algorithm.run()
| 1,870
| 2
| 80
|
8747cd2d839a44ad5113082afaedeed9aa0cc8f8
| 9,839
|
py
|
Python
|
benchmark_utils.py
|
neyudin/AcceleratedGNMethodEquations
|
b5606f98221d72ff3e9ebb8ce0709981db08cefd
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark_utils.py
|
neyudin/AcceleratedGNMethodEquations
|
b5606f98221d72ff3e9ebb8ce0709981db08cefd
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark_utils.py
|
neyudin/AcceleratedGNMethodEquations
|
b5606f98221d72ff3e9ebb8ce0709981db08cefd
|
[
"BSD-3-Clause"
] | null | null | null |
from optimizers import *
import gc
import time
def experiment_runner(args, x_0_dict):
"""
Runner routine which performs the whole experiment set.
Parameters
----------
args : populated namespace object from ArgumentParser
The system of equations evaluated at point x.
x_0_dict : dict
The dictionary of initial points x.
Returns
-------
dict
Aggregated experiment data.
"""
gc.enable()
gc.collect()
exp_res_dict = dict()
if args.verbose:
print("Started DetGNM!")
exp_res_dict['DetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['DetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['DetGNM'][name][n] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _ = DetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None)
start = time.time() - start
exp_res_dict['DetGNM'][name][n][i] = {'f_vals': f_vals, 'nabla_f_2_norm_vals': nabla_f_2_norm_vals, 'avg_time_s': start / len(f_vals), 'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, start
gc.collect()
if args.verbose:
print("Started ArmijoAccDetGNM!")
exp_res_dict['ArmijoAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['ArmijoAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['ArmijoAccDetGNM'][name][n] = dict()
for pair_num, (c1, c2) in enumerate(zip(args.c1_list, args.c2_list)):
if args.verbose:
print(' c1 = {:.4f}, c2 = {:.4f}:'.format(c1, c2))
exp_res_dict['ArmijoAccDetGNM'][name][n][pair_num] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _, local_steps_list, spec_steps_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Armijo", c1=c1, c2=c2)
start = time.time() - start
exp_res_dict['ArmijoAccDetGNM'][name][n][pair_num][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'local_steps_list': local_steps_list,
'spec_steps_list': spec_steps_list,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, local_steps_list, spec_steps_list, start
gc.collect()
if args.verbose:
print("Started ExtrapolationAccDetGNM!")
exp_res_dict['ExtrapolationAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['ExtrapolationAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['ExtrapolationAccDetGNM'][name][n] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _, n_iter_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Extrapolation")
start = time.time() - start
exp_res_dict['ExtrapolationAccDetGNM'][name][n][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'n_iter_list': n_iter_list,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, n_iter_list, start
gc.collect()
if args.verbose:
print("Started InterpolationAccDetGNM!")
exp_res_dict['InterpolationAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['InterpolationAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['InterpolationAccDetGNM'][name][n] = dict()
for n_points in args.n_points_list:
if args.verbose:
print(' n_points:', n_points)
exp_res_dict['InterpolationAccDetGNM'][name][n][n_points] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _ = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Interpolation")
start = time.time() - start
exp_res_dict['InterpolationAccDetGNM'][name][n][n_points][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, start
gc.collect()
if args.verbose:
print("Started SamplingAccDetGNM!")
exp_res_dict['SamplingAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['SamplingAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['SamplingAccDetGNM'][name][n] = dict()
for n_points in args.n_points_list:
if args.verbose:
print(' n_points:', n_points)
exp_res_dict['SamplingAccDetGNM'][name][n][n_points] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _ = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Sampling")
start = time.time() - start
exp_res_dict['SamplingAccDetGNM'][name][n][n_points][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, start
gc.collect()
if args.verbose:
print("Started GoldenRatioAccDetGNM!")
exp_res_dict['GoldenRatioAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['GoldenRatioAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['GoldenRatioAccDetGNM'][name][n] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _, n_iter_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "GoldenRatio")
start = time.time() - start
exp_res_dict['GoldenRatioAccDetGNM'][name][n][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'n_iter_list': n_iter_list,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, n_iter_list, start
gc.collect()
return exp_res_dict
| 53.472826
| 195
| 0.476979
|
from optimizers import *
import gc
import time
def experiment_runner(args, x_0_dict):
"""
Runner routine which performs the whole experiment set.
Parameters
----------
args : populated namespace object from ArgumentParser
The system of equations evaluated at point x.
x_0_dict : dict
The dictionary of initial points x.
Returns
-------
dict
Aggregated experiment data.
"""
gc.enable()
gc.collect()
exp_res_dict = dict()
if args.verbose:
print("Started DetGNM!")
exp_res_dict['DetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['DetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['DetGNM'][name][n] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _ = DetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None)
start = time.time() - start
exp_res_dict['DetGNM'][name][n][i] = {'f_vals': f_vals, 'nabla_f_2_norm_vals': nabla_f_2_norm_vals, 'avg_time_s': start / len(f_vals), 'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, start
gc.collect()
if args.verbose:
print("Started ArmijoAccDetGNM!")
exp_res_dict['ArmijoAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['ArmijoAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['ArmijoAccDetGNM'][name][n] = dict()
for pair_num, (c1, c2) in enumerate(zip(args.c1_list, args.c2_list)):
if args.verbose:
print(' c1 = {:.4f}, c2 = {:.4f}:'.format(c1, c2))
exp_res_dict['ArmijoAccDetGNM'][name][n][pair_num] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _, local_steps_list, spec_steps_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Armijo", c1=c1, c2=c2)
start = time.time() - start
exp_res_dict['ArmijoAccDetGNM'][name][n][pair_num][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'local_steps_list': local_steps_list,
'spec_steps_list': spec_steps_list,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, local_steps_list, spec_steps_list, start
gc.collect()
if args.verbose:
print("Started ExtrapolationAccDetGNM!")
exp_res_dict['ExtrapolationAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['ExtrapolationAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['ExtrapolationAccDetGNM'][name][n] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _, n_iter_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Extrapolation")
start = time.time() - start
exp_res_dict['ExtrapolationAccDetGNM'][name][n][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'n_iter_list': n_iter_list,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, n_iter_list, start
gc.collect()
if args.verbose:
print("Started InterpolationAccDetGNM!")
exp_res_dict['InterpolationAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['InterpolationAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['InterpolationAccDetGNM'][name][n] = dict()
for n_points in args.n_points_list:
if args.verbose:
print(' n_points:', n_points)
exp_res_dict['InterpolationAccDetGNM'][name][n][n_points] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _ = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Interpolation")
start = time.time() - start
exp_res_dict['InterpolationAccDetGNM'][name][n][n_points][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, start
gc.collect()
if args.verbose:
print("Started SamplingAccDetGNM!")
exp_res_dict['SamplingAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['SamplingAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['SamplingAccDetGNM'][name][n] = dict()
for n_points in args.n_points_list:
if args.verbose:
print(' n_points:', n_points)
exp_res_dict['SamplingAccDetGNM'][name][n][n_points] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _ = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "Sampling")
start = time.time() - start
exp_res_dict['SamplingAccDetGNM'][name][n][n_points][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, start
gc.collect()
if args.verbose:
print("Started GoldenRatioAccDetGNM!")
exp_res_dict['GoldenRatioAccDetGNM'] = dict()
for oracle_class, name in [(NesterovSkokovOracle, 'Nesterov-Skokov'), (HatOracle, 'Hat'), (PLOracle, 'PL')]:
if args.verbose:
print('Oracle:', name)
exp_res_dict['GoldenRatioAccDetGNM'][name] = dict()
for n in args.n_dims:
if args.verbose:
print(' n:', n)
exp_res_dict['GoldenRatioAccDetGNM'][name][n] = dict()
for i in range(args.n_starts):
if args.verbose:
print(' start #:', i + 1)
start = time.time()
_, f_vals, nabla_f_2_norm_vals, _, _, n_iter_list = AccDetGNM(oracle_class(n), args.N_iter, x_0_dict[n][i], args.L_0, True, None, "GoldenRatio")
start = time.time() - start
exp_res_dict['GoldenRatioAccDetGNM'][name][n][i] = {'f_vals': f_vals,
'nabla_f_2_norm_vals': nabla_f_2_norm_vals,
'n_iter_list': n_iter_list,
'avg_time_s': start / len(f_vals),
'time_s': start}
del _, f_vals, nabla_f_2_norm_vals, n_iter_list, start
gc.collect()
return exp_res_dict
| 0
| 0
| 0
|
3f735b40adc91055ec645c76cf232232a3b35167
| 344
|
py
|
Python
|
testing/mpy_dummy/utime.py
|
dpm76/Microvacbot
|
b89c7e7e0e1e7b21d946d2f6a312c217c08badd6
|
[
"MIT"
] | 1
|
2020-04-02T14:32:53.000Z
|
2020-04-02T14:32:53.000Z
|
testing/mpy_dummy/utime.py
|
dpm76/Microvacbot
|
b89c7e7e0e1e7b21d946d2f6a312c217c08badd6
|
[
"MIT"
] | null | null | null |
testing/mpy_dummy/utime.py
|
dpm76/Microvacbot
|
b89c7e7e0e1e7b21d946d2f6a312c217c08badd6
|
[
"MIT"
] | null | null | null |
import time
| 11.096774
| 35
| 0.517442
|
import time
def sleep(s):
time.sleep(s)
def sleep_ms(ms):
time.sleep(ms/1e3)
def sleep_us(us):
time.sleep(us/1e6)
def ticks_us():
time.time() * 1e6
def ticks_ms():
time.time() * 1e3
def ticks_diff(timeEnd, timeStart):
return timeEnd-timeStart
| 153
| 0
| 154
|
51311d98800bb3f9984f334fadd5802ed4789387
| 509
|
py
|
Python
|
docs/components_page/components/button/usage.py
|
glsdown/dash-bootstrap-components
|
0ebea4f7de43975f6e3a2958359c4480ae1d4927
|
[
"Apache-2.0"
] | 776
|
2019-02-07T19:36:59.000Z
|
2022-03-31T05:53:04.000Z
|
docs/components_page/components/button/usage.py
|
glsdown/dash-bootstrap-components
|
0ebea4f7de43975f6e3a2958359c4480ae1d4927
|
[
"Apache-2.0"
] | 350
|
2019-02-05T10:42:19.000Z
|
2022-03-31T19:23:35.000Z
|
docs/components_page/components/button/usage.py
|
glsdown/dash-bootstrap-components
|
0ebea4f7de43975f6e3a2958359c4480ae1d4927
|
[
"Apache-2.0"
] | 219
|
2019-02-10T13:46:25.000Z
|
2022-03-23T17:03:39.000Z
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
button = html.Div(
[
dbc.Button(
"Click me", id="example-button", className="me-2", n_clicks=0
),
html.Span(id="example-output", style={"verticalAlign": "middle"}),
]
)
@app.callback(
Output("example-output", "children"), [Input("example-button", "n_clicks")]
)
| 23.136364
| 79
| 0.607073
|
import dash_bootstrap_components as dbc
from dash import Input, Output, html
button = html.Div(
[
dbc.Button(
"Click me", id="example-button", className="me-2", n_clicks=0
),
html.Span(id="example-output", style={"verticalAlign": "middle"}),
]
)
@app.callback(
Output("example-output", "children"), [Input("example-button", "n_clicks")]
)
def on_button_click(n):
if n is None:
return "Not clicked."
else:
return f"Clicked {n} times."
| 97
| 0
| 22
|
2946bf995e423ab9f90fecaffdfbde0077702bb0
| 284
|
py
|
Python
|
blackdog/forms.py
|
UncleGoogle/dafipost
|
5e19d6a69dde9b7e5267bbdba680906bdb5e56eb
|
[
"MIT"
] | null | null | null |
blackdog/forms.py
|
UncleGoogle/dafipost
|
5e19d6a69dde9b7e5267bbdba680906bdb5e56eb
|
[
"MIT"
] | 1
|
2021-02-08T01:44:32.000Z
|
2021-02-08T01:44:32.000Z
|
blackdog/forms.py
|
UncleGoogle/dafipost
|
5e19d6a69dde9b7e5267bbdba680906bdb5e56eb
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Bark
| 20.285714
| 78
| 0.623239
|
from django import forms
from .models import Bark
class BarkForm(forms.ModelForm):
class Meta:
model = Bark
fields = ['content']
def clean_views(self):
"""Every new form or updated form is submitted we reset the counter"""
return 0
| 0
| 205
| 23
|
4fef830c6935b201ebe3270607f09b48471f7f26
| 4,094
|
py
|
Python
|
python/functions/calculateInterest.py
|
Z88897050/emulateSamples
|
57a519422e0e15aadc301470ac6a8397848d93c3
|
[
"Apache-2.0"
] | null | null | null |
python/functions/calculateInterest.py
|
Z88897050/emulateSamples
|
57a519422e0e15aadc301470ac6a8397848d93c3
|
[
"Apache-2.0"
] | 1
|
2021-09-21T14:38:33.000Z
|
2021-09-21T14:38:33.000Z
|
python/functions/calculateInterest.py
|
Z88897050/emulateSamples
|
57a519422e0e15aadc301470ac6a8397848d93c3
|
[
"Apache-2.0"
] | 1
|
2019-05-17T11:31:47.000Z
|
2019-05-17T11:31:47.000Z
|
# 等额本金和等额本息贷款计算
import math
area = float(input('请输入房屋面积(m^2): '))
unit_price = float(input('请输入单价(元): '))
year_interest = float(input('请输入贷款利率(%): '))
# '''
# 等额本金的每月还款情况文件,格式如下:
# 1月,5700.79(元)
# 2月,5691.69(元)
# 3月,5682.59(元)
# 4月,5673.5(元)
# ......
#
# '''
#
#
# # 读取文件内容,即每个月的还款数据
# def get_average_capital():
# file_path = input('请输入等额本金文件路径: ')
# average_capital_data = []
# file_object = open(file_path, 'r')
# try:
# for line in file_object:
# average_capital_data.append(float(line[line.find(',') + 1:line.find('(')]))
# finally:
# file_object.close()
# print(average_capital_data)
# return average_capital_data
'''
等额本金还款法:
设:A=贷款额 B=贷款月利率 C=贷款月数 D=每月还款额 E=还款利息总和
每月应还本金=贷款额÷还款月数=A÷C
第一月还款利息=剩余本金×月利率=A×B
第二月还款利息:(贷款额-第一月已归还本金额)×月利率=(A-A÷C)×B
第三月还款利息:(A-2×A÷C)×B
...
第C月还款利息:(A-(C-1)×A÷C)×B
求以上和得E=B(AC-[A+2A+3A+...+(C-1)A]÷C)=B(AC-A(C-1)÷2)=A×B×(C+1)÷2
每n月还款:D=本金+第n月利息=A÷C+(A-(n-1)×A÷C)×B=(1+B(C-n+1))×A÷C
'''
# 等额本金计算,默认30年
'''
等额本息还款法:
设:A=贷款额 B=贷款月利率 C=贷款月数 D=每月还款额 E=还款利息总和
第一月还款利息为【贷款额×月利率】:A×B
第二月还款利息为【[贷款额-第一个月已还本金]×月利率】:〔A-(D-A×B)〕×B=(A×B-D)×(1+B)^1+D
第三月还款利息为:{A-(D-A×B)-〔D-(A×B-D)×(1+B)^1-D〕}×B=(A×B-D)×(1+B)^2+D
第四月还款利息为:=(A×B-D)×(1+B)^3+D
.....
第C月还款利息为:=(A×B-D)×(1+B)^(C-1)+D
求以上和为:E=(A×B-D)[(1+B)^0+(1+B)^1+...+(1+B)^(C-1)]+C×D=(A×B-D)×〔(1+B)^C-1〕÷B+C×D
而利息总和E=C×D-A,两项E值相等求得
月均还款:D=A×B×(1+B)^C÷〔(1+B)^C-1〕
支付利息总和:E=C×D-A=C×A×B×(1+B)^C÷〔(1+B)^C-1〕-A
还款总额:E+A=C×A×B×(1+B)^C÷〔(1+B)^C-1〕
附注:a^b表示a的b次方。
'''
# 等额本息计算,默认30年
total = unit_price * area
loan = int((total - total * 0.3) / 10000) * 10000
first_pay = total - loan
print('房屋总价:', total, '贷款额:', loan, '首付款:', first_pay)
print("===============等额本金已还款总额(本金+利息)================")
for j in range(1, 31):
# print(first_pay + prepayment_average_capital(j), first_pay + prepayment_average_capital_plus_interest(j), total)
print(first_pay + prepayment_average_capital(j))
print("===============等额本息已还款总额(本金+利息)=================")
for k in range(1, 31):
print(first_pay + prepayment_average_capital_plus_interest(k))
print("===============等额本息已还款总额-等额本金已还款总额======")
for m in range(1, 31):
print(prepayment_average_capital_plus_interest(m) - prepayment_average_capital(m))
print("===============一次性还清剩余贷款本金======")
for n in range(1, 31):
print(loan - (n / 30) * loan)
print("===============总房价=================")
for l in range(1, 31):
print(total)
print("===============等额本金总还款额======")
for o in range(1, 31):
t1 = first_pay + prepayment_average_capital(o)
t2 = loan - (o / 30) * loan
print(t1 + t2)
print("===============等额本息总还款额======")
for p in range(1, 31):
t1 = first_pay + prepayment_average_capital_plus_interest(p)
t2 = loan - (p / 30) * loan
print(t1 + t2)
# # 输入年份计算等额本金已经付给银行的钱,默认15年(提前还)
# prepayment_average_capital_data = prepayment_average_capital()
# # 输入年份计算等额本息已经付给银行的钱,默认15年(提前还)
# prepayment_average_capital_plus_interest_data = prepayment_average_capital_plus_interest()
#
# print('等额本金总耗资:', first_pay + prepayment_average_capital_data)
# print('等额本息总耗资:', first_pay + prepayment_average_capital_plus_interest_data)
# print('少还利息:', prepayment_average_capital_plus_interest_data - prepayment_average_capital_data)
| 27.662162
| 118
| 0.610405
|
# 等额本金和等额本息贷款计算
import math
area = float(input('请输入房屋面积(m^2): '))
unit_price = float(input('请输入单价(元): '))
year_interest = float(input('请输入贷款利率(%): '))
# '''
# 等额本金的每月还款情况文件,格式如下:
# 1月,5700.79(元)
# 2月,5691.69(元)
# 3月,5682.59(元)
# 4月,5673.5(元)
# ......
#
# '''
#
#
# # 读取文件内容,即每个月的还款数据
# def get_average_capital():
# file_path = input('请输入等额本金文件路径: ')
# average_capital_data = []
# file_object = open(file_path, 'r')
# try:
# for line in file_object:
# average_capital_data.append(float(line[line.find(',') + 1:line.find('(')]))
# finally:
# file_object.close()
# print(average_capital_data)
# return average_capital_data
'''
等额本金还款法:
设:A=贷款额 B=贷款月利率 C=贷款月数 D=每月还款额 E=还款利息总和
每月应还本金=贷款额÷还款月数=A÷C
第一月还款利息=剩余本金×月利率=A×B
第二月还款利息:(贷款额-第一月已归还本金额)×月利率=(A-A÷C)×B
第三月还款利息:(A-2×A÷C)×B
...
第C月还款利息:(A-(C-1)×A÷C)×B
求以上和得E=B(AC-[A+2A+3A+...+(C-1)A]÷C)=B(AC-A(C-1)÷2)=A×B×(C+1)÷2
每n月还款:D=本金+第n月利息=A÷C+(A-(n-1)×A÷C)×B=(1+B(C-n+1))×A÷C
'''
def get_average_capital():
average_capital_data = []
for i in range(0, 360):
average_capital_data.append((loan / 360) + ((loan - loan * i / 360) * (year_interest / (12 * 100))))
return average_capital_data
# 等额本金计算,默认30年
def prepayment_average_capital(year=30):
arr_float = get_average_capital()
count = 0
pay = 0
# pay = loan - (year / 30) * loan
for i in arr_float:
pay += i
count += 1
if count == year * 12:
break
return pay
'''
等额本息还款法:
设:A=贷款额 B=贷款月利率 C=贷款月数 D=每月还款额 E=还款利息总和
第一月还款利息为【贷款额×月利率】:A×B
第二月还款利息为【[贷款额-第一个月已还本金]×月利率】:〔A-(D-A×B)〕×B=(A×B-D)×(1+B)^1+D
第三月还款利息为:{A-(D-A×B)-〔D-(A×B-D)×(1+B)^1-D〕}×B=(A×B-D)×(1+B)^2+D
第四月还款利息为:=(A×B-D)×(1+B)^3+D
.....
第C月还款利息为:=(A×B-D)×(1+B)^(C-1)+D
求以上和为:E=(A×B-D)[(1+B)^0+(1+B)^1+...+(1+B)^(C-1)]+C×D=(A×B-D)×〔(1+B)^C-1〕÷B+C×D
而利息总和E=C×D-A,两项E值相等求得
月均还款:D=A×B×(1+B)^C÷〔(1+B)^C-1〕
支付利息总和:E=C×D-A=C×A×B×(1+B)^C÷〔(1+B)^C-1〕-A
还款总额:E+A=C×A×B×(1+B)^C÷〔(1+B)^C-1〕
附注:a^b表示a的b次方。
'''
# 等额本息计算,默认30年
def prepayment_average_capital_plus_interest(year=30):
# pay = loan - (year / 30) * loan
pay = 0
month_interest = year_interest / (12 * 100)
month_pay = (loan * month_interest * math.pow((1 + month_interest), 360)) / (
math.pow((1 + month_interest), 360) - 1)
for i in range(1, year * 12 + 1):
pay += month_pay
return pay
total = unit_price * area
loan = int((total - total * 0.3) / 10000) * 10000
first_pay = total - loan
print('房屋总价:', total, '贷款额:', loan, '首付款:', first_pay)
print("===============等额本金已还款总额(本金+利息)================")
for j in range(1, 31):
# print(first_pay + prepayment_average_capital(j), first_pay + prepayment_average_capital_plus_interest(j), total)
print(first_pay + prepayment_average_capital(j))
print("===============等额本息已还款总额(本金+利息)=================")
for k in range(1, 31):
print(first_pay + prepayment_average_capital_plus_interest(k))
print("===============等额本息已还款总额-等额本金已还款总额======")
for m in range(1, 31):
print(prepayment_average_capital_plus_interest(m) - prepayment_average_capital(m))
print("===============一次性还清剩余贷款本金======")
for n in range(1, 31):
print(loan - (n / 30) * loan)
print("===============总房价=================")
for l in range(1, 31):
print(total)
print("===============等额本金总还款额======")
for o in range(1, 31):
t1 = first_pay + prepayment_average_capital(o)
t2 = loan - (o / 30) * loan
print(t1 + t2)
print("===============等额本息总还款额======")
for p in range(1, 31):
t1 = first_pay + prepayment_average_capital_plus_interest(p)
t2 = loan - (p / 30) * loan
print(t1 + t2)
# # 输入年份计算等额本金已经付给银行的钱,默认15年(提前还)
# prepayment_average_capital_data = prepayment_average_capital()
# # 输入年份计算等额本息已经付给银行的钱,默认15年(提前还)
# prepayment_average_capital_plus_interest_data = prepayment_average_capital_plus_interest()
#
# print('等额本金总耗资:', first_pay + prepayment_average_capital_data)
# print('等额本息总耗资:', first_pay + prepayment_average_capital_plus_interest_data)
# print('少还利息:', prepayment_average_capital_plus_interest_data - prepayment_average_capital_data)
| 789
| 0
| 68
|
10f7ff6712dc6b5dcc4a16b9b8b4446dac1019fe
| 4,949
|
py
|
Python
|
cmsplugin_blog_categories/migrations/0001_initial.py
|
bitmazk/cmsplugin-blog-categories
|
05e2fa3d50a8501f3f3f9cab784269838079cc37
|
[
"MIT"
] | null | null | null |
cmsplugin_blog_categories/migrations/0001_initial.py
|
bitmazk/cmsplugin-blog-categories
|
05e2fa3d50a8501f3f3f9cab784269838079cc37
|
[
"MIT"
] | 3
|
2020-02-11T22:01:45.000Z
|
2021-06-10T17:38:13.000Z
|
cmsplugin_blog_categories/migrations/0001_initial.py
|
bitmazk/cmsplugin-blog-categories
|
05e2fa3d50a8501f3f3f9cab784269838079cc37
|
[
"MIT"
] | null | null | null |
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 53.793478
| 137
| 0.62255
|
# flake8: noqa
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('cmsplugin_blog_categories_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=512)),
))
db.send_create_signal('cmsplugin_blog_categories', ['Category'])
# Adding model 'CategoryTitle'
db.create_table('cmsplugin_blog_categories_categorytitle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cmsplugin_blog_categories.Category'])),
('language', self.gf('django.db.models.fields.CharField')(max_length=5)),
))
db.send_create_signal('cmsplugin_blog_categories', ['CategoryTitle'])
# Adding model 'EntryCategory'
db.create_table('cmsplugin_blog_categories_entrycategory', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('entry', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cmsplugin_blog.Entry'])),
('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cmsplugin_blog_categories.Category'])),
))
db.send_create_signal('cmsplugin_blog_categories', ['EntryCategory'])
# Adding unique constraint on 'EntryCategory', fields ['entry', 'category']
db.create_unique('cmsplugin_blog_categories_entrycategory', ['entry_id', 'category_id'])
def backwards(self, orm):
# Removing unique constraint on 'EntryCategory', fields ['entry', 'category']
db.delete_unique('cmsplugin_blog_categories_entrycategory', ['entry_id', 'category_id'])
# Deleting model 'Category'
db.delete_table('cmsplugin_blog_categories_category')
# Deleting model 'CategoryTitle'
db.delete_table('cmsplugin_blog_categories_categorytitle')
# Deleting model 'EntryCategory'
db.delete_table('cmsplugin_blog_categories_entrycategory')
models = {
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_blog.entry': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Entry'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'placeholders': ('djangocms_utils.fields.M2MPlaceholderField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'tags': ('tagging.fields.TagField', [], {})
},
'cmsplugin_blog_categories.category': {
'Meta': {'object_name': 'Category'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '512'})
},
'cmsplugin_blog_categories.categorytitle': {
'Meta': {'object_name': 'CategoryTitle'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_blog_categories.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'cmsplugin_blog_categories.entrycategory': {
'Meta': {'unique_together': "(('entry', 'category'),)", 'object_name': 'EntryCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_blog_categories.Category']"}),
'entry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_blog.Entry']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['cmsplugin_blog_categories']
| 2,250
| 2,530
| 23
|
715bfc9c37739cc5dc1ee47ee653d0ae23d340ef
| 5,326
|
py
|
Python
|
src/analyze/track/analyze_race.py
|
kishorekolli/deep_racer_guru
|
0a0a56103f395f958e8177ee0bd5ae1481f93d98
|
[
"MIT"
] | 9
|
2020-07-31T03:04:24.000Z
|
2021-11-02T13:44:11.000Z
|
src/analyze/track/analyze_race.py
|
kishorekolli/deep_racer_guru
|
0a0a56103f395f958e8177ee0bd5ae1481f93d98
|
[
"MIT"
] | 137
|
2020-08-04T08:04:11.000Z
|
2021-11-10T10:35:58.000Z
|
src/analyze/track/analyze_race.py
|
kishorekolli/deep_racer_guru
|
0a0a56103f395f958e8177ee0bd5ae1481f93d98
|
[
"MIT"
] | 11
|
2020-06-24T23:38:38.000Z
|
2021-11-02T14:42:31.000Z
|
#
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
import threading
import time
import src.utils.geometry as geometry
import tkinter as tk
from src.analyze.track.track_analyzer import TrackAnalyzer
from src.episode.episode import Episode
from src.graphics.track_graphics import TrackGraphics
from src.analyze.core.controls import VideoControls, LapTimeControl
from src.personalize.configuration.appearance import RACE_COLOURS
| 37.507042
| 111
| 0.665039
|
#
# DeepRacer Guru
#
# Version 3.0 onwards
#
# Copyright (c) 2021 dmh23
#
import threading
import time
import src.utils.geometry as geometry
import tkinter as tk
from src.analyze.track.track_analyzer import TrackAnalyzer
from src.episode.episode import Episode
from src.graphics.track_graphics import TrackGraphics
from src.analyze.core.controls import VideoControls, LapTimeControl
from src.personalize.configuration.appearance import RACE_COLOURS
class AnalyzeRace(TrackAnalyzer):
def __init__(self, guru_parent_redraw, track_graphics :TrackGraphics, control_frame :tk.Frame):
super().__init__(guru_parent_redraw, track_graphics, control_frame)
self._video_controls = VideoControls(self._button_pressed, control_frame)
self._display_lap_time = LapTimeControl(control_frame)
self._timer = AnalyzeRace.Timer(self._draw)
# self._race_episode = self.all_episodes[0]
def build_control_frame(self, control_frame):
self._display_lap_time.add_to_control_frame()
self._video_controls.add_to_control_frame()
def _button_pressed(self, button_type):
if button_type == VideoControls.STOP:
self._timer.stop()
elif button_type == VideoControls.RESET:
self._timer.reset()
elif button_type == VideoControls.PLAY:
self._timer.play()
def redraw(self):
self._timer.redraw()
def warning_filtered_episodes_changed(self):
self._timer.reset(False)
def warning_lost_control(self):
self._timer.stop()
def _draw(self, simulation_time):
self._display_lap_time.show_time(simulation_time)
self.track_graphics.prepare_to_remove_old_cars()
all_done = True
if self.filtered_episodes:
for i, episode in enumerate(self.filtered_episodes[0:len(RACE_COLOURS)]):
self._draw_episode_car(episode, simulation_time, RACE_COLOURS[i])
if simulation_time < episode.time_taken:
all_done = False
self.track_graphics.remove_cars()
if all_done:
self._timer.soft_stop()
def _draw_episode_car(self, episode: Episode, simulation_time: float, colour: str):
event_index = episode.get_latest_event_index_on_or_before(simulation_time)
before_event = episode.events[event_index]
if event_index == len(episode.events) - 1:
self.track_graphics.draw_car(before_event.x, before_event.y, colour, before_event.heading)
else:
after_event = episode.events[event_index + 1]
event_x_gap = after_event.x - before_event.x
event_y_gap = after_event.y - before_event.y
event_time_gap = after_event.time_elapsed - before_event.time_elapsed
event_heading_gap = geometry.get_turn_between_directions(before_event.heading, after_event.heading)
ratio = (simulation_time - before_event.time_elapsed) / event_time_gap
x = before_event.x + ratio * event_x_gap
y = before_event.y + ratio * event_y_gap
heading = geometry.get_angle_in_proper_range(before_event.heading + ratio * event_heading_gap)
self.track_graphics.draw_car(x, y, colour, heading)
class Timer:
def __init__(self, redraw_callback: callable):
self._machine_start_time = 0.0
self._simulation_start_time = 0.0
self._simulation_stop_time = 0.0
self._keep_running = False
self._is_still_running = False
self._thread = None
self._redraw_callback = redraw_callback
def stop(self):
if self._keep_running:
self._keep_running = False
if self._is_still_running:
self._thread.join(0.2)
def soft_stop(self):
self._keep_running = False
def play(self):
if not self._keep_running and not self._is_still_running:
self._keep_running = True
self._thread = threading.Thread(target=self._run_until_stopped)
self._thread.daemon = True # Set as daemon so thread is killed if main GUI is closed
self._thread.start()
def reset(self, redraw=True):
self.stop()
self._simulation_stop_time = 0.0
self._simulation_start_time = 0.0
if redraw:
self._redraw_callback(0.0)
def redraw(self):
if self._is_still_running:
self._redraw_callback(self.get_current_simulation_time())
else:
self._redraw_callback(self._simulation_stop_time)
def _run_until_stopped(self):
self._is_still_running = True
self._simulation_start_time = self._simulation_stop_time
self._machine_start_time = time.time()
while self._keep_running:
simulation_time = self.get_current_simulation_time()
self._redraw_callback(simulation_time)
time.sleep(0.02)
self._simulation_stop_time = self.get_current_simulation_time()
self._is_still_running = False
def get_current_simulation_time(self):
return time.time() - self._machine_start_time + self._simulation_start_time
| 4,356
| 493
| 23
|
f0e86e901437c0295d3035b8e8488571c49c3943
| 1,821
|
py
|
Python
|
algospot/lec11/[cutz]gamecover.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | 3
|
2019-11-26T14:31:01.000Z
|
2020-01-10T18:19:46.000Z
|
algospot/lec11/[cutz]gamecover.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
algospot/lec11/[cutz]gamecover.py
|
cutz-j/AlgorithmStudy
|
de0f81220e29bd5e109d174800f507b12a3bee36
|
[
"MIT"
] | null | null | null |
import sys
cover = [[[0, 0], [1, 0], [0, 1]],
[[0, 0], [0, 1], [1, 1]],
[[0, 0], [1, 0], [1, 1]],
[[0, 0], [1, 0], [1, -1]]]
#rl = lambda: sys.stdin.readline()
rl = input
C = int(rl())
for _ in range(C):
H, W = map(int, rl().split())
block_list = []
white, black = 0, 0
for __ in range(H):
block_row = rl()
tmp_list = []
for b in block_row:
if b == '#':
black += 1
tmp_list.append(1)
else:
white += 1
tmp_list.append(0)
block_list.append(tmp_list)
if white % 3 != 0:
print(0)
continue
if black == (W*H):
print(1)
continue
print(cover_block(block_list))
| 21.939759
| 62
| 0.409665
|
import sys
cover = [[[0, 0], [1, 0], [0, 1]],
[[0, 0], [0, 1], [1, 1]],
[[0, 0], [1, 0], [1, 1]],
[[0, 0], [1, 0], [1, -1]]]
def set_block(board, y, x, cover_type, delta):
# delta 1 --> cover // -1 --> clear
ok = True
for i in range(3):
new_x = x + cover[cover_type][i][1]
new_y = y + cover[cover_type][i][0]
# 게임판을 넘어가는 경우
if new_y < 0 or new_y >= H or new_x < 0 or new_x >= W:
ok = False
# 중복으로 덮은 경우
else:
board[new_y][new_x] += delta
if board[new_y][new_x] > 1:
ok = False
return ok
def cover_block(board):
x, y = -1, -1
for i in range(H):
for j in range(W):
if board[i][j] == 0:
x = j
y = i
break
if y != -1:
break
# basis
if y == -1:
return 1
ret = 0
# 4가지 방법 모두 실행
for t in range(4):
# block 넣기
if set_block(board, y, x, t, 1) == True:
ret += cover_block(board)
# block 치우기
set_block(board, y, x, t, -1)
return ret
#rl = lambda: sys.stdin.readline()
rl = input
C = int(rl())
for _ in range(C):
H, W = map(int, rl().split())
block_list = []
white, black = 0, 0
for __ in range(H):
block_row = rl()
tmp_list = []
for b in block_row:
if b == '#':
black += 1
tmp_list.append(1)
else:
white += 1
tmp_list.append(0)
block_list.append(tmp_list)
if white % 3 != 0:
print(0)
continue
if black == (W*H):
print(1)
continue
print(cover_block(block_list))
| 1,026
| 0
| 46
|
64c67a4ed2acbb4051af77291436c1f297af4078
| 1,167
|
py
|
Python
|
cmsplugin_soundcloud/cms_plugins.py
|
misli/cmsplugin-soundcloud
|
d50e6fd1fd95916eb5a396fd9a437483d96626b3
|
[
"BSD-3-Clause"
] | null | null | null |
cmsplugin_soundcloud/cms_plugins.py
|
misli/cmsplugin-soundcloud
|
d50e6fd1fd95916eb5a396fd9a437483d96626b3
|
[
"BSD-3-Clause"
] | 1
|
2021-05-13T15:23:09.000Z
|
2021-06-29T09:25:36.000Z
|
cmsplugin_soundcloud/cms_plugins.py
|
misli/cmsplugin-soundcloud
|
d50e6fd1fd95916eb5a396fd9a437483d96626b3
|
[
"BSD-3-Clause"
] | 1
|
2017-06-01T15:46:46.000Z
|
2017-06-01T15:46:46.000Z
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import SoundCloud, COLORS
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
# use CMSPLUGIN_SOUNDCLOUD_PARAMS to override PARAMS
PARAMS = getattr(settings, 'CMSPLUGIN_SOUNDCLOUD_PARAMS',
'width="100%" height="166" scrolling="no" frameborder="no"')
plugin_pool.register_plugin(SoundCloudPlugin)
| 30.710526
| 78
| 0.709512
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .models import SoundCloud, COLORS
from django.conf import settings
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
# use CMSPLUGIN_SOUNDCLOUD_PARAMS to override PARAMS
PARAMS = getattr(settings, 'CMSPLUGIN_SOUNDCLOUD_PARAMS',
'width="100%" height="166" scrolling="no" frameborder="no"')
class SoundCloudPlugin(CMSPluginBase):
model = SoundCloud
name = _('SoundCloud')
text_enabled = True
render_template = 'cmsplugin_soundcloud/cmsplugin_soundcloud.html'
def render(self, context, instance, placeholder):
context.update({'plugin_soundcloud': instance})
return context
def icon_src(self, instance):
return instance.thumbnail_url
#def get_form(self, request, obj=None, **kwargs):
# if obj:
# kwargs['exclude'] = ['url']
# else:
# kwargs['exclude'] = ['title', 'description', 'thumbnail_url']
# return super(SoundCloudPlugin, self).get_form(request, obj, **kwargs)
plugin_pool.register_plugin(SoundCloudPlugin)
| 153
| 498
| 23
|
43cbe788101c59dea633374ac2936b60b77131a3
| 408
|
py
|
Python
|
1/find_inimum_in_rotated_sorted_array.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 4
|
2018-03-07T02:56:03.000Z
|
2021-06-15T05:43:31.000Z
|
1/find_inimum_in_rotated_sorted_array.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | null | null | null |
1/find_inimum_in_rotated_sorted_array.py
|
IronCore864/leetcode
|
a62a4cdde9814ae48997176debcaad537f7ad01f
|
[
"Apache-2.0"
] | 1
|
2021-09-02T12:05:15.000Z
|
2021-09-02T12:05:15.000Z
|
s = Solution()
print s.findMin([4, 5, 5, 6, 7, 0, 1, 2, 2, 2])
| 20.4
| 47
| 0.360294
|
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
i = 0
j = len(nums) - 1
while i < j:
m = i + (j - i) / 2
if nums[m] > nums[j]:
i = m + 1
else:
j = m
return nums[i]
s = Solution()
print s.findMin([4, 5, 5, 6, 7, 0, 1, 2, 2, 2])
| 0
| 321
| 22
|
e965a8d33c910c1887e79ac768c1271d3fda827c
| 3,711
|
py
|
Python
|
qiling/qiling/os/posix/syscall/fcntl.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:01.000Z
|
2021-06-04T14:27:15.000Z
|
qiling/qiling/os/posix/syscall/fcntl.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | null | null | null |
qiling/qiling/os/posix/syscall/fcntl.py
|
mrTavas/owasp-fstm-auto
|
6e9ff36e46d885701c7419db3eca15f12063a7f3
|
[
"CC0-1.0"
] | 2
|
2021-05-05T12:03:09.000Z
|
2021-06-04T14:27:21.000Z
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
from qiling.const import *
from qiling.os.linux.thread import *
from qiling.const import *
from qiling.os.posix.filestruct import *
from qiling.os.filestruct import *
from qiling.os.posix.const_mapping import *
from qiling.exception import *
| 27.488889
| 118
| 0.61924
|
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
from qiling.const import *
from qiling.os.linux.thread import *
from qiling.const import *
from qiling.os.posix.filestruct import *
from qiling.os.filestruct import *
from qiling.os.posix.const_mapping import *
from qiling.exception import *
def ql_syscall_open(ql, filename, flags, mode, *args, **kw):
path = ql.mem.string(filename)
real_path = ql.os.path.transform_to_real_path(path)
relative_path = ql.os.path.transform_to_relative_path(path)
flags = flags & 0xffffffff
mode = mode & 0xffffffff
for i in range(256):
if ql.os.fd[i] == 0:
idx = i
break
else:
idx = -1
if idx == -1:
# errno ENOMEM Insufficient kernel memory was available.
regreturn = -12
else:
try:
if ql.archtype== QL_ARCH.ARM:
mode = 0
flags = ql_open_flag_mapping(ql, flags)
ql.os.fd[idx] = ql.os.fs_mapper.open_ql_file(path, flags, mode)
regreturn = idx
except QlSyscallError as e:
regreturn = - e.errno
ql.log.debug("open(%s, %s, 0o%o) = %d" % (relative_path, open_flags_mapping(flags, ql.archtype), mode, regreturn))
if regreturn >= 0 and regreturn != 2:
ql.log.debug("File Found: %s" % real_path)
else:
ql.log.debug("File Not Found %s" % real_path)
return regreturn
def ql_syscall_openat(ql, openat_fd, openat_path, openat_flags, openat_mode, *args, **kw):
openat_fd = ql.unpacks(ql.pack(openat_fd))
openat_path = ql.mem.string(openat_path)
real_path = ql.os.path.transform_to_real_path(openat_path)
relative_path = ql.os.path.transform_to_relative_path(openat_path)
openat_flags = openat_flags & 0xffffffff
openat_mode = openat_mode & 0xffffffff
for i in range(256):
if ql.os.fd[i] == 0:
idx = i
break
else:
idx = -1
if idx == -1:
regreturn = -1
else:
try:
if ql.archtype== QL_ARCH.ARM:
mode = 0
openat_flags = ql_open_flag_mapping(ql, openat_flags)
ql.os.fd[idx] = ql.os.fs_mapper.open_ql_file(openat_path, openat_flags, openat_mode)
regreturn = idx
except QlSyscallError:
regreturn = -1
ql.log.debug("openat(%d, %s, %s, 0o%o) = %d" % (
openat_fd, relative_path, open_flags_mapping(openat_flags, ql.archtype), openat_mode, regreturn))
if regreturn >= 0 and regreturn != 2:
ql.log.debug("File Found: %s" % real_path)
else:
ql.log.debug("File Not Found %s" % real_path)
return regreturn
def ql_syscall_fcntl(ql, fcntl_fd, fcntl_cmd, *args, **kw):
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
regreturn = 0
if fcntl_cmd == F_SETFD:
regreturn = 0
elif fcntl_cmd == F_GETFL:
regreturn = 2
elif fcntl_cmd == F_SETFL:
regreturn = 0
return regreturn
def ql_syscall_fcntl64(ql, fcntl_fd, fcntl_cmd, fcntl_arg, *args, **kw):
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
if fcntl_cmd == F_GETFL:
regreturn = 2
elif fcntl_cmd == F_SETFL:
if isinstance(ql.os.fd[fcntl_fd], ql_socket):
ql.os.fd[fcntl_fd].fcntl(fcntl_cmd, fcntl_arg)
regreturn = 0
elif fcntl_cmd == F_GETFD:
regreturn = 2
elif fcntl_cmd == F_SETFD:
regreturn = 0
else:
regreturn = 0
return regreturn
def ql_syscall_flock(ql, flock_fd, flock_operation, *args, **kw):
# Should always return 0, we don't need a actual file lock
regreturn = 0
return regreturn
| 3,245
| 0
| 115
|
4e431cabe02f617dd38da093c80ad9c5404af08a
| 2,904
|
py
|
Python
|
test/unit/mysql_rep_admin/add_miss_slaves.py
|
mjpernot/mysql-rep-admin
|
08b8f5daf28bc3e462bd72968842f2c44161c084
|
[
"MIT"
] | null | null | null |
test/unit/mysql_rep_admin/add_miss_slaves.py
|
mjpernot/mysql-rep-admin
|
08b8f5daf28bc3e462bd72968842f2c44161c084
|
[
"MIT"
] | null | null | null |
test/unit/mysql_rep_admin/add_miss_slaves.py
|
mjpernot/mysql-rep-admin
|
08b8f5daf28bc3e462bd72968842f2c44161c084
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Classification (U)
"""Program: add_miss_slaves.py
Description: Unit testing of add_miss_slaves in mysql_rep_admin.py.
Usage:
test/unit/mysql_rep_admin/add_miss_slaves.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_rep_admin
import version
__version__ = version.__version__
class MasterRep(object):
"""Class: MasterRep
Description: Class stub holder for mysql_class.MasterRep class.
Methods:
__init__
show_slv_hosts
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.name = "Master_Name"
self.slv_hosts = [{"Host": "slave1"}, {"Host": "slave2"},
{"Host": "slave3"}]
def show_slv_hosts(self):
"""Method: show_slv_hosts
Description: Stub method holder for SlaveRep.get_err_stat.
Arguments:
"""
return self.slv_hosts
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_no_slv_miss
test_one_slv_miss
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.master = MasterRep()
self.outdata = {"Slaves": [{"Name": "slave1", "LagTime": None},
{"Name": "slave2", "LagTime": None}]}
self.outdata2 = {"Slaves": [{"Name": "slave1", "LagTime": None},
{"Name": "slave2", "LagTime": None},
{"Name": "slave3", "LagTime": None}]}
self.final_list = {"Slaves": [{"Name": "slave1", "LagTime": None},
{"Name": "slave2", "LagTime": None},
{"Name": "slave3", "LagTime": None}]}
def test_no_slv_miss(self):
"""Function: test_no_slv_miss
Description: Test with no slave missing.
Arguments:
"""
self.assertEqual(mysql_rep_admin.add_miss_slaves(self.master,
self.outdata2),
self.final_list)
def test_one_slv_miss(self):
"""Function: test_one_slv_miss
Description: Test with one slave missing.
Arguments:
"""
self.assertEqual(mysql_rep_admin.add_miss_slaves(self.master,
self.outdata),
self.final_list)
if __name__ == "__main__":
unittest.main()
| 20.892086
| 75
| 0.539945
|
#!/usr/bin/python
# Classification (U)
"""Program: add_miss_slaves.py
Description: Unit testing of add_miss_slaves in mysql_rep_admin.py.
Usage:
test/unit/mysql_rep_admin/add_miss_slaves.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import mysql_rep_admin
import version
__version__ = version.__version__
class MasterRep(object):
"""Class: MasterRep
Description: Class stub holder for mysql_class.MasterRep class.
Methods:
__init__
show_slv_hosts
"""
def __init__(self):
"""Method: __init__
Description: Class initialization.
Arguments:
"""
self.name = "Master_Name"
self.slv_hosts = [{"Host": "slave1"}, {"Host": "slave2"},
{"Host": "slave3"}]
def show_slv_hosts(self):
"""Method: show_slv_hosts
Description: Stub method holder for SlaveRep.get_err_stat.
Arguments:
"""
return self.slv_hosts
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_no_slv_miss
test_one_slv_miss
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.master = MasterRep()
self.outdata = {"Slaves": [{"Name": "slave1", "LagTime": None},
{"Name": "slave2", "LagTime": None}]}
self.outdata2 = {"Slaves": [{"Name": "slave1", "LagTime": None},
{"Name": "slave2", "LagTime": None},
{"Name": "slave3", "LagTime": None}]}
self.final_list = {"Slaves": [{"Name": "slave1", "LagTime": None},
{"Name": "slave2", "LagTime": None},
{"Name": "slave3", "LagTime": None}]}
def test_no_slv_miss(self):
"""Function: test_no_slv_miss
Description: Test with no slave missing.
Arguments:
"""
self.assertEqual(mysql_rep_admin.add_miss_slaves(self.master,
self.outdata2),
self.final_list)
def test_one_slv_miss(self):
"""Function: test_one_slv_miss
Description: Test with one slave missing.
Arguments:
"""
self.assertEqual(mysql_rep_admin.add_miss_slaves(self.master,
self.outdata),
self.final_list)
if __name__ == "__main__":
unittest.main()
| 0
| 0
| 0
|
99017e06c8585e5388a22a35b7a3f448afce5ddd
| 9,116
|
py
|
Python
|
SUnCNN_DC2.py
|
BehnoodRasti/SUnCNN
|
f19245cd7fc0e142bfc66c9a809444d22ed7da36
|
[
"Apache-2.0"
] | 3
|
2021-11-17T09:34:15.000Z
|
2022-03-26T11:51:48.000Z
|
SUnCNN_DC2.py
|
BehnoodRasti/SUnCNN
|
f19245cd7fc0e142bfc66c9a809444d22ed7da36
|
[
"Apache-2.0"
] | null | null | null |
SUnCNN_DC2.py
|
BehnoodRasti/SUnCNN
|
f19245cd7fc0e142bfc66c9a809444d22ed7da36
|
[
"Apache-2.0"
] | 2
|
2022-02-24T12:41:02.000Z
|
2022-03-30T12:26:21.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 17:53:50 2020
@author: behnood
"""
from __future__ import print_function
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import numpy as np
from models import *
import torch
import torch.optim
from skimage.measure import compare_psnr
from skimage.measure import compare_mse
from utils.denoising_utils import *
from skimage._shared import *
from skimage.util import *
from skimage.metrics.simple_metrics import _as_floats
from skimage.metrics.simple_metrics import mean_squared_error
from UtilityMine import *
from utils.sr_utils import tv_loss
from numpy import linalg as LA
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
PLOT = False
import scipy.io
#%%
#%%
fname2 = "Data/DC2/Y_clean.mat"
mat2 = scipy.io.loadmat(fname2)
img_np_gt = mat2["Y_clean"]
img_np_gt = img_np_gt.transpose(2,0,1)
[p1, nr1, nc1] = img_np_gt.shape
#%%
fname3 = "Data/DC2/XT.mat"
mat3 = scipy.io.loadmat(fname3)
A_true_np = mat3["XT"]
#%%
fname4 = "Data/DC2/EE.mat"
mat4 = scipy.io.loadmat(fname4)
EE = mat4["EE"]
#%%
LibS=EE.shape[1]
#%%
npar=np.zeros((1,3))
npar[0,0]=14.7
npar[0,1]=46.5
npar[0,2]=147
#npar[0,3]=367
tol1=npar.shape[1]
tol2=1
save_result=False
import time
from tqdm import tqdm
rmax=9
for fi in tqdm(range(tol1)):
for fj in tqdm(range(tol2)):
#%%
img_np_gt=np.clip(img_np_gt, 0, 1)
img_noisy_np = add_noise(img_np_gt, 1/npar[0,fi])#11.55 20 dB, 36.7 30 dB, 116.5 40 dB
print(compare_snr(img_np_gt, img_noisy_np))
img_resh=np.reshape(img_noisy_np,(p1,nr1*nc1))
V, SS, U = scipy.linalg.svd(img_resh, full_matrices=False)
PC=np.diag(SS)@U
img_resh_DN=V[:,:rmax]@PC[:rmax,:]
img_noisy_np=np.reshape(np.clip(img_resh_DN, 0, 1),(p1,nr1,nc1))
INPUT = 'noise' # 'meshgrid'
pad = 'reflection'
need_bias=True
OPT_OVER = 'net' # 'net,input'
#
reg_noise_std = 0.0
LR1 = 0.001
OPTIMIZER1='adam'# 'RMSprop'#'adam' # 'LBFGS'
show_every = 500
exp_weight=0.99
if fi==0:
num_iter1 = 4000
elif fi==1:
num_iter1 = 8000
elif fi==2:
num_iter1 = 12000
input_depth =img_noisy_np.shape[0]
net1 = CAE_AbEst()
net1.cuda()
print(net1)
# Compute number of parameters
s = sum([np.prod(list(p11.size())) for p11 in net1.parameters()]);
print ('Number of params: %d' % s)
# Loss
mse = torch.nn.MSELoss().type(dtype)
img_noisy_torch = np_to_torch(img_noisy_np).type(dtype)
# if fk==0:
net_input1 = get_noise(input_depth, INPUT,
(img_noisy_np.shape[1], img_noisy_np.shape[2])).type(dtype).detach()
# net_input1 = img_noisy_torch
E_torch = np_to_torch(EE).type(dtype)
#%%
net_input_saved = net_input1.detach().clone()
noise = net_input1.detach().clone()
out_avg = None
out_HR_avg= None
last_net = None
RMSE_LR_last = 0
loss=np.zeros((num_iter1,1))
AE=np.zeros((num_iter1,1))
i = 0
p11 = get_params(OPT_OVER, net1, net_input1)
optimize(OPTIMIZER1, p11, closure1, LR1, num_iter1)
if 1:
out_LR_np = out_LR.detach().cpu().squeeze().numpy()
out_avg_np = out_avg.detach().cpu().squeeze().numpy()
MAE_LR_avg= 100*np.mean(abs(A_true_np.astype(np.float32)- np.clip(out_avg_np, 0, 1)))
MAE_LR= 100*np.mean(abs(A_true_np.astype(np.float32)- np.clip(out_LR_np, 0, 1)))
SRE=10*np.log10(LA.norm(A_true_np.astype(np.float32).reshape((EE.shape[1],nr1*nc1)),'fro')/LA.norm((A_true_np.astype(np.float32)- np.clip(out_LR_np, 0, 1)).reshape((EE.shape[1],nr1*nc1)),'fro'))
SRE_avg=10*np.log10(LA.norm(A_true_np.astype(np.float32).reshape((EE.shape[1],nr1*nc1)),'fro')/LA.norm((A_true_np.astype(np.float32)- np.clip(out_avg_np, 0, 1)).reshape((EE.shape[1],nr1*nc1)),'fro'))
print ('Iteration %05d MAE_LR: %f MAE_LR_avg: %f SRE: %f SRE_avg: %f ' % (i, MAE_LR, MAE_LR_avg, SRE, SRE_avg), '\r', end='')
# if save_result is True:
# scipy.io.savemat("C:/Users/behnood/Desktop/Sparse Unmixing/Results/Sim2/demo1/10runs/out_avg_np%01d%01d.mat" % (fi+2,fj+1),
# {'out_avg_np%01d%01d' % (fi+2, fj+1):out_avg_np.transpose(1,2,0)})
# scipy.io.savemat("C:/Users/behnood/Desktop/Sparse Unmixing/Results/Sim2/demo1/10runs/out_LR_np%01d%01d.mat" % (fi+2,fj+1),
# {'out_LR_np%01d%01d' % (fi+2, fj+1):out_LR_np.transpose(1,2,0)})
#%%
| 41.625571
| 213
| 0.554739
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 17:53:50 2020
@author: behnood
"""
from __future__ import print_function
import matplotlib.pyplot as plt
#%matplotlib inline
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '3'
import numpy as np
from models import *
import torch
import torch.optim
from skimage.measure import compare_psnr
from skimage.measure import compare_mse
from utils.denoising_utils import *
from skimage._shared import *
from skimage.util import *
from skimage.metrics.simple_metrics import _as_floats
from skimage.metrics.simple_metrics import mean_squared_error
from UtilityMine import *
from utils.sr_utils import tv_loss
from numpy import linalg as LA
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
PLOT = False
import scipy.io
#%%
#%%
fname2 = "Data/DC2/Y_clean.mat"
mat2 = scipy.io.loadmat(fname2)
img_np_gt = mat2["Y_clean"]
img_np_gt = img_np_gt.transpose(2,0,1)
[p1, nr1, nc1] = img_np_gt.shape
#%%
fname3 = "Data/DC2/XT.mat"
mat3 = scipy.io.loadmat(fname3)
A_true_np = mat3["XT"]
#%%
fname4 = "Data/DC2/EE.mat"
mat4 = scipy.io.loadmat(fname4)
EE = mat4["EE"]
#%%
LibS=EE.shape[1]
#%%
npar=np.zeros((1,3))
npar[0,0]=14.7
npar[0,1]=46.5
npar[0,2]=147
#npar[0,3]=367
tol1=npar.shape[1]
tol2=1
save_result=False
import time
from tqdm import tqdm
rmax=9
for fi in tqdm(range(tol1)):
for fj in tqdm(range(tol2)):
#%%
img_np_gt=np.clip(img_np_gt, 0, 1)
img_noisy_np = add_noise(img_np_gt, 1/npar[0,fi])#11.55 20 dB, 36.7 30 dB, 116.5 40 dB
print(compare_snr(img_np_gt, img_noisy_np))
img_resh=np.reshape(img_noisy_np,(p1,nr1*nc1))
V, SS, U = scipy.linalg.svd(img_resh, full_matrices=False)
PC=np.diag(SS)@U
img_resh_DN=V[:,:rmax]@PC[:rmax,:]
img_noisy_np=np.reshape(np.clip(img_resh_DN, 0, 1),(p1,nr1,nc1))
INPUT = 'noise' # 'meshgrid'
pad = 'reflection'
need_bias=True
OPT_OVER = 'net' # 'net,input'
#
reg_noise_std = 0.0
LR1 = 0.001
OPTIMIZER1='adam'# 'RMSprop'#'adam' # 'LBFGS'
show_every = 500
exp_weight=0.99
if fi==0:
num_iter1 = 4000
elif fi==1:
num_iter1 = 8000
elif fi==2:
num_iter1 = 12000
input_depth =img_noisy_np.shape[0]
class CAE_AbEst(nn.Module):
def __init__(self):
super(CAE_AbEst, self).__init__()
self.conv1 = nn.Sequential(
UnmixArch(
input_depth, EE.shape[1],
# num_channels_down = [8, 16, 32, 64, 128],
# num_channels_up = [8, 16, 32, 64, 128],
# num_channels_skip = [4, 4, 4, 4, 4],
num_channels_down = [ 256],
num_channels_up = [ 256],
num_channels_skip = [ 4],
filter_size_up = 3,filter_size_down = 3, filter_skip_size=1,
upsample_mode='bilinear', # downsample_mode='avg',
need1x1_up=True,
need_sigmoid=True, need_bias=True, pad=pad, act_fun='ReLU').type(dtype)
)
def forward(self, x):
x = self.conv1(x)
return x
net1 = CAE_AbEst()
net1.cuda()
print(net1)
# Compute number of parameters
s = sum([np.prod(list(p11.size())) for p11 in net1.parameters()]);
print ('Number of params: %d' % s)
# Loss
mse = torch.nn.MSELoss().type(dtype)
img_noisy_torch = np_to_torch(img_noisy_np).type(dtype)
# if fk==0:
net_input1 = get_noise(input_depth, INPUT,
(img_noisy_np.shape[1], img_noisy_np.shape[2])).type(dtype).detach()
# net_input1 = img_noisy_torch
E_torch = np_to_torch(EE).type(dtype)
#%%
net_input_saved = net_input1.detach().clone()
noise = net_input1.detach().clone()
out_avg = None
out_HR_avg= None
last_net = None
RMSE_LR_last = 0
loss=np.zeros((num_iter1,1))
AE=np.zeros((num_iter1,1))
i = 0
def closure1():
global i, RMSE_LR, RMSE_LR_ave, RMSE_HR, out_LR_np, out_avg_np, out_LR\
, out_avg,out_HR_np, out_HR_avg, out_HR_avg_np, RMSE_LR_last, last_net\
, net_input,RMSE_LR_avg,RMSE_HR_avg,RE_HR_avg, RE_HR, Eest,loss,AE\
, MAE_LR,MAE_LR_avg,MAE_HR,MAE_HR_avg
if reg_noise_std > 0:
net_input = net_input_saved + (noise.normal_() * reg_noise_std)
out_LR = net1(net_input1)
out_HR=torch.mm(E_torch.view(p1,LibS),out_LR.view(LibS,nr1*nc1))
# Smoothing
if out_avg is None:
out_avg = out_LR.detach()
out_HR_avg = out_HR.detach()
else:
out_avg = out_avg * exp_weight + out_LR.detach() * (1 - exp_weight)
out_HR_avg = out_HR_avg * exp_weight + out_HR.detach() * (1 - exp_weight)
#%%
out_HR=out_HR.view((1,p1,nr1,nc1))
total_loss = mse(img_noisy_torch, out_HR)
total_loss.backward()
if 1:
out_LR_np = out_LR.detach().cpu().squeeze().numpy()
out_avg_np = out_avg.detach().cpu().squeeze().numpy()
SRE=10*np.log10(LA.norm(A_true_np.astype(np.float32).reshape((EE.shape[1],nr1*nc1)),'fro')/LA.norm((A_true_np.astype(np.float32)- np.clip(out_LR_np, 0, 1)).reshape((EE.shape[1],nr1*nc1)),'fro'))
SRE_avg=10*np.log10(LA.norm(A_true_np.astype(np.float32).reshape((EE.shape[1],nr1*nc1)),'fro')/LA.norm((A_true_np.astype(np.float32)- np.clip(out_avg_np, 0, 1)).reshape((EE.shape[1],nr1*nc1)),'fro'))
MAE_LR= 100*np.mean(abs(A_true_np.astype(np.float32)- np.clip(out_LR_np, 0, 1)))
MAE_LR_avg= 100*np.mean(abs(A_true_np.astype(np.float32)- np.clip(out_avg_np, 0, 1)))
print ('Iteration %05d Loss %f MAE_LR: %f MAE_LR_avg: %f SRE: %f SRE_avg: %f' % (i, total_loss.item(), MAE_LR, MAE_LR_avg, SRE, SRE_avg), '\r', end='')
if PLOT and i % show_every == 0:
out_LR_np = torch_to_np(out_LR)
out_avg_np = torch_to_np(out_avg)
# plot_image_grid([np.clip(out_np, 0, 1),
# np.clip(torch_to_np(out_avg), 0, 1)], factor=figsize, nrow=1)
# out_LR_np = np.clip(out_LR_np, 0, 1)
# out_avg_np = np.clip(out_avg_np, 0, 1)
# f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=(10,10))
# ax1.imshow(np.stack((out_LR_np[2,:,:],out_LR_np[1,:,:],out_LR_np[0,:,:]),2))
# ax2.imshow(np.stack((out_avg_np[2,:,:],out_avg_np[1,:,:],out_avg_np[0,:,:]),2))
# ax3.imshow(np.stack((A_true_np[2,:,:],A_true_np[1,:,:],A_true_np[0,:,:]),2))
# plt.show()
plt.plot(out_LR_np.reshape(LibS,nr1*nc1))
loss[i]=total_loss.item()
i += 1
return total_loss
p11 = get_params(OPT_OVER, net1, net_input1)
optimize(OPTIMIZER1, p11, closure1, LR1, num_iter1)
if 1:
out_LR_np = out_LR.detach().cpu().squeeze().numpy()
out_avg_np = out_avg.detach().cpu().squeeze().numpy()
MAE_LR_avg= 100*np.mean(abs(A_true_np.astype(np.float32)- np.clip(out_avg_np, 0, 1)))
MAE_LR= 100*np.mean(abs(A_true_np.astype(np.float32)- np.clip(out_LR_np, 0, 1)))
SRE=10*np.log10(LA.norm(A_true_np.astype(np.float32).reshape((EE.shape[1],nr1*nc1)),'fro')/LA.norm((A_true_np.astype(np.float32)- np.clip(out_LR_np, 0, 1)).reshape((EE.shape[1],nr1*nc1)),'fro'))
SRE_avg=10*np.log10(LA.norm(A_true_np.astype(np.float32).reshape((EE.shape[1],nr1*nc1)),'fro')/LA.norm((A_true_np.astype(np.float32)- np.clip(out_avg_np, 0, 1)).reshape((EE.shape[1],nr1*nc1)),'fro'))
print ('Iteration %05d MAE_LR: %f MAE_LR_avg: %f SRE: %f SRE_avg: %f ' % (i, MAE_LR, MAE_LR_avg, SRE, SRE_avg), '\r', end='')
# if save_result is True:
# scipy.io.savemat("C:/Users/behnood/Desktop/Sparse Unmixing/Results/Sim2/demo1/10runs/out_avg_np%01d%01d.mat" % (fi+2,fj+1),
# {'out_avg_np%01d%01d' % (fi+2, fj+1):out_avg_np.transpose(1,2,0)})
# scipy.io.savemat("C:/Users/behnood/Desktop/Sparse Unmixing/Results/Sim2/demo1/10runs/out_LR_np%01d%01d.mat" % (fi+2,fj+1),
# {'out_LR_np%01d%01d' % (fi+2, fj+1):out_LR_np.transpose(1,2,0)})
#%%
| 4,026
| 6
| 142
|
75b9590a65adb6b1e657e5a4109bd8c90e601615
| 379
|
py
|
Python
|
Curso de Python/numeros_primos.py
|
Cazcode/Curso_python
|
cbaaedacec8801da2d62da4c3aadce052f04bd64
|
[
"MIT"
] | null | null | null |
Curso de Python/numeros_primos.py
|
Cazcode/Curso_python
|
cbaaedacec8801da2d62da4c3aadce052f04bd64
|
[
"MIT"
] | null | null | null |
Curso de Python/numeros_primos.py
|
Cazcode/Curso_python
|
cbaaedacec8801da2d62da4c3aadce052f04bd64
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
run()
| 18.047619
| 46
| 0.522427
|
def es_primo(number):
if number < 2 or number % 2 == 0:
return False
for i in range(3, number):
if number % i == 0:
return False
return True
def run():
number = int(input('Ingrese un número: '))
if es_primo(number):
print('Es Primo')
else:
print('No es primo')
if __name__ == '__main__':
run()
| 296
| 0
| 45
|
813f1d0c8d016d91369c44e1d9a6e6b88e01a527
| 23,140
|
py
|
Python
|
fdc/fdc.py
|
alexandreday/Fast_Density_Clustering
|
91e7ee0ccf2b297b40747823302a21cd70d59dc9
|
[
"BSD-3-Clause"
] | 7
|
2019-11-02T02:07:17.000Z
|
2022-01-25T10:50:09.000Z
|
fdc/fdc.py
|
alexandreday/Fast_Density_Clustering
|
91e7ee0ccf2b297b40747823302a21cd70d59dc9
|
[
"BSD-3-Clause"
] | 1
|
2021-09-21T16:47:37.000Z
|
2022-01-30T00:58:11.000Z
|
fdc/fdc.py
|
alexandreday/Fast_Density_Clustering
|
91e7ee0ccf2b297b40747823302a21cd70d59dc9
|
[
"BSD-3-Clause"
] | 7
|
2017-12-05T03:18:08.000Z
|
2021-12-20T19:10:49.000Z
|
'''
Created : Jan 16, 2017
Last major update : June 29, 2017
@author: Alexandre Day
Purpose:
Fast density clustering
'''
import numpy as np
import time
from numpy.random import random
import sys, os
from .density_estimation import KDE
import pickle
from collections import OrderedDict as OD
from sklearn.neighbors import NearestNeighbors
import multiprocessing
class FDC:
""" Fast Density Clustering via kernel density modelling for low-dimensional data (D <~ 8)
Parameters
----------
nh_size : int, optional (default = 'auto')
Neighborhood size. This is the scale used for identifying the initial modes in the density distribution, regardless
of the covariance. If a point has the maximum density among it's nh_size neighbors, it is marked as
a potential cluster center. 'auto' means that the nh_size is scaled with number of samples. We
use nh_size = 100 for 10000 samples. The minimum neighborhood size is set to 10.
eta : float, optional (default = 0.4)
Noise threshold used to merge clusters. This is done by quenching directly to the specified noise threshold
(as opposed to progressively coarse-graining). The noise threshold determines the extended
neighborhood of cluster centers. Points that have a relative density difference of less than
"noise_threshold" and that are density-reachable, are part of the extended neighborhood.
random_state: int, optional (default = 0)
Random number for seeding random number generator. By default, the
method generates the same results. This random is used to seed
the cross-validation (set partitions) which will in turn affect the bandwitdth value
test_ratio_size: float, optional (default = 0.8)
Ratio size of the test set used when performing maximum likehood estimation.
In order to have smooth density estimations (prevent overfitting), it is recommended to
use a large test_ratio_size (closer to 1.0) rather than a small one.
verbose: int, optional (default = 1)
Set to 0 if you don't want to see print to screen.
bandwidth: float, optional (default = None)
If you want the bandwidth for kernel density to be set automatically or want to set it yourself.
By default it is set automatically.
merge: bool, optinal (default = True)
Optional merging at zero noise threshold, merges overlapping minimal clusters
atol: float, optional (default = 0.000005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
rtol: float, optional (default = 0.00005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
xtol: float, optional (default = 0.01)
precision parameter for optimizing the bandwidth using maximum likelihood on a test set
search_size: int, optional (default = 20)
when performing search over neighborhoods, size of each local neighborhood to check when
expanding. This drastically slows the coarse-graining if chosen to be too big !
kernel: str, optional (default='gaussian')
Type of Kernel to use for density estimates. Other options are {'epanechnikov'|'linear','tophat'}.
"""
def fit(self, X):
""" Performs density clustering on given data set
Parameters
----------
X : array, (n_sample, n_feature)
Data to cluster.
Returns
----------
self : fdc object
To obtain new cluster labels use self.cluster_label
"""
t = time.time()
self.X = X # shallow copy
self.n_sample = X.shape[0]
if self.n_sample < 10:
assert False, "Too few samples for computing densities !"
if self.nh_size is 'auto':
self.nh_size = max([int(25*np.log10(self.n_sample)), 10])
if self.search_size > self.nh_size:
self.search_size = self.nh_size
if self.verbose == 0:
blockPrint()
self.display_main_parameters()
print("[fdc] Starting clustering with n=%i samples..." % X.shape[0])
start = time.time()
print("[fdc] Fitting kernel model for density estimation ...")
self.fit_density(X)
#print("here")
print("[fdc] Finding centers ...")
self.compute_delta(X, self.rho)
print("[fdc] Found %i potential centers ..." % self.idx_centers_unmerged.shape[0])
# temporary idx for the centers :
self.idx_centers = self.idx_centers_unmerged
self.cluster_label = assign_cluster(self.idx_centers_unmerged, self.nn_delta, self.density_graph)
if self.merge: # usually by default one should perform this minimal merging ..
print("[fdc] Merging overlapping minimal clusters ...")
self.check_cluster_stability_fast(X, 0.) # given
if self.eta >= 1e-3 :
print("[fdc] Iterating up to specified noise threshold ...")
self.check_cluster_stability_fast(X, self.eta) # merging 'unstable' clusters
print("[fdc] Done in %.3f s" % (time.time()-start))
enablePrint()
return self
def save(self, name=None):
""" Saves current model to specified path 'name' """
if name is None:
fname = self.make_file_name()
else:
fname = name
fopen = open(fname,'wb')
pickle.dump(self,fopen)
fopen.close()
return fname
def f_tmp(self, X_, i_):
"""evaluating density and keeping track of threading order"""
return (i_, self.density_model.evaluate_density(X_))
#@profile
def coarse_grain(self, noise_iterable):
"""Started from an initial noise scale, progressively merges clusters.
If specified, saves the cluster assignments at every level of the coarse graining.
Parameters
-----------
noise_iterable : iterable of floats
Should be an iterable containg noise values at which to perform coarse graining. Usually
one should start from 0 and go to larger values by small increments. The whole clustering
information is stored in self.hierarchy
Return
------
self
"""
if self.verbose == 0:
blockPrint()
print("[fdc] Coarse graining until desired noise threshold ...")
noise_range = [n for n in noise_iterable]
#hierarchy = []
self.max_noise = -1
n_cluster = 0
# note to self, if no merger is done, no need to store hierarchy ... just work with noise_range dict ...
for nt in noise_range:
if self.n_cluster_init is not None:
if len(self.idx_centers) <= self.n_cluster_init:
print("[fdc.py] Reached number of specified clusters [= %i] (or close to), n_cluster = %i"%(self.n_cluster_init,len(self.idx_centers)))
break
self.check_cluster_stability_fast(self.X, eta = nt)
#hierarchy.append(OD({'idx_centers': self.idx_centers, 'cluster_labels': self.cluster_label})) # -> the only required information <-
if len(self.idx_centers) != n_cluster:
n_cluster = len(self.idx_centers)
self.max_noise = nt
#self.hierarchy = hierarchy
self.noise_range = noise_range
self.noise_threshold = noise_range[-1]
enablePrint()
return self
#@profile
def compute_delta(self, X, rho = None):
"""
Purpose:
Computes distance to nearest-neighbor with higher density
Return:
delta,nn_delta,idx_centers,density_graph
:delta: distance to n.n. with higher density (within some neighborhood cutoff)
:nn_delta: index of n.n. with ...
:idx_centers: list of points that have the largest density in their neigborhood cutoff
:density_graph: for every point, list of points are incoming (via the density gradient)
"""
if rho is None:
rho = self.rho
n_sample, n_feature = X.shape
maxdist = np.linalg.norm([np.max(X[:,i])-np.min(X[:,i]) for i in range(n_feature)])
delta = maxdist*np.ones(n_sample, dtype=np.float)
nn_delta = np.ones(n_sample, dtype=np.int)
density_graph = [[] for i in range(n_sample)] # store incoming leaves
### ----------->
nn_list = self.nn_list # restricted over neighborhood (nh_size)
### ----------->
for i in range(n_sample):
idx = index_greater(rho[nn_list[i]])
if idx:
density_graph[nn_list[i,idx]].append(i)
nn_delta[i] = nn_list[i,idx]
delta[i] = self.nn_dist[i,idx]
else:
nn_delta[i]=-1
idx_centers=np.array(range(n_sample))[delta > 0.999*maxdist]
self.delta = delta
self.nn_delta = nn_delta
self.idx_centers_unmerged = idx_centers
self.density_graph = density_graph
return self
def estimate_eta(self):
""" Based on the density distribution, computes a scale for eta
Need more experimenting, this is not quite working ...
"""
from matplotlib import pyplot as plt
idx = int(self.n_sample/10.)
idx = np.argsort(self.rho)[:-5*idx]#[2:idx:4*idx]
drho = []
for i in idx:
rho_init = self.rho[i]
nn_i = self.nn_delta[i]
while nn_i != -1:
rho_c = self.rho[nn_i]
nn_i = self.nn_delta[nn_i]
drho.append(rho_c- rho_init)
""" plt.his(drho,bins=60)
plt.show()
exit() """
eta = np.mean(drho)#+0.5*np.std(drho)
self.cout("Using std eta of %.3f"%eta)
return eta
""" def get_cluster_info(self, eta = None):
if eta is None:
return self.cluster_label, self.idx_centers
else:
pos = np.argmin(np.abs(np.array(self.noise_range)-eta))
#delta_ = self.noise_range[pos]
#idx_centers = self.hierarchy[pos]['idx_centers']
cluster_label = self.hierarchy[pos]['cluster_labels']
idx_center = self.hierarchy[pos]['idx_centers']
return cluster_label, idx_center """
""" def update_labels(self, idx_centers, cluster_label):
self.idx_centers = idx_centers
self.cluster_label = cluster_label """
#@profile
def find_NH_tree_search(self, idx, eta, cluster_label):
"""
Function for searching for nearest neighbors within some density threshold.
NH should be an empty set for the inital function call.
Note to myself : lots of optimization, this is pretty time/memory consumming !
Parameters
-----------
idx : int
index of the cluster centroid to start from
eta : float
maximum density you can spill over (this is "density_center - eta")
cluster_label: array of int
cluster label for every datapoint.
Returns
-----------
List of points in the neighborhood of point idx : 1D array
"""
rho = self.rho
zero_array = np.zeros(len(self.nn_list),dtype=bool)
nn_list = self.nn_list
zero_array[nn_list[idx, :self.search_size]] = True
new_leaves = zero_array
is_NH = (rho > eta) & (new_leaves)
current_label = cluster_label[idx]
# This could probably be improved, but at least it's fully vectorized and scalable (NlogN in time and N in memory)
while True:
update = False
leaves=np.copy(new_leaves)
#y_leave = cluster_label[leaves]
leaves_cluster = (leaves) & (cluster_label == current_label)
new_leaves=np.zeros(len(self.nn_list), dtype=bool)
nn_leaf = np.unique(nn_list[leaves_cluster][:self.search_size].flatten())
res = nn_leaf[is_NH[nn_leaf]==False]
pos = np.where(rho[res] > eta)[0]
if len(pos) > 0: update=True
is_NH[res[pos]] = True
new_leaves[res[pos]] = True
if update is False:
break
return np.where(is_NH)[0]
def find_NH_tree_search_v1(self, idx, eta, cluster_label):
"""
Function for searching for nearest neighbors within
some density threshold.
NH should be an empty set for the inital function call.
Note to myself : lots of optimization, this is pretty time consumming !
Returns
-----------
List of points in the neighborhood of point idx : 1D array
"""
rho = self.rho
nn_list = self.nn_list
new_leaves=nn_list[idx][:self.search_size]
is_NH = np.zeros(len(self.nn_list),dtype=np.int)
is_NH[new_leaves[rho[new_leaves] > eta]] = 1
current_label = cluster_label[idx]
# ideally here we cythonize what's below... this is highly ineficient ...
while True:
update = False
leaves=np.hstack(new_leaves)
new_leaves=[]
y_leave = cluster_label[leaves]
leaves_cluster = leaves[y_leave == current_label]
nn_leaf = nn_list[leaves_cluster]
for i in range(1, self.search_size):
res = nn_leaf[is_NH[nn_leaf[:,i]] == 0, i]
pos = np.where(rho[res] > eta)[0]
if len(pos) > 0: update=True
is_NH[res[pos]] = 1
new_leaves.append(res[pos])
if update is False:
break
return np.where(is_NH == 1)[0]
""" def compute_coarse_grain_graph(self):
graph = {}
for idx in self.idx_centers: # at some scale
NH = self.find_NH_tree_search(idx, eta, cluster_label)
label_centers_nn = np.unique([cluster_label[ni] for ni in NH]) """
#####################################################
#####################################################
############ utility functions below ################
#####################################################
#####################################################
def check_cluster_stability(self, X, threshold):
"""
Given the identified cluster centers, performs a more rigourous
neighborhood search (based on some noise threshold) for points with higher densities.
This is vaguely similar to a watershed cuts in image segmentation and basically
makes sure we haven't identified spurious cluster centers w.r.t to some noise threshold (false positive).
This has bad memory complexity, needs improvement if we want to run on N>10^5 data points.
"""
density_graph = self.density_graph
nn_delta = self.nn_delta
delta = self.delta
rho = self.rho
nn_list = self.nn_list
idx_centers = self.idx_centers_unmerged
cluster_label = self.cluster_label
n_false_pos = 0
idx_true_centers = []
for idx in idx_centers:
rho_center = rho[idx]
delta_rho = rho_center - threshold
if threshold < 1e-3: # just check nn_list ...
NH=nn_list[idx][1:self.search_size]
else:
NH = self.find_NH_tree_search(idx, delta_rho, cluster_label)
#print(len(NH))
label_centers_nn = np.unique(self.cluster_label[NH])#[cluster_label[ni] for ni in NH])
idx_max = idx_centers[ label_centers_nn[np.argmax(rho[idx_centers[label_centers_nn]])] ]
rho_current = rho[idx]
if ( rho_current < rho[idx_max] ) & ( idx != idx_max ) :
nn_delta[idx] = idx_max
delta[idx] = np.linalg.norm(X[idx_max]-X[idx])
density_graph[idx_max].append(idx)
n_false_pos+=1
else:
idx_true_centers.append(idx)
return np.array(idx_true_centers,dtype=np.int), n_false_pos
def assign_cluster(idx_centers, nn_delta, density_graph):
"""
Given the cluster centers and the local gradients (nn_delta) assign to every
point a cluster label
"""
n_center = idx_centers.shape[0]
n_sample = nn_delta.shape[0]
cluster_label = -1*np.ones(n_sample,dtype=np.int) # reinitialized every time.
for c, label in zip(idx_centers, range(n_center) ):
cluster_label[c] = label
assign_cluster_deep(density_graph[c], cluster_label, density_graph, label)
return cluster_label
def assign_cluster_deep(root,cluster_label,density_graph,label):
"""
Recursive function for assigning labels for a tree graph.
Stopping condition is met when the root is empty (i.e. a leaf has been reached)
"""
if not root: # then must be a leaf !
return
else:
for child in root:
cluster_label[child]=label
assign_cluster_deep(density_graph[child],cluster_label,density_graph,label)
def index_greater(array, prec=1e-8):
"""
Purpose:
Function for finding first item in an array that has a value greater than the first element in that array
If no element is found, returns None
Precision:
1e-8
Return:
int or None
"""
item=array[0]
for idx, val in np.ndenumerate(array): # slow ! : could be cythonized
if val > (item + prec):
return idx[0]
def blockPrint():
"""Blocks printing to screen"""
sys.stdout = open(os.devnull, 'w')
def enablePrint():
"""Enables printing to screen"""
sys.stdout = sys.__stdout__
| 34.485842
| 158
| 0.591357
|
'''
Created : Jan 16, 2017
Last major update : June 29, 2017
@author: Alexandre Day
Purpose:
Fast density clustering
'''
import numpy as np
import time
from numpy.random import random
import sys, os
from .density_estimation import KDE
import pickle
from collections import OrderedDict as OD
from sklearn.neighbors import NearestNeighbors
import multiprocessing
class FDC:
""" Fast Density Clustering via kernel density modelling for low-dimensional data (D <~ 8)
Parameters
----------
nh_size : int, optional (default = 'auto')
Neighborhood size. This is the scale used for identifying the initial modes in the density distribution, regardless
of the covariance. If a point has the maximum density among it's nh_size neighbors, it is marked as
a potential cluster center. 'auto' means that the nh_size is scaled with number of samples. We
use nh_size = 100 for 10000 samples. The minimum neighborhood size is set to 10.
eta : float, optional (default = 0.4)
Noise threshold used to merge clusters. This is done by quenching directly to the specified noise threshold
(as opposed to progressively coarse-graining). The noise threshold determines the extended
neighborhood of cluster centers. Points that have a relative density difference of less than
"noise_threshold" and that are density-reachable, are part of the extended neighborhood.
random_state: int, optional (default = 0)
Random number for seeding random number generator. By default, the
method generates the same results. This random is used to seed
the cross-validation (set partitions) which will in turn affect the bandwitdth value
test_ratio_size: float, optional (default = 0.8)
Ratio size of the test set used when performing maximum likehood estimation.
In order to have smooth density estimations (prevent overfitting), it is recommended to
use a large test_ratio_size (closer to 1.0) rather than a small one.
verbose: int, optional (default = 1)
Set to 0 if you don't want to see print to screen.
bandwidth: float, optional (default = None)
If you want the bandwidth for kernel density to be set automatically or want to set it yourself.
By default it is set automatically.
merge: bool, optinal (default = True)
Optional merging at zero noise threshold, merges overlapping minimal clusters
atol: float, optional (default = 0.000005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
rtol: float, optional (default = 0.00005)
kernel density estimate precision parameter. determines the precision used for kde.
smaller values leads to slower execution but better precision
xtol: float, optional (default = 0.01)
precision parameter for optimizing the bandwidth using maximum likelihood on a test set
search_size: int, optional (default = 20)
when performing search over neighborhoods, size of each local neighborhood to check when
expanding. This drastically slows the coarse-graining if chosen to be too big !
kernel: str, optional (default='gaussian')
Type of Kernel to use for density estimates. Other options are {'epanechnikov'|'linear','tophat'}.
"""
def __init__(self, nh_size='auto', eta=0.5,
random_state=0, test_ratio_size=0.8, verbose=1, bandwidth=None,
merge=True,
atol=0.01,
rtol=0.0001,
xtol=0.01,
search_size = 20,
n_cluster_init = None,
kernel = 'gaussian',
n_job='auto'
):
self.test_ratio_size = test_ratio_size
self.random_state = random_state
self.verbose = verbose
self.nh_size = nh_size
self.bandwidth = bandwidth
self.eta = eta
self.merge = merge
self.atol = atol
self.rtol = rtol
self.xtol = xtol
self.cluster_label = None
self.search_size = search_size
self.n_cluster_init = n_cluster_init
self.kernel = kernel
self.nbrs= None
self.nn_dist= None
self.nn_list= None
self.density_model = None
if n_job == 'auto':
self.n_job=multiprocessing.cpu_count()
else:
if n_job > multiprocessing.cpu_count():
self.n_job=multiprocessing.cpu_count()
else:
self.n_job=n_job
def fit(self, X):
""" Performs density clustering on given data set
Parameters
----------
X : array, (n_sample, n_feature)
Data to cluster.
Returns
----------
self : fdc object
To obtain new cluster labels use self.cluster_label
"""
t = time.time()
self.X = X # shallow copy
self.n_sample = X.shape[0]
if self.n_sample < 10:
assert False, "Too few samples for computing densities !"
if self.nh_size is 'auto':
self.nh_size = max([int(25*np.log10(self.n_sample)), 10])
if self.search_size > self.nh_size:
self.search_size = self.nh_size
if self.verbose == 0:
blockPrint()
self.display_main_parameters()
print("[fdc] Starting clustering with n=%i samples..." % X.shape[0])
start = time.time()
print("[fdc] Fitting kernel model for density estimation ...")
self.fit_density(X)
#print("here")
print("[fdc] Finding centers ...")
self.compute_delta(X, self.rho)
print("[fdc] Found %i potential centers ..." % self.idx_centers_unmerged.shape[0])
# temporary idx for the centers :
self.idx_centers = self.idx_centers_unmerged
self.cluster_label = assign_cluster(self.idx_centers_unmerged, self.nn_delta, self.density_graph)
if self.merge: # usually by default one should perform this minimal merging ..
print("[fdc] Merging overlapping minimal clusters ...")
self.check_cluster_stability_fast(X, 0.) # given
if self.eta >= 1e-3 :
print("[fdc] Iterating up to specified noise threshold ...")
self.check_cluster_stability_fast(X, self.eta) # merging 'unstable' clusters
print("[fdc] Done in %.3f s" % (time.time()-start))
enablePrint()
return self
def save(self, name=None):
""" Saves current model to specified path 'name' """
if name is None:
fname = self.make_file_name()
else:
fname = name
fopen = open(fname,'wb')
pickle.dump(self,fopen)
fopen.close()
return fname
def load(self, name=None):
if name is None:
name = self.make_file_name()
self.__dict__.update(pickle.load(open(name,'rb')).__dict__)
return self
def fit_density(self, X):
# nearest neighbors class
self.nbrs = NearestNeighbors(n_neighbors = self.nh_size, algorithm='kd_tree').fit(X)
# get k-NN
self.nn_dist, self.nn_list = self.nbrs.kneighbors(X)
# density model class
self.density_model = KDE(bandwidth=self.bandwidth, test_ratio_size=self.test_ratio_size,
atol=self.atol, rtol=self.rtol, xtol=self.xtol, nn_dist = self.nn_dist, kernel=self.kernel)
# fit density model to data
self.density_model.fit(X)
# save bandwidth
self.bandwidth = self.density_model.bandwidth
# compute density map based on kernel density model
if (self.n_sample > 30000) & (self.n_job !=1) :
print("[fdc] Computing density with %i threads..."%self.n_job)
p = multiprocessing.Pool(self.n_job)
size_split = X.shape[0]//self.n_job
results =[]
idx_split = chunkIt(len(X), self.n_job) # find the index to split the array in approx. n_job equal parts.
for i in range(self.n_job):
results.append(p.apply_async(self.f_tmp, [X[idx_split[i][0]:idx_split[i][1]], i]))
results = [res.get() for res in results]
asort = np.argsort([results[i][0] for i in range(self.n_job)]) # reordering
#print(asort)
self.rho=np.hstack([results[a][1] for a in asort])
else:
print("[fdc] Computing density with 1 thread...")
self.rho = self.density_model.evaluate_density(X)
return self
def f_tmp(self, X_, i_):
"""evaluating density and keeping track of threading order"""
return (i_, self.density_model.evaluate_density(X_))
#@profile
def coarse_grain(self, noise_iterable):
"""Started from an initial noise scale, progressively merges clusters.
If specified, saves the cluster assignments at every level of the coarse graining.
Parameters
-----------
noise_iterable : iterable of floats
Should be an iterable containg noise values at which to perform coarse graining. Usually
one should start from 0 and go to larger values by small increments. The whole clustering
information is stored in self.hierarchy
Return
------
self
"""
if self.verbose == 0:
blockPrint()
print("[fdc] Coarse graining until desired noise threshold ...")
noise_range = [n for n in noise_iterable]
#hierarchy = []
self.max_noise = -1
n_cluster = 0
# note to self, if no merger is done, no need to store hierarchy ... just work with noise_range dict ...
for nt in noise_range:
if self.n_cluster_init is not None:
if len(self.idx_centers) <= self.n_cluster_init:
print("[fdc.py] Reached number of specified clusters [= %i] (or close to), n_cluster = %i"%(self.n_cluster_init,len(self.idx_centers)))
break
self.check_cluster_stability_fast(self.X, eta = nt)
#hierarchy.append(OD({'idx_centers': self.idx_centers, 'cluster_labels': self.cluster_label})) # -> the only required information <-
if len(self.idx_centers) != n_cluster:
n_cluster = len(self.idx_centers)
self.max_noise = nt
#self.hierarchy = hierarchy
self.noise_range = noise_range
self.noise_threshold = noise_range[-1]
enablePrint()
return self
#@profile
def compute_delta(self, X, rho = None):
"""
Purpose:
Computes distance to nearest-neighbor with higher density
Return:
delta,nn_delta,idx_centers,density_graph
:delta: distance to n.n. with higher density (within some neighborhood cutoff)
:nn_delta: index of n.n. with ...
:idx_centers: list of points that have the largest density in their neigborhood cutoff
:density_graph: for every point, list of points are incoming (via the density gradient)
"""
if rho is None:
rho = self.rho
n_sample, n_feature = X.shape
maxdist = np.linalg.norm([np.max(X[:,i])-np.min(X[:,i]) for i in range(n_feature)])
delta = maxdist*np.ones(n_sample, dtype=np.float)
nn_delta = np.ones(n_sample, dtype=np.int)
density_graph = [[] for i in range(n_sample)] # store incoming leaves
### ----------->
nn_list = self.nn_list # restricted over neighborhood (nh_size)
### ----------->
for i in range(n_sample):
idx = index_greater(rho[nn_list[i]])
if idx:
density_graph[nn_list[i,idx]].append(i)
nn_delta[i] = nn_list[i,idx]
delta[i] = self.nn_dist[i,idx]
else:
nn_delta[i]=-1
idx_centers=np.array(range(n_sample))[delta > 0.999*maxdist]
self.delta = delta
self.nn_delta = nn_delta
self.idx_centers_unmerged = idx_centers
self.density_graph = density_graph
return self
def estimate_eta(self):
""" Based on the density distribution, computes a scale for eta
Need more experimenting, this is not quite working ...
"""
from matplotlib import pyplot as plt
idx = int(self.n_sample/10.)
idx = np.argsort(self.rho)[:-5*idx]#[2:idx:4*idx]
drho = []
for i in idx:
rho_init = self.rho[i]
nn_i = self.nn_delta[i]
while nn_i != -1:
rho_c = self.rho[nn_i]
nn_i = self.nn_delta[nn_i]
drho.append(rho_c- rho_init)
""" plt.his(drho,bins=60)
plt.show()
exit() """
eta = np.mean(drho)#+0.5*np.std(drho)
self.cout("Using std eta of %.3f"%eta)
return eta
def check_cluster_stability_fast(self, X, eta = None): # given
if self.verbose == 0:
blockPrint()
if eta is None:
eta = self.eta
while True: # iterates untill number of cluster does not change ...
self.cluster_label = assign_cluster(self.idx_centers_unmerged, self.nn_delta, self.density_graph) # first approximation of assignments
self.idx_centers, n_false_pos = check_cluster_stability(self, X, eta)
self.idx_centers_unmerged = self.idx_centers
if n_false_pos == 0:
print(" # of stable clusters with noise %.6f : %i" % (eta, self.idx_centers.shape[0]))
break
enablePrint()
""" def get_cluster_info(self, eta = None):
if eta is None:
return self.cluster_label, self.idx_centers
else:
pos = np.argmin(np.abs(np.array(self.noise_range)-eta))
#delta_ = self.noise_range[pos]
#idx_centers = self.hierarchy[pos]['idx_centers']
cluster_label = self.hierarchy[pos]['cluster_labels']
idx_center = self.hierarchy[pos]['idx_centers']
return cluster_label, idx_center """
""" def update_labels(self, idx_centers, cluster_label):
self.idx_centers = idx_centers
self.cluster_label = cluster_label """
#@profile
def find_NH_tree_search(self, idx, eta, cluster_label):
"""
Function for searching for nearest neighbors within some density threshold.
NH should be an empty set for the inital function call.
Note to myself : lots of optimization, this is pretty time/memory consumming !
Parameters
-----------
idx : int
index of the cluster centroid to start from
eta : float
maximum density you can spill over (this is "density_center - eta")
cluster_label: array of int
cluster label for every datapoint.
Returns
-----------
List of points in the neighborhood of point idx : 1D array
"""
rho = self.rho
zero_array = np.zeros(len(self.nn_list),dtype=bool)
nn_list = self.nn_list
zero_array[nn_list[idx, :self.search_size]] = True
new_leaves = zero_array
is_NH = (rho > eta) & (new_leaves)
current_label = cluster_label[idx]
# This could probably be improved, but at least it's fully vectorized and scalable (NlogN in time and N in memory)
while True:
update = False
leaves=np.copy(new_leaves)
#y_leave = cluster_label[leaves]
leaves_cluster = (leaves) & (cluster_label == current_label)
new_leaves=np.zeros(len(self.nn_list), dtype=bool)
nn_leaf = np.unique(nn_list[leaves_cluster][:self.search_size].flatten())
res = nn_leaf[is_NH[nn_leaf]==False]
pos = np.where(rho[res] > eta)[0]
if len(pos) > 0: update=True
is_NH[res[pos]] = True
new_leaves[res[pos]] = True
if update is False:
break
return np.where(is_NH)[0]
def find_NH_tree_search_v1(self, idx, eta, cluster_label):
"""
Function for searching for nearest neighbors within
some density threshold.
NH should be an empty set for the inital function call.
Note to myself : lots of optimization, this is pretty time consumming !
Returns
-----------
List of points in the neighborhood of point idx : 1D array
"""
rho = self.rho
nn_list = self.nn_list
new_leaves=nn_list[idx][:self.search_size]
is_NH = np.zeros(len(self.nn_list),dtype=np.int)
is_NH[new_leaves[rho[new_leaves] > eta]] = 1
current_label = cluster_label[idx]
# ideally here we cythonize what's below... this is highly ineficient ...
while True:
update = False
leaves=np.hstack(new_leaves)
new_leaves=[]
y_leave = cluster_label[leaves]
leaves_cluster = leaves[y_leave == current_label]
nn_leaf = nn_list[leaves_cluster]
for i in range(1, self.search_size):
res = nn_leaf[is_NH[nn_leaf[:,i]] == 0, i]
pos = np.where(rho[res] > eta)[0]
if len(pos) > 0: update=True
is_NH[res[pos]] = 1
new_leaves.append(res[pos])
if update is False:
break
return np.where(is_NH == 1)[0]
def cout(self, s):
print('[fdc] '+s)
def make_file_name(self):
t_name = "fdc_nhSize=%i_eta=%.3f_ratio=%.2f.pkl"
return t_name%(self.nh_size, self.eta, self.test_ratio_size)
""" def compute_coarse_grain_graph(self):
graph = {}
for idx in self.idx_centers: # at some scale
NH = self.find_NH_tree_search(idx, eta, cluster_label)
label_centers_nn = np.unique([cluster_label[ni] for ni in NH]) """
def display_main_parameters(self):
if self.eta is not 'auto':
eta = "%.3f"%self.eta
else:
eta = self.eta
out = [
"[fdc] {0:<20s}{1:<4s}{2:<6d}".format("nh_size",":",self.nh_size),
"[fdc] {0:<20s}{1:<4s}{2:<6s}".format("eta",":",eta),
"[fdc] {0:<20s}{1:<4s}{2:<6s}".format("merge",":",str(self.merge)),
"[fdc] {0:<20s}{1:<4s}{2:<6d}".format("search_size",":",self.search_size),
"[fdc] {0:<20s}{1:<4s}{2:<6.3f}".format("test_size_ratio",":",self.test_ratio_size)
]
for o in out:
print(o)
def reset(self):
self.bandwidth = None
#####################################################
#####################################################
############ utility functions below ################
#####################################################
#####################################################
def check_cluster_stability(self, X, threshold):
"""
Given the identified cluster centers, performs a more rigourous
neighborhood search (based on some noise threshold) for points with higher densities.
This is vaguely similar to a watershed cuts in image segmentation and basically
makes sure we haven't identified spurious cluster centers w.r.t to some noise threshold (false positive).
This has bad memory complexity, needs improvement if we want to run on N>10^5 data points.
"""
density_graph = self.density_graph
nn_delta = self.nn_delta
delta = self.delta
rho = self.rho
nn_list = self.nn_list
idx_centers = self.idx_centers_unmerged
cluster_label = self.cluster_label
n_false_pos = 0
idx_true_centers = []
for idx in idx_centers:
rho_center = rho[idx]
delta_rho = rho_center - threshold
if threshold < 1e-3: # just check nn_list ...
NH=nn_list[idx][1:self.search_size]
else:
NH = self.find_NH_tree_search(idx, delta_rho, cluster_label)
#print(len(NH))
label_centers_nn = np.unique(self.cluster_label[NH])#[cluster_label[ni] for ni in NH])
idx_max = idx_centers[ label_centers_nn[np.argmax(rho[idx_centers[label_centers_nn]])] ]
rho_current = rho[idx]
if ( rho_current < rho[idx_max] ) & ( idx != idx_max ) :
nn_delta[idx] = idx_max
delta[idx] = np.linalg.norm(X[idx_max]-X[idx])
density_graph[idx_max].append(idx)
n_false_pos+=1
else:
idx_true_centers.append(idx)
return np.array(idx_true_centers,dtype=np.int), n_false_pos
def assign_cluster(idx_centers, nn_delta, density_graph):
"""
Given the cluster centers and the local gradients (nn_delta) assign to every
point a cluster label
"""
n_center = idx_centers.shape[0]
n_sample = nn_delta.shape[0]
cluster_label = -1*np.ones(n_sample,dtype=np.int) # reinitialized every time.
for c, label in zip(idx_centers, range(n_center) ):
cluster_label[c] = label
assign_cluster_deep(density_graph[c], cluster_label, density_graph, label)
return cluster_label
def assign_cluster_deep(root,cluster_label,density_graph,label):
"""
Recursive function for assigning labels for a tree graph.
Stopping condition is met when the root is empty (i.e. a leaf has been reached)
"""
if not root: # then must be a leaf !
return
else:
for child in root:
cluster_label[child]=label
assign_cluster_deep(density_graph[child],cluster_label,density_graph,label)
def index_greater(array, prec=1e-8):
"""
Purpose:
Function for finding first item in an array that has a value greater than the first element in that array
If no element is found, returns None
Precision:
1e-8
Return:
int or None
"""
item=array[0]
for idx, val in np.ndenumerate(array): # slow ! : could be cythonized
if val > (item + prec):
return idx[0]
def blockPrint():
"""Blocks printing to screen"""
sys.stdout = open(os.devnull, 'w')
def enablePrint():
"""Enables printing to screen"""
sys.stdout = sys.__stdout__
def chunkIt(length_seq, num):
avg = length_seq / float(num)
out = []
last = 0.0
idx_list = []
while last < length_seq:
idx_list.append([int(last),int(last + avg)])
last += avg
if len(idx_list) > num:
idx_list.pop()
idx_list[-1] = [idx_list[-1][0], length_seq]
return idx_list
| 4,753
| 0
| 255
|
8af9843b14b841e700e5b71b89c9be14eff3deb2
| 539
|
py
|
Python
|
aztk_cli/spark/endpoints/job/get_app.py
|
Geims83/aztk
|
8f8e7b268bdbf82c3ae4ecdcd907077bd6fe69b6
|
[
"MIT"
] | 161
|
2017-10-04T08:58:27.000Z
|
2022-01-03T13:01:04.000Z
|
aztk_cli/spark/endpoints/job/get_app.py
|
Geims83/aztk
|
8f8e7b268bdbf82c3ae4ecdcd907077bd6fe69b6
|
[
"MIT"
] | 400
|
2017-09-29T21:52:08.000Z
|
2021-01-08T02:48:56.000Z
|
aztk_cli/spark/endpoints/job/get_app.py
|
isabella232/aztk
|
6e04372d19661ead6744387edab7beda16e3d928
|
[
"MIT"
] | 74
|
2017-10-13T04:41:26.000Z
|
2021-12-20T15:56:42.000Z
|
import argparse
import typing
import aztk.spark
from aztk_cli import config, utils
| 31.705882
| 104
| 0.762523
|
import argparse
import typing
import aztk.spark
from aztk_cli import config, utils
def setup_parser(parser: argparse.ArgumentParser):
parser.add_argument("--id", dest="job_id", required=True, help="The unique id of your AZTK job")
parser.add_argument("--name", dest="app_name", required=True, help="The unique id of your job name")
def execute(args: typing.NamedTuple):
spark_client = aztk.spark.Client(config.load_aztk_secrets())
utils.print_application(spark_client.job.get_application(args.job_id, args.app_name))
| 407
| 0
| 46
|
f3302cd3e06d5ffb2be99f117180800962602ad7
| 1,021
|
py
|
Python
|
src/start_stop_timer.py
|
MarioMey/OBS-Studio-Python-Scripting-Cheatsheet-obspython-Examples-of-API
|
7f1f2a00c2731c558f6b9fd75edb697fc0719a7c
|
[
"MIT"
] | 4
|
2021-03-23T05:25:24.000Z
|
2021-12-29T16:46:01.000Z
|
src/start_stop_timer.py
|
MarioMey/OBS-Studio-Python-Scripting-Cheatsheet-obspython-Examples-of-API
|
7f1f2a00c2731c558f6b9fd75edb697fc0719a7c
|
[
"MIT"
] | null | null | null |
src/start_stop_timer.py
|
MarioMey/OBS-Studio-Python-Scripting-Cheatsheet-obspython-Examples-of-API
|
7f1f2a00c2731c558f6b9fd75edb697fc0719a7c
|
[
"MIT"
] | null | null | null |
import obspython as obs
from random import choices
FLAG = True
INTERVAL = 100
eg = Example() # class created ,obs part starts
| 22.688889
| 76
| 0.624878
|
import obspython as obs
from random import choices
FLAG = True
INTERVAL = 100
class Example:
def __init__(self):
self.lock = True
def random_numbers(self):
print(choices(range(1,10),k=3))
def ticker(self):
""" how fast update.One callback at time with lock"""
if self.lock:
self.random_numbers()
if not self.lock:
obs.remove_current_callback()
eg = Example() # class created ,obs part starts
def stop_pressed(props, prop):
global FLAG
FLAG = True
eg.lock = False
def start_pressed(props, prop):
global FLAG # to keep only one timer callback
if FLAG:
obs.timer_add(eg.ticker, INTERVAL)
eg.lock = True
FLAG = False
def script_properties(): # ui
props = obs.obs_properties_create()
obs.obs_properties_add_button(props, "button", "Stop", stop_pressed)
obs.obs_properties_add_button(props, "button2", "Start", start_pressed)
return props
| 510
| 265
| 100
|
e825db0cd14e732018d8765c992065e614f82878
| 1,048
|
py
|
Python
|
checkout_sdk/disputes/disputes.py
|
riaz-bordie-cko/checkout-sdk-python
|
d9bc073306c1a98544c326be693ed722576ea895
|
[
"MIT"
] | null | null | null |
checkout_sdk/disputes/disputes.py
|
riaz-bordie-cko/checkout-sdk-python
|
d9bc073306c1a98544c326be693ed722576ea895
|
[
"MIT"
] | null | null | null |
checkout_sdk/disputes/disputes.py
|
riaz-bordie-cko/checkout-sdk-python
|
d9bc073306c1a98544c326be693ed722576ea895
|
[
"MIT"
] | null | null | null |
from datetime import datetime
| 28.324324
| 53
| 0.78626
|
from datetime import datetime
class DisputesQueryFilter:
limit: int
skip: int
from_: datetime
to: datetime
id: str
statuses: str
payment_id: str
payment_reference: str
payment_arn: str
this_channel_only: bool # Only available for CS2
entity_ids: str
sub_entity_ids: str
payment_mcc: str
class DisputeEvidenceRequest:
proof_of_delivery_or_service_file: str
proof_of_delivery_or_service_text: str
invoice_or_receipt_file: str
invoice_or_receipt_text: str
invoice_showing_distinct_transactions_file: str
invoice_showing_distinct_transactions_text: str
customer_communication_file: str
customer_communication_text: str
refund_or_cancellation_policy_file: str
refund_or_cancellation_policy_text: str
recurring_transaction_agreement_file: str
recurring_transaction_agreement_text: str
additional_evidence_file: str
additional_evidence_text: str
proof_of_delivery_or_service_date_file: str
proof_of_delivery_or_service_date_text: str
| 0
| 970
| 46
|
6f8866f4896b6cf29e177021809a4addfceb41c6
| 3,582
|
py
|
Python
|
esp32/sensor_identifikation/ntc.py
|
phofmeier/wifi_temp_sensor
|
93ebc466b6913842cac6eb9385272c79651d2479
|
[
"MIT"
] | null | null | null |
esp32/sensor_identifikation/ntc.py
|
phofmeier/wifi_temp_sensor
|
93ebc466b6913842cac6eb9385272c79651d2479
|
[
"MIT"
] | null | null | null |
esp32/sensor_identifikation/ntc.py
|
phofmeier/wifi_temp_sensor
|
93ebc466b6913842cac6eb9385272c79651d2479
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import casadi as cas
# define functions
# Check if Sensor is a NTC and find out the Resistance
R_measured = [103e3, 120e3, 70e3, 15.2e3]
T_measured_C = [25, 20, 33, 74]
R_N = 103e3
T_N = 298.15
U_ges = 3.3
T_range_C = [0, 200]
# Kelvin from Temp
T_range = [i + 273.15 for i in T_range_C]
T_measured = [i + 273.15 for i in T_measured_C]
# Fit B
B_sym = cas.SX.sym("B")
f = 0
for R, T in zip(R_measured, T_measured):
f += (R-R_ntc(R_N, B_sym, T, T_N))**2
nlp = {'x': B_sym, 'f': f}
S = cas.nlpsol('S', 'ipopt', nlp)
res = S()
B = res['x'].full()[0]
# Search for the Value of R_1 which minimizes the deviation of Measured Voltage at a specific Temperature
T_Nenn_C = 65
T_Nenn = T_Nenn_C + 273.15
R_1 = cas.SX.sym("R_1")
T_sym = cas.SX.sym("T")
U = U_meas(U_ges, R_1, R_ntc(R_N, B, T_sym, T_N))
jac = cas.Function("dudT", [R_1, T_sym], [cas.jacobian(U, T_sym)])
lbx = [0]
ubx = [cas.inf]
nlp = {'x': R_1, 'f': jac(R_1, T_Nenn)}
S = cas.nlpsol('S', 'ipopt', nlp)
res = S(lbx=lbx, ubx=ubx)
R_1 = res['x'].full()[0]
# plot
T = np.linspace(T_range[0], T_range[1], num=200)
T_C = np.linspace(T_range_C[0], T_range_C[1], num=200)
print("R_1: %e" % R_1)
print("B: %e" % B)
plt.figure()
plt.title("Fitted and Measured Resistance of the NTC")
plt.plot(T_C, R_ntc(R_N, B, T, T_N), label="R_fitted")
plt.plot(T_measured_C, R_measured, "x", label="R_measured")
plt.xlabel("Temp [Grad]")
plt.ylabel("R [Ohm]")
plt.legend()
U_range = U_meas(U_ges, R_1, R_ntc(R_N, B, T, T_N))
plt.figure()
plt.title("Voltage vs. Temperature with optimal Resistance. Vertical line shows the optimal Temperature.")
plt.plot(T_C, U_range, label="R/NTC")
plt.vlines(T_Nenn_C, U_range.min(), U_range.max())
plt.xlabel("Temp [Grad]")
plt.ylabel("U [V]")
plt.legend()
# Calibrate Sensor from ADC measurements
# measurements
adc_voltage_measured = [2.21634, 2.17343, 1.59904, 1.49781, 0.84637, 0.71773, 0.59042, 0.67618, 0.18674, 0.22936,0.18058,0.16265, 0.31080, 0.35350, 0.51420, 0.54871, 0.69035, 0.74026, 0.93252, 0.97848, 1.14037, 1.22280, 1.62269, 1.66503, 2.68944, 2.70243]
T_measured = [42, 43, 60, 65, 92, 99, 106, 101, 156, 146, 158, 161, 133, 128, 112, 109, 100, 97, 87, 85, 78, 75, 61, 59, 24, 23]
# Fit Model
R1 = 24.9e3
B_sym = cas.SX.sym("B")
Rn_sym = cas.SX.sym("Rn")
Tn_sym = cas.SX.sym("Tn")
syms = cas.vertcat(B_sym, Rn_sym, Tn_sym)
x0 = [B, 103e3, T_N]
f = 0
for T, adc_voltage in zip(T_measured, adc_voltage_measured):
f += (T-Temp(adc_voltage, R1, U_ges, B_sym, Rn_sym, Tn_sym))**2
f += (Rn_sym-103e3)**2+(Tn_sym-T_N)**2
nlp = {'x': syms, 'f': f}
S = cas.nlpsol('S', 'ipopt', nlp)
res = S(x0=x0)
B = res['x'].full()[0]
Rn = res['x'].full()[1]
Tn = res['x'].full()[2]
print("B:", B, "R1:", R1, "Rn:", Rn, "Tn:", Tn)
plt.figure()
plt.title("Fitted Model for measured Voltage vs. Temperature")
adc_range = np.linspace(min(adc_voltage_measured), max(adc_voltage_measured))
plt.plot(adc_range, Temp(adc_range, R1, U_ges, B, Rn, Tn), label="Fitted Model")
plt.plot(adc_voltage_measured, T_measured, 'x', label="Measured")
plt.xlabel("ADC voltage [V]")
plt.ylabel("Temp [Grad]")
plt.legend()
plt.show()
| 27.343511
| 255
| 0.650475
|
import numpy as np
import matplotlib.pyplot as plt
import casadi as cas
# define functions
def R_ntc(R_N, B, T, T_N):
return R_N * np.exp(B*(1/T-1/T_N))
def U_meas(U_ges, R_1, R_2):
return U_ges/(R_1+R_2) * R_2
def Temp(U_meas, R_1, U_ges, B, R_N, T_N):
R_NTC = U_meas * R_1 / (U_ges - U_meas)
T_kelvin = 1 / (np.log(R_NTC / R_N) / B + 1 / T_N)
return T_kelvin - 273.15
def Temp_adc(adc_value, R_1, U_ges, B, R_N, T_N):
return Temp(U_ges/4096 * adc_value, R_1, U_ges, B, R_N, T_N)
# Check if Sensor is a NTC and find out the Resistance
R_measured = [103e3, 120e3, 70e3, 15.2e3]
T_measured_C = [25, 20, 33, 74]
R_N = 103e3
T_N = 298.15
U_ges = 3.3
T_range_C = [0, 200]
# Kelvin from Temp
T_range = [i + 273.15 for i in T_range_C]
T_measured = [i + 273.15 for i in T_measured_C]
# Fit B
B_sym = cas.SX.sym("B")
f = 0
for R, T in zip(R_measured, T_measured):
f += (R-R_ntc(R_N, B_sym, T, T_N))**2
nlp = {'x': B_sym, 'f': f}
S = cas.nlpsol('S', 'ipopt', nlp)
res = S()
B = res['x'].full()[0]
# Search for the Value of R_1 which minimizes the deviation of Measured Voltage at a specific Temperature
T_Nenn_C = 65
T_Nenn = T_Nenn_C + 273.15
R_1 = cas.SX.sym("R_1")
T_sym = cas.SX.sym("T")
U = U_meas(U_ges, R_1, R_ntc(R_N, B, T_sym, T_N))
jac = cas.Function("dudT", [R_1, T_sym], [cas.jacobian(U, T_sym)])
lbx = [0]
ubx = [cas.inf]
nlp = {'x': R_1, 'f': jac(R_1, T_Nenn)}
S = cas.nlpsol('S', 'ipopt', nlp)
res = S(lbx=lbx, ubx=ubx)
R_1 = res['x'].full()[0]
# plot
T = np.linspace(T_range[0], T_range[1], num=200)
T_C = np.linspace(T_range_C[0], T_range_C[1], num=200)
print("R_1: %e" % R_1)
print("B: %e" % B)
plt.figure()
plt.title("Fitted and Measured Resistance of the NTC")
plt.plot(T_C, R_ntc(R_N, B, T, T_N), label="R_fitted")
plt.plot(T_measured_C, R_measured, "x", label="R_measured")
plt.xlabel("Temp [Grad]")
plt.ylabel("R [Ohm]")
plt.legend()
U_range = U_meas(U_ges, R_1, R_ntc(R_N, B, T, T_N))
plt.figure()
plt.title("Voltage vs. Temperature with optimal Resistance. Vertical line shows the optimal Temperature.")
plt.plot(T_C, U_range, label="R/NTC")
plt.vlines(T_Nenn_C, U_range.min(), U_range.max())
plt.xlabel("Temp [Grad]")
plt.ylabel("U [V]")
plt.legend()
# Calibrate Sensor from ADC measurements
# measurements
adc_voltage_measured = [2.21634, 2.17343, 1.59904, 1.49781, 0.84637, 0.71773, 0.59042, 0.67618, 0.18674, 0.22936,0.18058,0.16265, 0.31080, 0.35350, 0.51420, 0.54871, 0.69035, 0.74026, 0.93252, 0.97848, 1.14037, 1.22280, 1.62269, 1.66503, 2.68944, 2.70243]
T_measured = [42, 43, 60, 65, 92, 99, 106, 101, 156, 146, 158, 161, 133, 128, 112, 109, 100, 97, 87, 85, 78, 75, 61, 59, 24, 23]
# Fit Model
R1 = 24.9e3
B_sym = cas.SX.sym("B")
Rn_sym = cas.SX.sym("Rn")
Tn_sym = cas.SX.sym("Tn")
syms = cas.vertcat(B_sym, Rn_sym, Tn_sym)
x0 = [B, 103e3, T_N]
f = 0
for T, adc_voltage in zip(T_measured, adc_voltage_measured):
f += (T-Temp(adc_voltage, R1, U_ges, B_sym, Rn_sym, Tn_sym))**2
f += (Rn_sym-103e3)**2+(Tn_sym-T_N)**2
nlp = {'x': syms, 'f': f}
S = cas.nlpsol('S', 'ipopt', nlp)
res = S(x0=x0)
B = res['x'].full()[0]
Rn = res['x'].full()[1]
Tn = res['x'].full()[2]
print("B:", B, "R1:", R1, "Rn:", Rn, "Tn:", Tn)
plt.figure()
plt.title("Fitted Model for measured Voltage vs. Temperature")
adc_range = np.linspace(min(adc_voltage_measured), max(adc_voltage_measured))
plt.plot(adc_range, Temp(adc_range, R1, U_ges, B, Rn, Tn), label="Fitted Model")
plt.plot(adc_voltage_measured, T_measured, 'x', label="Measured")
plt.xlabel("ADC voltage [V]")
plt.ylabel("Temp [Grad]")
plt.legend()
plt.show()
| 327
| 0
| 91
|
11b074486878ba17beb24bb3f56ff683fc015de8
| 2,698
|
py
|
Python
|
tools/diff_filter.py
|
caizhanjin/deepseg
|
5e91a387683ad73075b51b49da8957d8f4bb6b7f
|
[
"Apache-2.0"
] | null | null | null |
tools/diff_filter.py
|
caizhanjin/deepseg
|
5e91a387683ad73075b51b49da8957d8f4bb6b7f
|
[
"Apache-2.0"
] | null | null | null |
tools/diff_filter.py
|
caizhanjin/deepseg
|
5e91a387683ad73075b51b49da8957d8f4bb6b7f
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
if __name__ == "__main__":
filters = [SingleChineseFilter()]
diff_file = "C:\\Users\\allen.luo\\Desktop\\diff_filter\\test.diff.txt"
differ = DiffFilter(diff_file)
differ.filter(filters)
| 32.506024
| 92
| 0.490734
|
import os
import re
class Filter(object):
def accept(self, line):
raise NotImplementedError()
class SingleChineseFilter(Filter):
def __init__(self):
self.pattern = re.compile("[\u4e00-\u9fa5]+")
self.key_words = ['号', '层', '幢', '与', '栋', '旁', '室', '楼']
def accept(self, line):
words = line.split(" ")
acc = False
for w in words:
if len(w) > 1:
continue
if w in self.key_words:
continue
m = self.pattern.findall(w)
if not m:
continue
acc = True
break
return acc
class DiffFilter(object):
def __init__(self, diff_file):
self.diff_file = diff_file
def filter(self, filters):
filepath = str(self.diff_file).split(os.sep)[0:-1]
skipped_file = os.path.join(os.sep.join(filepath), "skipped.txt")
selected_file = os.path.join(os.sep.join(filepath), "selected.txt")
editable = os.path.join(os.sep.join(filepath), "editable.txt")
with open(self.diff_file, mode="rt", encoding="utf8", buffering=8192) as f, \
open(skipped_file, mode="wt", encoding="utf8", buffering=8192) as skip, \
open(selected_file, mode="wt", encoding="utf8", buffering=8192) as select, \
open(editable, mode="wt", encoding="utf8", buffering=8192) as ed:
while True:
line = f.readline()
if not line:
break
# empty = f.readline()
jieba = f.readline().replace("STD_label:", "").strip("\n")
model = f.readline().replace("SEG_label:", "").strip("\n")
print(jieba)
print(model)
print()
if not jieba or not model:
continue
skip_line = True
for flt in filters:
if flt.accept(model):
skip_line = False
break
if not skip_line:
select.write("jieba: %s\n" % jieba)
select.write("model: %s\n" % model)
select.write("\n")
ed.write(model + "\n")
ed.write("\n\n")
else:
skip.write("jieba: %s\n" % jieba)
skip.write("model: %s\n" % model)
skip.write("\n")
if __name__ == "__main__":
filters = [SingleChineseFilter()]
diff_file = "C:\\Users\\allen.luo\\Desktop\\diff_filter\\test.diff.txt"
differ = DiffFilter(diff_file)
differ.filter(filters)
| 2,265
| 17
| 204
|
644df2770b74634974010bc59eadeb47c1623f79
| 2,967
|
py
|
Python
|
tests/onegov/agency/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/agency/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
tests/onegov/agency/conftest.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.agency.app import AgencyApp
from onegov.agency.initial_content import create_new_organisation
from onegov.user import User
from os import path
from pytest import fixture
from sqlalchemy.orm.session import close_all_sessions
from tests.shared import Client
from tests.shared.utils import create_app
from transaction import commit
from yaml import dump
@fixture(scope='function')
@fixture(scope='function')
@fixture(scope='function')
@fixture(scope='function')
@fixture(scope='function')
| 26.491071
| 76
| 0.601618
|
from onegov.agency.app import AgencyApp
from onegov.agency.initial_content import create_new_organisation
from onegov.user import User
from os import path
from pytest import fixture
from sqlalchemy.orm.session import close_all_sessions
from tests.shared import Client
from tests.shared.utils import create_app
from transaction import commit
from yaml import dump
@fixture(scope='function')
def cfg_path(postgres_dsn, session_manager, temporary_directory, redis_url):
cfg = {
'applications': [
{
'path': '/agency/*',
'application': 'onegov.agency.app.AgencyApp',
'namespace': 'agency',
'configuration': {
'dsn': postgres_dsn,
'redis_url': redis_url,
'depot_backend': 'depot.io.memory.MemoryFileStorage',
'filestorage': 'fs.osfs.OSFS',
'filestorage_options': {
'root_path': '{}/file-storage'.format(
temporary_directory
),
'create': 'true'
},
}
}
]
}
cfg_path = path.join(temporary_directory, 'onegov.yml')
with open(cfg_path, 'w') as f:
f.write(dump(cfg))
return cfg_path
def create_agency_app(request, use_elasticsearch=False):
app = create_app(
AgencyApp,
request,
use_smtp=True,
use_elasticsearch=use_elasticsearch
)
org = create_new_organisation(app, name="Govikon")
org.meta['reply_to'] = 'mails@govikon.ch'
org.meta['locales'] = 'de_CH'
session = app.session()
test_password = request.getfixturevalue('test_password')
session.add(
User(
username='admin@example.org',
password_hash=test_password,
role='admin'
)
)
session.add(
User(
username='editor@example.org',
password_hash=test_password,
role='editor'
)
)
session.add(
User(
username='member@example.org',
password_hash=test_password,
role='member'
)
)
commit()
close_all_sessions()
return app
@fixture(scope='function')
def agency_app(request):
app = create_agency_app(request, use_elasticsearch=False)
yield app
app.session_manager.dispose()
@fixture(scope='function')
def es_agency_app(request):
app = create_agency_app(request, use_elasticsearch=True)
yield app
app.session_manager.dispose()
@fixture(scope='function')
def client(agency_app):
client = Client(agency_app)
client.skip_first_form = True
client.use_intercooler = True
return client
@fixture(scope='function')
def client_with_es(es_agency_app):
client = Client(es_agency_app)
client.skip_first_form = True
client.use_intercooler = True
return client
| 2,325
| 0
| 133
|
8b67b98d8ea2d3b839bcd5bef3e02df81aed5aca
| 1,142
|
py
|
Python
|
instances/apps.py
|
glzjin/webvirtcloud
|
ecaf11e02aeb57654257ed502d3da6fd8405f21b
|
[
"Apache-2.0"
] | null | null | null |
instances/apps.py
|
glzjin/webvirtcloud
|
ecaf11e02aeb57654257ed502d3da6fd8405f21b
|
[
"Apache-2.0"
] | null | null | null |
instances/apps.py
|
glzjin/webvirtcloud
|
ecaf11e02aeb57654257ed502d3da6fd8405f21b
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
from django.db.models.signals import post_migrate
def migrate_can_clone_instances(sender, **kwargs):
'''
Migrate can clone instances user attribute to permission
'''
from django.conf import settings
from django.contrib.auth.models import User, Permission
from accounts.models import UserAttributes
plan = kwargs['plan']
for migration, rolled_back in plan:
if migration.app_label == 'instances' and migration.name == '0002_permissionset' and not rolled_back:
users = User.objects.all()
permission = Permission.objects.get(codename='clone_instances')
print('\033[92mMigrating can_clone_instaces user attribute to permission\033[0m')
for user in users:
if user.userattributes:
if user.userattributes.can_clone_instances:
user.user_permissions.add(permission)
break
| 35.6875
| 109
| 0.683888
|
from django.apps import AppConfig
from django.db.models.signals import post_migrate
def migrate_can_clone_instances(sender, **kwargs):
'''
Migrate can clone instances user attribute to permission
'''
from django.conf import settings
from django.contrib.auth.models import User, Permission
from accounts.models import UserAttributes
plan = kwargs['plan']
for migration, rolled_back in plan:
if migration.app_label == 'instances' and migration.name == '0002_permissionset' and not rolled_back:
users = User.objects.all()
permission = Permission.objects.get(codename='clone_instances')
print('\033[92mMigrating can_clone_instaces user attribute to permission\033[0m')
for user in users:
if user.userattributes:
if user.userattributes.can_clone_instances:
user.user_permissions.add(permission)
break
class InstancesConfig(AppConfig):
name = 'instances'
verbose_name = 'instances'
def ready(self):
post_migrate.connect(migrate_can_clone_instances, sender=self)
| 66
| 93
| 23
|
db9b52b59816ce2444968110f44841a8d37eb201
| 9,759
|
py
|
Python
|
tests/opc/test_rel.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 3,031
|
2015-01-02T11:11:24.000Z
|
2022-03-30T00:57:17.000Z
|
tests/opc/test_rel.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 934
|
2015-01-06T20:53:56.000Z
|
2022-03-28T10:08:03.000Z
|
tests/opc/test_rel.py
|
revvsales/python-docx-1
|
5b3ff2b828cc30f1567cb1682a8cb399143732d7
|
[
"MIT"
] | 901
|
2015-01-07T18:22:07.000Z
|
2022-03-31T18:38:51.000Z
|
# encoding: utf-8
"""
Unit test suite for the docx.opc.rel module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.opc.oxml import CT_Relationships
from docx.opc.packuri import PackURI
from docx.opc.part import Part
from docx.opc.rel import _Relationship, Relationships
from ..unitutil.mock import (
call, class_mock, instance_mock, Mock, patch, PropertyMock
)
| 34.242105
| 77
| 0.630905
|
# encoding: utf-8
"""
Unit test suite for the docx.opc.rel module
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from docx.opc.oxml import CT_Relationships
from docx.opc.packuri import PackURI
from docx.opc.part import Part
from docx.opc.rel import _Relationship, Relationships
from ..unitutil.mock import (
call, class_mock, instance_mock, Mock, patch, PropertyMock
)
class Describe_Relationship(object):
def it_remembers_construction_values(self):
# test data --------------------
rId = 'rId9'
reltype = 'reltype'
target = Mock(name='target_part')
external = False
# exercise ---------------------
rel = _Relationship(rId, reltype, target, None, external)
# verify -----------------------
assert rel.rId == rId
assert rel.reltype == reltype
assert rel.target_part == target
assert rel.is_external == external
def it_should_raise_on_target_part_access_on_external_rel(self):
rel = _Relationship(None, None, None, None, external=True)
with pytest.raises(ValueError):
rel.target_part
def it_should_have_target_ref_for_external_rel(self):
rel = _Relationship(None, None, 'target', None, external=True)
assert rel.target_ref == 'target'
def it_should_have_relative_ref_for_internal_rel(self):
"""
Internal relationships (TargetMode == 'Internal' in the XML) should
have a relative ref, e.g. '../slideLayouts/slideLayout1.xml', for
the target_ref attribute.
"""
part = Mock(name='part', partname=PackURI('/ppt/media/image1.png'))
baseURI = '/ppt/slides'
rel = _Relationship(None, None, part, baseURI) # external=False
assert rel.target_ref == '../media/image1.png'
class DescribeRelationships(object):
def it_can_add_a_relationship(self, _Relationship_):
baseURI, rId, reltype, target, external = (
'baseURI', 'rId9', 'reltype', 'target', False
)
rels = Relationships(baseURI)
rel = rels.add_relationship(reltype, target, rId, external)
_Relationship_.assert_called_once_with(
rId, reltype, target, baseURI, external
)
assert rels[rId] == rel
assert rel == _Relationship_.return_value
def it_can_add_an_external_relationship(self, add_ext_rel_fixture_):
rels, reltype, url = add_ext_rel_fixture_
rId = rels.get_or_add_ext_rel(reltype, url)
rel = rels[rId]
assert rel.is_external
assert rel.target_ref == url
assert rel.reltype == reltype
def it_can_find_a_relationship_by_rId(self):
rel = Mock(name='rel', rId='foobar')
rels = Relationships(None)
rels['foobar'] = rel
assert rels['foobar'] == rel
def it_can_find_or_add_a_relationship(
self, rels_with_matching_rel_, rels_with_missing_rel_):
rels, reltype, part, matching_rel = rels_with_matching_rel_
assert rels.get_or_add(reltype, part) == matching_rel
rels, reltype, part, new_rel = rels_with_missing_rel_
assert rels.get_or_add(reltype, part) == new_rel
def it_can_find_or_add_an_external_relationship(
self, add_matching_ext_rel_fixture_):
rels, reltype, url, rId = add_matching_ext_rel_fixture_
_rId = rels.get_or_add_ext_rel(reltype, url)
assert _rId == rId
assert len(rels) == 1
def it_can_find_a_related_part_by_rId(self, rels_with_known_target_part):
rels, rId, known_target_part = rels_with_known_target_part
part = rels.related_parts[rId]
assert part is known_target_part
def it_raises_on_related_part_not_found(self, rels):
with pytest.raises(KeyError):
rels.related_parts['rId666']
def it_can_find_a_related_part_by_reltype(
self, rels_with_target_known_by_reltype):
rels, reltype, known_target_part = rels_with_target_known_by_reltype
part = rels.part_with_reltype(reltype)
assert part is known_target_part
def it_can_compose_rels_xml(self, rels, rels_elm):
# exercise ---------------------
rels.xml
# verify -----------------------
rels_elm.assert_has_calls(
[
call.add_rel(
'rId1', 'http://rt-hyperlink', 'http://some/link', True
),
call.add_rel(
'rId2', 'http://rt-image', '../media/image1.png', False
),
call.xml()
],
any_order=True
)
def it_knows_the_next_available_rId_to_help(self, rels_with_rId_gap):
rels, expected_next_rId = rels_with_rId_gap
next_rId = rels._next_rId
assert next_rId == expected_next_rId
# fixtures ---------------------------------------------
@pytest.fixture
def add_ext_rel_fixture_(self, reltype, url):
rels = Relationships(None)
return rels, reltype, url
@pytest.fixture
def add_matching_ext_rel_fixture_(self, request, reltype, url):
rId = 'rId369'
rels = Relationships(None)
rels.add_relationship(reltype, url, rId, is_external=True)
return rels, reltype, url, rId
# fixture components -----------------------------------
@pytest.fixture
def _baseURI(self):
return '/baseURI'
@pytest.fixture
def _Relationship_(self, request):
return class_mock(request, 'docx.opc.rel._Relationship')
@pytest.fixture
def _rel_with_target_known_by_reltype(
self, _rId, reltype, _target_part, _baseURI):
rel = _Relationship(_rId, reltype, _target_part, _baseURI)
return rel, reltype, _target_part
@pytest.fixture
def rels(self):
"""
Populated Relationships instance that will exercise the rels.xml
property.
"""
rels = Relationships('/baseURI')
rels.add_relationship(
reltype='http://rt-hyperlink', target='http://some/link',
rId='rId1', is_external=True
)
part = Mock(name='part')
part.partname.relative_ref.return_value = '../media/image1.png'
rels.add_relationship(reltype='http://rt-image', target=part,
rId='rId2')
return rels
@pytest.fixture
def rels_elm(self, request):
"""
Return a rels_elm mock that will be returned from
CT_Relationships.new()
"""
# create rels_elm mock with a .xml property
rels_elm = Mock(name='rels_elm')
xml = PropertyMock(name='xml')
type(rels_elm).xml = xml
rels_elm.attach_mock(xml, 'xml')
rels_elm.reset_mock() # to clear attach_mock call
# patch CT_Relationships to return that rels_elm
patch_ = patch.object(CT_Relationships, 'new', return_value=rels_elm)
patch_.start()
request.addfinalizer(patch_.stop)
return rels_elm
@pytest.fixture
def _rel_with_known_target_part(
self, _rId, reltype, _target_part, _baseURI):
rel = _Relationship(_rId, reltype, _target_part, _baseURI)
return rel, _rId, _target_part
@pytest.fixture
def rels_with_known_target_part(self, rels, _rel_with_known_target_part):
rel, rId, target_part = _rel_with_known_target_part
rels.add_relationship(None, target_part, rId)
return rels, rId, target_part
@pytest.fixture
def rels_with_matching_rel_(self, request, rels):
matching_reltype_ = instance_mock(
request, str, name='matching_reltype_'
)
matching_part_ = instance_mock(
request, Part, name='matching_part_'
)
matching_rel_ = instance_mock(
request, _Relationship, name='matching_rel_',
reltype=matching_reltype_, target_part=matching_part_,
is_external=False
)
rels[1] = matching_rel_
return rels, matching_reltype_, matching_part_, matching_rel_
@pytest.fixture
def rels_with_missing_rel_(self, request, rels, _Relationship_):
missing_reltype_ = instance_mock(
request, str, name='missing_reltype_'
)
missing_part_ = instance_mock(
request, Part, name='missing_part_'
)
new_rel_ = instance_mock(
request, _Relationship, name='new_rel_',
reltype=missing_reltype_, target_part=missing_part_,
is_external=False
)
_Relationship_.return_value = new_rel_
return rels, missing_reltype_, missing_part_, new_rel_
@pytest.fixture
def rels_with_rId_gap(self, request):
rels = Relationships(None)
rel_with_rId1 = instance_mock(
request, _Relationship, name='rel_with_rId1', rId='rId1'
)
rel_with_rId3 = instance_mock(
request, _Relationship, name='rel_with_rId3', rId='rId3'
)
rels['rId1'] = rel_with_rId1
rels['rId3'] = rel_with_rId3
return rels, 'rId2'
@pytest.fixture
def rels_with_target_known_by_reltype(
self, rels, _rel_with_target_known_by_reltype):
rel, reltype, target_part = _rel_with_target_known_by_reltype
rels[1] = rel
return rels, reltype, target_part
@pytest.fixture
def reltype(self):
return 'http://rel/type'
@pytest.fixture
def _rId(self):
return 'rId6'
@pytest.fixture
def _target_part(self, request):
return instance_mock(request, Part)
@pytest.fixture
def url(self):
return 'https://github.com/scanny/python-docx'
| 6,314
| 2,957
| 46
|
5d363713f0c567c72f60e40b049c1d6047d88f04
| 4,873
|
py
|
Python
|
python/plugins/broker/mqtt/tests/test_params.py
|
ulen2000/sinetstream
|
efbd1688be0754c38b0ea88f0f253f91b44689be
|
[
"Apache-2.0"
] | 1
|
2020-03-24T15:29:23.000Z
|
2020-03-24T15:29:23.000Z
|
python/plugins/broker/mqtt/tests/test_params.py
|
ulen2000/sinetstream
|
efbd1688be0754c38b0ea88f0f253f91b44689be
|
[
"Apache-2.0"
] | null | null | null |
python/plugins/broker/mqtt/tests/test_params.py
|
ulen2000/sinetstream
|
efbd1688be0754c38b0ea88f0f253f91b44689be
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/local/bin/python3.6
# vim: expandtab shiftwidth=4
# Copyright (C) 2020 National Institute of Informatics
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from sinetstream import MessageReader, MessageWriter, InvalidArgumentError
from conftest import (
SERVICE, TOPIC, BROKER, WS_BROKER, WSS_BROKER, CACERT_PATH,
)
from itertools import product
pytestmark = pytest.mark.usefixtures('setup_config')
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'protocol': x} for x in ['MQTTv31', 'MQTTv311', 'MQTTv5']],
))
@pytest.mark.parametrize("io,config_params", [
(MessageReader, {'protocol': 'xxx'}),
(MessageWriter, {'protocol': 'xxx'}),
])
@pytest.mark.parametrize("io,config_params", [
(MessageReader, {'transport': 'tcp'}),
(MessageWriter, {'transport': 'tcp'}),
])
@pytest.mark.skipif(WS_BROKER is None, reason='MQTT_WS_BROKER is not set.')
@pytest.mark.parametrize("io,config_params,config_brokers", [
(MessageReader, {'transport': 'websockets'}, WS_BROKER),
(MessageWriter, {'transport': 'websockets'}, WS_BROKER),
])
wss_params = {
'transport': 'websockets',
'tls': {
'ca_certs': str(CACERT_PATH),
},
}
@pytest.mark.skipif(WSS_BROKER is None, reason='MQTT_WSS_BROKER is not set.')
@pytest.mark.parametrize("io,config_params,config_brokers", [
(MessageReader, wss_params, WSS_BROKER),
(MessageWriter, wss_params, WSS_BROKER),
])
@pytest.mark.parametrize("io,config_params", [
(MessageReader, {'transport': 'xxx'}),
(MessageWriter, {'transport': 'xxx'}),
])
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'clean_session': x} for x in [True, False]],
))
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'max_inflight_messages_set': {'inflight': x}} for x in [20, 40]],
))
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'max_queued_messages_set': {'queue_size': x}} for x in [0, 10]],
))
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'message_retry_set': {'retry': x}} for x in [5, 10]],
))
ws_set_options_params = {
'transport': 'websockets',
'ws_set_options': {
'path': '/mqtt',
},
}
@pytest.mark.skipif(WS_BROKER is None, reason='MQTT_WS_BROKER is not set.')
@pytest.mark.parametrize("io,config_params,config_brokers", [
(MessageReader, ws_set_options_params, WS_BROKER),
(MessageWriter, ws_set_options_params, WS_BROKER),
])
will_set_params = {
'will_set': {
'topic': TOPIC,
'payload': 'XXX',
'qos': 1,
'retain': True,
}
}
@pytest.mark.parametrize("io,config_params", [
(MessageReader, will_set_params),
(MessageWriter, will_set_params),
])
reconnect_delay_params = {
'reconnect_delay_set': {
'min_delay': 1,
'max_delay': 120,
}
}
@pytest.mark.parametrize("io,config_params", [
(MessageReader, reconnect_delay_params),
(MessageWriter, reconnect_delay_params),
])
| 26.058824
| 77
| 0.682742
|
#!/usr/local/bin/python3.6
# vim: expandtab shiftwidth=4
# Copyright (C) 2020 National Institute of Informatics
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from sinetstream import MessageReader, MessageWriter, InvalidArgumentError
from conftest import (
SERVICE, TOPIC, BROKER, WS_BROKER, WSS_BROKER, CACERT_PATH,
)
from itertools import product
pytestmark = pytest.mark.usefixtures('setup_config')
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'protocol': x} for x in ['MQTTv31', 'MQTTv311', 'MQTTv5']],
))
def test_protocol(io):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", [
(MessageReader, {'protocol': 'xxx'}),
(MessageWriter, {'protocol': 'xxx'}),
])
def test_bad_protocol(io):
with pytest.raises(InvalidArgumentError):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", [
(MessageReader, {'transport': 'tcp'}),
(MessageWriter, {'transport': 'tcp'}),
])
def test_transport_tcp(io):
with io(SERVICE) as f:
pass
@pytest.mark.skipif(WS_BROKER is None, reason='MQTT_WS_BROKER is not set.')
@pytest.mark.parametrize("io,config_params,config_brokers", [
(MessageReader, {'transport': 'websockets'}, WS_BROKER),
(MessageWriter, {'transport': 'websockets'}, WS_BROKER),
])
def test_transport_ws(io):
with io(SERVICE) as f:
pass
wss_params = {
'transport': 'websockets',
'tls': {
'ca_certs': str(CACERT_PATH),
},
}
@pytest.mark.skipif(WSS_BROKER is None, reason='MQTT_WSS_BROKER is not set.')
@pytest.mark.parametrize("io,config_params,config_brokers", [
(MessageReader, wss_params, WSS_BROKER),
(MessageWriter, wss_params, WSS_BROKER),
])
def test_transport_wss(io):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", [
(MessageReader, {'transport': 'xxx'}),
(MessageWriter, {'transport': 'xxx'}),
])
def test_transport_bad_value(io):
with pytest.raises(InvalidArgumentError):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'clean_session': x} for x in [True, False]],
))
def test_clean_session(io):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'max_inflight_messages_set': {'inflight': x}} for x in [20, 40]],
))
def test_max_inflight_messages(io):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'max_queued_messages_set': {'queue_size': x}} for x in [0, 10]],
))
def test_max_queued_messages(io):
with io(SERVICE) as f:
pass
@pytest.mark.parametrize("io,config_params", product(
[MessageReader, MessageWriter],
[{'message_retry_set': {'retry': x}} for x in [5, 10]],
))
def test_message_retry(io):
with io(SERVICE) as f:
pass
ws_set_options_params = {
'transport': 'websockets',
'ws_set_options': {
'path': '/mqtt',
},
}
@pytest.mark.skipif(WS_BROKER is None, reason='MQTT_WS_BROKER is not set.')
@pytest.mark.parametrize("io,config_params,config_brokers", [
(MessageReader, ws_set_options_params, WS_BROKER),
(MessageWriter, ws_set_options_params, WS_BROKER),
])
def test_ws_set_options(io):
with io(SERVICE) as f:
pass
will_set_params = {
'will_set': {
'topic': TOPIC,
'payload': 'XXX',
'qos': 1,
'retain': True,
}
}
@pytest.mark.parametrize("io,config_params", [
(MessageReader, will_set_params),
(MessageWriter, will_set_params),
])
def test_will(io):
with io(SERVICE) as f:
pass
reconnect_delay_params = {
'reconnect_delay_set': {
'min_delay': 1,
'max_delay': 120,
}
}
@pytest.mark.parametrize("io,config_params", [
(MessageReader, reconnect_delay_params),
(MessageWriter, reconnect_delay_params),
])
def test_reconnect_delay(io):
with io(SERVICE) as f:
pass
| 713
| 0
| 286
|
3d601cf4bfe04df1b7cde903f2017afd1f54a428
| 407
|
py
|
Python
|
test/backup_setup.py
|
jinyiabc/china_stock_lib
|
d580b9f2a3f20ca9f87c7a4d42aeedccfa450f4f
|
[
"MIT"
] | null | null | null |
test/backup_setup.py
|
jinyiabc/china_stock_lib
|
d580b9f2a3f20ca9f87c7a4d42aeedccfa450f4f
|
[
"MIT"
] | null | null | null |
test/backup_setup.py
|
jinyiabc/china_stock_lib
|
d580b9f2a3f20ca9f87c7a4d42aeedccfa450f4f
|
[
"MIT"
] | 2
|
2021-12-30T23:53:55.000Z
|
2022-02-01T18:10:42.000Z
|
# from distutils.core import setup
# setup(name='helper',
# version='0.1.1',
# py_modules=['upload_github'],
# data_files=[('config', ['mysql.cfg'])],
# )
# from setuptools import setup
#
# setup(
# name='mypackage',
# version='0.0.1',
# packages=['mypackage'],
# install_requires=[
# 'requests',
# 'importlib; python_version == "2.6"',
# ],
# )
| 22.611111
| 47
| 0.538084
|
# from distutils.core import setup
# setup(name='helper',
# version='0.1.1',
# py_modules=['upload_github'],
# data_files=[('config', ['mysql.cfg'])],
# )
# from setuptools import setup
#
# setup(
# name='mypackage',
# version='0.0.1',
# packages=['mypackage'],
# install_requires=[
# 'requests',
# 'importlib; python_version == "2.6"',
# ],
# )
| 0
| 0
| 0
|
79225c470cde861cd7490495560ca1243733a6b2
| 4,223
|
py
|
Python
|
tests/unit_tests/test_units/test_core.py
|
radical-project/radical.dreamer
|
74bb2a9a705fc90b0dc773963f2bfd48af6e1b84
|
[
"MIT"
] | 4
|
2021-04-30T04:25:12.000Z
|
2021-12-16T19:53:37.000Z
|
tests/unit_tests/test_units/test_core.py
|
radical-project/radical.dreamer
|
74bb2a9a705fc90b0dc773963f2bfd48af6e1b84
|
[
"MIT"
] | 1
|
2021-04-20T22:08:24.000Z
|
2021-04-20T22:08:24.000Z
|
tests/unit_tests/test_units/test_core.py
|
radical-project/radical.dreamer
|
74bb2a9a705fc90b0dc773963f2bfd48af6e1b84
|
[
"MIT"
] | 1
|
2021-01-10T20:09:19.000Z
|
2021-01-10T20:09:19.000Z
|
__copyright__ = 'Copyright 2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
import glob
import radical.utils as ru
from radical.dreamer.units import Core, Task, CORE_STATE
from unittest import TestCase
TEST_CASES_PATH = 'tests/unit_tests/test_units/test_cases/core.*.json'
| 35.487395
| 76
| 0.600047
|
__copyright__ = 'Copyright 2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
import glob
import radical.utils as ru
from radical.dreamer.units import Core, Task, CORE_STATE
from unittest import TestCase
TEST_CASES_PATH = 'tests/unit_tests/test_units/test_cases/core.*.json'
class CoreTestClass(TestCase):
@classmethod
def setUpClass(cls):
cls._test_cases = []
for f in glob.glob(TEST_CASES_PATH):
cls._test_cases.extend(ru.read_json(f))
def test_init(self):
# default attributes
c = Core()
self.assertTrue(c.uid.startswith('core.'))
self.assertEqual(c.perf, 1.)
self.assertEqual(c.perf_dynamic, 1.)
self.assertFalse(c.perf_history)
self.assertIsInstance(c.perf_history, list)
self.assertEqual(c.io_rate, 0.)
self.assertEqual(c.io_time, 0.)
self.assertEqual(c.acquire_time, 0.)
self.assertEqual(c.release_time, 0.)
self.assertEqual(c.planned_compute_time, 0.)
self.assertEqual(c.planned_release_time, 0.)
self.assertEqual(c.state, CORE_STATE.Idle)
# with input data
for test_case in self._test_cases:
c = Core(**test_case['input'])
result = dict(test_case['input'])
for k, v in Core._defaults.items():
if k not in result:
result[k] = v
if result.get('uid'):
self.assertEqual(c.uid, result['uid'])
self.assertEqual(c.perf, result['perf'])
self.assertEqual(c.perf_dynamic, result['perf_dynamic'])
self.assertEqual(c.perf_history, result['perf_history'])
self.assertEqual(c.io_rate, result['io_rate'])
self.assertEqual(c.io_time, result['io_time'])
self.assertEqual(c.acquire_time, result['acquire_time'])
self.assertEqual(c.release_time, result['release_time'])
self.assertEqual(c.planned_compute_time,
result['planned_compute_time'])
self.assertEqual(c.planned_release_time,
result['planned_release_time'])
self.assertEqual(c.state, result['state'])
def test_execute(self):
for test_case in self._test_cases:
if 'task' not in test_case or 'result_execute' not in test_case:
continue
t = Task(**test_case['task'])
c = Core(**test_case['input'])
previous_release_time = c.release_time
ret_c = c.execute(task=t)
result = test_case['result_execute']
self.assertIsInstance(ret_c, Core)
self.assertEqual(ret_c.io_time, result['io_time'])
self.assertEqual(ret_c.acquire_time, previous_release_time)
self.assertEqual(ret_c.release_time, result['release_time'])
self.assertEqual(ret_c.planned_compute_time,
result['planned_compute_time'])
self.assertEqual(ret_c.planned_release_time,
result['planned_release_time'])
self.assertEqual(ret_c.planned_compute_time,
ret_c.planned_release_time
- ret_c.acquire_time
- ret_c.io_time)
self.assertTrue(ret_c.is_busy)
self.assertEqual(t.start_time, ret_c.acquire_time)
self.assertEqual(t.end_time, ret_c.release_time)
def test_release(self):
for test_case in self._test_cases:
if 'task' not in test_case:
continue
c = Core(**test_case['input'])
t = Task(**test_case['task'])
c.execute(task=t)
perf_history_len = len(c.perf_history)
self.assertEqual(c.state, CORE_STATE.Busy)
ret_c = c.release()
self.assertIsInstance(ret_c, Core)
self.assertEqual(ret_c.perf_dynamic, ret_c.perf_history[-1])
self.assertEqual(len(ret_c.perf_history), perf_history_len + 1)
self.assertFalse(ret_c.is_busy)
self.assertFalse(ret_c.io_time)
self.assertFalse(ret_c.planned_compute_time)
| 3,778
| 134
| 23
|
bcf1f57f33b1cf0327c51d531ce1989a4096bd93
| 24,349
|
py
|
Python
|
LivSim Processing/event.py
|
kbui1993/LivSim-Codes
|
5317fa6ea773d5967871dfb67dec1a0118ec2f5e
|
[
"MIT"
] | 2
|
2018-05-07T03:31:54.000Z
|
2019-07-02T18:30:41.000Z
|
LivSim Processing/event.py
|
kbui1993/LivSim-Codes
|
5317fa6ea773d5967871dfb67dec1a0118ec2f5e
|
[
"MIT"
] | null | null | null |
LivSim Processing/event.py
|
kbui1993/LivSim-Codes
|
5317fa6ea773d5967871dfb67dec1a0118ec2f5e
|
[
"MIT"
] | 1
|
2019-01-19T19:56:43.000Z
|
2019-01-19T19:56:43.000Z
|
import entity, allocate
import numpy as nump
import datetime
from copy import deepcopy
ndsa = 58
#######################################################################Event Processes####################################################################################
def Arrival(arrivalinfo, Sim, Stat, OPTN):
"""
This function simulates the arrival of patients. It computes the MELD score of the patient and adds him to the corresponding DSA waiting list.
Input:
@arrivalinfo: all the patient's information
@Sim: class object that contains variables relevant to the simulation
@Stat: class object that containts statistical info of the simulation
@OPTN: complete patient data
Output:
@Sim: updated Sim
@Stat: updated Stat
@OPTN updated OPTN
"""
#Create Patient Entity
newpatient = entity.Patient(arrivalinfo[1].astype(int),arrivalinfo[3].astype(int),Sim.clock)
#Assign Patient Characteristics
newpatient.Status1 = arrivalinfo[9].astype(int)
newpatient.ABO = arrivalinfo[5].astype(int)
newpatient.HCC = arrivalinfo[8].astype(int)
newpatient.Na = arrivalinfo[10].astype(int)
newpatient.lMELD = arrivalinfo[7].astype(int)
newpatient.MELD = arrivalinfo[6].astype(int)
newpatient.Inactive = arrivalinfo[11].astype(int)
#Assign Allocation MELD based on policy
if Sim.sodium ==1: #if sodium policy is selected
#bound the sodium score
effective_na = newpatient.Na
if effective_na <125:
effective_na = 125
elif effective_na > 137:
effective_na = 137
#compute the allocation MELD score
if nump.rint(newpatient.lMELD + 1.32*(137-effective_na)-(0.033*newpatient.lMELD*(137-effective_na))) <6:
newpatient.MELD =6
elif nump.rint(newpatient.lMELD + 1.32*(137-effective_na)-(0.033*newpatient.lMELD*(137-effective_na))) > 40:
newpatient.MELD =40
else:
newpatient.MELD = nump.rint(newpatient.lMELD + 1.32*(137-effective_na)-(0.033*newpatient.lMELD*(137-effective_na)))
else: #if sodium policy not selected
#bound the sodium score
if newpatient.MELD <6:
newpatient.MELD =6
elif newpatient.MELD >40:
newpatient.MELD = 40
#Apply Status1 and HCC exceptions (if applicable)
if newpatient.Status1 ==1:
newpatient.MELD =41
elif newpatient.HCC ==1 and Sim.capanddelay ==1:
newpatient.MELD = min(newpatient.lMELD,28)
#Place Patient in DSA List
if newpatient.DSA >= 0:
OPTN[newpatient.DSA].append(newpatient)
#Update Stats
Stat.yarrivals[newpatient.DSA] = Stat.yarrivals[newpatient.DSA] + 1
Stat.numcandidates[newpatient.DSA] = Stat.numcandidates[newpatient.DSA] + 1
#return updated Sim, Stat, and OPTN
return Sim, Stat, OPTN
#Diagnostic Info
#here
def Progression(proginfo, Sim, Stat, OPTN, reps):
"""
This function searches for a particular patient from the OPTN data structure and update the patient's characteristics
Input:
@proginfo: all of the information of the patient being searched up
@Sim: class object containing relevant information of the patient being searched up
@Stat: class object containing statistical information of the simulation
@OPTN: complete patient data
@reps: current replication number
Output:
@Sim: updated Sim
@Stat: updated Stat
@OPTN: updated OPTN
"""
progdsa = proginfo[9].astype(int) #obtain DSA
#test =0
#run if we have an actual DSA
if progdsa >=0:
#search for patient in the OPTN data structure
for i, patient in enumerate(OPTN[progdsa]):
#check if the id matches
if patient.id == proginfo[1].astype(int):
#Update Patient
if patient.Relist ==1:
#Ignore updates for relisted patients
break
elif proginfo[3].astype(int) == 1:
#Patient dies, remove and update stats
Stat.ydeaths[progdsa] = Stat.ydeaths[progdsa] + 1 #increment the number of deaths by one
Stat.numcandidates[progdsa] = Stat.numcandidates[progdsa] - 1 #decrement the number of candidates waiting by one
del OPTN[progdsa][i] #delete the patient object since patient died
break
elif proginfo[4].astype(int) ==1:
#Patient removed, remove and update stats
Stat.yremoved[progdsa] = Stat.yremoved[progdsa] + 1 #increment the number of removals by one
Stat.numcandidates[progdsa] = Stat.numcandidates[progdsa] - 1 #decrement the number of candidates waiting by one
#record as follows (time of removal, repetition, patient id, patient allocation MELD, patient lab MELD)
oidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,patient.MELD, patient.lMELD]
Sim.record_removals = nump.vstack((Sim.record_removals, oidreport)) #concatenate the row oidreport to the record of removals
del OPTN[progdsa][i] #delete patient object since patient is removed
break
else:
#Update candidate
patient.lMELD = proginfo[6].astype(int)
patient.Na = proginfo[7].astype(int)
patient.MELD = proginfo[5].astype(int)
patient.Inactive = proginfo[10].astype(int)
#set bound on MELD score
if patient.MELD <6:
patient.MELD =6
elif patient.MELD >40:
patient.MELD = 40
#Update Allocation MELD based on policy (if applicable)
if Sim.sodium ==1 and patient.Status1 != 1 and patient.HCC != 1:
#if sodium score policy is selected, then update the meld score for non-status1, non-HCC patient
#set bound on sodium score
effective_na = patient.Na
if effective_na <125:
effective_na = 125
elif effective_na > 137:
effective_na = 137
#compute MELD score
if nump.rint(patient.lMELD + 1.32*(137-effective_na)-(0.033*patient.lMELD*(137-effective_na))) <6:
patient.MELD =6
elif nump.rint(patient.lMELD + 1.32*(137-effective_na)-(0.033*patient.lMELD*(137-effective_na))) > 40:
patient.MELD =40
else:
patient.MELD = nump.rint(patient.lMELD + 1.32*(137-effective_na)-(0.033*patient.lMELD*(137-effective_na)))
elif Sim.capanddelay ==1 and patient.Status1 != 1 and patient.HCC == 1:
#if cap and delay policy is selected, update meld score for status1, HCC patient
#compute MELD score
if Sim.clock - patient.create_time <= .5:
patient.MELD = max(28,patient.MELD)
elif (Sim.clock - patient.create_time) > .5 and (Sim.clock - patient.create_time <= .75):
patient.MELD = max(29,patient.MELD)
elif (Sim.clock - patient.create_time) > .75 and (Sim.clock - patient.create_time <= 1):
patient.MELD = max(31,patient.MELD)
elif (Sim.clock - patient.create_time) > 1 and (Sim.clock - patient.create_time <= 1.25):
patient.MELD = max(33,patient.MELD)
elif (Sim.clock - patient.create_time) > 1.25 and (Sim.clock - patient.create_time <= 1.5):
patient.MELD = max(34,patient.MELD)
else:
patient.MELD = min(patient.MELD+1,40)
break
#return updated Sim, Stat, and OPTN
return Sim, Stat, OPTN
def OrganArrival(organinfo, Sim, Stat, OPTN, Regions, SharingPartners, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel, Relist, reps):
"""
This function simulates the organ arrival. It tries to match the organ to a patient from the corresponding DSA waitlist.
Input:
@organinfo: information on the organ
@Sim: class object containing relevant variables for simulation
@Stat: class object containing statistical information of simulation
@OPTN: complete patient data
@Regions: neighbhorhood map for regions, districts, or neighbhorhoods
@SharingPartners: neighborhood map adding sharing partners to existing geographic relationships among OPOs
@Patients_Accept: coefficients regarding donor's characteristics for acceptance model
@Donor_Accept: coefficients regarding donor's characteristics for acceptance model
@DSA_Avg_Times: data on average transport times between DSAs
@AcceptanceModelS1: coefficients regarding patient's characteristics for status-1 acceptance model
@AccpetanceModel: coefficients regarding patient's characteristics for non-status-1 acceptance model
@Relist: values regarding the probability that a transplanted patient will relist
@reps: replication number
Output:
@Sim: updated Sim
@Stat: updated Stat
@OPTN: updated OPTN
"""
#Create Organ
neworgan = entity.Organ(int(organinfo[2]))
neworgan.organid = Sim.oid
Sim.oid = Sim.oid + 1
#Assign Organ Attributes
neworgan.ABO = organinfo[4].astype(int)
#Allocate Organ
#disposition is tuple of organ status (accept/reject) , transplanting DSA, and patient id if accepted
disposition = allocate.Allocate(neworgan, OPTN, Sim, Regions, SharingPartners, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel)
if disposition[0] == 1: #organ is transplanted
#Remove transplanted patient from waiting list and update statistics
for i, patient in enumerate(OPTN[disposition[1]]):
#search for the patient in the OPTN data structure
if patient.id == disposition[2]:
#Determine if patient will relist and assign special attributes if necesary
willrelist = 0
if patient.Relist ==0:
#if patient has not been relisted
#Determine if patient will be relisted if was not already
r1 =nump.random.uniform(Relist[0],Relist[1],1)
r2 =nump.random.uniform(0,1,1)
if r2 < r1:
willrelist =1
#Determine when current graft will fail
r3 = nump.random.uniform(0,1,1)
if r3 < .4:
patient.RelistTxTime = Sim.clock + 5
elif r3 >= .4 and r3 < .6:
patient.RelistTxTime = Sim.clock + 2
elif r3 >= 6 and r3 < .8:
patient.RelistTxTime = Sim.clock + 1
else:
patient.RelistTxTime = Sim.clock
#Update relist statistics
Stat.yrelists[disposition[1]] = Stat.yrelists[disposition[1]] +1
#record the floor of current time, reptitiion, current time, patient id, patient meld score, and relist tx time
relistidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,patient.MELD,patient.RelistTxTime]
#concatenate the relistidreport to the record of relists
Sim.record_relists = nump.vstack((Sim.record_relists, relistidreport))
#Update Stats for Transplants
#Number of Transplants and Regrafts
Stat.ytransplants[disposition[1]] = Stat.ytransplants[disposition[1]] +1
Sim.record_txDSA[neworgan.DSA,disposition[1]] = Sim.record_txDSA[neworgan.DSA,disposition[1]] +1
if patient.Relist ==1:
#if patient is relisted
Stat.yregrafts[disposition[1]] = Stat.yregrafts[disposition[1]] +1 #increase number of retransplanted patients by 1
#Compute waiting Time
if patient.Relist ==0:
#if patient is not relisted
Stat.ywait[disposition[1]] = Stat.ywait[disposition[1]] + (Sim.clock - patient.create_time)
else:
#if patient is relisted
Stat.ywait[disposition[1]] = Stat.ywait[disposition[1]] + (Sim.clock - patient.RelistTxTime)
#Waiting List Sizes
if willrelist ==0:
Stat.numcandidates[disposition[1]] = Stat.numcandidates[disposition[1]] -1 #decrease the number of waitling list candidates for the DSA by 1
#1st Transplant MELD
if patient.Status1 ==0 and patient.Relist ==0:
#Tx-MELD at measure assumed to exclude re-grafts
Stat.yMELD[disposition[1]] = Stat.yMELD[disposition[1]] + patient.MELD #increase the MELD score the DSA
Stat.ymedMELD[disposition[1]].append(patient.MELD) #record the patient MELD score
#Output for Posttransplant processing for those who were not ever relisted or will be
if willrelist ==0 and patient.Relist ==0:
regtx =0
nattx = 0
if patient.DSA != neworgan.DSA and (Regions[neworgan.DSA,patient.DSA] ==1 or SharingPartners[neworgan.DSA, patient.DSA] == 1):
regtx =1 #patient had regional transplant
elif patient.DSA != neworgan.DSA:
nattx=1 #patient had national transplant
#record the floor of the current time, repetition, current time, patient id, indicator for regional transplant, indicator for national
#transplant
txidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,regtx,nattx]
Sim.record_txID = nump.vstack((Sim.record_txID, txidreport)) #add new record to list of transplant records
#record as follows (time of removal, repetition, patient id, patient allocation MELD, patient lab MElD)
oidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,Sim.oid]
#add to list of transplant records
Sim.record_doID = nump.vstack((Sim.record_doID, oidreport))
#Out for Posttransplant proceesing for regrafts
if patient.Relist ==1:
regtx =0 #indicator for regional transplant
nattx = 0 #indicator for national transplant
if patient.DSA != neworgan.DSA and (Regions[neworgan.DSA,patient.DSA] ==1 or SharingPartners[neworgan.DSA, patient.DSA] == 1):
regtx =1 #patient had regional transplant
elif patient.DSA != neworgan.DSA:
nattx=1 #patient had national transplant
#record the floor of the current time, repetition, current time, patient id, indicator for regional transplant, indicator for national transplant
txidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,regtx,nattx]
#add to list of relisted transplants
Sim.record_txIDregraft = nump.vstack((Sim.record_txIDregraft, txidreport))
#record as follows (time of removal, repetition, patient id, patient allocation MELD, patient lab MELD)
oidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,Sim.oid]
#add to list of retransplant records
Sim.record_doIDregraft = nump.vstack((Sim.record_doIDregraft, oidreport))
if willrelist ==1:
#if patient will relist, update relist status and MELD score
OPTN[disposition[1]][i].Relist =1
OPTN[disposition[1]][i].MELD = 32
OPTN[disposition[1]][i].lMELD = 32
else:
#remove transplanted patient if will not be relisted
del OPTN[disposition[1]][i]
break
else: #organ is discarded; update statistics (optional)
pass
#return updated Sim, Stat, and OPTN
return Sim, Stat, OPTN
def Year(Sim, Stat, reps):
"""
This function updates the statistics of the simulation per year
Input:
@Sim: class object that contains relevant variables for the simulation
@Stat: class object that contains statistical information of the simulation
@reps: current replication number
Output:
@Sim: updated Sim
@Stat: updated Stat
"""
#Annual Disparity Statistics
mr_1 = nump.zeros(shape=(ndsa,1),dtype=float)
tr_1 = nump.zeros(shape=(ndsa,1),dtype=float)
wt_1 = nump.zeros(shape=(ndsa,1),dtype=float)
meld_l = nump.zeros(shape=(ndsa,1),dtype=float)
for i in range(0,ndsa):
if Stat.ytransplants[i] > 0:
wt_1[i] = Stat.ywait[i] / Stat.ytransplants[i] #compute the total waiting list/total # of transplant per DSA
meld_l[i] = Stat.yMELD[i] / Stat.ytransplants[i] #compute the total MELD score/total # of transplant per DSA
else:
#write nan if no values available
wt_1[i] = nump.nan
meld_l[i] = nump.nan
if (Stat.yarrivals[i] + Stat.ycandidates[i]) == 0:
#write nan if no values available
mr_1[i] = nump.nan
tr_1[i] = nump.nan
else:
mr_1[i] = Stat.ydeaths[i] / (Stat.yarrivals[i] + Stat.ycandidates[i]) #compute mortality rate (number of deaths/number of waiting candidates)
tr_1[i] = Stat.ytransplants[i] / (Stat.yarrivals[i] + Stat.ycandidates[i]) #compute transplant rate (number of transplants/number of waiting candidates)
#compute the median MELD score
medianmelds = nump.zeros(shape=(ndsa,1),dtype=float)
for i in range(0,ndsa):
if Stat.ymedMELD[i] != []:
medianmelds[i] = nump.nanmedian(Stat.ymedMELD[i])
else:
medianmelds[i] = nump.nan
#Intermediate Data Outputs
#nump.savetxt("Output_check.txt", Stat.numcandidates, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check2.txt", Stat.yremoved, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check3.txt", Stat.yarrivals, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check4.txt", Stat.ydeaths, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check5.txt", Stat.ytransplants, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check6.txt", mr_1, fmt='%1.4e', delimiter='\t', newline='\n')
mr_numdeaths = [nump.sum(Stat.ydeaths),nump.floor(Sim.clock),reps] #record the total number of deaths along with its current time and current repetition
#mr_disparity = [nump.linalg.norm(mr_1,ord=1),nump.floor(Sim.clock),reps]
#tx_disparity = [nump.linalg.norm(tr_1,ord=1),nump.floor(Sim.clock),reps]
#wt_disparity = [nump.linalg.norm(wt_1,ord=1),nump.floor(Sim.clock),reps]
mr_disparity_mean = [nump.nanmean(mr_1),nump.floor(Sim.clock),reps] #record the mean mortality rate along with its current time and current repetition
mr_disparity_std = [nump.nanstd(mr_1),nump.floor(Sim.clock),reps] #record the standard deviation of mortality rate along withs current time and current repetition
meld_disparity_mean = [nump.nanmean(meld_l),nump.floor(Sim.clock),reps] #record the mean MELD score along with current time and current repetition
meld_disparity_std = [nump.nanstd(meld_l),nump.floor(Sim.clock),reps] #record the standard deviation of the MELD score along with current time and current repetition
medmeld_mean = [nump.nanmean(medianmelds),nump.floor(Sim.clock),reps] #record the mean median MELD score along with current time and current repetition
medmeld_std = [nump.nanstd(medianmelds),nump.floor(Sim.clock),reps] #record the standard deviation of the median MELD score along with current time and current repetition
#print(tx_disparity)
#add the records to the list of yearly statistics
Sim.record_deaths = nump.vstack((Sim.record_deaths, mr_numdeaths))
Sim.record_mr_disparity_mean = nump.vstack((Sim.record_mr_disparity_mean, mr_disparity_mean))
Sim.record_mr_disparity_std = nump.vstack((Sim.record_mr_disparity_std, mr_disparity_std))
Sim.record_meld_disparity_mean = nump.vstack((Sim.record_meld_disparity_mean, meld_disparity_mean))
Sim.record_meld_disparity_std = nump.vstack((Sim.record_meld_disparity_std, meld_disparity_std))
Sim.record_medMELDmean = nump.vstack((Sim.record_medMELDmean, medmeld_mean))
Sim.record_medMELDstd = nump.vstack((Sim.record_medMELDstd, medmeld_std))
Sim.record_txDSAoutput = nump.vstack((Sim.record_txDSAoutput, Sim.record_txDSA))
#create array that records the current time and repetition
recindex =nump.ndarray(shape=(1,3))
recindex[0,0] = nump.floor(Sim.clock)
recindex[0,1] = reps
#add DSA vector regarding deaths, transplants, etc. to the list of records
Sim.record_ydeaths =nump.concatenate((Sim.record_ydeaths,nump.concatenate((recindex,nump.transpose(Stat.ydeaths)),axis=1)),axis=0)
Sim.record_ytransplants = nump.concatenate((Sim.record_ytransplants,nump.concatenate((recindex,nump.transpose(Stat.ytransplants)),axis=1)),axis=0)
Sim.record_yarrivals = nump.concatenate((Sim.record_yarrivals,nump.concatenate((recindex,nump.transpose(Stat.yarrivals)),axis=1)),axis=0)
Sim.record_ycandidates = nump.concatenate((Sim.record_ycandidates,nump.concatenate((recindex,nump.transpose(Stat.ycandidates)),axis=1)),axis=0)
Sim.record_yremoved =nump.concatenate((Sim.record_yremoved,nump.concatenate((recindex,nump.transpose(Stat.yremoved)),axis=1)),axis=0)
Sim.record_ywait = nump.concatenate((Sim.record_ywait,nump.concatenate((recindex,nump.transpose(Stat.ywait)),axis=1)),axis=0)
Sim.record_yMELD = nump.concatenate((Sim.record_yMELD,nump.concatenate((recindex,nump.transpose(Stat.yMELD)),axis=1)),axis=0)
Sim.record_yrelists =nump.concatenate((Sim.record_yrelists,nump.concatenate((recindex,nump.transpose(Stat.yrelists)),axis=1)),axis=0)
Sim.record_yregrafts =nump.concatenate((Sim.record_yregrafts,nump.concatenate((recindex,nump.transpose(Stat.yregrafts)),axis=1)),axis=0)
#Reset Statistics for Following Year
Stat.yarrivals = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ydeaths = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.yremoved = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ytransplants = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ycandidates = deepcopy(Stat.numcandidates)
Stat.ywait = nump.zeros(shape=(ndsa,1),dtype=float)
Stat.yMELD = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ymedMELD = [[] for i in range(0,ndsa)]
Stat.yrelists = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.yregrafts = nump.zeros(shape=(ndsa,1),dtype=int)
#return updated Sim, Stat
return Sim, Stat
def EndRep():
"""
Prints a message saying that a replication ended.
"""
print("Ending replication, time is: ", datetime.datetime.now().time())
#print(Sim.clock)
#####################################################################################################################################################################################################################
| 51.153361
| 213
| 0.611278
|
import entity, allocate
import numpy as nump
import datetime
from copy import deepcopy
ndsa = 58
#######################################################################Event Processes####################################################################################
def Arrival(arrivalinfo, Sim, Stat, OPTN):
"""
This function simulates the arrival of patients. It computes the MELD score of the patient and adds him to the corresponding DSA waiting list.
Input:
@arrivalinfo: all the patient's information
@Sim: class object that contains variables relevant to the simulation
@Stat: class object that containts statistical info of the simulation
@OPTN: complete patient data
Output:
@Sim: updated Sim
@Stat: updated Stat
@OPTN updated OPTN
"""
#Create Patient Entity
newpatient = entity.Patient(arrivalinfo[1].astype(int),arrivalinfo[3].astype(int),Sim.clock)
#Assign Patient Characteristics
newpatient.Status1 = arrivalinfo[9].astype(int)
newpatient.ABO = arrivalinfo[5].astype(int)
newpatient.HCC = arrivalinfo[8].astype(int)
newpatient.Na = arrivalinfo[10].astype(int)
newpatient.lMELD = arrivalinfo[7].astype(int)
newpatient.MELD = arrivalinfo[6].astype(int)
newpatient.Inactive = arrivalinfo[11].astype(int)
#Assign Allocation MELD based on policy
if Sim.sodium ==1: #if sodium policy is selected
#bound the sodium score
effective_na = newpatient.Na
if effective_na <125:
effective_na = 125
elif effective_na > 137:
effective_na = 137
#compute the allocation MELD score
if nump.rint(newpatient.lMELD + 1.32*(137-effective_na)-(0.033*newpatient.lMELD*(137-effective_na))) <6:
newpatient.MELD =6
elif nump.rint(newpatient.lMELD + 1.32*(137-effective_na)-(0.033*newpatient.lMELD*(137-effective_na))) > 40:
newpatient.MELD =40
else:
newpatient.MELD = nump.rint(newpatient.lMELD + 1.32*(137-effective_na)-(0.033*newpatient.lMELD*(137-effective_na)))
else: #if sodium policy not selected
#bound the sodium score
if newpatient.MELD <6:
newpatient.MELD =6
elif newpatient.MELD >40:
newpatient.MELD = 40
#Apply Status1 and HCC exceptions (if applicable)
if newpatient.Status1 ==1:
newpatient.MELD =41
elif newpatient.HCC ==1 and Sim.capanddelay ==1:
newpatient.MELD = min(newpatient.lMELD,28)
#Place Patient in DSA List
if newpatient.DSA >= 0:
OPTN[newpatient.DSA].append(newpatient)
#Update Stats
Stat.yarrivals[newpatient.DSA] = Stat.yarrivals[newpatient.DSA] + 1
Stat.numcandidates[newpatient.DSA] = Stat.numcandidates[newpatient.DSA] + 1
#return updated Sim, Stat, and OPTN
return Sim, Stat, OPTN
#Diagnostic Info
#here
def Progression(proginfo, Sim, Stat, OPTN, reps):
"""
This function searches for a particular patient from the OPTN data structure and update the patient's characteristics
Input:
@proginfo: all of the information of the patient being searched up
@Sim: class object containing relevant information of the patient being searched up
@Stat: class object containing statistical information of the simulation
@OPTN: complete patient data
@reps: current replication number
Output:
@Sim: updated Sim
@Stat: updated Stat
@OPTN: updated OPTN
"""
progdsa = proginfo[9].astype(int) #obtain DSA
#test =0
#run if we have an actual DSA
if progdsa >=0:
#search for patient in the OPTN data structure
for i, patient in enumerate(OPTN[progdsa]):
#check if the id matches
if patient.id == proginfo[1].astype(int):
#Update Patient
if patient.Relist ==1:
#Ignore updates for relisted patients
break
elif proginfo[3].astype(int) == 1:
#Patient dies, remove and update stats
Stat.ydeaths[progdsa] = Stat.ydeaths[progdsa] + 1 #increment the number of deaths by one
Stat.numcandidates[progdsa] = Stat.numcandidates[progdsa] - 1 #decrement the number of candidates waiting by one
del OPTN[progdsa][i] #delete the patient object since patient died
break
elif proginfo[4].astype(int) ==1:
#Patient removed, remove and update stats
Stat.yremoved[progdsa] = Stat.yremoved[progdsa] + 1 #increment the number of removals by one
Stat.numcandidates[progdsa] = Stat.numcandidates[progdsa] - 1 #decrement the number of candidates waiting by one
#record as follows (time of removal, repetition, patient id, patient allocation MELD, patient lab MELD)
oidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,patient.MELD, patient.lMELD]
Sim.record_removals = nump.vstack((Sim.record_removals, oidreport)) #concatenate the row oidreport to the record of removals
del OPTN[progdsa][i] #delete patient object since patient is removed
break
else:
#Update candidate
patient.lMELD = proginfo[6].astype(int)
patient.Na = proginfo[7].astype(int)
patient.MELD = proginfo[5].astype(int)
patient.Inactive = proginfo[10].astype(int)
#set bound on MELD score
if patient.MELD <6:
patient.MELD =6
elif patient.MELD >40:
patient.MELD = 40
#Update Allocation MELD based on policy (if applicable)
if Sim.sodium ==1 and patient.Status1 != 1 and patient.HCC != 1:
#if sodium score policy is selected, then update the meld score for non-status1, non-HCC patient
#set bound on sodium score
effective_na = patient.Na
if effective_na <125:
effective_na = 125
elif effective_na > 137:
effective_na = 137
#compute MELD score
if nump.rint(patient.lMELD + 1.32*(137-effective_na)-(0.033*patient.lMELD*(137-effective_na))) <6:
patient.MELD =6
elif nump.rint(patient.lMELD + 1.32*(137-effective_na)-(0.033*patient.lMELD*(137-effective_na))) > 40:
patient.MELD =40
else:
patient.MELD = nump.rint(patient.lMELD + 1.32*(137-effective_na)-(0.033*patient.lMELD*(137-effective_na)))
elif Sim.capanddelay ==1 and patient.Status1 != 1 and patient.HCC == 1:
#if cap and delay policy is selected, update meld score for status1, HCC patient
#compute MELD score
if Sim.clock - patient.create_time <= .5:
patient.MELD = max(28,patient.MELD)
elif (Sim.clock - patient.create_time) > .5 and (Sim.clock - patient.create_time <= .75):
patient.MELD = max(29,patient.MELD)
elif (Sim.clock - patient.create_time) > .75 and (Sim.clock - patient.create_time <= 1):
patient.MELD = max(31,patient.MELD)
elif (Sim.clock - patient.create_time) > 1 and (Sim.clock - patient.create_time <= 1.25):
patient.MELD = max(33,patient.MELD)
elif (Sim.clock - patient.create_time) > 1.25 and (Sim.clock - patient.create_time <= 1.5):
patient.MELD = max(34,patient.MELD)
else:
patient.MELD = min(patient.MELD+1,40)
break
#return updated Sim, Stat, and OPTN
return Sim, Stat, OPTN
def OrganArrival(organinfo, Sim, Stat, OPTN, Regions, SharingPartners, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel, Relist, reps):
"""
This function simulates the organ arrival. It tries to match the organ to a patient from the corresponding DSA waitlist.
Input:
@organinfo: information on the organ
@Sim: class object containing relevant variables for simulation
@Stat: class object containing statistical information of simulation
@OPTN: complete patient data
@Regions: neighbhorhood map for regions, districts, or neighbhorhoods
@SharingPartners: neighborhood map adding sharing partners to existing geographic relationships among OPOs
@Patients_Accept: coefficients regarding donor's characteristics for acceptance model
@Donor_Accept: coefficients regarding donor's characteristics for acceptance model
@DSA_Avg_Times: data on average transport times between DSAs
@AcceptanceModelS1: coefficients regarding patient's characteristics for status-1 acceptance model
@AccpetanceModel: coefficients regarding patient's characteristics for non-status-1 acceptance model
@Relist: values regarding the probability that a transplanted patient will relist
@reps: replication number
Output:
@Sim: updated Sim
@Stat: updated Stat
@OPTN: updated OPTN
"""
#Create Organ
neworgan = entity.Organ(int(organinfo[2]))
neworgan.organid = Sim.oid
Sim.oid = Sim.oid + 1
#Assign Organ Attributes
neworgan.ABO = organinfo[4].astype(int)
#Allocate Organ
#disposition is tuple of organ status (accept/reject) , transplanting DSA, and patient id if accepted
disposition = allocate.Allocate(neworgan, OPTN, Sim, Regions, SharingPartners, Patients_Accept, Donor_Accept, DSA_Avg_Times, AcceptanceModelS1, AcceptanceModel)
if disposition[0] == 1: #organ is transplanted
#Remove transplanted patient from waiting list and update statistics
for i, patient in enumerate(OPTN[disposition[1]]):
#search for the patient in the OPTN data structure
if patient.id == disposition[2]:
#Determine if patient will relist and assign special attributes if necesary
willrelist = 0
if patient.Relist ==0:
#if patient has not been relisted
#Determine if patient will be relisted if was not already
r1 =nump.random.uniform(Relist[0],Relist[1],1)
r2 =nump.random.uniform(0,1,1)
if r2 < r1:
willrelist =1
#Determine when current graft will fail
r3 = nump.random.uniform(0,1,1)
if r3 < .4:
patient.RelistTxTime = Sim.clock + 5
elif r3 >= .4 and r3 < .6:
patient.RelistTxTime = Sim.clock + 2
elif r3 >= 6 and r3 < .8:
patient.RelistTxTime = Sim.clock + 1
else:
patient.RelistTxTime = Sim.clock
#Update relist statistics
Stat.yrelists[disposition[1]] = Stat.yrelists[disposition[1]] +1
#record the floor of current time, reptitiion, current time, patient id, patient meld score, and relist tx time
relistidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,patient.MELD,patient.RelistTxTime]
#concatenate the relistidreport to the record of relists
Sim.record_relists = nump.vstack((Sim.record_relists, relistidreport))
#Update Stats for Transplants
#Number of Transplants and Regrafts
Stat.ytransplants[disposition[1]] = Stat.ytransplants[disposition[1]] +1
Sim.record_txDSA[neworgan.DSA,disposition[1]] = Sim.record_txDSA[neworgan.DSA,disposition[1]] +1
if patient.Relist ==1:
#if patient is relisted
Stat.yregrafts[disposition[1]] = Stat.yregrafts[disposition[1]] +1 #increase number of retransplanted patients by 1
#Compute waiting Time
if patient.Relist ==0:
#if patient is not relisted
Stat.ywait[disposition[1]] = Stat.ywait[disposition[1]] + (Sim.clock - patient.create_time)
else:
#if patient is relisted
Stat.ywait[disposition[1]] = Stat.ywait[disposition[1]] + (Sim.clock - patient.RelistTxTime)
#Waiting List Sizes
if willrelist ==0:
Stat.numcandidates[disposition[1]] = Stat.numcandidates[disposition[1]] -1 #decrease the number of waitling list candidates for the DSA by 1
#1st Transplant MELD
if patient.Status1 ==0 and patient.Relist ==0:
#Tx-MELD at measure assumed to exclude re-grafts
Stat.yMELD[disposition[1]] = Stat.yMELD[disposition[1]] + patient.MELD #increase the MELD score the DSA
Stat.ymedMELD[disposition[1]].append(patient.MELD) #record the patient MELD score
#Output for Posttransplant processing for those who were not ever relisted or will be
if willrelist ==0 and patient.Relist ==0:
regtx =0
nattx = 0
if patient.DSA != neworgan.DSA and (Regions[neworgan.DSA,patient.DSA] ==1 or SharingPartners[neworgan.DSA, patient.DSA] == 1):
regtx =1 #patient had regional transplant
elif patient.DSA != neworgan.DSA:
nattx=1 #patient had national transplant
#record the floor of the current time, repetition, current time, patient id, indicator for regional transplant, indicator for national
#transplant
txidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,regtx,nattx]
Sim.record_txID = nump.vstack((Sim.record_txID, txidreport)) #add new record to list of transplant records
#record as follows (time of removal, repetition, patient id, patient allocation MELD, patient lab MElD)
oidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,Sim.oid]
#add to list of transplant records
Sim.record_doID = nump.vstack((Sim.record_doID, oidreport))
#Out for Posttransplant proceesing for regrafts
if patient.Relist ==1:
regtx =0 #indicator for regional transplant
nattx = 0 #indicator for national transplant
if patient.DSA != neworgan.DSA and (Regions[neworgan.DSA,patient.DSA] ==1 or SharingPartners[neworgan.DSA, patient.DSA] == 1):
regtx =1 #patient had regional transplant
elif patient.DSA != neworgan.DSA:
nattx=1 #patient had national transplant
#record the floor of the current time, repetition, current time, patient id, indicator for regional transplant, indicator for national transplant
txidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,regtx,nattx]
#add to list of relisted transplants
Sim.record_txIDregraft = nump.vstack((Sim.record_txIDregraft, txidreport))
#record as follows (time of removal, repetition, patient id, patient allocation MELD, patient lab MELD)
oidreport = [nump.floor(Sim.clock), reps, Sim.clock, patient.id,Sim.oid]
#add to list of retransplant records
Sim.record_doIDregraft = nump.vstack((Sim.record_doIDregraft, oidreport))
if willrelist ==1:
#if patient will relist, update relist status and MELD score
OPTN[disposition[1]][i].Relist =1
OPTN[disposition[1]][i].MELD = 32
OPTN[disposition[1]][i].lMELD = 32
else:
#remove transplanted patient if will not be relisted
del OPTN[disposition[1]][i]
break
else: #organ is discarded; update statistics (optional)
pass
#return updated Sim, Stat, and OPTN
return Sim, Stat, OPTN
def Year(Sim, Stat, reps):
"""
This function updates the statistics of the simulation per year
Input:
@Sim: class object that contains relevant variables for the simulation
@Stat: class object that contains statistical information of the simulation
@reps: current replication number
Output:
@Sim: updated Sim
@Stat: updated Stat
"""
#Annual Disparity Statistics
mr_1 = nump.zeros(shape=(ndsa,1),dtype=float)
tr_1 = nump.zeros(shape=(ndsa,1),dtype=float)
wt_1 = nump.zeros(shape=(ndsa,1),dtype=float)
meld_l = nump.zeros(shape=(ndsa,1),dtype=float)
for i in range(0,ndsa):
if Stat.ytransplants[i] > 0:
wt_1[i] = Stat.ywait[i] / Stat.ytransplants[i] #compute the total waiting list/total # of transplant per DSA
meld_l[i] = Stat.yMELD[i] / Stat.ytransplants[i] #compute the total MELD score/total # of transplant per DSA
else:
#write nan if no values available
wt_1[i] = nump.nan
meld_l[i] = nump.nan
if (Stat.yarrivals[i] + Stat.ycandidates[i]) == 0:
#write nan if no values available
mr_1[i] = nump.nan
tr_1[i] = nump.nan
else:
mr_1[i] = Stat.ydeaths[i] / (Stat.yarrivals[i] + Stat.ycandidates[i]) #compute mortality rate (number of deaths/number of waiting candidates)
tr_1[i] = Stat.ytransplants[i] / (Stat.yarrivals[i] + Stat.ycandidates[i]) #compute transplant rate (number of transplants/number of waiting candidates)
#compute the median MELD score
medianmelds = nump.zeros(shape=(ndsa,1),dtype=float)
for i in range(0,ndsa):
if Stat.ymedMELD[i] != []:
medianmelds[i] = nump.nanmedian(Stat.ymedMELD[i])
else:
medianmelds[i] = nump.nan
#Intermediate Data Outputs
#nump.savetxt("Output_check.txt", Stat.numcandidates, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check2.txt", Stat.yremoved, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check3.txt", Stat.yarrivals, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check4.txt", Stat.ydeaths, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check5.txt", Stat.ytransplants, fmt='%1.4e', delimiter='\t', newline='\n')
#nump.savetxt("Output_check6.txt", mr_1, fmt='%1.4e', delimiter='\t', newline='\n')
mr_numdeaths = [nump.sum(Stat.ydeaths),nump.floor(Sim.clock),reps] #record the total number of deaths along with its current time and current repetition
#mr_disparity = [nump.linalg.norm(mr_1,ord=1),nump.floor(Sim.clock),reps]
#tx_disparity = [nump.linalg.norm(tr_1,ord=1),nump.floor(Sim.clock),reps]
#wt_disparity = [nump.linalg.norm(wt_1,ord=1),nump.floor(Sim.clock),reps]
mr_disparity_mean = [nump.nanmean(mr_1),nump.floor(Sim.clock),reps] #record the mean mortality rate along with its current time and current repetition
mr_disparity_std = [nump.nanstd(mr_1),nump.floor(Sim.clock),reps] #record the standard deviation of mortality rate along withs current time and current repetition
meld_disparity_mean = [nump.nanmean(meld_l),nump.floor(Sim.clock),reps] #record the mean MELD score along with current time and current repetition
meld_disparity_std = [nump.nanstd(meld_l),nump.floor(Sim.clock),reps] #record the standard deviation of the MELD score along with current time and current repetition
medmeld_mean = [nump.nanmean(medianmelds),nump.floor(Sim.clock),reps] #record the mean median MELD score along with current time and current repetition
medmeld_std = [nump.nanstd(medianmelds),nump.floor(Sim.clock),reps] #record the standard deviation of the median MELD score along with current time and current repetition
#print(tx_disparity)
#add the records to the list of yearly statistics
Sim.record_deaths = nump.vstack((Sim.record_deaths, mr_numdeaths))
Sim.record_mr_disparity_mean = nump.vstack((Sim.record_mr_disparity_mean, mr_disparity_mean))
Sim.record_mr_disparity_std = nump.vstack((Sim.record_mr_disparity_std, mr_disparity_std))
Sim.record_meld_disparity_mean = nump.vstack((Sim.record_meld_disparity_mean, meld_disparity_mean))
Sim.record_meld_disparity_std = nump.vstack((Sim.record_meld_disparity_std, meld_disparity_std))
Sim.record_medMELDmean = nump.vstack((Sim.record_medMELDmean, medmeld_mean))
Sim.record_medMELDstd = nump.vstack((Sim.record_medMELDstd, medmeld_std))
Sim.record_txDSAoutput = nump.vstack((Sim.record_txDSAoutput, Sim.record_txDSA))
#create array that records the current time and repetition
recindex =nump.ndarray(shape=(1,3))
recindex[0,0] = nump.floor(Sim.clock)
recindex[0,1] = reps
#add DSA vector regarding deaths, transplants, etc. to the list of records
Sim.record_ydeaths =nump.concatenate((Sim.record_ydeaths,nump.concatenate((recindex,nump.transpose(Stat.ydeaths)),axis=1)),axis=0)
Sim.record_ytransplants = nump.concatenate((Sim.record_ytransplants,nump.concatenate((recindex,nump.transpose(Stat.ytransplants)),axis=1)),axis=0)
Sim.record_yarrivals = nump.concatenate((Sim.record_yarrivals,nump.concatenate((recindex,nump.transpose(Stat.yarrivals)),axis=1)),axis=0)
Sim.record_ycandidates = nump.concatenate((Sim.record_ycandidates,nump.concatenate((recindex,nump.transpose(Stat.ycandidates)),axis=1)),axis=0)
Sim.record_yremoved =nump.concatenate((Sim.record_yremoved,nump.concatenate((recindex,nump.transpose(Stat.yremoved)),axis=1)),axis=0)
Sim.record_ywait = nump.concatenate((Sim.record_ywait,nump.concatenate((recindex,nump.transpose(Stat.ywait)),axis=1)),axis=0)
Sim.record_yMELD = nump.concatenate((Sim.record_yMELD,nump.concatenate((recindex,nump.transpose(Stat.yMELD)),axis=1)),axis=0)
Sim.record_yrelists =nump.concatenate((Sim.record_yrelists,nump.concatenate((recindex,nump.transpose(Stat.yrelists)),axis=1)),axis=0)
Sim.record_yregrafts =nump.concatenate((Sim.record_yregrafts,nump.concatenate((recindex,nump.transpose(Stat.yregrafts)),axis=1)),axis=0)
#Reset Statistics for Following Year
Stat.yarrivals = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ydeaths = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.yremoved = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ytransplants = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ycandidates = deepcopy(Stat.numcandidates)
Stat.ywait = nump.zeros(shape=(ndsa,1),dtype=float)
Stat.yMELD = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.ymedMELD = [[] for i in range(0,ndsa)]
Stat.yrelists = nump.zeros(shape=(ndsa,1),dtype=int)
Stat.yregrafts = nump.zeros(shape=(ndsa,1),dtype=int)
#return updated Sim, Stat
return Sim, Stat
def EndRep():
"""
Prints a message saying that a replication ended.
"""
print("Ending replication, time is: ", datetime.datetime.now().time())
#print(Sim.clock)
#####################################################################################################################################################################################################################
| 0
| 0
| 0
|
89b6a5aacdae170197bc3ceae867aa296d074173
| 594
|
py
|
Python
|
Algorithms/Searching & Sorting/Counting Sort/counting_sort.py
|
sol4ik/interview-techdev-guide
|
2f1c755df3f34125850a2d6322edc24dc097c1fb
|
[
"MIT"
] | null | null | null |
Algorithms/Searching & Sorting/Counting Sort/counting_sort.py
|
sol4ik/interview-techdev-guide
|
2f1c755df3f34125850a2d6322edc24dc097c1fb
|
[
"MIT"
] | null | null | null |
Algorithms/Searching & Sorting/Counting Sort/counting_sort.py
|
sol4ik/interview-techdev-guide
|
2f1c755df3f34125850a2d6322edc24dc097c1fb
|
[
"MIT"
] | null | null | null |
def countSort(array):
"""
Counting Sort algorithm implementation on Python.
:param array: array that need to be sorted
:return: resulting sorted array
"""
output = [0 for i in range(256)]
count = [0 for i in range(256)]
result = ["" for element in array]
for i in array:
result[ord(i)] += 1
for i in range(256):
result[i] += result[i - 1]
for i in range(len(array)):
output[result[ord(array[i])] - 1] = array[i]
count[ord(array[i])] -= 1
for i in range(len(array)):
result[i] = output[i]
return result
| 29.7
| 53
| 0.574074
|
def countSort(array):
"""
Counting Sort algorithm implementation on Python.
:param array: array that need to be sorted
:return: resulting sorted array
"""
output = [0 for i in range(256)]
count = [0 for i in range(256)]
result = ["" for element in array]
for i in array:
result[ord(i)] += 1
for i in range(256):
result[i] += result[i - 1]
for i in range(len(array)):
output[result[ord(array[i])] - 1] = array[i]
count[ord(array[i])] -= 1
for i in range(len(array)):
result[i] = output[i]
return result
| 0
| 0
| 0
|
159c632f19d51c0cb382b2c9ba57cbb71f415051
| 96
|
py
|
Python
|
samtranslator/public/models.py
|
eugeniosu/serverless-application-model
|
d93e15232a1921fa51667389d83aeabbf1ff72d3
|
[
"Apache-2.0"
] | 6
|
2019-03-29T02:56:59.000Z
|
2021-03-28T22:07:02.000Z
|
samtranslator/public/models.py
|
eugeniosu/serverless-application-model
|
d93e15232a1921fa51667389d83aeabbf1ff72d3
|
[
"Apache-2.0"
] | 18
|
2019-10-09T23:27:48.000Z
|
2021-06-25T15:18:24.000Z
|
samtranslator/public/models.py
|
Mattlk13/serverless-application-model
|
27b5934de46c42d47ba1484d5432310cac694b25
|
[
"Apache-2.0"
] | 15
|
2019-05-27T01:04:30.000Z
|
2021-10-01T05:54:45.000Z
|
# flake8: noqa
from samtranslator.model.function_policies import FunctionPolicies, PolicyTypes
| 24
| 79
| 0.854167
|
# flake8: noqa
from samtranslator.model.function_policies import FunctionPolicies, PolicyTypes
| 0
| 0
| 0
|
43c0f69870ce566bd19cef135e12fb3a3b95c79c
| 32,952
|
py
|
Python
|
src/shared_gui.py
|
NicholasPSnow/99-CapstoneProject-201920
|
ca3baa3b3c53ae6c5af70cd93b3af450a1da41ad
|
[
"MIT"
] | null | null | null |
src/shared_gui.py
|
NicholasPSnow/99-CapstoneProject-201920
|
ca3baa3b3c53ae6c5af70cd93b3af450a1da41ad
|
[
"MIT"
] | null | null | null |
src/shared_gui.py
|
NicholasPSnow/99-CapstoneProject-201920
|
ca3baa3b3c53ae6c5af70cd93b3af450a1da41ad
|
[
"MIT"
] | null | null | null |
"""
Capstone Project. Code to run on a LAPTOP (NOT the robot).
Constructs and returns Frame objects for the basics:
-- teleoperation
-- arm movement
-- stopping the robot program
This code is SHARED by all team members. It contains both:
-- High-level, general-purpose methods for a Snatch3r EV3 robot.
-- Lower-level code to interact with the EV3 robot library.
Author: Your professors (for the framework and lower-level code)
and Nicholas Snow, Katana College, and Zach Kelly.
Winter term, 2018-2019.
"""
import tkinter
from tkinter import ttk
import time
def get_teleoperation_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Entry and Button objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Teleoperation")
left_speed_label = ttk.Label(frame, text="Left wheel speed (0 to 100)")
right_speed_label = ttk.Label(frame, text="Right wheel speed (0 to 100)")
left_speed_entry = ttk.Entry(frame, width=8)
left_speed_entry.insert(0, "100")
right_speed_entry = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
right_speed_entry.insert(0, "100")
forward_button = ttk.Button(frame, text="Forward")
backward_button = ttk.Button(frame, text="Backward")
left_button = ttk.Button(frame, text="Left")
right_button = ttk.Button(frame, text="Right")
stop_button = ttk.Button(frame, text="Stop")
# Grid the widgets:
frame_label.grid(row=0, column=1)
left_speed_label.grid(row=1, column=0)
right_speed_label.grid(row=1, column=2)
left_speed_entry.grid(row=2, column=0)
right_speed_entry.grid(row=2, column=2)
forward_button.grid(row=3, column=1)
left_button.grid(row=4, column=0)
stop_button.grid(row=4, column=1)
right_button.grid(row=4, column=2)
backward_button.grid(row=5, column=1)
# Set the button callbacks:
forward_button["command"] = lambda: handle_forward(
left_speed_entry, right_speed_entry, mqtt_sender)
backward_button["command"] = lambda: handle_backward(
left_speed_entry, right_speed_entry, mqtt_sender)
left_button["command"] = lambda: handle_left(
left_speed_entry, right_speed_entry, mqtt_sender)
right_button["command"] = lambda: handle_right(
left_speed_entry, right_speed_entry, mqtt_sender)
stop_button["command"] = lambda: handle_stop(mqtt_sender)
return frame
def get_arm_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Entry and Button objects that control the EV3 robot's Arm
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Arm and Claw")
position_label = ttk.Label(frame, text="Desired arm position:")
position_entry = ttk.Entry(frame, width=8)
raise_arm_button = ttk.Button(frame, text="Raise arm")
lower_arm_button = ttk.Button(frame, text="Lower arm")
calibrate_arm_button = ttk.Button(frame, text="Calibrate arm")
move_arm_button = ttk.Button(frame,
text="Move arm to position (0 to 5112)")
blank_label = ttk.Label(frame, text="")
# Grid the widgets:
frame_label.grid(row=0, column=1)
position_label.grid(row=1, column=0)
position_entry.grid(row=1, column=1)
position_entry.insert(0, "0")
move_arm_button.grid(row=1, column=2)
blank_label.grid(row=2, column=1)
raise_arm_button.grid(row=3, column=0)
lower_arm_button.grid(row=3, column=1)
calibrate_arm_button.grid(row=3, column=2)
# Set the Button callbacks:
raise_arm_button["command"] = lambda: handle_raise_arm(mqtt_sender)
lower_arm_button["command"] = lambda: handle_lower_arm(mqtt_sender)
calibrate_arm_button["command"] = lambda: handle_calibrate_arm(mqtt_sender)
move_arm_button["command"] = lambda: handle_move_arm_to_position(position_entry.get(), mqtt_sender)
return frame
def get_control_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame has
Button objects to exit this program and/or the robot's program (via MQTT).
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Control")
quit_robot_button = ttk.Button(frame, text="Stop the robot's program")
exit_button = ttk.Button(frame, text="Stop this and the robot's program")
# Grid the widgets:
frame_label.grid(row=0, column=1)
quit_robot_button.grid(row=1, column=0)
exit_button.grid(row=1, column=2)
# Set the Button callbacks:
quit_robot_button["command"] = lambda: handle_quit(mqtt_sender)
exit_button["command"] = lambda: handle_exit(mqtt_sender)
return frame
###############################################################################
###############################################################################
# The following specifies, for each Button,
# what should happen when the Button is pressed.
###############################################################################
###############################################################################
###############################################################################
# Handlers for Buttons in the Teleoperation frame.
###############################################################################
def handle_forward(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
with the speeds used as given.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('forward', left_entry_box.get(), right_entry_box.get())
mqtt_sender.send_message('movement', [left_entry_box.get(), right_entry_box.get()])
def handle_backward(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negatives of the speeds in the entry boxes.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('backward', left_entry_box.get(), right_entry_box.get())
left = -int(left_entry_box.get())
right = -int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
def handle_left(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negative of the speed in the left entry box.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('left', left_entry_box.get(), right_entry_box.get())
left = -int(left_entry_box.get())
right = int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
def handle_right(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negative of the speed in the right entry box.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('right', left_entry_box.get(), right_entry_box.get())
left = int(left_entry_box.get())
right = -int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
def handle_stop(mqtt_sender):
"""
Tells the robot to stop.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('stop')
print("Stop")
###############################################################################
# Handlers for Buttons in the ArmAndClaw frame.
###############################################################################
def handle_raise_arm(mqtt_sender):
"""
Tells the robot to raise its Arm until its touch sensor is pressed.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('up')
print("Move Arm Up")
def handle_lower_arm(mqtt_sender):
"""
Tells the robot to lower its Arm until it is all the way down.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('down')
print("Move Arm Down")
def handle_calibrate_arm(mqtt_sender):
"""
Tells the robot to calibrate its Arm, that is, first to raise its Arm
until its touch sensor is pressed, then to lower its Arm until it is
all the way down, and then to mark taht position as position 0.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('calibrate')
print("Calibrate")
def handle_move_arm_to_position(position_entry, mqtt_sender):
"""
Tells the robot to move its Arm to the position in the given Entry box.
The robot must have previously calibrated its Arm.
:type arm_position_entry ttk.Entry
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('move_to_pos', [str(position_entry)])
print("Move to Position:",position_entry)
###############################################################################
# Handlers for Buttons in the Control frame.
###############################################################################
def handle_quit(mqtt_sender):
"""
Tell the robot's program to stop its loop (and hence quit).
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('command', ['quit'])
print('########')
print('# Quit #')
print('########')
def handle_exit(mqtt_sender):
"""
Tell the robot's program to stop its loop (and hence quit).
Then exit this program.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('command', ['exit'])
print('########')
print('# Exit #')
print('########')
def get_Sprint_1_Drive_System_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Special objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Secondary Drive System")
wheel_speed_label = ttk.Label(frame, text="Wheel Speed (0 to 100)")
time_label = ttk.Label(frame, text="Movement Time (0 to INF)")
inches_label = ttk.Label(frame, text="Movement Distance (0 to INF)")
wheel_speed_entry = ttk.Entry(frame, width=8)
wheel_speed_entry.insert(0, "100")
time_entry = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
time_entry.insert(0, "10")
inches_entry = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
inches_entry.insert(0, "10")
forward_time_button = ttk.Button(frame, text="Forward for Seconds")
forward_time_inches_button = ttk.Button(frame, text="Forward for Inches(time)")
forward_inches_button = ttk.Button(frame, text="Forward for Inches(Encoder)")
# Grid the widgets:
frame_label.grid(row=0, column=1)
wheel_speed_label.grid(row=1, column=0)
time_label.grid(row=1, column=1)
inches_label.grid(row=1, column=2)
wheel_speed_entry.grid(row=2, column=0)
time_entry.grid(row=2, column=1)
inches_entry.grid(row=2, column=2)
forward_time_button.grid(row=3, column=0)
forward_time_inches_button.grid(row=3, column=1)
forward_inches_button.grid(row=3, column=2)
# Set the button callbacks:
forward_time_button["command"] = lambda: handle_forward_time_button(wheel_speed_entry.get(), time_entry.get(), mqtt_sender)
forward_time_inches_button["command"] = lambda: handle_forward_time_inches_button(wheel_speed_entry.get(), inches_entry.get(), mqtt_sender)
forward_inches_button["command"] = lambda: handle_forward_inches_button(wheel_speed_entry.get(), inches_entry.get(), mqtt_sender)
return frame
def get_Sprint_1_Beeper_System_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Sound System")
number_of_beeps_label = ttk.Label(frame, text="Number of Beeps")
tone_duration_label = ttk.Label(frame, text="Duration of Tone")
tone_frequency_label = ttk.Label(frame, text="Tone Frequency")
speak_text_label = ttk.Label(frame, text="Text to Speech")
number_of_beeps= ttk.Entry(frame, width=8)
number_of_beeps.insert(0, "10")
tone_duration = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
tone_duration.insert(0, "10")
tone_frequency = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
tone_frequency.insert(0, "10")
speak_text = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
speak_text.insert(0, "Type Here")
beep_button = ttk.Button(frame, text="Play Beeps")
tone_button = ttk.Button(frame, text="Play Tone")
speak_button = ttk.Button(frame, text="Read Text")
# Grid the widgets:
frame_label.grid(row=0, column=1)
number_of_beeps_label.grid(row=1, column=0)
tone_duration_label.grid(row=1, column=1)
tone_frequency_label.grid(row=1, column=2)
speak_text_label.grid(row=1, column=3)
number_of_beeps.grid(row=2, column=0)
tone_duration.grid(row=2, column=1)
tone_frequency.grid(row=2, column=2)
speak_text.grid(row=2, column=3)
beep_button.grid(row=3, column=0)
tone_button.grid(row=3, column=1)
speak_button.grid(row=3, column=3)
# Set the button callbacks:
beep_button["command"] = lambda: handle_beep_button(number_of_beeps.get(), mqtt_sender)
tone_button["command"] = lambda: handle_tone_button(tone_duration.get(), tone_frequency.get(), mqtt_sender)
speak_button["command"] = lambda: handle_speak_button(speak_text.get(), mqtt_sender)
return frame
def get_Katana_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Katana's System")
obtain_with_sensor_label = ttk.Label(frame, text="Get Object with Proximity")
obtain_with_sensor_button = ttk.Button(frame, text="Get")
wheel_speed_label = ttk.Label(frame, text="Move Speed")
rate_of_frequency_label = ttk.Label(frame, text="Frequency Rate (Increasing)")
rate_of_frequency = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
rate_of_frequency.insert(0, "10")
initial_frequency_label = ttk.Label(frame, text="Initial Frequency")
initial_frequency = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
initial_frequency.insert(0, "5")
obtain_with_camera_label = ttk.Label(frame, text="Get Object with Camera")
obtain_with_camera_button = ttk.Button(frame, text="Get")
wheel_speed_entry = ttk.Entry(frame, width=8)
wheel_speed_entry.insert(0, "100")
spin_speed_label = ttk.Label(frame, text="Spinning Speed")
spin_speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
spin_speed.insert(0, "10")
spin_direction_label = ttk.Label(frame, text="Spinning Direction")
spin_direction = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
spin_direction.insert(0, "Counter Clockwise")
# Grid the widgets:
frame_label.grid(row=0, column=2)
obtain_with_sensor_label.grid(row=1, column=1)
obtain_with_sensor_button.grid(row=2, column=1)
rate_of_frequency_label.grid(row=1,column=2)
rate_of_frequency.grid(row=2, column=2)
initial_frequency_label.grid(row=1, column=3)
initial_frequency.grid(row=2, column=3)
wheel_speed_label.grid(row=3, column=2)
wheel_speed_entry.grid(row=4, column=2)
obtain_with_camera_label.grid(row=5, column=1)
obtain_with_camera_button.grid(row=6, column=1)
spin_speed_label.grid(row=5, column=2)
spin_speed.grid(row=6, column=2)
spin_direction_label.grid(row=5, column=3)
spin_direction.grid(row=6, column=3)
# Set the button callbacks:
obtain_with_sensor_button["command"] = lambda: handle_obtain_with_sensor_button(wheel_speed_entry.get(),rate_of_frequency.get(),
initial_frequency.get(),
mqtt_sender)
obtain_with_camera_button["command"] = lambda: handle_obtain_with_camera_button(wheel_speed_entry.get(),
spin_speed.get(),
spin_direction.get(),
rate_of_frequency.get(),
initial_frequency.get(),mqtt_sender)
return frame
def get_Nick_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Nick's Sprint 2 System")
proximity_label = ttk.Label(frame, text="Go to and Pick up Object (Proximity)")
proximity_button = ttk.Button(frame, text="Run Proximity Grab")
camera_label = ttk.Label(frame, text="Go to and Pick up Object (Camera)")
camera_button = ttk.Button(frame, text="Run Camera Grab")
line_follower_label = ttk.Label(frame, text="Line Follower (Bang Bang Method)")
line_button = ttk.Button(frame, text="Follow Line")
rate_of_beeps_label = ttk.Label(frame, text="Beep Rate Increase")
rate_of_beeps= ttk.Entry(frame, width=8, justify=tkinter.LEFT)
rate_of_beeps.insert(0, "10")
initial_beeps_label = ttk.Label(frame, text="Initial Beep Rate")
initial_beeps = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
initial_beeps.insert(0, "5")
speed_label = ttk.Label(frame, text="Turning Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "100")
direction_label = ttk.Label(frame, text="Turning Direction, Clockwise or Counter-Clockwise")
direction = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
direction.insert(0, "Clockwise")
starting_side_label = ttk.Label(frame, text="Turning Direction, Right or Left")
starting_side = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
starting_side.insert(0, "Right")
# Grid the widgets:
frame_label.grid(row=0, column=1)
proximity_label.grid(row=1, column=0)
proximity_button.grid(row=1, column=1)
rate_of_beeps_label.grid(row=2, column=0)
rate_of_beeps.grid(row=2, column=1)
initial_beeps_label.grid(row=3, column=0)
initial_beeps.grid(row=3, column=1)
camera_label.grid(row=4, column=0)
camera_button.grid(row=4, column=1)
speed_label.grid(row=5, column=0)
speed.grid(row=5, column=1)
direction_label.grid(row=6, column=0)
direction.grid(row=6, column=1)
line_follower_label.grid(row=7, column=0)
line_button.grid(row=7, column=1)
starting_side_label.grid(row=8, column=0)
starting_side.grid(row=8, column=1)
# Set the button callbacks:
proximity_button["command"] = lambda: handle_proximity_button(rate_of_beeps.get(), initial_beeps.get(), mqtt_sender)
camera_button["command"] = lambda: handle_camera_button(speed.get(),direction.get(),rate_of_beeps.get(), initial_beeps.get(), mqtt_sender)
line_button["command"] = lambda: handle_line_button(starting_side.get(), mqtt_sender)
return frame
##COLOR FRAMING
def get_Sprint_2_Color_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Color Sensor")
intensity_less_label = ttk.Label(frame, text="Go Until Intensity is Less Than")
intensity_less_button = ttk.Button(frame, text="Run Less than Intensity")
intensity_greater_label = ttk.Label(frame, text="Go Until Intensity is Greater Than")
intensity_greater_button = ttk.Button(frame, text="Run Greater than Intensity")
until_color_label = ttk.Label(frame, text="Go Until Color")
until_color_button = ttk.Button(frame, text="Run Go Until Color")
until_not_color_label = ttk.Label(frame, text="Go Until Not Color")
until_not_color_button = ttk.Button(frame, text="Run Go Until Not Color")
color_label = ttk.Label(frame, text="Color")
color= ttk.Entry(frame, width=8, justify=tkinter.LEFT)
color.insert(0, "Red")
speed_label = ttk.Label(frame, text="Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "50")
intensity_label = ttk.Label(frame, text="Light Intensity")
intensity = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
intensity.insert(0, "50")
# Grid the widgets:
frame_label.grid(row=0, column=1)
color_label.grid(row=1, column=0)
color.grid(row=2, column=0)
speed_label.grid(row=1, column=1)
speed.grid(row=2, column=1)
intensity_label.grid(row=1, column=2)
intensity.grid(row=2, column=2)
intensity_less_label.grid(row=3, column=0)
intensity_less_button.grid(row=4, column=0)
intensity_greater_label.grid(row=5, column=0)
intensity_greater_button.grid(row=6, column=0)
until_color_label.grid(row=3, column=2)
until_color_button.grid(row=4, column=2)
until_not_color_label.grid(row=5, column=2)
until_not_color_button.grid(row=6, column=2)
# Set the button callbacks:
intensity_less_button["command"] = lambda: handle_intensity_less_button(speed.get(), intensity.get(), mqtt_sender)
intensity_greater_button["command"] = lambda: handle_intensity_greater_button(speed.get(),intensity.get(), mqtt_sender)
until_color_button["command"] = lambda: handle_until_color_button(speed.get(),color.get(), mqtt_sender)
until_not_color_button["command"] = lambda: handle_until_not_color_button(speed.get(),color.get(), mqtt_sender)
return frame
##PROXIMITY SENSOR
def get_Sprint_2_Proximity_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Proximity Sensor")
distance_less_label = ttk.Label(frame, text="Go Until Distance is Less Than")
distance_less_button = ttk.Button(frame, text="Run Less than Distance")
distance_greater_label = ttk.Label(frame, text="Go Until Distance is Greater Than")
distance_greater_button = ttk.Button(frame, text="Run Greater than Distance")
until_distance_label = ttk.Label(frame, text="Go Until Distance Within")
until_distance_button = ttk.Button(frame, text="Run Go Until Distance Within")
inches_label = ttk.Label(frame, text="Inches")
inches = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
inches.insert(0, "10")
speed_label = ttk.Label(frame, text="Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "100")
delta_label = ttk.Label(frame, text="Delta Distance")
delta = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
delta.insert(0, "50")
# Grid the widgets:
frame_label.grid(row=0, column=1)
distance_less_label.grid(row=3, column=0)
distance_less_button.grid(row=4, column=0)
distance_greater_label.grid(row=3, column=1)
distance_greater_button.grid(row=4, column=1)
until_distance_label.grid(row=3, column=2)
until_distance_button.grid(row=4, column=2)
delta_label.grid(row=1, column=0)
delta.grid(row=2, column=0)
speed_label.grid(row=1, column=1)
speed.grid(row=2, column=1)
inches_label.grid(row=1, column=2)
inches.grid(row=2, column=2)
# Set the button callbacks:
distance_greater_button["command"] = lambda: handle_distance_greater_button(speed.get(), inches.get(), mqtt_sender)
distance_less_button["command"] = lambda: handle_distance_less_button(speed.get(), inches.get(),mqtt_sender)
until_distance_button["command"] = lambda: handle_until_distance_button(speed.get(), inches.get(), delta.get(), mqtt_sender)
return frame
##Camera SENSOR
def get_Sprint_2_Camera_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Camera Sensor")
counter_clockwise_label = ttk.Label(frame, text="Search Counterclockwise")
counter_clockwise_button = ttk.Button(frame, text="Run CCW Search")
clockwise_label = ttk.Label(frame, text="Search Counterclockwise")
clockwise_button = ttk.Button(frame, text="Run CW Search")
speed_label = ttk.Label(frame, text="Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "100")
area_label = ttk.Label(frame, text="Area Size")
area = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
area.insert(0, "10")
# Grid the widgets:
frame_label.grid(row=0, column=1)
counter_clockwise_label.grid(row=1, column=0)
counter_clockwise_button.grid(row=2, column=0)
clockwise_label.grid(row=1, column=2)
clockwise_button.grid(row=2, column=2)
area_label.grid(row=1, column=1)
area.grid(row=2, column=1)
speed_label.grid(row=3, column=1)
speed.grid(row=4, column=1)
# Set the button callbacks:
counter_clockwise_button["command"] = lambda: handle_counter_clockwise_button(speed.get(), area.get(), mqtt_sender)
clockwise_button["command"] = lambda: handle_clockwise_button(speed.get(), area.get(), mqtt_sender)
return frame
| 41.711392
| 160
| 0.689306
|
"""
Capstone Project. Code to run on a LAPTOP (NOT the robot).
Constructs and returns Frame objects for the basics:
-- teleoperation
-- arm movement
-- stopping the robot program
This code is SHARED by all team members. It contains both:
-- High-level, general-purpose methods for a Snatch3r EV3 robot.
-- Lower-level code to interact with the EV3 robot library.
Author: Your professors (for the framework and lower-level code)
and Nicholas Snow, Katana College, and Zach Kelly.
Winter term, 2018-2019.
"""
import tkinter
from tkinter import ttk
import time
def get_teleoperation_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Entry and Button objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Teleoperation")
left_speed_label = ttk.Label(frame, text="Left wheel speed (0 to 100)")
right_speed_label = ttk.Label(frame, text="Right wheel speed (0 to 100)")
left_speed_entry = ttk.Entry(frame, width=8)
left_speed_entry.insert(0, "100")
right_speed_entry = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
right_speed_entry.insert(0, "100")
forward_button = ttk.Button(frame, text="Forward")
backward_button = ttk.Button(frame, text="Backward")
left_button = ttk.Button(frame, text="Left")
right_button = ttk.Button(frame, text="Right")
stop_button = ttk.Button(frame, text="Stop")
# Grid the widgets:
frame_label.grid(row=0, column=1)
left_speed_label.grid(row=1, column=0)
right_speed_label.grid(row=1, column=2)
left_speed_entry.grid(row=2, column=0)
right_speed_entry.grid(row=2, column=2)
forward_button.grid(row=3, column=1)
left_button.grid(row=4, column=0)
stop_button.grid(row=4, column=1)
right_button.grid(row=4, column=2)
backward_button.grid(row=5, column=1)
# Set the button callbacks:
forward_button["command"] = lambda: handle_forward(
left_speed_entry, right_speed_entry, mqtt_sender)
backward_button["command"] = lambda: handle_backward(
left_speed_entry, right_speed_entry, mqtt_sender)
left_button["command"] = lambda: handle_left(
left_speed_entry, right_speed_entry, mqtt_sender)
right_button["command"] = lambda: handle_right(
left_speed_entry, right_speed_entry, mqtt_sender)
stop_button["command"] = lambda: handle_stop(mqtt_sender)
return frame
def get_arm_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Entry and Button objects that control the EV3 robot's Arm
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Arm and Claw")
position_label = ttk.Label(frame, text="Desired arm position:")
position_entry = ttk.Entry(frame, width=8)
raise_arm_button = ttk.Button(frame, text="Raise arm")
lower_arm_button = ttk.Button(frame, text="Lower arm")
calibrate_arm_button = ttk.Button(frame, text="Calibrate arm")
move_arm_button = ttk.Button(frame,
text="Move arm to position (0 to 5112)")
blank_label = ttk.Label(frame, text="")
# Grid the widgets:
frame_label.grid(row=0, column=1)
position_label.grid(row=1, column=0)
position_entry.grid(row=1, column=1)
position_entry.insert(0, "0")
move_arm_button.grid(row=1, column=2)
blank_label.grid(row=2, column=1)
raise_arm_button.grid(row=3, column=0)
lower_arm_button.grid(row=3, column=1)
calibrate_arm_button.grid(row=3, column=2)
# Set the Button callbacks:
raise_arm_button["command"] = lambda: handle_raise_arm(mqtt_sender)
lower_arm_button["command"] = lambda: handle_lower_arm(mqtt_sender)
calibrate_arm_button["command"] = lambda: handle_calibrate_arm(mqtt_sender)
move_arm_button["command"] = lambda: handle_move_arm_to_position(position_entry.get(), mqtt_sender)
return frame
def get_control_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame has
Button objects to exit this program and/or the robot's program (via MQTT).
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Control")
quit_robot_button = ttk.Button(frame, text="Stop the robot's program")
exit_button = ttk.Button(frame, text="Stop this and the robot's program")
# Grid the widgets:
frame_label.grid(row=0, column=1)
quit_robot_button.grid(row=1, column=0)
exit_button.grid(row=1, column=2)
# Set the Button callbacks:
quit_robot_button["command"] = lambda: handle_quit(mqtt_sender)
exit_button["command"] = lambda: handle_exit(mqtt_sender)
return frame
###############################################################################
###############################################################################
# The following specifies, for each Button,
# what should happen when the Button is pressed.
###############################################################################
###############################################################################
###############################################################################
# Handlers for Buttons in the Teleoperation frame.
###############################################################################
def handle_forward(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
with the speeds used as given.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('forward', left_entry_box.get(), right_entry_box.get())
mqtt_sender.send_message('movement', [left_entry_box.get(), right_entry_box.get()])
def handle_backward(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negatives of the speeds in the entry boxes.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('backward', left_entry_box.get(), right_entry_box.get())
left = -int(left_entry_box.get())
right = -int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
def handle_left(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negative of the speed in the left entry box.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('left', left_entry_box.get(), right_entry_box.get())
left = -int(left_entry_box.get())
right = int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
def handle_right(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negative of the speed in the right entry box.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('right', left_entry_box.get(), right_entry_box.get())
left = int(left_entry_box.get())
right = -int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
def handle_stop(mqtt_sender):
"""
Tells the robot to stop.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('stop')
print("Stop")
###############################################################################
# Handlers for Buttons in the ArmAndClaw frame.
###############################################################################
def handle_raise_arm(mqtt_sender):
"""
Tells the robot to raise its Arm until its touch sensor is pressed.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('up')
print("Move Arm Up")
def handle_lower_arm(mqtt_sender):
"""
Tells the robot to lower its Arm until it is all the way down.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('down')
print("Move Arm Down")
def handle_calibrate_arm(mqtt_sender):
"""
Tells the robot to calibrate its Arm, that is, first to raise its Arm
until its touch sensor is pressed, then to lower its Arm until it is
all the way down, and then to mark taht position as position 0.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('calibrate')
print("Calibrate")
def handle_move_arm_to_position(position_entry, mqtt_sender):
"""
Tells the robot to move its Arm to the position in the given Entry box.
The robot must have previously calibrated its Arm.
:type arm_position_entry ttk.Entry
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('move_to_pos', [str(position_entry)])
print("Move to Position:",position_entry)
###############################################################################
# Handlers for Buttons in the Control frame.
###############################################################################
def handle_quit(mqtt_sender):
"""
Tell the robot's program to stop its loop (and hence quit).
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('command', ['quit'])
print('########')
print('# Quit #')
print('########')
def handle_exit(mqtt_sender):
"""
Tell the robot's program to stop its loop (and hence quit).
Then exit this program.
:type mqtt_sender: com.MqttClient
"""
mqtt_sender.send_message('command', ['exit'])
print('########')
print('# Exit #')
print('########')
def get_Sprint_1_Drive_System_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Special objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Secondary Drive System")
wheel_speed_label = ttk.Label(frame, text="Wheel Speed (0 to 100)")
time_label = ttk.Label(frame, text="Movement Time (0 to INF)")
inches_label = ttk.Label(frame, text="Movement Distance (0 to INF)")
wheel_speed_entry = ttk.Entry(frame, width=8)
wheel_speed_entry.insert(0, "100")
time_entry = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
time_entry.insert(0, "10")
inches_entry = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
inches_entry.insert(0, "10")
forward_time_button = ttk.Button(frame, text="Forward for Seconds")
forward_time_inches_button = ttk.Button(frame, text="Forward for Inches(time)")
forward_inches_button = ttk.Button(frame, text="Forward for Inches(Encoder)")
# Grid the widgets:
frame_label.grid(row=0, column=1)
wheel_speed_label.grid(row=1, column=0)
time_label.grid(row=1, column=1)
inches_label.grid(row=1, column=2)
wheel_speed_entry.grid(row=2, column=0)
time_entry.grid(row=2, column=1)
inches_entry.grid(row=2, column=2)
forward_time_button.grid(row=3, column=0)
forward_time_inches_button.grid(row=3, column=1)
forward_inches_button.grid(row=3, column=2)
# Set the button callbacks:
forward_time_button["command"] = lambda: handle_forward_time_button(wheel_speed_entry.get(), time_entry.get(), mqtt_sender)
forward_time_inches_button["command"] = lambda: handle_forward_time_inches_button(wheel_speed_entry.get(), inches_entry.get(), mqtt_sender)
forward_inches_button["command"] = lambda: handle_forward_inches_button(wheel_speed_entry.get(), inches_entry.get(), mqtt_sender)
return frame
def handle_forward_time_button(speed,time,mqtt_sender):
mqtt_sender.send_message('Forward_Time', [str(speed),str(time)])
print('Forward_Time',speed,time)
def handle_forward_time_inches_button(speed,inches,mqtt_sender):
mqtt_sender.send_message('Forward_Time_Inches', [str(speed), str(inches)])
print('Forward_Time_Inches', speed, inches)
def handle_forward_inches_button(speed,inches,mqtt_sender):
mqtt_sender.send_message('Forward_Inches', [str(speed), str(inches)])
print('Forward_Inches', speed, inches)
def get_Sprint_1_Beeper_System_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Sound System")
number_of_beeps_label = ttk.Label(frame, text="Number of Beeps")
tone_duration_label = ttk.Label(frame, text="Duration of Tone")
tone_frequency_label = ttk.Label(frame, text="Tone Frequency")
speak_text_label = ttk.Label(frame, text="Text to Speech")
number_of_beeps= ttk.Entry(frame, width=8)
number_of_beeps.insert(0, "10")
tone_duration = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
tone_duration.insert(0, "10")
tone_frequency = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
tone_frequency.insert(0, "10")
speak_text = ttk.Entry(frame, width=8, justify=tkinter.RIGHT)
speak_text.insert(0, "Type Here")
beep_button = ttk.Button(frame, text="Play Beeps")
tone_button = ttk.Button(frame, text="Play Tone")
speak_button = ttk.Button(frame, text="Read Text")
# Grid the widgets:
frame_label.grid(row=0, column=1)
number_of_beeps_label.grid(row=1, column=0)
tone_duration_label.grid(row=1, column=1)
tone_frequency_label.grid(row=1, column=2)
speak_text_label.grid(row=1, column=3)
number_of_beeps.grid(row=2, column=0)
tone_duration.grid(row=2, column=1)
tone_frequency.grid(row=2, column=2)
speak_text.grid(row=2, column=3)
beep_button.grid(row=3, column=0)
tone_button.grid(row=3, column=1)
speak_button.grid(row=3, column=3)
# Set the button callbacks:
beep_button["command"] = lambda: handle_beep_button(number_of_beeps.get(), mqtt_sender)
tone_button["command"] = lambda: handle_tone_button(tone_duration.get(), tone_frequency.get(), mqtt_sender)
speak_button["command"] = lambda: handle_speak_button(speak_text.get(), mqtt_sender)
return frame
def handle_beep_button(numberofbeeps,mqtt_sender):
mqtt_sender.send_message('beep_button', [str(numberofbeeps)])
print('beep_button',numberofbeeps)
def handle_tone_button(duration,frequency,mqtt_sender):
mqtt_sender.send_message('tone_button', [str(duration), str(frequency)])
print('tone_button', duration, frequency)
def handle_speak_button(text,mqtt_sender):
mqtt_sender.send_message('speak_button', [text])
print('speak_button', text)
def get_Katana_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Katana's System")
obtain_with_sensor_label = ttk.Label(frame, text="Get Object with Proximity")
obtain_with_sensor_button = ttk.Button(frame, text="Get")
wheel_speed_label = ttk.Label(frame, text="Move Speed")
rate_of_frequency_label = ttk.Label(frame, text="Frequency Rate (Increasing)")
rate_of_frequency = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
rate_of_frequency.insert(0, "10")
initial_frequency_label = ttk.Label(frame, text="Initial Frequency")
initial_frequency = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
initial_frequency.insert(0, "5")
obtain_with_camera_label = ttk.Label(frame, text="Get Object with Camera")
obtain_with_camera_button = ttk.Button(frame, text="Get")
wheel_speed_entry = ttk.Entry(frame, width=8)
wheel_speed_entry.insert(0, "100")
spin_speed_label = ttk.Label(frame, text="Spinning Speed")
spin_speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
spin_speed.insert(0, "10")
spin_direction_label = ttk.Label(frame, text="Spinning Direction")
spin_direction = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
spin_direction.insert(0, "Counter Clockwise")
# Grid the widgets:
frame_label.grid(row=0, column=2)
obtain_with_sensor_label.grid(row=1, column=1)
obtain_with_sensor_button.grid(row=2, column=1)
rate_of_frequency_label.grid(row=1,column=2)
rate_of_frequency.grid(row=2, column=2)
initial_frequency_label.grid(row=1, column=3)
initial_frequency.grid(row=2, column=3)
wheel_speed_label.grid(row=3, column=2)
wheel_speed_entry.grid(row=4, column=2)
obtain_with_camera_label.grid(row=5, column=1)
obtain_with_camera_button.grid(row=6, column=1)
spin_speed_label.grid(row=5, column=2)
spin_speed.grid(row=6, column=2)
spin_direction_label.grid(row=5, column=3)
spin_direction.grid(row=6, column=3)
# Set the button callbacks:
obtain_with_sensor_button["command"] = lambda: handle_obtain_with_sensor_button(wheel_speed_entry.get(),rate_of_frequency.get(),
initial_frequency.get(),
mqtt_sender)
obtain_with_camera_button["command"] = lambda: handle_obtain_with_camera_button(wheel_speed_entry.get(),
spin_speed.get(),
spin_direction.get(),
rate_of_frequency.get(),
initial_frequency.get(),mqtt_sender)
return frame
def handle_obtain_with_sensor_button(wheel_speed,rate_of_frequency, initial_frequency, mqtt_sender):
print('handler')
mqtt_sender.send_message('obtain_with_sensor_button', [str(wheel_speed),str(rate_of_frequency),str(initial_frequency)])
print('obtain_with_sensor',wheel_speed, rate_of_frequency, initial_frequency)
def handle_obtain_with_camera_button(wheel_speed, spin_speed, spin_direction, rate_of_frequency, initial_frequency, mqtt_sender):
mqtt_sender.send_message('obtain_with_camera_button', [str(wheel_speed),str(spin_speed), str(spin_direction),str(rate_of_frequency),str(initial_frequency)])
print('obtain_with_camera', wheel_speed, spin_speed, spin_direction, rate_of_frequency, initial_frequency)
def get_Nick_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Nick's Sprint 2 System")
proximity_label = ttk.Label(frame, text="Go to and Pick up Object (Proximity)")
proximity_button = ttk.Button(frame, text="Run Proximity Grab")
camera_label = ttk.Label(frame, text="Go to and Pick up Object (Camera)")
camera_button = ttk.Button(frame, text="Run Camera Grab")
line_follower_label = ttk.Label(frame, text="Line Follower (Bang Bang Method)")
line_button = ttk.Button(frame, text="Follow Line")
rate_of_beeps_label = ttk.Label(frame, text="Beep Rate Increase")
rate_of_beeps= ttk.Entry(frame, width=8, justify=tkinter.LEFT)
rate_of_beeps.insert(0, "10")
initial_beeps_label = ttk.Label(frame, text="Initial Beep Rate")
initial_beeps = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
initial_beeps.insert(0, "5")
speed_label = ttk.Label(frame, text="Turning Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "100")
direction_label = ttk.Label(frame, text="Turning Direction, Clockwise or Counter-Clockwise")
direction = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
direction.insert(0, "Clockwise")
starting_side_label = ttk.Label(frame, text="Turning Direction, Right or Left")
starting_side = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
starting_side.insert(0, "Right")
# Grid the widgets:
frame_label.grid(row=0, column=1)
proximity_label.grid(row=1, column=0)
proximity_button.grid(row=1, column=1)
rate_of_beeps_label.grid(row=2, column=0)
rate_of_beeps.grid(row=2, column=1)
initial_beeps_label.grid(row=3, column=0)
initial_beeps.grid(row=3, column=1)
camera_label.grid(row=4, column=0)
camera_button.grid(row=4, column=1)
speed_label.grid(row=5, column=0)
speed.grid(row=5, column=1)
direction_label.grid(row=6, column=0)
direction.grid(row=6, column=1)
line_follower_label.grid(row=7, column=0)
line_button.grid(row=7, column=1)
starting_side_label.grid(row=8, column=0)
starting_side.grid(row=8, column=1)
# Set the button callbacks:
proximity_button["command"] = lambda: handle_proximity_button(rate_of_beeps.get(), initial_beeps.get(), mqtt_sender)
camera_button["command"] = lambda: handle_camera_button(speed.get(),direction.get(),rate_of_beeps.get(), initial_beeps.get(), mqtt_sender)
line_button["command"] = lambda: handle_line_button(starting_side.get(), mqtt_sender)
return frame
def handle_proximity_button(rate_of_beeps, initial_beeps, mqtt_sender):
mqtt_sender.send_message('m1proximity_button', [str(rate_of_beeps),str(initial_beeps)])
print('proximity',rate_of_beeps, initial_beeps)
def handle_camera_button(speed,direction,rate_of_beeps, initial_beeps, mqtt_sender):
mqtt_sender.send_message('m1camera_button', [str(speed), str(direction),str(rate_of_beeps),str(initial_beeps)])
print('camera', speed, direction, rate_of_beeps, initial_beeps)
def handle_line_button(starting_side, mqtt_sender):
mqtt_sender.send_message('m1line_button', [str(starting_side)])
print('line', starting_side)
##COLOR FRAMING
def get_Sprint_2_Color_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Color Sensor")
intensity_less_label = ttk.Label(frame, text="Go Until Intensity is Less Than")
intensity_less_button = ttk.Button(frame, text="Run Less than Intensity")
intensity_greater_label = ttk.Label(frame, text="Go Until Intensity is Greater Than")
intensity_greater_button = ttk.Button(frame, text="Run Greater than Intensity")
until_color_label = ttk.Label(frame, text="Go Until Color")
until_color_button = ttk.Button(frame, text="Run Go Until Color")
until_not_color_label = ttk.Label(frame, text="Go Until Not Color")
until_not_color_button = ttk.Button(frame, text="Run Go Until Not Color")
color_label = ttk.Label(frame, text="Color")
color= ttk.Entry(frame, width=8, justify=tkinter.LEFT)
color.insert(0, "Red")
speed_label = ttk.Label(frame, text="Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "50")
intensity_label = ttk.Label(frame, text="Light Intensity")
intensity = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
intensity.insert(0, "50")
# Grid the widgets:
frame_label.grid(row=0, column=1)
color_label.grid(row=1, column=0)
color.grid(row=2, column=0)
speed_label.grid(row=1, column=1)
speed.grid(row=2, column=1)
intensity_label.grid(row=1, column=2)
intensity.grid(row=2, column=2)
intensity_less_label.grid(row=3, column=0)
intensity_less_button.grid(row=4, column=0)
intensity_greater_label.grid(row=5, column=0)
intensity_greater_button.grid(row=6, column=0)
until_color_label.grid(row=3, column=2)
until_color_button.grid(row=4, column=2)
until_not_color_label.grid(row=5, column=2)
until_not_color_button.grid(row=6, column=2)
# Set the button callbacks:
intensity_less_button["command"] = lambda: handle_intensity_less_button(speed.get(), intensity.get(), mqtt_sender)
intensity_greater_button["command"] = lambda: handle_intensity_greater_button(speed.get(),intensity.get(), mqtt_sender)
until_color_button["command"] = lambda: handle_until_color_button(speed.get(),color.get(), mqtt_sender)
until_not_color_button["command"] = lambda: handle_until_not_color_button(speed.get(),color.get(), mqtt_sender)
return frame
def handle_intensity_less_button(speed, intensity, mqtt_sender):
mqtt_sender.send_message('intensity_less_button', [str(speed),str(intensity)])
print('intensity_less_button',speed, intensity)
def handle_intensity_greater_button(speed, intensity, mqtt_sender):
mqtt_sender.send_message('intensity_greater_button', [str(speed), str(intensity)])
print('intensity_greater_button', speed, intensity)
def handle_until_color_button(speed,color, mqtt_sender):
mqtt_sender.send_message('until_color_button', [str(speed),str(color)])
print('until_color_button', speed,color)
def handle_until_not_color_button(speed,color, mqtt_sender):
mqtt_sender.send_message('until_not_color_button', [str(speed),str(color)])
print('until_not_color_button', speed,color)
##PROXIMITY SENSOR
def get_Sprint_2_Proximity_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Proximity Sensor")
distance_less_label = ttk.Label(frame, text="Go Until Distance is Less Than")
distance_less_button = ttk.Button(frame, text="Run Less than Distance")
distance_greater_label = ttk.Label(frame, text="Go Until Distance is Greater Than")
distance_greater_button = ttk.Button(frame, text="Run Greater than Distance")
until_distance_label = ttk.Label(frame, text="Go Until Distance Within")
until_distance_button = ttk.Button(frame, text="Run Go Until Distance Within")
inches_label = ttk.Label(frame, text="Inches")
inches = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
inches.insert(0, "10")
speed_label = ttk.Label(frame, text="Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "100")
delta_label = ttk.Label(frame, text="Delta Distance")
delta = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
delta.insert(0, "50")
# Grid the widgets:
frame_label.grid(row=0, column=1)
distance_less_label.grid(row=3, column=0)
distance_less_button.grid(row=4, column=0)
distance_greater_label.grid(row=3, column=1)
distance_greater_button.grid(row=4, column=1)
until_distance_label.grid(row=3, column=2)
until_distance_button.grid(row=4, column=2)
delta_label.grid(row=1, column=0)
delta.grid(row=2, column=0)
speed_label.grid(row=1, column=1)
speed.grid(row=2, column=1)
inches_label.grid(row=1, column=2)
inches.grid(row=2, column=2)
# Set the button callbacks:
distance_greater_button["command"] = lambda: handle_distance_greater_button(speed.get(), inches.get(), mqtt_sender)
distance_less_button["command"] = lambda: handle_distance_less_button(speed.get(), inches.get(),mqtt_sender)
until_distance_button["command"] = lambda: handle_until_distance_button(speed.get(), inches.get(), delta.get(), mqtt_sender)
return frame
def handle_distance_greater_button(speed, inches, mqtt_sender):
mqtt_sender.send_message('distance_greater_button', [str(speed), str(inches)])
print('distance_greater_button', speed, inches)
def handle_distance_less_button(speed, inches,mqtt_sender):
mqtt_sender.send_message('distance_less_button', [str(speed), str(inches)])
print('distance_less_button', speed, inches)
def handle_until_distance_button(speed, inches, delta, mqtt_sender):
mqtt_sender.send_message('until_distance_button', [str(speed), str(inches),str(delta)])
print('until_distance_button', speed, inches, delta)
##Camera SENSOR
def get_Sprint_2_Camera_frame(window, mqtt_sender):
"""
Constructs and returns a frame on the given window, where the frame
has Beeper objects that control the EV3 robot's motion
by passing messages using the given MQTT Sender.
:type window: ttk.Frame | ttk.Toplevel
:type mqtt_sender: com.MqttClient
"""
# Construct the frame to return:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame.grid()
# Construct the widgets on the frame:
frame_label = ttk.Label(frame, text="Camera Sensor")
counter_clockwise_label = ttk.Label(frame, text="Search Counterclockwise")
counter_clockwise_button = ttk.Button(frame, text="Run CCW Search")
clockwise_label = ttk.Label(frame, text="Search Counterclockwise")
clockwise_button = ttk.Button(frame, text="Run CW Search")
speed_label = ttk.Label(frame, text="Speed")
speed = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
speed.insert(0, "100")
area_label = ttk.Label(frame, text="Area Size")
area = ttk.Entry(frame, width=8, justify=tkinter.LEFT)
area.insert(0, "10")
# Grid the widgets:
frame_label.grid(row=0, column=1)
counter_clockwise_label.grid(row=1, column=0)
counter_clockwise_button.grid(row=2, column=0)
clockwise_label.grid(row=1, column=2)
clockwise_button.grid(row=2, column=2)
area_label.grid(row=1, column=1)
area.grid(row=2, column=1)
speed_label.grid(row=3, column=1)
speed.grid(row=4, column=1)
# Set the button callbacks:
counter_clockwise_button["command"] = lambda: handle_counter_clockwise_button(speed.get(), area.get(), mqtt_sender)
clockwise_button["command"] = lambda: handle_clockwise_button(speed.get(), area.get(), mqtt_sender)
return frame
def handle_counter_clockwise_button(speed, area, mqtt_sender):
mqtt_sender.send_message('camera_counter_clockwise_button', [str(speed), str(area)])
print('camera_counter_clockwise_button', speed, area)
def handle_clockwise_button(speed, area, mqtt_sender):
mqtt_sender.send_message('camera_clockwise_button', [str(speed), str(area)])
print('camera_clockwise_button', speed, area)
| 3,703
| 0
| 452
|
1825f4b23a74762f04636ac05fc56fa9252aa0dc
| 1,765
|
py
|
Python
|
segmentation_models/encoders/__init__.py
|
jmerkow/segmentation_models.pytorch
|
d33fb5ea4a66da1ed0006eaca4dbfa88aa986925
|
[
"MIT"
] | null | null | null |
segmentation_models/encoders/__init__.py
|
jmerkow/segmentation_models.pytorch
|
d33fb5ea4a66da1ed0006eaca4dbfa88aa986925
|
[
"MIT"
] | null | null | null |
segmentation_models/encoders/__init__.py
|
jmerkow/segmentation_models.pytorch
|
d33fb5ea4a66da1ed0006eaca4dbfa88aa986925
|
[
"MIT"
] | 2
|
2019-07-25T16:52:29.000Z
|
2019-08-19T17:44:46.000Z
|
import functools
import torch.utils.model_zoo as model_zoo
from ._preprocessing import preprocess_input
from .densenet import densenet_encoders
from .dpn import dpn_encoders
from .efficientnet import efficientnet_encoders
from .inceptionresnetv2 import inception_encoders
from .resnet import resnet_encoders
from .senet import senet_encoders
from .vgg import vgg_encoders
from .xception import xception_encoders
encoders = {}
encoders.update(resnet_encoders)
encoders.update(dpn_encoders)
encoders.update(vgg_encoders)
encoders.update(senet_encoders)
encoders.update(densenet_encoders)
encoders.update(inception_encoders)
encoders.update(xception_encoders)
encoders.update(efficientnet_encoders)
| 32.685185
| 116
| 0.782436
|
import functools
import torch.utils.model_zoo as model_zoo
from ._preprocessing import preprocess_input
from .densenet import densenet_encoders
from .dpn import dpn_encoders
from .efficientnet import efficientnet_encoders
from .inceptionresnetv2 import inception_encoders
from .resnet import resnet_encoders
from .senet import senet_encoders
from .vgg import vgg_encoders
from .xception import xception_encoders
encoders = {}
encoders.update(resnet_encoders)
encoders.update(dpn_encoders)
encoders.update(vgg_encoders)
encoders.update(senet_encoders)
encoders.update(densenet_encoders)
encoders.update(inception_encoders)
encoders.update(xception_encoders)
encoders.update(efficientnet_encoders)
def get_encoder(name, encoder_weights=None, model_dir=None):
Encoder = encoders[name]['encoder']
encoder = Encoder(**encoders[name]['params'])
encoder.out_shapes = encoders[name]['out_shapes']
if encoder_weights is not None:
settings = encoders[name]['pretrained_settings'][encoder_weights]
encoder.load_state_dict(model_zoo.load_url(settings['url'], model_dir=model_dir))
return encoder
def get_encoder_names():
return list(encoders.keys())
def get_preprocessing_fn(encoder_name, pretrained='imagenet'):
settings = encoders[encoder_name]['pretrained_settings']
if pretrained not in settings.keys():
raise ValueError('Avaliable pretrained options {}'.format(settings.keys()))
input_space = settings[pretrained].get('input_space')
input_range = settings[pretrained].get('input_range')
mean = settings[pretrained].get('mean')
std = settings[pretrained].get('std')
return functools.partial(preprocess_input, mean=mean, std=std, input_space=input_space, input_range=input_range)
| 994
| 0
| 69
|
eebfc13ced21d7e77e7aebf9405251558b6a72e0
| 284
|
py
|
Python
|
exercicios/vestibular.py
|
IgoPereiraBarros/maratona-data-science-brasil
|
cc07476579134a2764f00d229d415657555dcdd1
|
[
"MIT"
] | null | null | null |
exercicios/vestibular.py
|
IgoPereiraBarros/maratona-data-science-brasil
|
cc07476579134a2764f00d229d415657555dcdd1
|
[
"MIT"
] | null | null | null |
exercicios/vestibular.py
|
IgoPereiraBarros/maratona-data-science-brasil
|
cc07476579134a2764f00d229d415657555dcdd1
|
[
"MIT"
] | null | null | null |
N = int(input())
gabarito_prova = input().split()
gabarito_aluno = input().split()
list_aluno = []
list_prova = []
count = 0
for i in range(gabarito_aluno):
for j in range(gabarito_prova):
if gabarito_aluno[i] == gabarito_prova[j]:
count += 1
print(count)
| 17.75
| 50
| 0.637324
|
N = int(input())
gabarito_prova = input().split()
gabarito_aluno = input().split()
list_aluno = []
list_prova = []
count = 0
for i in range(gabarito_aluno):
for j in range(gabarito_prova):
if gabarito_aluno[i] == gabarito_prova[j]:
count += 1
print(count)
| 0
| 0
| 0
|
616305a77276e725923fc24a8acb3ff30c156348
| 194
|
py
|
Python
|
apps/contact/urls.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 32
|
2017-02-22T13:38:38.000Z
|
2022-03-31T23:29:54.000Z
|
apps/contact/urls.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 694
|
2017-02-15T23:09:52.000Z
|
2022-03-31T23:16:07.000Z
|
apps/contact/urls.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 35
|
2017-09-02T21:13:09.000Z
|
2022-02-21T11:30:30.000Z
|
from django.conf.urls import url
from apps.contact import views
urlpatterns = [
url(r"^$", views.index, name="contact_index"),
url(r"^submit/", views.contact_submit, name="submit"),
]
| 21.555556
| 58
| 0.690722
|
from django.conf.urls import url
from apps.contact import views
urlpatterns = [
url(r"^$", views.index, name="contact_index"),
url(r"^submit/", views.contact_submit, name="submit"),
]
| 0
| 0
| 0
|
60b9f934da6221160ed9c0661e0ab963f52e38f1
| 2,993
|
py
|
Python
|
src/plugins/nonebot_plugin_picsearcher/iqdb.py
|
ltyec/Kiba
|
1c7db0939151aaa46ef865638b8b347ceebb71a1
|
[
"MIT"
] | null | null | null |
src/plugins/nonebot_plugin_picsearcher/iqdb.py
|
ltyec/Kiba
|
1c7db0939151aaa46ef865638b8b347ceebb71a1
|
[
"MIT"
] | null | null | null |
src/plugins/nonebot_plugin_picsearcher/iqdb.py
|
ltyec/Kiba
|
1c7db0939151aaa46ef865638b8b347ceebb71a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import asyncio
from typing import List, Tuple
import io
from urllib.parse import urljoin
from lxml.html import fromstring
import aiohttp
from nonebot.adapters.cqhttp import MessageSegment
from .formdata import FormData
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'multipart/form-data; boundary=----WebKitFormBoundaryuwjSiBcpPag4k159',
'Cookie': 'Hm_lvt_765ecde8c11b85f1ac5f168fa6e6821f=1602471368; Hm_lpvt_765ecde8c11b85f1ac5f168fa6e6821f=1602472300',
'Host': 'iqdb.org', 'Origin': 'http://iqdb.org', 'Referer': 'http://iqdb.org/', 'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
async def get_pic_from_url(url: str):
"""
返回信息元祖
:param url:
:return:
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
content = io.BytesIO(await resp.read())
data = FormData(boundary="----WebKitFormBoundaryuwjSiBcpPag4k159")
data.add_field(name="MAX_FILE_SIZE", value="")
for i in range(1, 7):
data.add_field(name="service[]", value=str(i))
data.add_field(name="service[]", value="11")
data.add_field(name="service[]", value="13")
data.add_field(name="file", value=content, content_type="application/octet-stream", filename="0.jpg")
data.add_field(name="url", value="")
async with session.post("http://iqdb.org/", data=data, headers=headers) as res:
html = await res.text()
return [i for i in parse_html(html)]
pass
async def get_des(url: str):
"""
返回详细简介 cq码转义
:param url:
:return:
"""
image_data: List[Tuple] = await get_pic_from_url(url)
if not image_data:
msg: str = "找不到高相似度的"
yield msg
return
for pic in image_data:
msg = MessageSegment.image(file=pic[0]) + f"\n{pic[1]}\n"
for i in pic[2]:
msg = msg + f"{i}\n"
yield msg
| 37.4125
| 141
| 0.625459
|
# -*- coding: utf-8 -*-
import asyncio
from typing import List, Tuple
import io
from urllib.parse import urljoin
from lxml.html import fromstring
import aiohttp
from nonebot.adapters.cqhttp import MessageSegment
from .formdata import FormData
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Content-Type': 'multipart/form-data; boundary=----WebKitFormBoundaryuwjSiBcpPag4k159',
'Cookie': 'Hm_lvt_765ecde8c11b85f1ac5f168fa6e6821f=1602471368; Hm_lpvt_765ecde8c11b85f1ac5f168fa6e6821f=1602472300',
'Host': 'iqdb.org', 'Origin': 'http://iqdb.org', 'Referer': 'http://iqdb.org/', 'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'}
def parse_html(html: str):
selector = fromstring(html)
for tag in selector.xpath('//div[@id="pages"]/div[position()>1]/table'):
# 第一个是bestmatch
if pic_url := tag.xpath('./tr[2]/td/a/img/@src'):
pic_url = urljoin("http://iqdb.org/", pic_url[0]) # 缩略图
else:
pic_url = "没有最相似的"
similarity = tag.xpath('./tr[last()]/td/text()')[0] # 相似度
href: List[str] = tag.xpath('./tr/td/a/@href') # 第一个href
href.extend(tag.xpath('./tr/td/span/a/@href')) # 第二个 可能是空
href = list(map(lambda x: "https:" + x if not x.startswith("https") else x, href))
yield pic_url, similarity, href
pass
async def get_pic_from_url(url: str):
"""
返回信息元祖
:param url:
:return:
"""
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
content = io.BytesIO(await resp.read())
data = FormData(boundary="----WebKitFormBoundaryuwjSiBcpPag4k159")
data.add_field(name="MAX_FILE_SIZE", value="")
for i in range(1, 7):
data.add_field(name="service[]", value=str(i))
data.add_field(name="service[]", value="11")
data.add_field(name="service[]", value="13")
data.add_field(name="file", value=content, content_type="application/octet-stream", filename="0.jpg")
data.add_field(name="url", value="")
async with session.post("http://iqdb.org/", data=data, headers=headers) as res:
html = await res.text()
return [i for i in parse_html(html)]
pass
async def get_des(url: str):
"""
返回详细简介 cq码转义
:param url:
:return:
"""
image_data: List[Tuple] = await get_pic_from_url(url)
if not image_data:
msg: str = "找不到高相似度的"
yield msg
return
for pic in image_data:
msg = MessageSegment.image(file=pic[0]) + f"\n{pic[1]}\n"
for i in pic[2]:
msg = msg + f"{i}\n"
yield msg
| 704
| 0
| 23
|
4cecb8040d2df98220d60255b7529141552cd38a
| 2,703
|
py
|
Python
|
explorer/api/inner_api.py
|
AthenaExplorer/xm_s_explorer_v2
|
203f7b5d129552f5b7c977c4247d2060956f8add
|
[
"MIT"
] | null | null | null |
explorer/api/inner_api.py
|
AthenaExplorer/xm_s_explorer_v2
|
203f7b5d129552f5b7c977c4247d2060956f8add
|
[
"MIT"
] | null | null | null |
explorer/api/inner_api.py
|
AthenaExplorer/xm_s_explorer_v2
|
203f7b5d129552f5b7c977c4247d2060956f8add
|
[
"MIT"
] | 1
|
2022-03-02T19:20:52.000Z
|
2022-03-02T19:20:52.000Z
|
import datetime
from flask import request
from explorer.services.message import MessageService
from explorer.services.miner import MinerService
from explorer.services.wallets import WalletsService
from explorer.services.blocks import BlocksService
from base.utils.fil import datetime_to_height
from base.response import response_json
def get_message_list_by_height():
"""
获取Send消息列表根据高度
:return:
"""
height = request.form.get('height')
result = MessageService.get_message_list_by_height(height)
return response_json(result)
def get_miner_info_by_miner_no():
"""
获取指定信息列表miner_no
:return:
"""
miner_no = request.form.get('miner_no')
date_str = request.form.get('date_str')
result = MinerService.get_miner_info_by_miner_no(miner_no, date_str)
return response_json(result)
def get_wallet_address_change():
"""
获取钱包指点数据大小的变化量
:return:
"""
wallet_address = request.form.get('wallet_address')
balance_value = request.form.get('balance_value')
height = int(request.form.get('height'))
result = WalletsService.get_wallet_address_change(wallet_address, balance_value, height)
return response_json(result)
def get_is_all_wallet():
"""
查询是否是钱包
:return:
"""
address = request.form.get("address")
if not address:
return response_json(False)
result = WalletsService.get_is_all_wallet(address)
if result:
return response_json(result["value"])
return response_json(False)
def get_miner_day_list():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
date = request.form.get("date")
data = MinerService.get_miner_day_list(miner_no, date)
return response_json(data)
def get_init_value():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
fields = request.form.get("fields")
end_time = request.form.get("end_time")
data = MinerService.get_init_value(miner_no, fields, end_time)
return response_json(data)
def get_block_count():
"""
查询指定时间后是否还有新的区块
:return:
"""
date = request.form.get('date')
height = datetime_to_height(date)
count = BlocksService.get_tipset_block_count(start_height=height)
return response_json({"count": count})
def get_miner_increment():
"""
查询指定时间后是否还有新的区块
:return:
"""
miner_no = request.form.get('miner_no')
date = request.form.get('date')
key = request.form.get('key')
if not date:
date = str(datetime.date.today() - datetime.timedelta(days=1))
result = MinerService.get_miner_increment(miner_no, date, key)
return response_json(result)
| 25.027778
| 92
| 0.699593
|
import datetime
from flask import request
from explorer.services.message import MessageService
from explorer.services.miner import MinerService
from explorer.services.wallets import WalletsService
from explorer.services.blocks import BlocksService
from base.utils.fil import datetime_to_height
from base.response import response_json
def get_message_list_by_height():
"""
获取Send消息列表根据高度
:return:
"""
height = request.form.get('height')
result = MessageService.get_message_list_by_height(height)
return response_json(result)
def get_miner_info_by_miner_no():
"""
获取指定信息列表miner_no
:return:
"""
miner_no = request.form.get('miner_no')
date_str = request.form.get('date_str')
result = MinerService.get_miner_info_by_miner_no(miner_no, date_str)
return response_json(result)
def get_wallet_address_change():
"""
获取钱包指点数据大小的变化量
:return:
"""
wallet_address = request.form.get('wallet_address')
balance_value = request.form.get('balance_value')
height = int(request.form.get('height'))
result = WalletsService.get_wallet_address_change(wallet_address, balance_value, height)
return response_json(result)
def get_is_all_wallet():
"""
查询是否是钱包
:return:
"""
address = request.form.get("address")
if not address:
return response_json(False)
result = WalletsService.get_is_all_wallet(address)
if result:
return response_json(result["value"])
return response_json(False)
def get_miner_day_list():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
date = request.form.get("date")
data = MinerService.get_miner_day_list(miner_no, date)
return response_json(data)
def get_init_value():
"""
存储提供者每天的miner数据
:return:
"""
miner_no = request.form.get("miner_no")
fields = request.form.get("fields")
end_time = request.form.get("end_time")
data = MinerService.get_init_value(miner_no, fields, end_time)
return response_json(data)
def get_block_count():
"""
查询指定时间后是否还有新的区块
:return:
"""
date = request.form.get('date')
height = datetime_to_height(date)
count = BlocksService.get_tipset_block_count(start_height=height)
return response_json({"count": count})
def get_miner_increment():
"""
查询指定时间后是否还有新的区块
:return:
"""
miner_no = request.form.get('miner_no')
date = request.form.get('date')
key = request.form.get('key')
if not date:
date = str(datetime.date.today() - datetime.timedelta(days=1))
result = MinerService.get_miner_increment(miner_no, date, key)
return response_json(result)
| 0
| 0
| 0
|
d2832ef568cd65320be37ac0297b38c2de6deae9
| 3,743
|
py
|
Python
|
starter_code/student_utils.py
|
ykhiari/Patient-Selection-for-Diabetes-Drug-Testing
|
ce8e698bff4cbf5a9319607404edada539c5c099
|
[
"MIT"
] | null | null | null |
starter_code/student_utils.py
|
ykhiari/Patient-Selection-for-Diabetes-Drug-Testing
|
ce8e698bff4cbf5a9319607404edada539c5c099
|
[
"MIT"
] | null | null | null |
starter_code/student_utils.py
|
ykhiari/Patient-Selection-for-Diabetes-Drug-Testing
|
ce8e698bff4cbf5a9319607404edada539c5c099
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from functools import partial
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_code_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
mapping = dict(ndc_code_df[['NDC_Code', 'Non-proprietary Name']].values)
mapping['nan'] = np.nan
df['generic_drug_name'] = df['ndc_code'].astype(str).apply(lambda x : mapping[x])
return df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
df.sort_values(by = 'encounter_id')
first_encounters = df.groupby('patient_nbr')['encounter_id'].first().values
first_encounter_df = df[df['encounter_id'].isin(first_encounters)]
return first_encounter_df
#Question 6
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
vocab = tf.feature_column.categorical_column_with_vocabulary_file(key=c,
vocabulary_file = vocab_file_path,
num_oov_buckets=1)
tf_categorical_feature_column = tf.feature_column.indicator_column(vocab)
output_tf_list.append(tf_categorical_feature_column)
return output_tf_list
#Question 8
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer_fn = lambda col, m, s : (col - m) / s
normalizer = partial(normalizer_fn, m = MEAN, s = STD)
tf_numeric_feature = tf.feature_column.numeric_column(col, normalizer_fn = normalizer, dtype = tf.float64,
default_value = default_value)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = diabetes_yhat.mean()
s = diabetes_yhat.stddev()
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
'''
student_binary_prediction = df[col].apply(lambda x : 1 if x >= 5 else 0)
return student_binary_prediction
| 36.339806
| 111
| 0.678333
|
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from functools import partial
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_code_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
mapping = dict(ndc_code_df[['NDC_Code', 'Non-proprietary Name']].values)
mapping['nan'] = np.nan
df['generic_drug_name'] = df['ndc_code'].astype(str).apply(lambda x : mapping[x])
return df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
df.sort_values(by = 'encounter_id')
first_encounters = df.groupby('patient_nbr')['encounter_id'].first().values
first_encounter_df = df[df['encounter_id'].isin(first_encounters)]
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, PREDICTOR_FIELD, patient_key='patient_nbr'):
pass
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
vocab = tf.feature_column.categorical_column_with_vocabulary_file(key=c,
vocabulary_file = vocab_file_path,
num_oov_buckets=1)
tf_categorical_feature_column = tf.feature_column.indicator_column(vocab)
output_tf_list.append(tf_categorical_feature_column)
return output_tf_list
#Question 8
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer_fn = lambda col, m, s : (col - m) / s
normalizer = partial(normalizer_fn, m = MEAN, s = STD)
tf_numeric_feature = tf.feature_column.numeric_column(col, normalizer_fn = normalizer, dtype = tf.float64,
default_value = default_value)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = diabetes_yhat.mean()
s = diabetes_yhat.stddev()
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
'''
student_binary_prediction = df[col].apply(lambda x : 1 if x >= 5 else 0)
return student_binary_prediction
| 65
| 0
| 22
|
455bb28e06d1af0c76eed9c4216798bb512e32a8
| 1,115
|
py
|
Python
|
snake.py
|
junio-firmino/snake_game
|
fd78695406a55d057c2db54a126f46d552e77865
|
[
"MIT"
] | null | null | null |
snake.py
|
junio-firmino/snake_game
|
fd78695406a55d057c2db54a126f46d552e77865
|
[
"MIT"
] | null | null | null |
snake.py
|
junio-firmino/snake_game
|
fd78695406a55d057c2db54a126f46d552e77865
|
[
"MIT"
] | null | null | null |
from turtle import Screen
import time
from food import Food
from snake_shape import Snake
from snake_scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor('black')
screen.title('My snake game for desktop.')
screen.tracer(0)
snake = Snake()
food = Food()
scoreboard = Scoreboard()
screen.listen()
screen.onkey(snake.up, "Up")
screen.onkey(snake.down, "Down")
screen.onkey(snake.left, "Left")
screen.onkey(snake.right, "Right")
game_is_on = True
while game_is_on:
screen.update()
time.sleep(0.1)
snake.move()
if snake.segment[0].distance(food) < 15:
food.refresh()
snake.extend()
scoreboard.increase_score()
if snake.segment[0].xcor() > 280 or snake.segment[0].xcor() < -280 or snake.segment[0].ycor() > 255 or snake.segment[0].ycor() < -280:
scoreboard.game_over()
game_is_on = False
for segment in snake.segment[1:]:
if snake.segment[0].distance(segment) < 10:
game_is_on = False
scoreboard.game_over()
screen.exitonclick()
| 25.340909
| 139
| 0.650224
|
from turtle import Screen
import time
from food import Food
from snake_shape import Snake
from snake_scoreboard import Scoreboard
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor('black')
screen.title('My snake game for desktop.')
screen.tracer(0)
snake = Snake()
food = Food()
scoreboard = Scoreboard()
screen.listen()
screen.onkey(snake.up, "Up")
screen.onkey(snake.down, "Down")
screen.onkey(snake.left, "Left")
screen.onkey(snake.right, "Right")
game_is_on = True
while game_is_on:
screen.update()
time.sleep(0.1)
snake.move()
if snake.segment[0].distance(food) < 15:
food.refresh()
snake.extend()
scoreboard.increase_score()
if snake.segment[0].xcor() > 280 or snake.segment[0].xcor() < -280 or snake.segment[0].ycor() > 255 or snake.segment[0].ycor() < -280:
scoreboard.game_over()
game_is_on = False
for segment in snake.segment[1:]:
if snake.segment[0].distance(segment) < 10:
game_is_on = False
scoreboard.game_over()
screen.exitonclick()
| 0
| 0
| 0
|
ce7497caf65ff408828c9a4485a76cb9d8da0b6f
| 720
|
py
|
Python
|
tests/test_callouts.py
|
michalporeba/callouts
|
44d3e8bdbb1bc012a1a354f438f48b1d9a372c66
|
[
"MIT"
] | null | null | null |
tests/test_callouts.py
|
michalporeba/callouts
|
44d3e8bdbb1bc012a1a354f438f48b1d9a372c66
|
[
"MIT"
] | null | null | null |
tests/test_callouts.py
|
michalporeba/callouts
|
44d3e8bdbb1bc012a1a354f438f48b1d9a372c66
|
[
"MIT"
] | null | null | null |
from .sample.aplugin import APlugin
from .sample.apluginimpl import *
from .sample.bplugin import BPlugin
from .sample.bpluginimpl import *
| 27.692308
| 64
| 0.726389
|
from .sample.aplugin import APlugin
from .sample.apluginimpl import *
from .sample.bplugin import BPlugin
from .sample.bpluginimpl import *
def test_query_first_in_a():
assert 'a second' == APlugin.get_name()
def test_query_first_in_b():
assert 'b first' == BPlugin.get_name()
def test_return_default_value_if_not_implemented():
assert 'default value' == APlugin.get_not_once_implemented()
def test_return_all_in_a():
assert [1,2,3] == APlugin.get_id()
def test_return_all_in_b():
assert [None, None] == BPlugin.get_id()
def test_return_many_in_a():
assert [10, 11, 12, 'and more'] == APlugin.get_many()
def test_parameter_passing():
assert [9, 24] == BPlugin.do_your_maths(2,3,4)
| 415
| 0
| 165
|
8db4e0a2b4356561beec71bf632f2ecd064965c3
| 5,060
|
py
|
Python
|
tests/test_nelson_siegel_svensson.py
|
luphord/nelson_siegel_svensson
|
e2437a9bf924d6cd54181de018ed8af8214a6055
|
[
"MIT"
] | 55
|
2019-02-13T21:23:43.000Z
|
2022-03-19T13:15:35.000Z
|
tests/test_nelson_siegel_svensson.py
|
musacan1300/nelson_siegel_svensson
|
b5c652f5f6d134457571467055fa12cd7df57213
|
[
"MIT"
] | 10
|
2019-04-06T12:46:33.000Z
|
2022-03-30T13:00:27.000Z
|
tests/test_nelson_siegel_svensson.py
|
musacan1300/nelson_siegel_svensson
|
b5c652f5f6d134457571467055fa12cd7df57213
|
[
"MIT"
] | 33
|
2019-05-20T22:42:22.000Z
|
2022-02-03T12:20:26.000Z
|
# -*- coding: utf-8 -*-
import unittest
import os
import json
from dataclasses import asdict
import numpy as np
import click
from click.testing import CliRunner
from nelson_siegel_svensson import cli, NelsonSiegelCurve, \
NelsonSiegelSvenssonCurve
class TestNelson_siegel_svensson(unittest.TestCase):
'''Tests for `nelson_siegel_svensson` CLI.'''
def test_command_line_interface(self):
'''Test the CLI.'''
result = self.runner.invoke(cli.cli_main)
self.assertEqual(0, result.exit_code)
help_result = self.runner.invoke(cli.cli_main, ['--help'])
self.assertEqual(0, help_result.exit_code)
self.assertIn('--help Show this message and exit.',
help_result.output)
def test_cli_evaluate(self):
'''Test evaluate CLI.'''
param = ['evaluate', '-c',
'{"beta0": 0.017, "beta1": -0.023, "beta2": 0.24, "tau": 2}',
'-t', '[1,2,3]']
result = self.runner.invoke(cli.cli_main, param)
self.assertEqual(0, result.exit_code)
self.assertIn('0.0758359', result.output)
def test_cli_calibrate(self):
'''Test calibrate CLI.'''
param = ['calibrate', '-t', json.dumps(self.t),
'-y', json.dumps(self.y)]
result = self.runner.invoke(cli.cli_main, param)
self.assertEqual(0, result.exit_code)
self.assertIn('0.0451', result.output)
first_output = result.output
result = self.runner.invoke(cli.cli_main,
param + ['--nelson-siegel-svensson'])
self.assertEqual(0, result.exit_code)
self.assertEqual(first_output, result.output)
result = self.runner.invoke(cli.cli_main,
param + ['--nelson-siegel'])
self.assertEqual(0, result.exit_code)
self.assertIn('0.0425', result.output)
result = self.runner.invoke(cli.cli_main,
param + ['--nelson-siegel',
'--initial-tau1', '1.234'])
self.assertEqual(0, result.exit_code)
self.assertIn('0.04179', result.output)
def test_cli_plot(self):
'''Test plot CLI.'''
fname = 'output.png'
param = ['plot', '-o', fname, '-c',
'{"beta0": 0.017, "beta1": -0.023, "beta2": 0.24, "tau": 2}']
with self.runner.isolated_filesystem():
result = self.runner.invoke(cli.cli_main, param)
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.exists(fname), fname + ' missing')
result = self.runner.invoke(cli.cli_main,
param + ['-f', '10', '-t', '20'])
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.exists(fname), fname + ' missing')
def test_curve_parameters(self):
'''Test curve parameter.'''
param = cli.Curve()
self.assertRaises(click.BadParameter, param.convert,
value='', param=None, ctx=None)
self.assertRaises(click.BadParameter, param.convert,
value='{}', param=None, ctx=None)
missing_tau = '{"beta0": 0.017, "beta1": -0.023, "beta2": 0.24}'
self.assertRaises(click.BadParameter, param.convert,
value=missing_tau, param=None, ctx=None)
self.assertEqual(self.y1,
param.convert(json.dumps(asdict(self.y1)),
None, None))
self.assertEqual(self.y2,
param.convert(json.dumps(asdict(self.y2)),
None, None))
def test_float_array_parameters(self):
'''Test float array parameter.'''
param = cli.FloatArray()
self.assertRaises(click.BadParameter, param.convert,
value='', param=None, ctx=None)
self.assertRaises(click.BadParameter, param.convert,
value='{"a": 1}', param=None, ctx=None)
self.assertRaises(click.BadParameter, param.convert,
value='["a"]', param=None, ctx=None)
self.assertEqual(np.array([1.0]),
param.convert('[1.0]', None, None))
self.assertEqual(np.array([1.0]),
param.convert('[1]', None, None))
self.assertEqual([],
param.convert('[]', None, None).tolist())
self.assertTrue((np.array([1.0, 2.0, 3.0]) ==
param.convert('[1, 2,3.0]', None, None)).all())
| 43.62069
| 79
| 0.538933
|
# -*- coding: utf-8 -*-
import unittest
import os
import json
from dataclasses import asdict
import numpy as np
import click
from click.testing import CliRunner
from nelson_siegel_svensson import cli, NelsonSiegelCurve, \
NelsonSiegelSvenssonCurve
class TestNelson_siegel_svensson(unittest.TestCase):
'''Tests for `nelson_siegel_svensson` CLI.'''
def setUp(self):
self.y1 = NelsonSiegelCurve(0.017, -0.023, 0.24, 2.2)
self.y2 = NelsonSiegelSvenssonCurve(0.017, -0.023, 0.24, 0.1, 2.2, 3.1)
self.t = [0.0, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0,
10.0, 15.0, 20.0, 25.0, 30.0]
self.y = [0.01, 0.011, 0.013, 0.016, 0.019, 0.021,
0.026, 0.03, 0.035, 0.037, 0.038, 0.04]
self.runner = CliRunner()
def test_command_line_interface(self):
'''Test the CLI.'''
result = self.runner.invoke(cli.cli_main)
self.assertEqual(0, result.exit_code)
help_result = self.runner.invoke(cli.cli_main, ['--help'])
self.assertEqual(0, help_result.exit_code)
self.assertIn('--help Show this message and exit.',
help_result.output)
def test_cli_evaluate(self):
'''Test evaluate CLI.'''
param = ['evaluate', '-c',
'{"beta0": 0.017, "beta1": -0.023, "beta2": 0.24, "tau": 2}',
'-t', '[1,2,3]']
result = self.runner.invoke(cli.cli_main, param)
self.assertEqual(0, result.exit_code)
self.assertIn('0.0758359', result.output)
def test_cli_calibrate(self):
'''Test calibrate CLI.'''
param = ['calibrate', '-t', json.dumps(self.t),
'-y', json.dumps(self.y)]
result = self.runner.invoke(cli.cli_main, param)
self.assertEqual(0, result.exit_code)
self.assertIn('0.0451', result.output)
first_output = result.output
result = self.runner.invoke(cli.cli_main,
param + ['--nelson-siegel-svensson'])
self.assertEqual(0, result.exit_code)
self.assertEqual(first_output, result.output)
result = self.runner.invoke(cli.cli_main,
param + ['--nelson-siegel'])
self.assertEqual(0, result.exit_code)
self.assertIn('0.0425', result.output)
result = self.runner.invoke(cli.cli_main,
param + ['--nelson-siegel',
'--initial-tau1', '1.234'])
self.assertEqual(0, result.exit_code)
self.assertIn('0.04179', result.output)
def test_cli_plot(self):
'''Test plot CLI.'''
fname = 'output.png'
param = ['plot', '-o', fname, '-c',
'{"beta0": 0.017, "beta1": -0.023, "beta2": 0.24, "tau": 2}']
with self.runner.isolated_filesystem():
result = self.runner.invoke(cli.cli_main, param)
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.exists(fname), fname + ' missing')
result = self.runner.invoke(cli.cli_main,
param + ['-f', '10', '-t', '20'])
self.assertEqual(0, result.exit_code)
self.assertTrue(os.path.exists(fname), fname + ' missing')
def test_curve_parameters(self):
'''Test curve parameter.'''
param = cli.Curve()
self.assertRaises(click.BadParameter, param.convert,
value='', param=None, ctx=None)
self.assertRaises(click.BadParameter, param.convert,
value='{}', param=None, ctx=None)
missing_tau = '{"beta0": 0.017, "beta1": -0.023, "beta2": 0.24}'
self.assertRaises(click.BadParameter, param.convert,
value=missing_tau, param=None, ctx=None)
self.assertEqual(self.y1,
param.convert(json.dumps(asdict(self.y1)),
None, None))
self.assertEqual(self.y2,
param.convert(json.dumps(asdict(self.y2)),
None, None))
def test_float_array_parameters(self):
'''Test float array parameter.'''
param = cli.FloatArray()
self.assertRaises(click.BadParameter, param.convert,
value='', param=None, ctx=None)
self.assertRaises(click.BadParameter, param.convert,
value='{"a": 1}', param=None, ctx=None)
self.assertRaises(click.BadParameter, param.convert,
value='["a"]', param=None, ctx=None)
self.assertEqual(np.array([1.0]),
param.convert('[1.0]', None, None))
self.assertEqual(np.array([1.0]),
param.convert('[1]', None, None))
self.assertEqual([],
param.convert('[]', None, None).tolist())
self.assertTrue((np.array([1.0, 2.0, 3.0]) ==
param.convert('[1, 2,3.0]', None, None)).all())
| 389
| 0
| 27
|
05b83b0f032b96f78decc11085191dfb8bd3a7f2
| 2,114
|
py
|
Python
|
Data Management Tools/reproject_shp.py
|
MBoustani/Geothon
|
07a499d4ac0bb767677cd59b301022ad2ab16136
|
[
"Apache-2.0"
] | 60
|
2015-01-12T08:36:19.000Z
|
2021-12-20T11:06:25.000Z
|
Data Management Tools/reproject_shp.py
|
MBoustani/Geothon
|
07a499d4ac0bb767677cd59b301022ad2ab16136
|
[
"Apache-2.0"
] | null | null | null |
Data Management Tools/reproject_shp.py
|
MBoustani/Geothon
|
07a499d4ac0bb767677cd59b301022ad2ab16136
|
[
"Apache-2.0"
] | 20
|
2015-02-20T03:05:17.000Z
|
2021-12-27T16:18:45.000Z
|
#!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/reproject_shp.py
Description: This code reprojects Shapefile.
Author: Maziyar Boustani (github.com/MBoustani)
'''
import os
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
#an example input shapefile file.
in_shp_file = '../static_files/shapefile/rivers_lake_centerlines/ne_50m_rivers_lake_centerlines.shp'
#set the driver to ESRI Shapefile
driver = ogr.GetDriverByName('ESRI Shapefile')
#open input shapefile
in_shp_datasource = driver.Open(in_shp_file)
#get input shapefile layer
in_layer = in_shp_datasource.GetLayerByIndex(0)
#get input shapefile geometry
in_geom_type = in_layer.GetGeomType()
#get input shapefile spatial reference
source = in_layer.GetSpatialRef()
#create spatial reference for output shapefile
target = osr.SpatialReference()
#in this case NAD83(HARN) / California zone 4
target.ImportFromEPSG(2873)
#create a trasnform from source to target
transform = osr.CoordinateTransformation(source, target)
#output shapefile name
out_shp = 'reprojected.shp'
#output shapefile layer name
out_layer_name = 'shp_layer'
#create output shapefile data_source(file)
if os.path.exists(out_shp):
driver.DeleteDataSource(out_shp)
data_source = driver.CreateDataSource(out_shp)
#define output shapefile layer
out_layer = data_source.CreateLayer(out_layer_name, target, in_geom_type)
#get input shapefile layer definition
in_layer_defn = in_layer.GetLayerDefn()
num_field_col = in_layer_defn.GetFieldCount()
for each in range(num_field_col):
field = in_layer_defn.GetFieldDefn(each)
out_layer.CreateField(field)
#get input shapefile number of features
in_num_feature = in_layer.GetFeatureCount()
for feature in range(in_num_feature):
in_feature = in_layer.GetFeature(feature)
in_geom = in_feature.GetGeometryRef()
in_geom.Transform(transform)
feature = ogr.Feature(out_layer.GetLayerDefn())
feature.SetGeometry(in_geom)
out_layer.CreateFeature(feature)
| 27.102564
| 100
| 0.784295
|
#!/usr/bin/env python
'''
Project: Geothon (https://github.com/MBoustani/Geothon)
File: Vector/reproject_shp.py
Description: This code reprojects Shapefile.
Author: Maziyar Boustani (github.com/MBoustani)
'''
import os
try:
import ogr
except ImportError:
from osgeo import ogr
try:
import osr
except ImportError:
from osgeo import osr
#an example input shapefile file.
in_shp_file = '../static_files/shapefile/rivers_lake_centerlines/ne_50m_rivers_lake_centerlines.shp'
#set the driver to ESRI Shapefile
driver = ogr.GetDriverByName('ESRI Shapefile')
#open input shapefile
in_shp_datasource = driver.Open(in_shp_file)
#get input shapefile layer
in_layer = in_shp_datasource.GetLayerByIndex(0)
#get input shapefile geometry
in_geom_type = in_layer.GetGeomType()
#get input shapefile spatial reference
source = in_layer.GetSpatialRef()
#create spatial reference for output shapefile
target = osr.SpatialReference()
#in this case NAD83(HARN) / California zone 4
target.ImportFromEPSG(2873)
#create a trasnform from source to target
transform = osr.CoordinateTransformation(source, target)
#output shapefile name
out_shp = 'reprojected.shp'
#output shapefile layer name
out_layer_name = 'shp_layer'
#create output shapefile data_source(file)
if os.path.exists(out_shp):
driver.DeleteDataSource(out_shp)
data_source = driver.CreateDataSource(out_shp)
#define output shapefile layer
out_layer = data_source.CreateLayer(out_layer_name, target, in_geom_type)
#get input shapefile layer definition
in_layer_defn = in_layer.GetLayerDefn()
num_field_col = in_layer_defn.GetFieldCount()
for each in range(num_field_col):
field = in_layer_defn.GetFieldDefn(each)
out_layer.CreateField(field)
#get input shapefile number of features
in_num_feature = in_layer.GetFeatureCount()
for feature in range(in_num_feature):
in_feature = in_layer.GetFeature(feature)
in_geom = in_feature.GetGeometryRef()
in_geom.Transform(transform)
feature = ogr.Feature(out_layer.GetLayerDefn())
feature.SetGeometry(in_geom)
out_layer.CreateFeature(feature)
| 0
| 0
| 0
|
7f9859507d041ae77a30f1cf80437265e112aec8
| 23,500
|
py
|
Python
|
environmentalSoundClassification/audioProcessingUtil.py
|
amogh3892/Environmental-sound-recognition-using-combination-of-spectrogram-and-acoustic-features
|
448efaa2e1954e0f74602dc5c5aba95ba69ecfcd
|
[
"Apache-2.0"
] | 21
|
2017-06-21T01:28:04.000Z
|
2022-03-24T03:23:01.000Z
|
environmentalSoundClassification/audioProcessingUtil.py
|
amogh3892/Environmental-sound-recognition-using-combination-of-spectrogram-and-acoustic-features
|
448efaa2e1954e0f74602dc5c5aba95ba69ecfcd
|
[
"Apache-2.0"
] | 1
|
2018-03-20T20:02:45.000Z
|
2018-03-20T20:02:45.000Z
|
environmentalSoundClassification/audioProcessingUtil.py
|
amogh3892/Environmental-sound-recognition-using-combination-of-spectrogram-and-acoustic-features
|
448efaa2e1954e0f74602dc5c5aba95ba69ecfcd
|
[
"Apache-2.0"
] | 3
|
2019-06-25T17:41:11.000Z
|
2021-05-06T01:04:26.000Z
|
import numpy as np
import librosa
from scipy import interpolate
import pywt
from matplotlib.image import imsave
from scipy.signal import butter, lfilter, freqz
from matplotlib import pyplot as plt
from imageProcessingUtil import ImageProcessing
import SimpleITK as sitk
| 30.961792
| 180
| 0.567787
|
import numpy as np
import librosa
from scipy import interpolate
import pywt
from matplotlib.image import imsave
from scipy.signal import butter, lfilter, freqz
from matplotlib import pyplot as plt
from imageProcessingUtil import ImageProcessing
import SimpleITK as sitk
class AudioProcessing(object):
def __init__(self):
pass
@staticmethod
def read(absFilePath,sr=None):
"""
Reading audio
:param absFilePath: Absolute File Path
:param sr: Sampling rate of audio to be read (If None, original sampling rate is considered)
:return: audio samples,
"""
data,fs = librosa.load(absFilePath,sr=sr)
return data,fs
@staticmethod
def writeAsWav(data,sr,filename):
"""
Write .wav files
:param data: audio data
:param sr: sampling rate
:param filename: filename to be saved
:return: None
"""
if filename is None or sr is None or data is None :
return "Please provid arguements as writeAsWav(data,sr,filename)"
if "wav" not in filename:
return "Only wav files!"
filename_split = filename.rsplit(".",1)
filename = filename_split[0]
filetype = filename_split[1].lower()
data = AudioProcessing.rescaleAmplitude(data)
librosa.output.write_wav("{}.{}".format(filename,filetype),data,sr)
@staticmethod
def generateSineWave(amp,f,phi,fs):
"""
Generating a simple sine wave
:param amp: Amplitude
:param f: Frequency
:param phi: Phase
:param fs: Frequency sampling rate
:return: Sine wave signal
"""
# considering 5 time periodics
t = np.arange(0,10.0/f,1.0/fs)
x = amp*np.cos(2*np.pi*f*t + phi)
return(t,x)
@staticmethod
def convert_to_mono(x):
"""
Convert multi channel sounds to mono channel
:param x: audio data
:return: mono channel (audio data)
"""
if x.ndim > 1:
return librosa.to_mono(x)
return x
@staticmethod
def DFT(data,N,fs,start_time = 0.0):
"""
calculating N point DFT
:param data: audio data
:param N: N point DFT
:param fs: sampling frequency
:return:
"""
data = AudioProcessing.convert_to_mono(data)
size = data.size
new_data = np.zeros(N)
if size < N:
diff = N - size
new_data[:size] = data
else:
new_data = data[start_time*fs:start_time*fs+N]
hanning = np.hanning(N)
new_data = new_data*hanning
print("Calculating DFT for {} ms window with start time {} sec".format(N*1000/float(fs),start_time))
nv = np.arange(N)
kv = np.arange(N)
nv = np.arange(-N/2.0,N/2.0)
kv = np.arange(-N/2.0,N/2.0)
X = np.array([])
# Calculating the DFT of the cropped signal
for k in kv:
s = np.exp(1j*2*np.pi*k/N*nv)
X = np.append(X,sum(new_data*np.conjugate(s)))
X = np.abs(X)
frequency_axis = kv*fs/N
return (frequency_axis,X)
@staticmethod
def resampleAudio(data,fs,new_fs):
"""
Resampling audio to a different sampling rate
:param data: audio data
:param fs: old sampling rate
:param new_fs: new sampling rate
:return: resampled audio
"""
print("Resampling from {} to {} hz".format(fs,new_fs))
fs = float(fs)
new_fs = float(new_fs)
data = AudioProcessing.convert_to_mono(data)
size = data.size
old_time_axis = np.arange(size)/fs
total_time = old_time_axis[-1]
total_samples = round(total_time*new_fs)
new_time_axis = np.arange(total_samples)/new_fs
f = interpolate.interp1d(old_time_axis,data)
new_data = f(new_time_axis)
return new_data
@staticmethod
def rescaleAmplitude(data,scale_range = (-1,1)):
"""
rescaling an array to a particlar range
:param data: Any array
:param scale_range: The range to which rescaling has to be done
:return: rescaled array
"""
mini = np.min(data)
maxi = np.max(data)
new_min = scale_range[0]
new_max = scale_range[1]
new_data = ((new_max - new_min)*(data - mini)/(maxi - mini)) + new_min
return new_data
@staticmethod
def get_entropy(X):
"""
:param X: Input array
:return: Entropy of the input array
"""
probs = [np.mean(X == c) for c in set(X)]
return np.sum(-p * np.log2(p) for p in probs)
@staticmethod
def denoise_by_wavelets(audio,wavelet = 'dmey',threshold = 9):
"""
Audio denoising by using wavelet packet decomposition
Steps 1) Wavelet Packet decomposition 2) Thresholding 3) Reconstruction of wavelet packet decomposition.
:param audio:
:param wavelet:
:param threshold: Threshold used to remove noise (Actual threshold = threshold*std of
lowest level detail coefficients of the tree of wavelet packet decomposition)
:return: Denoised audio
"""
wp = pywt.WaveletPacket(data=audio, wavelet=wavelet, mode='symmetric')
new_wp = pywt.WaveletPacket(data=None, wavelet=wavelet, mode='symmetric')
ld = wp['d'].data
threshold = threshold*np.std(ld)
print("Denoising using wavelets for {} levels ... This may take a while".format(wp.maxlevel))
for i in range(wp.maxlevel):
paths = [node.path for node in wp.get_level(i+1, 'natural')]
for path in paths:
new_wp[path] = pywt.threshold(wp[path].data,threshold)
new_wp.reconstruct(update=True)
return new_wp.data
@staticmethod
def get_stft(data,n_fft,win_length,hop_length):
"""
Compute Short Time Fourier Transform of the audio
:param data: audio data
:param n_fft: FFT length
:param win_length: Time frame or the window length
:param hop_length: Hop length between the time frames. (Determines overlapping between frames)
:return: STFT of the audio signal
"""
stft = librosa.stft(y = data,n_fft=n_fft,hop_length=hop_length,win_length=win_length)
return stft
@staticmethod
def get_energy(data,frame_length,hop_length):
"""
Compute the Root mean square energy of the signal
:param data: audio data
:param frame_length: window or frame legth
:param hop_length: overlapping factor
:return: Energy of the audio signal.
"""
energy = librosa.feature.rmse(y=data,n_fft=frame_length,hop_length=hop_length)
energy = energy[0,:]
return energy
@staticmethod
def get_spectrogram(data,n_fft = 512,win_length = 480,hop_length = 120,range = (0,255),pixel_type = np.uint8,log_amplitude = True):
"""
return spectorgram in log scale recaled to given range
:param log_amplitude: if True, returns spectrogram in logamplitude, or returns linear amplitude.
:return: Spectrogram image
"""
# calculating stft for window length = 480 and overlap = 360 samples
stft = AudioProcessing.get_stft(data,n_fft,win_length,hop_length)
db = np.absolute(stft)
if log_amplitude:
db = librosa.logamplitude(db)
# converting to log amplitude and rescaling it between the given range
db = AudioProcessing.rescaleAmplitude(db,range)
db = db.astype(pixel_type)
return db
@staticmethod
def get_spectrogram_label(data,n_fft = 512,win_length = 480,hop_length = 120,
range = (0,255),pixel_type = np.uint8,log_amplitude = True,
initial_labels = [25,50,75,100,125,150,175,200,225,250], no_labels = 2 ):
"""
Performs preprocessing and clustering on the spectrogram to retrieve the most prominent parts as labels.
:param data: audio data
:param n_fft: FFT length
:param win_length: Window length
:param hop_length: Hop length (overlapping factor)
:param range: range of the intensity values of spectrogram
:param pixel_type: Pixel type for intensity values of spectrogram
:param log_amplitude: Whether to consider log amplitude of spectrogram or not
:param initial_labels: Initial Labels for clustering the spectrogram using Kmeans
:param no_labels: Maximum number of labels to be retained.
:return: Labels extracted from spectrogram.
"""
# obtaining the spectrogram of the audio
spectrogram = AudioProcessing.get_spectrogram(data,n_fft=n_fft,win_length=win_length,hop_length=hop_length,range=range,pixel_type=pixel_type,log_amplitude = log_amplitude)
# converting to sitk image
db_sitk = sitk.GetImageFromArray(spectrogram)
db_sitk = sitk.GetImageFromArray(ImageProcessing.median_image_filter(db_sitk,radius=(3,3,3)))
# kmeans clustering the image acoording to the intial labels
labels = sitk.ScalarImageKmeans(db_sitk,initial_labels,True)
# considering only last n labels given byu no_labels
lables_arr = sitk.GetArrayFromImage(labels)
max_label = np.max(lables_arr)
lables_arr[lables_arr < (max_label-(no_labels - 1))] = 0
lables_arr[lables_arr >= (max_label-(no_labels - 1))] = 1
labels = sitk.GetImageFromArray(lables_arr)
# performing binary closing and dilating with certain parameters
closed = sitk.BinaryMorphologicalClosing(labels,1,sitk.sitkBall)
dilated = sitk.BinaryDilate(closed,3,sitk.sitkBall)
# filling holes
holesfilled = sitk.BinaryFillhole(dilated,fullyConnected=True)
# getting the connected components and relabelling it according to size
connected = sitk.ConnectedComponent(holesfilled,fullyConnected=True)
relabelled = sitk.RelabelComponent(connected,minimumObjectSize=200)
relabelled_arr = sitk.GetArrayFromImage(relabelled)
# returning the spectrogram and the label
return relabelled_arr
@staticmethod
def segmentAudioByEnergyApproximation(data,fs,threshold = 5 ,short_energy_time = 64,max_segments = 5):
"""
Segmenting the audio based on approximation using signal energy. Modelling the noise
by considering certain amount of low energy level frames.
:param data:
:param fs:
:param threshold:
:param short_energy_time:
:param max_segments:
:return:
"""
total_samples = 0.2*fs
min_energy_samples = np.sort(abs(data))[:int(total_samples)]
min_energy_samples = np.array(min_energy_samples)
mean = np.mean(abs(min_energy_samples))
std = np.std(abs(min_energy_samples))
if std == 0.0:
std = 0.01
# Approximating a frame with the maximum value of the frame to eliminate the high frequency content
approximate = np.copy(abs(data))
i = 0
hop_size = 2048
while(i < data.size):
if(i+hop_size < data.size):
# approximate my maximum
approximate[i:i+hop_size] = np.max(approximate[i:i+hop_size])
else:
approximate[i:] = np.max(approximate[i:])
i = i+hop_size
check_array = (abs(approximate) - mean)/float(std)
if 0:
import pdb
pdb.set_trace()
plt.plot(check_array)
plt.show()
if np.min(check_array )> threshold:
threshold = np.min(check_array) + 3
ind_p = np.where(check_array > threshold)
ind_n = np.where(check_array <= threshold)
check_array[ind_p] = 1
check_array[ind_n] = 0
diff = np.ediff1d(check_array)
ones = np.where(diff == 1)[0]
minus_ones = np.where(diff == -1)[0]
if ones.size == 0:
ones = np.array([0])
if minus_ones.size == 0:
minus_ones = np.array([check_array.size - 1])
if ones[0] >= minus_ones[0]:
ones = np.append(0,ones)
if ones[-1] >= minus_ones[-1]:
minus_ones = np.append(minus_ones,[check_array.size - 1])
segments = []
if 0:
import pdb
pdb.set_trace()
for i in range(ones.size):
if(minus_ones[i] - ones[i] >= 6144):
# print(minus_ones[i] - ones[i],i)
segments.append((ones[i],minus_ones[i],minus_ones[i]-ones[i]))
def seg_size(x):
return (x[2])
segments = sorted(segments,key=seg_size,reverse=True)
if len(segments) > max_segments :
segments =segments[:5]
return segments
@staticmethod
def segmentAudioBySpectrograms(data,spec_label,win_len,hop_len,max_segments = 5):
"""
Segmentation audio by using labels generated by spectrogram.
First compute spectrogram labels using get_spectrogram_label method and
:param data: audio data to be segmented
:param spec_label: Spectrogram labels
:param win_len: Window length
:param hop_len: Hop Length
:param max_segments: Maximum number of segments to be retained
:return: Segments by removing unwanted part of the signal.
"""
shape = spec_label.shape
time_range = shape[1]
check_array = np.zeros(data.size)
for i in range(time_range):
col_value = np.sum(spec_label[:,i])
if col_value > 0 :
check_array[i*hop_len : (i*hop_len + win_len)] = 1
diff = np.ediff1d(check_array)
ones = np.where(diff == 1)[0]
minus_ones = np.where(diff == -1)[0]
if ones.size == 0:
ones = np.array([0])
if minus_ones.size == 0:
minus_ones = np.array([check_array.size - 1])
if ones[0] >= minus_ones[0]:
ones = np.append(0,ones)
if ones[-1] >= minus_ones[-1]:
minus_ones = np.append(minus_ones,[check_array.size - 1])
segments = []
for i in range(ones.size):
# print(minus_ones[i] - ones[i],i)
segments.append((ones[i],minus_ones[i],minus_ones[i]-ones[i]))
def seg_size(x):
return (x[2])
segments = sorted(segments,key=seg_size,reverse=True)
if len(segments) > max_segments :
segments =segments[:max_segments]
if 0:
ch = np.zeros(data.size)
ch[segments[0][0]:segments[0][1]] = 1
import matplotlib.pyplot as plt
plt.plot(data)
plt.plot(ch)
plt.show()
return segments
@staticmethod
def butter_lowpass_filter(data, cutoff, fs, order=5):
"""
Low pass filter using butterworth coefficients
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b,a = butter(order, normal_cutoff, btype='low', analog=False)
y = lfilter(b, a, data)
return y
@staticmethod
def butter_highpass_filter(data, cutoff, fs, order=5):
"""
High pass filter using butterworth coefficients
"""
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b,a = butter(order, normal_cutoff, btype='high', analog=False)
y = lfilter(b, a, data)
return y
@staticmethod
def meanImage(image_arr,radius):
"""
Blur image with MeanImageFilter
:param image_arr: Image array
:param radius: radius of the kernel
:return: Mean Image
"""
meanImageFilter = sitk.MeanImageFilter()
meanImageFilter.SetRadius(radius)
return sitk.GetArrayFromImage(meanImageFilter.Execute(sitk.GetImageFromArray(image_arr)))
@staticmethod
def segmentationByIterativeTimeDomain(data,fs):
data_copy = np.copy(data)
energy = AudioProcessing.get_energy(data_copy,frame_length=64,hop_length=64)
pre_threshold = None
annotation = np.ones(energy.size)
while 1:
check_indices = np.where(annotation == 1)
db = 10*np.log10(energy[check_indices])
# db[np.isneginf(db)] = 0
# nonzero = db[np.nonzero(db)]
min_energy_sample = sorted(db)[0]
print(min_energy_sample)
threshold = 0.5*(10**((min_energy_sample)/10.0))
if pre_threshold is not None:
print(pre_threshold - threshold)
pre_threshold = threshold
data_copy[abs(data_copy) < threshold] = 0
plt.plot(data)
plt.plot(data_copy)
plt.show()
import pdb
pdb.set_trace()
@staticmethod
def get_hilbert_transform(data):
from scipy.signal import hilbert
return hilbert(data)
@staticmethod
def get_audio_features(y,sr,n_fft,hop_length,n_mfcc):
"""
Compute acoustic features of the audio
:param y: audio data
:param sr: Sampling rate
:param n_fft: FFT length
:param hop_length: Hop length
:param n_mfcc: Number of MFCC coefficients.
:return: Audio feature matrix
"""
features = None
#MFCCS
mfccs = librosa.feature.mfcc(y=y, sr=sr, n_mfcc = n_mfcc , n_fft = n_fft, hop_length = hop_length)
features = mfccs
#Delta mfccs
delta_mfccs = librosa.feature.delta(mfccs)
features = np.concatenate((features,delta_mfccs))
#rmse
rmse = librosa.feature.rmse(y=y , n_fft = n_fft , hop_length = hop_length)
features = np.concatenate((features,rmse))
#spectral centroid
spectral_centroid = librosa.feature.spectral_centroid(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length )
features = np.concatenate((features,spectral_centroid))
#spectral bandwidth
spectral_bandwidth = librosa.feature.spectral_bandwidth(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)
features = np.concatenate((features,spectral_bandwidth))
#spectral contrast
spectral_contrast = librosa.feature.spectral_contrast(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)
features = np.concatenate((features,spectral_contrast))
#spectral rolloff
spectral_rolloff = librosa.feature.spectral_rolloff(y=y, sr=sr, n_fft = n_fft, hop_length = hop_length)
features = np.concatenate((features,spectral_rolloff))
#zero crossing rate
zero_crossing_rate = librosa.feature.zero_crossing_rate(y=y, frame_length = n_fft, hop_length = hop_length)
features = np.concatenate((features,zero_crossing_rate))
return np.transpose(features)
@staticmethod
def levinson_1d(r, order):
try:
nonzero = np.nonzero(r)[0][0]
except:
import pdb
pdb.set_trace()
r = r[nonzero:]
r = np.atleast_1d(r)
if r.ndim > 1:
raise ValueError("Only rank 1 are supported for now.")
n = r.size
if order > n - 1:
raise ValueError("Order should be <= size-1")
elif n < 1:
raise ValueError("Cannot operate on empty array !")
if not np.isreal(r[0]):
raise ValueError("First item of input must be real.")
elif not np.isfinite(1/r[0]):
raise ValueError("First item should be != 0")
# Estimated coefficients
a = np.empty(order+1, r.dtype)
# temporary array
t = np.empty(order+1, r.dtype)
# Reflection coefficients
k = np.empty(order, r.dtype)
a[0] = 1.
e = r[0]
for i in xrange(1, order+1):
acc = r[i]
for j in range(1, i):
acc += a[j] * r[i-j]
k[i-1] = -acc / e
a[i] = k[i-1]
for j in range(order):
t[j] = a[j]
for j in range(1, i):
a[j] += k[i-1] * np.conj(t[i-j])
e *= 1 - k[i-1] * np.conj(k[i-1])
return a, e, k
@staticmethod
def get_lpc_coefficients_feature_vector(y,order,n_fft,hop_length):
window = np.hanning(n_fft)
i = 0
lpc_coefficients = []
while i <= y.shape[0]:
window_end = i + n_fft
audio_end = y.shape[0]
if audio_end - i < n_fft:
d = y[i:]
d_len = len(d)
diff = n_fft - d_len
d = list(d)
for j in range(diff):
d.append(0)
d = np.array(d)
d = d*window
else:
d = y[i:window_end]
d = np.array(d)
d = d*window
if not np.all(d == 0):
a,e,k = AudioProcessing.levinson_1d(d,order)
a = a[1:]
if np.nan not in a and np.nan not in k:
lpcs = []
lpcs.extend(a)
lpcs.extend(k)
lpc_coefficients.append(lpcs)
i = i + hop_length
lpc_coefficients = np.array(lpc_coefficients)
return lpc_coefficients
@staticmethod
def get_lpc_column_names(order):
a = []
k = []
for i in range(order):
a.append("LPC_A_{}".format(i+1))
k.append("LPC_K_{}".format(i+1))
lpc_columns = []
lpc_columns.extend(a)
lpc_columns.extend(k)
return lpc_columns
@staticmethod
def get_audio_feature_columns(n_mfcc,append = None):
cols = []
mfccs = []
delta_mfccs = []
constrasts = []
for i in range(n_mfcc):
mfccs.append('MFCC_{}'.format(i+1))
delta_mfccs.append('DELTA_MFCC_{}'.format(i+1))
for i in range(7):
constrasts.append('SpectralContrast_{}'.format(i+1))
cols.extend(mfccs)
cols.extend(delta_mfccs)
cols.extend(['RMSE','SpectralCentroid','SpectralBandwidth'])
cols.extend(constrasts)
cols.extend(['SpectralRollOff','ZeroCrossingRate'])
new_cols = []
if append is not None:
for col in cols:
new_cols.append("Audio_" + col + append)
return new_cols
return cols
| 4,622
| 18,571
| 24
|
13e23dc0a6662a0f1c60b6e0b4542b80784914e5
| 275
|
py
|
Python
|
proj02/proj02_01.py
|
scienceman44/SAVY
|
5a9781a9d08288c40f883602dc3b6bab4d6c63c1
|
[
"MIT"
] | null | null | null |
proj02/proj02_01.py
|
scienceman44/SAVY
|
5a9781a9d08288c40f883602dc3b6bab4d6c63c1
|
[
"MIT"
] | null | null | null |
proj02/proj02_01.py
|
scienceman44/SAVY
|
5a9781a9d08288c40f883602dc3b6bab4d6c63c1
|
[
"MIT"
] | null | null | null |
# Name:
# Date:
result = 0
loop_control = True
while loop_control == True:
b = int(raw_input('Enter a number to add, or 0 to indicate you are finished:'))
if b == 0:
loop_control = False
result = result + b
print 'your result is:'
print result
| 18.333333
| 83
| 0.621818
|
# Name:
# Date:
result = 0
loop_control = True
while loop_control == True:
b = int(raw_input('Enter a number to add, or 0 to indicate you are finished:'))
if b == 0:
loop_control = False
result = result + b
print 'your result is:'
print result
| 0
| 0
| 0
|
04b3ca56fafcb17962d19c797e31c3700c6e41b8
| 9,753
|
py
|
Python
|
generate_qumulo_cloudformation_template_test.py
|
Qumulo/Cloud-Deployment-Samples
|
a1ed1850b80ec14ea7520c5829421209efce8382
|
[
"MIT"
] | 4
|
2019-12-25T22:09:37.000Z
|
2022-02-07T19:46:03.000Z
|
generate_qumulo_cloudformation_template_test.py
|
Qumulo/Cloud-Deployment-Samples
|
a1ed1850b80ec14ea7520c5829421209efce8382
|
[
"MIT"
] | 1
|
2020-07-10T22:24:53.000Z
|
2020-07-10T22:24:53.000Z
|
generate_qumulo_cloudformation_template_test.py
|
Qumulo/cloud-samples
|
a1ed1850b80ec14ea7520c5829421209efce8382
|
[
"MIT"
] | 3
|
2020-07-10T22:05:14.000Z
|
2022-02-07T19:46:04.000Z
|
import os
import json
import unittest
from troposphere import ec2, Template
from generate_qumulo_cloudformation_template import *
if __name__ == '__main__':
unittest.main()
| 34.708185
| 86
| 0.58177
|
import os
import json
import unittest
from troposphere import ec2, Template
from generate_qumulo_cloudformation_template import *
class ChassisSpecTest(unittest.TestCase):
def test_init(self) -> None:
spec = ChassisSpec(
volume_count=20,
pairing_ratio=4,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
self.assertEqual(spec.working_volume_count, 4)
self.assertEqual(spec.backing_volume_count, 16)
def test_init_no_backing_spec(self) -> None:
spec = ChassisSpec(
volume_count=5,
pairing_ratio=0,
working_spec={'VolumeSize': 1},
backing_spec=None,
)
self.assertEqual(spec.working_volume_count, 5)
self.assertEqual(spec.backing_volume_count, 0)
def test_init_too_many_volumes(self) -> None:
with self.assertRaisesRegex(AssertionError, 'Too many volumes specified'):
ChassisSpec(
volume_count=26,
pairing_ratio=0,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
def test_init_bad_pairing_ratio(self) -> None:
with self.assertRaisesRegex(AssertionError, 'Not all volumes can be used'):
ChassisSpec(
volume_count=10,
pairing_ratio=3,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 3},
)
def test_init_need_backing_spec(self) -> None:
with self.assertRaisesRegex(AssertionError, 'Backing volumes require'):
ChassisSpec(
volume_count=10,
pairing_ratio=1,
working_spec={'VolumeSize': 1},
backing_spec=None,
)
def test_from_json(self) -> None:
json_spec = {
'slot_count': 12,
'pairing_ratio': 2,
'working_spec': {'VolumeSize': 1},
'backing_spec': {'VolumeSize': 5}
}
spec = ChassisSpec.from_json(json_spec)
self.assertEqual(spec.working_volume_count, 4)
self.assertEqual(spec.backing_volume_count, 8)
def test_get_block_device_mappings(self) -> None:
spec = ChassisSpec(
volume_count=2,
pairing_ratio=1,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
mappings = spec.get_block_device_mappings()
self.assertEqual(len(mappings), 3)
devices = [mapping.to_dict()['DeviceName'] for mapping in mappings]
self.assertEqual(devices, ['/dev/sda1', '/dev/xvdb', '/dev/xvdc'])
def test_get_slot_specs(self) -> None:
spec = ChassisSpec(
volume_count=2,
pairing_ratio=1,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
slot_specs = spec.get_slot_specs()
expected_specs = [
{
'drive_bay': '/dev/xvdb',
'disk_role': 'working',
'disk_size': 1073741824,
},
{
'drive_bay': '/dev/xvdc',
'disk_role': 'backing',
'disk_size': 5368709120,
}
]
self.assertEqual(slot_specs['slot_specs'], expected_specs)
class TemplateTest(unittest.TestCase):
def test_add_conditions(self) -> None:
template = Template()
add_conditions(template)
self.assertEqual(
list(template.conditions.keys()),
['HasEncryptionKey', 'HasIamInstanceProfile', 'HasInstanceRecoveryTopic']
)
def test_add_params_with_ingress_cidr_param(self) -> None:
template = Template()
add_params(template, True)
expected_parameters = [
'ClusterName', 'KeyName', 'InstanceType', 'VpcId', 'SubnetId', 'SgCidr',
'VolumesEncryptionKey', 'IamInstanceProfile', 'InstanceRecoveryTopic'
]
self.assertEqual(list(template.parameters.keys()), expected_parameters)
def test_add_params_without_ingress_cidr_param(self) -> None:
template = Template()
add_params(template, False)
expected_parameters = [
'ClusterName', 'KeyName', 'InstanceType', 'VpcId', 'SubnetId',
'VolumesEncryptionKey', 'IamInstanceProfile', 'InstanceRecoveryTopic'
]
self.assertEqual(list(template.parameters.keys()), expected_parameters)
def test_add_ami_map(self) -> None:
template = Template()
add_ami_map(template, 'ami-1234')
expected_mapping = {
'us-east-1': {'AMI': 'ami-1234'},
'us-east-2': {'AMI': 'ami-1234'},
'us-west-1': {'AMI': 'ami-1234'},
'us-west-2': {'AMI': 'ami-1234'},
'ca-central-1': {'AMI': 'ami-1234'},
'eu-central-1': {'AMI': 'ami-1234'},
'eu-west-1': {'AMI': 'ami-1234'},
'eu-west-2': {'AMI': 'ami-1234'},
'eu-west-3': {'AMI': 'ami-1234'},
}
self.assertEqual(template.mappings['RegionMap'], expected_mapping)
def test_add_security_group(self) -> None:
template = Template()
add_security_group(template)
self.assertEqual(
list(template.resources.keys()),
['QumuloSecurityGroup', 'QumuloSecurityGroupNodeRule']
)
class GenerateUserDataTest(unittest.TestCase):
def test_generate_node1_user_data(self) -> None:
instances = [ec2.Instance('t1'), ec2.Instance('t2')]
spec = ChassisSpec(
volume_count=2,
pairing_ratio=1,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
user_data = generate_node1_user_data(
instances, spec, get_ip_ref=lambda x: x, cluster_name_ref='nameref'
)
self.assertIn('t2', user_data)
self.assertIn('nameref', user_data)
self.assertIn('"spec_info": ', user_data)
self.assertIn(' "slot_specs": [', user_data)
def test_generate_other_nodes_user_data(self) -> None:
spec = ChassisSpec(
volume_count=2,
pairing_ratio=1,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
user_data = generate_other_nodes_user_data(spec)
self.assertIn('"spec_info": ', user_data)
self.assertIn(' "slot_specs": [', user_data)
class AddNodesTest(unittest.TestCase):
def setUp(self) -> None:
self.spec = ChassisSpec(
volume_count=2,
pairing_ratio=1,
working_spec={'VolumeSize': 1},
backing_spec={'VolumeSize': 5},
)
self.expected_resources = [
'testEni1',
'testEni2',
'testNode1',
'testNode2',
'CWRecoveryAlarmtestNode1',
'CWRecoveryAlarmtestNode2'
]
self.expected_outputs = [
'ClusterInstanceIDs',
'ClusterPrivateIPs',
'SecurityGroup',
'TemporaryPassword',
'LinkToManagement',
'QumuloKnowledgeBase'
]
def test_nodes_no_secondary_ips(self) -> None:
template = Template()
add_nodes(template, 2, 'test', self.spec, 0, 'sg-9')
self.assertEqual(list(template.resources.keys()), self.expected_resources)
self.assertEqual(list(template.outputs.keys()), self.expected_outputs)
def test_nodes_has_secondary_ips(self) -> None:
template = Template()
add_nodes(template, 2, 'test', self.spec, 1, 'sg-9')
self.assertEqual(list(template.resources.keys()), self.expected_resources)
self.expected_outputs.insert(2, 'ClusterSecondaryPrivateIPs')
self.assertEqual(list(template.outputs.keys()), self.expected_outputs)
class GenerateQumuloCloudformationTemplateTest(unittest.TestCase):
def setUp(self) -> None:
self.file_path = os.path.join(os.getcwd(), 'config_file.json')
def tearDown(self) -> None:
if os.path.exists(self.file_path):
os.remove(self.file_path)
def test_generate_qcft_with_override(self) -> None:
config = {
'slot_count': 12,
'pairing_ratio': 2,
'working_spec': {'VolumeSize': 1},
'backing_spec': {'VolumeSize': 5}
}
json_config = json.dumps(config, indent = 4)
with open(self.file_path, 'w+') as config_file:
config_file.write(json_config)
template = generate_qcft(2, self.file_path, 'st1', 'ami-123')
self.assertIsNotNone(template)
def test_generate_qcft_no_override(self) -> None:
config = {
'slot_count': 12,
'pairing_ratio': 2,
'working_spec': {'VolumeSize': 1},
'backing_spec': {'VolumeSize': 5}
}
json_config = json.dumps(config, indent = 4)
with open(self.file_path, 'w+') as config_file:
config_file.write(json_config)
template = generate_qcft(2, self.file_path, None, 'ami-123')
self.assertIsNotNone(template)
def test_generate_qcft_bad_override(self) -> None:
config = {
'slot_count': 12,
'pairing_ratio': 2,
'working_spec': {'VolumeSize': 1},
}
json_config = json.dumps(config, indent = 4)
with open(self.file_path, 'w+') as config_file:
config_file.write(json_config)
with self.assertRaisesRegex(NoBackingVolumesException, 'The backing volumes'):
generate_qcft(2, self.file_path, 'st1', 'ami-123')
if __name__ == '__main__':
unittest.main()
| 8,703
| 124
| 739
|
86d20c2cd0960aaa6593f882e80973ad808bffa1
| 23,886
|
py
|
Python
|
sdk/python/pulumi_azure_native/notificationhubs/v20160301/notification_hub.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/notificationhubs/v20160301/notification_hub.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/notificationhubs/v20160301/notification_hub.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NotificationHubArgs', 'NotificationHub']
@pulumi.input_type
| 46.111969
| 590
| 0.668174
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['NotificationHubArgs', 'NotificationHub']
@pulumi.input_type
class NotificationHubArgs:
def __init__(__self__, *,
namespace_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
adm_credential: Optional[pulumi.Input['AdmCredentialArgs']] = None,
apns_credential: Optional[pulumi.Input['ApnsCredentialArgs']] = None,
authorization_rules: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]]] = None,
baidu_credential: Optional[pulumi.Input['BaiduCredentialArgs']] = None,
gcm_credential: Optional[pulumi.Input['GcmCredentialArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
mpns_credential: Optional[pulumi.Input['MpnsCredentialArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
notification_hub_name: Optional[pulumi.Input[str]] = None,
registration_ttl: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['SkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
wns_credential: Optional[pulumi.Input['WnsCredentialArgs']] = None):
"""
The set of arguments for constructing a NotificationHub resource.
:param pulumi.Input[str] namespace_name: The namespace name.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input['AdmCredentialArgs'] adm_credential: The AdmCredential of the created NotificationHub
:param pulumi.Input['ApnsCredentialArgs'] apns_credential: The ApnsCredential of the created NotificationHub
:param pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]] authorization_rules: The AuthorizationRules of the created NotificationHub
:param pulumi.Input['BaiduCredentialArgs'] baidu_credential: The BaiduCredential of the created NotificationHub
:param pulumi.Input['GcmCredentialArgs'] gcm_credential: The GcmCredential of the created NotificationHub
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['MpnsCredentialArgs'] mpns_credential: The MpnsCredential of the created NotificationHub
:param pulumi.Input[str] name: The NotificationHub name.
:param pulumi.Input[str] notification_hub_name: The notification hub name.
:param pulumi.Input[str] registration_ttl: The RegistrationTtl of the created NotificationHub
:param pulumi.Input['SkuArgs'] sku: The sku of the created namespace
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input['WnsCredentialArgs'] wns_credential: The WnsCredential of the created NotificationHub
"""
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if adm_credential is not None:
pulumi.set(__self__, "adm_credential", adm_credential)
if apns_credential is not None:
pulumi.set(__self__, "apns_credential", apns_credential)
if authorization_rules is not None:
pulumi.set(__self__, "authorization_rules", authorization_rules)
if baidu_credential is not None:
pulumi.set(__self__, "baidu_credential", baidu_credential)
if gcm_credential is not None:
pulumi.set(__self__, "gcm_credential", gcm_credential)
if location is not None:
pulumi.set(__self__, "location", location)
if mpns_credential is not None:
pulumi.set(__self__, "mpns_credential", mpns_credential)
if name is not None:
pulumi.set(__self__, "name", name)
if notification_hub_name is not None:
pulumi.set(__self__, "notification_hub_name", notification_hub_name)
if registration_ttl is not None:
pulumi.set(__self__, "registration_ttl", registration_ttl)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if wns_credential is not None:
pulumi.set(__self__, "wns_credential", wns_credential)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
The namespace name.
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="admCredential")
def adm_credential(self) -> Optional[pulumi.Input['AdmCredentialArgs']]:
"""
The AdmCredential of the created NotificationHub
"""
return pulumi.get(self, "adm_credential")
@adm_credential.setter
def adm_credential(self, value: Optional[pulumi.Input['AdmCredentialArgs']]):
pulumi.set(self, "adm_credential", value)
@property
@pulumi.getter(name="apnsCredential")
def apns_credential(self) -> Optional[pulumi.Input['ApnsCredentialArgs']]:
"""
The ApnsCredential of the created NotificationHub
"""
return pulumi.get(self, "apns_credential")
@apns_credential.setter
def apns_credential(self, value: Optional[pulumi.Input['ApnsCredentialArgs']]):
pulumi.set(self, "apns_credential", value)
@property
@pulumi.getter(name="authorizationRules")
def authorization_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]]]:
"""
The AuthorizationRules of the created NotificationHub
"""
return pulumi.get(self, "authorization_rules")
@authorization_rules.setter
def authorization_rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['SharedAccessAuthorizationRulePropertiesArgs']]]]):
pulumi.set(self, "authorization_rules", value)
@property
@pulumi.getter(name="baiduCredential")
def baidu_credential(self) -> Optional[pulumi.Input['BaiduCredentialArgs']]:
"""
The BaiduCredential of the created NotificationHub
"""
return pulumi.get(self, "baidu_credential")
@baidu_credential.setter
def baidu_credential(self, value: Optional[pulumi.Input['BaiduCredentialArgs']]):
pulumi.set(self, "baidu_credential", value)
@property
@pulumi.getter(name="gcmCredential")
def gcm_credential(self) -> Optional[pulumi.Input['GcmCredentialArgs']]:
"""
The GcmCredential of the created NotificationHub
"""
return pulumi.get(self, "gcm_credential")
@gcm_credential.setter
def gcm_credential(self, value: Optional[pulumi.Input['GcmCredentialArgs']]):
pulumi.set(self, "gcm_credential", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="mpnsCredential")
def mpns_credential(self) -> Optional[pulumi.Input['MpnsCredentialArgs']]:
"""
The MpnsCredential of the created NotificationHub
"""
return pulumi.get(self, "mpns_credential")
@mpns_credential.setter
def mpns_credential(self, value: Optional[pulumi.Input['MpnsCredentialArgs']]):
pulumi.set(self, "mpns_credential", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The NotificationHub name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationHubName")
def notification_hub_name(self) -> Optional[pulumi.Input[str]]:
"""
The notification hub name.
"""
return pulumi.get(self, "notification_hub_name")
@notification_hub_name.setter
def notification_hub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notification_hub_name", value)
@property
@pulumi.getter(name="registrationTtl")
def registration_ttl(self) -> Optional[pulumi.Input[str]]:
"""
The RegistrationTtl of the created NotificationHub
"""
return pulumi.get(self, "registration_ttl")
@registration_ttl.setter
def registration_ttl(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "registration_ttl", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['SkuArgs']]:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['SkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="wnsCredential")
def wns_credential(self) -> Optional[pulumi.Input['WnsCredentialArgs']]:
"""
The WnsCredential of the created NotificationHub
"""
return pulumi.get(self, "wns_credential")
@wns_credential.setter
def wns_credential(self, value: Optional[pulumi.Input['WnsCredentialArgs']]):
pulumi.set(self, "wns_credential", value)
class NotificationHub(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
adm_credential: Optional[pulumi.Input[pulumi.InputType['AdmCredentialArgs']]] = None,
apns_credential: Optional[pulumi.Input[pulumi.InputType['ApnsCredentialArgs']]] = None,
authorization_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SharedAccessAuthorizationRulePropertiesArgs']]]]] = None,
baidu_credential: Optional[pulumi.Input[pulumi.InputType['BaiduCredentialArgs']]] = None,
gcm_credential: Optional[pulumi.Input[pulumi.InputType['GcmCredentialArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
mpns_credential: Optional[pulumi.Input[pulumi.InputType['MpnsCredentialArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
notification_hub_name: Optional[pulumi.Input[str]] = None,
registration_ttl: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
wns_credential: Optional[pulumi.Input[pulumi.InputType['WnsCredentialArgs']]] = None,
__props__=None):
"""
Description of a NotificationHub Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AdmCredentialArgs']] adm_credential: The AdmCredential of the created NotificationHub
:param pulumi.Input[pulumi.InputType['ApnsCredentialArgs']] apns_credential: The ApnsCredential of the created NotificationHub
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SharedAccessAuthorizationRulePropertiesArgs']]]] authorization_rules: The AuthorizationRules of the created NotificationHub
:param pulumi.Input[pulumi.InputType['BaiduCredentialArgs']] baidu_credential: The BaiduCredential of the created NotificationHub
:param pulumi.Input[pulumi.InputType['GcmCredentialArgs']] gcm_credential: The GcmCredential of the created NotificationHub
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[pulumi.InputType['MpnsCredentialArgs']] mpns_credential: The MpnsCredential of the created NotificationHub
:param pulumi.Input[str] name: The NotificationHub name.
:param pulumi.Input[str] namespace_name: The namespace name.
:param pulumi.Input[str] notification_hub_name: The notification hub name.
:param pulumi.Input[str] registration_ttl: The RegistrationTtl of the created NotificationHub
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SkuArgs']] sku: The sku of the created namespace
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
:param pulumi.Input[pulumi.InputType['WnsCredentialArgs']] wns_credential: The WnsCredential of the created NotificationHub
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: NotificationHubArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Description of a NotificationHub Resource.
:param str resource_name: The name of the resource.
:param NotificationHubArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(NotificationHubArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
adm_credential: Optional[pulumi.Input[pulumi.InputType['AdmCredentialArgs']]] = None,
apns_credential: Optional[pulumi.Input[pulumi.InputType['ApnsCredentialArgs']]] = None,
authorization_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SharedAccessAuthorizationRulePropertiesArgs']]]]] = None,
baidu_credential: Optional[pulumi.Input[pulumi.InputType['BaiduCredentialArgs']]] = None,
gcm_credential: Optional[pulumi.Input[pulumi.InputType['GcmCredentialArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
mpns_credential: Optional[pulumi.Input[pulumi.InputType['MpnsCredentialArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
notification_hub_name: Optional[pulumi.Input[str]] = None,
registration_ttl: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['SkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
wns_credential: Optional[pulumi.Input[pulumi.InputType['WnsCredentialArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = NotificationHubArgs.__new__(NotificationHubArgs)
__props__.__dict__["adm_credential"] = adm_credential
__props__.__dict__["apns_credential"] = apns_credential
__props__.__dict__["authorization_rules"] = authorization_rules
__props__.__dict__["baidu_credential"] = baidu_credential
__props__.__dict__["gcm_credential"] = gcm_credential
__props__.__dict__["location"] = location
__props__.__dict__["mpns_credential"] = mpns_credential
__props__.__dict__["name"] = name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
__props__.__dict__["notification_hub_name"] = notification_hub_name
__props__.__dict__["registration_ttl"] = registration_ttl
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["wns_credential"] = wns_credential
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:notificationhubs/v20160301:NotificationHub"), pulumi.Alias(type_="azure-native:notificationhubs:NotificationHub"), pulumi.Alias(type_="azure-nextgen:notificationhubs:NotificationHub"), pulumi.Alias(type_="azure-native:notificationhubs/v20140901:NotificationHub"), pulumi.Alias(type_="azure-nextgen:notificationhubs/v20140901:NotificationHub"), pulumi.Alias(type_="azure-native:notificationhubs/v20170401:NotificationHub"), pulumi.Alias(type_="azure-nextgen:notificationhubs/v20170401:NotificationHub")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NotificationHub, __self__).__init__(
'azure-native:notificationhubs/v20160301:NotificationHub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NotificationHub':
"""
Get an existing NotificationHub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = NotificationHubArgs.__new__(NotificationHubArgs)
__props__.__dict__["adm_credential"] = None
__props__.__dict__["apns_credential"] = None
__props__.__dict__["authorization_rules"] = None
__props__.__dict__["baidu_credential"] = None
__props__.__dict__["gcm_credential"] = None
__props__.__dict__["location"] = None
__props__.__dict__["mpns_credential"] = None
__props__.__dict__["name"] = None
__props__.__dict__["registration_ttl"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["wns_credential"] = None
return NotificationHub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="admCredential")
def adm_credential(self) -> pulumi.Output[Optional['outputs.AdmCredentialResponse']]:
"""
The AdmCredential of the created NotificationHub
"""
return pulumi.get(self, "adm_credential")
@property
@pulumi.getter(name="apnsCredential")
def apns_credential(self) -> pulumi.Output[Optional['outputs.ApnsCredentialResponse']]:
"""
The ApnsCredential of the created NotificationHub
"""
return pulumi.get(self, "apns_credential")
@property
@pulumi.getter(name="authorizationRules")
def authorization_rules(self) -> pulumi.Output[Optional[Sequence['outputs.SharedAccessAuthorizationRulePropertiesResponse']]]:
"""
The AuthorizationRules of the created NotificationHub
"""
return pulumi.get(self, "authorization_rules")
@property
@pulumi.getter(name="baiduCredential")
def baidu_credential(self) -> pulumi.Output[Optional['outputs.BaiduCredentialResponse']]:
"""
The BaiduCredential of the created NotificationHub
"""
return pulumi.get(self, "baidu_credential")
@property
@pulumi.getter(name="gcmCredential")
def gcm_credential(self) -> pulumi.Output[Optional['outputs.GcmCredentialResponse']]:
"""
The GcmCredential of the created NotificationHub
"""
return pulumi.get(self, "gcm_credential")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mpnsCredential")
def mpns_credential(self) -> pulumi.Output[Optional['outputs.MpnsCredentialResponse']]:
"""
The MpnsCredential of the created NotificationHub
"""
return pulumi.get(self, "mpns_credential")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="registrationTtl")
def registration_ttl(self) -> pulumi.Output[Optional[str]]:
"""
The RegistrationTtl of the created NotificationHub
"""
return pulumi.get(self, "registration_ttl")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.SkuResponse']]:
"""
The sku of the created namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="wnsCredential")
def wns_credential(self) -> pulumi.Output[Optional['outputs.WnsCredentialResponse']]:
"""
The WnsCredential of the created NotificationHub
"""
return pulumi.get(self, "wns_credential")
| 6,082
| 17,299
| 45
|
ff0414b799e8f6504673a326a43404552c0e74fc
| 12,480
|
py
|
Python
|
sts_auth/stsauth.py
|
cshamrick/stsauth
|
f30cc37ed1d5e18cb47a250bf4c67a5d6332478e
|
[
"MIT"
] | 20
|
2018-05-21T21:21:47.000Z
|
2022-02-09T04:11:06.000Z
|
sts_auth/stsauth.py
|
cshamrick/stsauth
|
f30cc37ed1d5e18cb47a250bf4c67a5d6332478e
|
[
"MIT"
] | 15
|
2018-07-04T02:57:14.000Z
|
2021-08-09T18:03:12.000Z
|
sts_auth/stsauth.py
|
cshamrick/stsauth
|
f30cc37ed1d5e18cb47a250bf4c67a5d6332478e
|
[
"MIT"
] | 6
|
2018-09-04T05:07:09.000Z
|
2021-06-30T19:49:21.000Z
|
import os
import re
import sys
from datetime import datetime
from typing import Optional, Mapping
from urllib.parse import urlparse, urlunparse
import boto3 # type: ignore[import]
import click # type: ignore[import]
import requests
from requests_ntlm import HttpNtlmAuth # type: ignore[import]
from bs4 import BeautifulSoup # type: ignore[import]
from botocore.exceptions import ProfileNotFound, ClientError # type: ignore[import]
from sts_auth import utils
from sts_auth.okta import Okta
from sts_auth.config import Config
from sts_auth.utils import logger
class STSAuth(object):
"""Initializes an STS Authenticator.
:param username: Username to authenticate with (required).
:param password: Password to authenticate with (required).
:param credentialsfile: A path to an AWS Credentials file (required).
See https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html
for more details.
:param idpentryurl: URL to the IDP Entrypoint.
:param profile: Name of an AWS Profile to automatically fetch credentials for.
:param okta_org: Name of the Okta organization, ex: `my-company`.
:param domain: Domain which your username resides if required.
:param region: Region for AWS to authenticate in.
:param output: Output format, one of: `json`, `text`, `table`.
"""
def fetch_aws_account_names(self, response: requests.Response) -> Optional[requests.Response]:
"""Posts ADFS form to get account list response"""
hiddenform = response.soup.find("form", {"name": "hiddenform"}) # type: ignore[attr-defined]
headers = {
"Referer": response.url,
"Content-Type": "application/x-www-form-urlencoded",
}
selectors = ",".join("{}[name]".format(i) for i in ("input", "button", "textarea", "select"))
data = [(tag.get("name"), tag.get("value")) for tag in hiddenform.select(selectors)]
url = hiddenform.attrs.get("action")
try:
adfs_response = self.session.post(url, data=data, headers=headers, timeout=5)
except requests.exceptions.ConnectionError as e:
msg_fmt = "Could not fetch account aliases from {} due to an exception. Using cached values!\n {}"
click.secho(msg_fmt.format(url, str(e)), fg="red")
return None
adfs_response.soup = BeautifulSoup(adfs_response.text, "lxml") # type: ignore[attr-defined]
return adfs_response
def fetch_aws_sts_token(
role_arn: str,
principal_arn: str,
assertion: str,
duration_seconds: Optional[int] = 3600,
aws_profile: Optional[str] = None,
) -> Mapping[str, str]:
"""Use the assertion to get an AWS STS token using `assume_role_with_saml`"""
sts = sts_client(aws_profile)
token = sts.assume_role_with_saml(
RoleArn=role_arn,
PrincipalArn=principal_arn,
SAMLAssertion=assertion,
DurationSeconds=duration_seconds,
)
return token
def fetch_aws_sts_token_assume_role(
role_arn: str,
role_session_name: str,
aws_profile: str,
duration_seconds: Optional[int] = 3600,
) -> Mapping[str, str]:
"""Use the assertion to get an AWS STS token using `assume_role_with_saml`"""
sts = sts_client(aws_profile)
try:
token = sts.assume_role(
RoleArn=role_arn,
RoleSessionName=role_session_name,
DurationSeconds=duration_seconds,
)
except ClientError as e:
click.secho(str(e), fg="red")
sys.exit(1)
return token
def sts_client(aws_profile: Optional[str]) -> boto3.Session.client:
"""Generate a boto3 sts client."""
try:
session = boto3.Session(profile_name=aws_profile)
sts = session.client("sts")
except ProfileNotFound as e:
click.secho(str(e), fg="red")
sys.exit(1)
except Exception as e:
# TODO: Proper exception and message
raise e
return sts
| 43.034483
| 110
| 0.634936
|
import os
import re
import sys
from datetime import datetime
from typing import Optional, Mapping
from urllib.parse import urlparse, urlunparse
import boto3 # type: ignore[import]
import click # type: ignore[import]
import requests
from requests_ntlm import HttpNtlmAuth # type: ignore[import]
from bs4 import BeautifulSoup # type: ignore[import]
from botocore.exceptions import ProfileNotFound, ClientError # type: ignore[import]
from sts_auth import utils
from sts_auth.okta import Okta
from sts_auth.config import Config
from sts_auth.utils import logger
class STSAuth(object):
"""Initializes an STS Authenticator.
:param username: Username to authenticate with (required).
:param password: Password to authenticate with (required).
:param credentialsfile: A path to an AWS Credentials file (required).
See https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/setup-credentials.html
for more details.
:param idpentryurl: URL to the IDP Entrypoint.
:param profile: Name of an AWS Profile to automatically fetch credentials for.
:param okta_org: Name of the Okta organization, ex: `my-company`.
:param domain: Domain which your username resides if required.
:param region: Region for AWS to authenticate in.
:param output: Output format, one of: `json`, `text`, `table`.
"""
def __init__(
self,
username: str,
password: str,
credentialsfile: str,
idpentryurl: Optional[str] = None,
profile: Optional[str] = None,
okta_org: Optional[str] = None,
okta_shared_secret: Optional[str] = None,
domain: Optional[str] = None,
region: Optional[str] = None,
output: Optional[str] = None,
vip_access_security_code: Optional[str] = None,
force: Optional[bool] = False,
):
self.credentialsfile = os.path.expanduser(credentialsfile)
self.vip_access_security_code = vip_access_security_code
self.config = Config(
self.credentialsfile,
username=username, # type: ignore[arg-type]
password=password, # type: ignore[arg-type]
domain=domain, # type: ignore[arg-type]
idpentryurl=idpentryurl, # type: ignore[arg-type]
region=region, # type: ignore[arg-type]
output=output, # type: ignore[arg-type]
okta_org=okta_org, # type: ignore[arg-type]
okta_shared_secret=okta_shared_secret, # type: ignore[arg-type]
profile=profile, # type: ignore[arg-type]
)
self.config.load()
self.profile = self.config.profile
self.session = requests.Session()
self.session.headers.update({"content-type": "application/json"})
self.session.auth = HttpNtlmAuth(self.config.domain_user, self.config.password)
def get_saml_response(self, response: Optional[requests.Response] = None) -> requests.Response:
if not response:
logger.debug("No response provided. Fetching IDP Entry URL...")
response = self.session.get(self.config.idpentryurl)
response.soup = BeautifulSoup(response.text, "lxml") # type: ignore[attr-defined]
assertion_pattern = re.compile(r"name=\"SAMLResponse\" value=\"(.*)\"\s*/><noscript>")
assertion = re.search(assertion_pattern, response.text)
if assertion:
# If there is already an assertion in the response body,
# we can attach the parsed assertion to the response object and
# return the whole response for use later.
# return account_map, assertion.group(1)
response.assertion = assertion.group(1) # type: ignore[attr-defined]
return response
logger.debug("No SAML assertion found in response. Attempting to log in...")
login_form = response.soup.find(id="loginForm") # type: ignore[attr-defined]
okta_login = response.soup.find(id="okta-login-container") # type: ignore[attr-defined]
if okta_login:
state_token = utils.get_state_token_from_response(response.text)
if state_token is None:
click.secho("No State Token found in response. Exiting...", fg="red")
sys.exit(1)
okta_client = Okta(
session=self.session,
state_token=state_token,
okta_org=self.config.okta_org,
okta_shared_secret=self.config.okta_shared_secret,
)
okta_response = okta_client.handle_okta_verification(response)
return self.get_saml_response(response=okta_response)
if login_form:
# If there is no assertion, it is possible the user is attempting
# to authenticate from outside the network, so we check for a login
# form in their response.
form_response = self.authenticate_to_adfs_portal(response)
return self.get_saml_response(response=form_response)
else:
msg = "Response did not contain a valid SAML assertion, a valid login form, or request MFA."
click.secho(msg, fg="red")
sys.exit(1)
def generate_payload_from_login_page(self, response: requests.Response) -> Mapping[str, str]:
login_page = BeautifulSoup(response.text, "html.parser")
payload = {}
for input_tag in login_page.find_all(re.compile("(INPUT|input)")):
name = input_tag.get("name", "")
value = input_tag.get("value", "")
logger.debug("Adding value for {!r} to Login Form payload.".format(name))
if "user" in name.lower():
payload[name] = self.config.domain_user
elif "email" in name.lower():
payload[name] = self.config.domain_user
elif "pass" in name.lower():
payload[name] = self.config.password
elif "security_code" in name.lower():
payload[name] = self.vip_access_security_code # type: ignore[assignment]
else:
payload[name] = value
return payload
def build_idp_auth_url(self, response: requests.Response) -> str:
idp_auth_form_submit_url = response.url
login_page = BeautifulSoup(response.text, "html.parser")
for form in login_page.find_all(re.compile("(FORM|form)")):
action = form.get("action")
if action:
parsed_action = urlparse(action)
parsed_idp_url = urlparse(self.config.idpentryurl)
# Fallback to the IDP Entry URL from the config file if the
# form action does not contain a fully defined URL.
# i.e. action='/path/to/something' vs action='http://test.com/path/to/something'
scheme = parsed_action.scheme if parsed_action.scheme else parsed_idp_url.scheme
netloc = parsed_action.netloc if parsed_action.netloc else parsed_idp_url.netloc
url_parts = (
scheme,
netloc,
parsed_action.path,
None,
parsed_action.query,
None,
)
idp_auth_form_submit_url = urlunparse(url_parts)
return idp_auth_form_submit_url
def authenticate_to_adfs_portal(self, response: requests.Response) -> requests.Response:
payload = self.generate_payload_from_login_page(response)
idp_auth_form_submit_url = self.build_idp_auth_url(response)
logger.debug("Posting login data to URL: {}".format(idp_auth_form_submit_url))
login_response = self.session.post(idp_auth_form_submit_url, data=payload, verify=True)
login_response_page = BeautifulSoup(login_response.text, "html.parser")
# Checks for errorText id on page to indicate any errors
login_error_message = login_response_page.find(id="errorText")
# Checks for specific text in a paragraph element to indicate any errors
vip_login_error_message = login_response_page.find(
lambda tag: tag.name == "p" and "Authentication failed" in tag.text
)
if (login_error_message and len(login_error_message.string) > 0) or (
vip_login_error_message and len(vip_login_error_message) > 0
):
msg = "Login page returned the following message. Please resolve this issue before continuing:"
click.secho(msg, fg="red")
error_msg = login_error_message if login_error_message else vip_login_error_message
click.secho(error_msg.string, fg="red")
sys.exit(1)
return login_response
def fetch_aws_account_names(self, response: requests.Response) -> Optional[requests.Response]:
"""Posts ADFS form to get account list response"""
hiddenform = response.soup.find("form", {"name": "hiddenform"}) # type: ignore[attr-defined]
headers = {
"Referer": response.url,
"Content-Type": "application/x-www-form-urlencoded",
}
selectors = ",".join("{}[name]".format(i) for i in ("input", "button", "textarea", "select"))
data = [(tag.get("name"), tag.get("value")) for tag in hiddenform.select(selectors)]
url = hiddenform.attrs.get("action")
try:
adfs_response = self.session.post(url, data=data, headers=headers, timeout=5)
except requests.exceptions.ConnectionError as e:
msg_fmt = "Could not fetch account aliases from {} due to an exception. Using cached values!\n {}"
click.secho(msg_fmt.format(url, str(e)), fg="red")
return None
adfs_response.soup = BeautifulSoup(adfs_response.text, "lxml") # type: ignore[attr-defined]
return adfs_response
def generate_login_url(self, token: Mapping[str, Mapping[str, str]]) -> str:
federation_base_url = "https://signin.aws.amazon.com/federation"
request_params = {
"Action": "getSigninToken",
"SessionDuration": "43200",
"Session": str(
{
"sessionId": token["Credentials"]["AccessKeyId"],
"sessionKey": token["Credentials"]["SecretAccessKey"],
"sessionToken": token["Credentials"]["SessionToken"],
}
),
}
r = self.session.get(federation_base_url, params=request_params)
signin_token = r.json()
login_params = {
"Action": "login",
"Destination": "https://console.aws.amazon.com/",
"SigninToken": signin_token["SigninToken"],
}
request_parameters = requests.compat.urlencode(login_params) # type: ignore[attr-defined]
request_url = "{base_url}?{request_parameters}".format(
base_url=federation_base_url, request_parameters=request_parameters
)
return request_url
def fetch_aws_sts_token(
role_arn: str,
principal_arn: str,
assertion: str,
duration_seconds: Optional[int] = 3600,
aws_profile: Optional[str] = None,
) -> Mapping[str, str]:
"""Use the assertion to get an AWS STS token using `assume_role_with_saml`"""
sts = sts_client(aws_profile)
token = sts.assume_role_with_saml(
RoleArn=role_arn,
PrincipalArn=principal_arn,
SAMLAssertion=assertion,
DurationSeconds=duration_seconds,
)
return token
def fetch_aws_sts_token_assume_role(
role_arn: str,
role_session_name: str,
aws_profile: str,
duration_seconds: Optional[int] = 3600,
) -> Mapping[str, str]:
"""Use the assertion to get an AWS STS token using `assume_role_with_saml`"""
sts = sts_client(aws_profile)
try:
token = sts.assume_role(
RoleArn=role_arn,
RoleSessionName=role_session_name,
DurationSeconds=duration_seconds,
)
except ClientError as e:
click.secho(str(e), fg="red")
sys.exit(1)
return token
def sts_client(aws_profile: Optional[str]) -> boto3.Session.client:
"""Generate a boto3 sts client."""
try:
session = boto3.Session(profile_name=aws_profile)
sts = session.client("sts")
except ProfileNotFound as e:
click.secho(str(e), fg="red")
sys.exit(1)
except Exception as e:
# TODO: Proper exception and message
raise e
return sts
| 8,351
| 0
| 162
|
f8b6c498aceb5c6cd20a8bd5c69808fe4c7a6870
| 561
|
py
|
Python
|
src/unittest/python/rds_utils_tests.py
|
ImmobilienScout24/rds_log_dog
|
f8df42b78f24856358db15b20675f0a155a02f19
|
[
"MIT"
] | 1
|
2018-01-11T18:36:28.000Z
|
2018-01-11T18:36:28.000Z
|
src/unittest/python/rds_utils_tests.py
|
ImmobilienScout24/rds_log_dog
|
f8df42b78f24856358db15b20675f0a155a02f19
|
[
"MIT"
] | 2
|
2017-01-05T15:45:47.000Z
|
2018-04-16T11:12:01.000Z
|
src/unittest/python/rds_utils_tests.py
|
ImmobilienScout24/rds_log_dog
|
f8df42b78f24856358db15b20675f0a155a02f19
|
[
"MIT"
] | 5
|
2017-01-05T15:39:02.000Z
|
2018-04-16T10:37:44.000Z
|
from __future__ import print_function, absolute_import, division
import unittest2 as unittest
from mock import patch
from rds_log_dog.rds_utils import get_size
| 33
| 65
| 0.741533
|
from __future__ import print_function, absolute_import, division
import unittest2 as unittest
from mock import patch
from rds_log_dog.rds_utils import get_size
class Test(unittest.TestCase):
@patch('rds_log_dog.rds_utils.describe_logfiles_of_instance')
def test_get_size(self, describe_logfiles_of_instance):
describe_logfiles_of_instance.return_value = [
dict(LogFileName='foo', LastWritten='bar', Size=42),
dict(LogFileName='foo2', LastWritten='bar', Size=23)]
self.assertEqual(42, get_size('foo', 'foo'))
| 273
| 102
| 23
|
e7671c5cc516f190fa1376f587270c007db5334e
| 2,747
|
py
|
Python
|
cytopy/tests/test_project.py
|
JANHMS/CytoPy
|
8537d707fa25645b55b4ec1e25fff9f19847fb1b
|
[
"MIT"
] | 41
|
2020-04-08T11:01:28.000Z
|
2022-03-11T17:17:18.000Z
|
cytopy/tests/test_project.py
|
JANHMS/CytoPy
|
8537d707fa25645b55b4ec1e25fff9f19847fb1b
|
[
"MIT"
] | 27
|
2020-04-07T14:59:24.000Z
|
2022-03-01T20:43:34.000Z
|
cytopy/tests/test_project.py
|
JANHMS/CytoPy
|
8537d707fa25645b55b4ec1e25fff9f19847fb1b
|
[
"MIT"
] | 8
|
2020-04-28T15:16:24.000Z
|
2022-03-02T19:02:14.000Z
|
from cytopy.data.project import Project
from cytopy.data.errors import *
from cytopy.tests import assets
import pytest
import os
@pytest.fixture()
| 34.772152
| 104
| 0.697124
|
from cytopy.data.project import Project
from cytopy.data.errors import *
from cytopy.tests import assets
import pytest
import os
@pytest.fixture()
def create_project():
os.mkdir(f"{os.getcwd()}/test_data2")
p = Project(project_id="test", data_directory=f"{os.getcwd()}/test_data2")
p.save()
yield p
p.delete()
def test_create_project_warn_directory():
x = f"{os.getcwd()}/test_data2"
with pytest.warns(UserWarning) as warn_:
Project(project_id="test", data_directory=x)
warning = f"Could not locate data directory at path {x}, all further operations " \
f"will likely resolve in errors as single cell data will not be attainable. Update the " \
f"data directory before continuing using the 'update_data_directory' method."
assert str(warn_.list[0].message) == warning
def test_add_experiment(create_project):
p = create_project
p.add_experiment(experiment_id="test1",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
assert len(p.experiments) == 1
p.add_experiment(experiment_id="test2",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
assert len(p.experiments) == 2
def test_add_experiment_duplicate_err(create_project):
p = create_project
p.add_experiment(experiment_id="test1",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
with pytest.raises(DuplicateExperimentError) as err:
p.add_experiment(experiment_id="test1",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
assert str(err.value) == 'Experiment with id test1 already exists!'
def test_list_experiments(create_project):
p = create_project
p.add_experiment(experiment_id="test1",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
assert list(p.list_experiments()) == ["test1"]
def test_load_experiment(create_project):
p = create_project
p.add_experiment(experiment_id="test1",
panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx")
e = p.get_experiment(experiment_id="test1")
assert e.experiment_id == "test1"
def test_add_subject(create_project):
p = create_project
p.add_subject(subject_id="test_subject")
assert len(p.subjects) == 1
def test_list_subjects(create_project):
p = create_project
p.add_subject(subject_id="test_subject")
assert list(p.list_subjects()) == ["test_subject"]
def test_get_subject(create_project):
p = create_project
p.add_subject(subject_id="test_subject")
s = p.get_subject(subject_id="test_subject")
assert s.subject_id == "test_subject"
| 2,384
| 0
| 206
|