hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19cbf796ad7b5641c3d047fa842ac8998f9be4ac | 2,940 | py | Python | .deprecated/gmanage/io.py | kablekompany/gmanage | deb712a9785f54fc9b442732ac1dab23fb4e391e | [
"MIT"
] | null | null | null | .deprecated/gmanage/io.py | kablekompany/gmanage | deb712a9785f54fc9b442732ac1dab23fb4e391e | [
"MIT"
] | null | null | null | .deprecated/gmanage/io.py | kablekompany/gmanage | deb712a9785f54fc9b442732ac1dab23fb4e391e | [
"MIT"
] | null | null | null | """
Copyright 2020 KableKompany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
def read(filepath: str, *, create_new: bool = False, default_new: dict = None) -> dict:
"""Reads a JSON file form ::filepath::
:returns dict"""
try:
with open(filepath) as opened:
data = json.load(opened)
return data
except FileNotFoundError as FNFE:
if create_new:
data = default_new or {}
try:
write(filepath, default_new or {})
data = default_new or {}
except Exception as fail:
raise OSError("Failed to create new file.") from fail
else:
return data
def write(fp: str, data: dict, *, indent: int = 2, rollback: bool = False) -> dict:
"""
Writes data to a JSON file. Pretty simple actually
:param fp: The filepath to write to
:param data: the dict to write
:param indent: what indent to use to pretty print. 0 to disable.
:param rollback: weather to keep a backup and roll it back to that if failure to write to file.
:return: new data [dict]
"""
if rollback:
try:
safe = read(fp)
except Exception as e:
raise IOError(f"Tried creating a rollback point but failed.") from e
else:
safe = None
with open(fp, "w+") as opened:
try:
json.dump(data, opened, indent=indent)
return data
except Exception as init:
if safe:
try:
json.dump(safe, fp, indent=indent)
raise UserWarning("Write rolled-back to previous state due to error writing new data!") from init
except Exception as safe_fail:
raise IOError(f"Failed to roll-back to safe state, but failed.") from safe_fail
else:
raise IOError(f"Failed to write new data to file.") from init
| 41.408451 | 117 | 0.652381 |
432a1a72c23beff092724859bcb28568bfd0b420 | 313 | py | Python | utils/function.py | thebriss/PyMongoDiscordBot | 998a593aac9d93d4c98c95ce4394043f5e63143b | [
"Apache-2.0"
] | 1 | 2021-05-17T11:04:08.000Z | 2021-05-17T11:04:08.000Z | utils/function.py | thebriss/PyMongoDiscordBot | 998a593aac9d93d4c98c95ce4394043f5e63143b | [
"Apache-2.0"
] | null | null | null | utils/function.py | thebriss/PyMongoDiscordBot | 998a593aac9d93d4c98c95ce4394043f5e63143b | [
"Apache-2.0"
] | null | null | null | import datetime
from simple_chalk import chalk
import os
async def send(type: str, message: str):
print(chalk.yellow(f"[{datetime.datetime.now().strftime('%c')}] ") + chalk.red(f"{type.upper().replace('İ', 'I')} | ") +chalk.green(f"{message}"))
def cls():
os.system('cls' if os.name=='nt' else 'clear') | 31.3 | 150 | 0.651757 |
ff18aa0f1d76ef13c354f8a49b5f61f4c4bd3261 | 1,129 | py | Python | projects/04_loading_saving_data/04_loading_saving_data.py | NJannasch/TechLabs-FlaskIntro | 24fadedd42ba2bfddbc40b0f939a47dfcf85867f | [
"MIT"
] | null | null | null | projects/04_loading_saving_data/04_loading_saving_data.py | NJannasch/TechLabs-FlaskIntro | 24fadedd42ba2bfddbc40b0f939a47dfcf85867f | [
"MIT"
] | null | null | null | projects/04_loading_saving_data/04_loading_saving_data.py | NJannasch/TechLabs-FlaskIntro | 24fadedd42ba2bfddbc40b0f939a47dfcf85867f | [
"MIT"
] | null | null | null | from flask import Flask, jsonify, request
import typing
from database import load_data, save_data
app = Flask(__name__)
my_persons: typing.List = []
@app.route('/persons', methods=['GET'])
def persons():
""" Get all persons """
return jsonify(my_persons)
@app.route('/persons', methods=['POST'])
def create_person():
""" Create a new person """
# Check if all fields are available
if request.json.get('name') and \
request.json.get('location'):
my_persons.append(request.json)
else:
# Data is missing in the request
return jsonify({"error": "Data incomplete"}), 400
save_data('database.json', my_persons)
return jsonify(my_persons), 201
@app.route('/persons/<int:number>', methods=['GET'])
def person_number(number: int):
""" Get the first person """
person: typing.Dict = {}
if len(my_persons) > number:
person = my_persons[number]
return jsonify(person)
if __name__ == '__main__':
# Load dataset before starting the server
my_persons = load_data('database.json')
app.run(host='0.0.0.0', port=8080, debug=True)
| 24.543478 | 57 | 0.651904 |
0b09cf71d825ec0deedaedd16caa65b236dbf3d8 | 12,032 | py | Python | tensorflow_federated/python/tensorflow_libs/graph_merge.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/tensorflow_libs/graph_merge.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/tensorflow_libs/graph_merge.py | zhihansh/federated-oss | 38cfcb05702ff7297db76d3ccb5f5afef53ca09b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of TensorFlow graph-merging functions and associated helpers."""
import collections
import uuid
import tensorflow as tf
from tensorflow_federated.python.tensorflow_libs import graph_spec
# TODO(b/168706001): the last usages of the uuid based implementation is in the
# TFExecutor, which could potentially be moved to
# `uniquify_shared_names_with_suffix`, or this could be updated to delegate to
# that using a uuid suffix.
def uniquify_shared_names(graph_def):
"""Appends unique identifier to any shared names present in `graph`."""
# TODO(b/117428091): Upgrade our TF serialization mechanisms in order to
# unblock using more modern TF compositional constructs, and avoid direct
# proto manipulation as is happening here.
for x in graph_def.node:
shared_name = x.attr.get('shared_name')
if shared_name is not None:
uid = str(uuid.uuid1())[:8].encode('utf-8')
shared_name.s += uid
return graph_def
def uniquify_shared_names_with_suffix(graph_def: tf.compat.v1.GraphDef,
suffix: str) -> tf.compat.v1.GraphDef:
"""Appends unique identifier to any shared names present in `graph`."""
# TODO(b/117428091): Upgrade our TF serialization mechanisms in order to
# unblock using more modern TF compositional constructs, and avoid direct
# proto manipulation as is happening here.
num_empty_shared_names = 0
for x in graph_def.node:
shared_name = x.attr.get('shared_name')
if shared_name is not None:
if not shared_name.s:
# Encountered an empty string shared name, avoid creating a shared name
# that starts with an underscore (not allowed by TF).
shared_name.s = f'empty_{num_empty_shared_names}'.encode('utf-8')
num_empty_shared_names += 1
shared_name.s += b'_' + suffix.encode('utf-8')
return graph_def
def _concat_graphs(graph_def_list, graph_names_list):
"""Imports graphs from `graph_def_list` under the names `graph_names_list`.
`concat_graphs` is important here to isolate the necessary logic to keep
barriers between variables defined in separate graphs but with conflicting
names. In particular, the `shared_names` field can cause these imported
variables to be wired up incorrectly.
Args:
graph_def_list: Python iterable of `tf.compat.v1.GraphDef` objects.
graph_names_list: Parallel Python iterable containing the names under which
we wish to import the `tf.compat.v1.GraphDef`s in `graph_def_list`.
Returns:
An instance of `tf.Graph`, representing the computations in
`graph_def_list` side-by-side in a single new `tf.Graph`. The names of ops
and tensors in this new graph will be the same as those in the old graphs,
but prepended by the appropriate name in `graph_names_list`.
"""
merged_graph = tf.Graph()
for k in range(len(graph_names_list)):
# The GraphDef we are about to import must have its shared name attributes
# set to unique values to avoid variables being wired together incorrectly.
graph_def_to_merge = uniquify_shared_names_with_suffix(
graph_def_list[k], graph_names_list[k])
with merged_graph.as_default():
tf.import_graph_def(graph_def_to_merge, name=graph_names_list[k])
return merged_graph
def concatenate_inputs_and_outputs(arg_list):
"""Concatenates computations in `arg_list` side-by-side into one `tf.Graph`.
`concatenate_inputs_and_outputs` is used to combine multiple computations into
one in the case where none is intended to consume outputs of any other, and we
simply wish to concatenate the inputs and outputs side-by-side into a single
`tf.Graph`.
Args:
arg_list: Python iterable of `graph_spec.GraphSpec` instances, containing
the computations we wish to concatenate side-by-side.
Returns:
A 4-tuple:
merged_graph: An instance of `tf.Graph` representing the concatenated
computations.
init_op_name: A string representing the op in `merged_graph` that runs
any initializers passed in with `arg_list`.
in_name_maps: A Python `list` of `dict`s, representing how names from
`arg_list` map to names in `merged_graph`. That is, for the
`graph_spec.GraphSpec` `x` in index `i` of `arg_list`, the `i`th
element of `in_name_maps` is a dict containing keys the elements of
`x.in_names`, and values the new names of these elements in
`merged_graph`.
out_name_maps: Similar to `in_name_maps`.
"""
if not isinstance(arg_list, collections.abc.Iterable):
raise TypeError('Please pass an iterable to '
'`concatenate_inputs_and_outputs`.')
(graph_def_list, init_op_names_list, in_names_list, out_names_list,
graph_names_list) = _parse_graph_spec_list(arg_list)
merged_graph = _concat_graphs(graph_def_list, graph_names_list)
init_op_name = _get_merged_init_op_name(merged_graph, graph_names_list,
init_op_names_list)
in_name_maps = []
out_name_maps = []
for k in range(len(arg_list)):
in_name_maps.append(
{x: '{}/{}'.format(graph_names_list[k], x) for x in in_names_list[k]})
out_name_maps.append(
{x: '{}/{}'.format(graph_names_list[k], x) for x in out_names_list[k]})
return merged_graph, init_op_name, in_name_maps, out_name_maps
def _get_merged_init_op_name(merged_graph, graph_names_list,
init_op_names_list):
"""Groups init ops and returns name of group."""
merged_init_op_list = []
proposed_name = 'merged_init'
for graph_name, init_op_name in zip(graph_names_list, init_op_names_list):
if init_op_name is None:
continue
else:
merged_init_op_list.append(
merged_graph.get_operation_by_name('{}/{}'.format(
graph_name, init_op_name)))
with merged_graph.as_default():
init_op = tf.group(merged_init_op_list, name=proposed_name)
return init_op.name
def _parse_graph_spec_list(arg_list):
"""Flattens list of `graph_spec.GraphSpec` instances."""
if not all(isinstance(x, graph_spec.GraphSpec) for x in arg_list):
raise TypeError('Please pass an iterable of `graph_spec.GraphSpec`s.')
graph_defs = [x.graph_def for x in arg_list]
init_op_names = [x.init_op for x in arg_list]
in_names = [x.in_names for x in arg_list]
out_names = [x.out_names for x in arg_list]
graph_names = [f'graph_{k}' for k in range(len(arg_list))]
return (graph_defs, init_op_names, in_names, out_names, graph_names)
def compose_graph_specs(graph_spec_list):
"""Composes `graph_spec.GraphSpec` list in order, wiring output of k to input of k+1.
Notice that due to the semantics of composition (e.g., compose(f1, f2)
represents first calling f2 on the argument of x, then calling f1 on the
result), we will reverse `graph_spec_list` before wiring inputs and outputs
together,since `tf.import_graph_def` works in the opposite way, that is, we
must have tensors to map as inputs to the graph we are importing.
We enforce the invariant that each element of `graph_spec_list` must declare
exactly as many inputs as the next element declares outputs. This removes
any possibility of ambiguity in identifying inputs and outputs of the
resulting composed graph.
Args:
graph_spec_list: Python list or tuple of instances of
`graph_spec.GraphSpec`. Notice not all iterables can work here, since
composition is inherently a noncommutative operation.
Returns:
A four-tuple:
composed_graph: An instance of `tf.Graph` representing outputs of the
elements of `graph_spec_list` wired to the inputs of the next
element. That is, represents the dataflow graphs composed as functions.
init_op_name: A string representing the op in `composed_graph` that runs
any initializers passed in with `arg_list`.
in_name_map: A `dict` mapping the input names of the first element of
`graph_spec_list` to their new names in `composed_graph`.
out_name_map: A `dict` mapping the output names of the last element of
`graph_spec_list` to their new names in `composed_graph`.
Raises:
TypeError: If we are not passed a list or tuple of `graph_spec.GraphSpec`s.
ValueError: If the `graph_spec.GraphSpec`s passed in do not respect the
requirement
that number of outputs of element k must match the number of inputs to
element k+1.
"""
if not isinstance(graph_spec_list, (list, tuple)):
raise TypeError('Please pass a list or tuple to ' '`compose_graph_specs`.')
graph_spec_list = list(reversed(graph_spec_list))
(graph_def_list, init_op_names_list, in_names_list, out_names_list,
graph_names_list) = _parse_graph_spec_list(graph_spec_list)
for out_names, in_names in zip(in_names_list[1:], out_names_list[:-1]):
if len(out_names) != len(in_names):
raise ValueError(
'Attempted to compose graphs with a mismatched number of elements in '
'and out; attempted to pass {} in to {}'.format(out_names, in_names))
def _compose_graphs(graph_def_list, in_names, out_names, graph_names_list):
"""Imports graphs in `graph_def_list` wiring inputs and outputs as declared.
Args:
graph_def_list: Python iterable of `tf.compat.v1.GraphDef` objects.
in_names: Parallel Python iterable whose kth element specifies the input
names in element k from `graph_def_list`.
out_names: Parallel Python iterable whose kth element specifies the output
names in element k from `graph_def_list`.
graph_names_list: Parallel Python iterable containing the names under
which we wish to import the elements from `graph_def_list` into the
newly created `tf.Graph`.
Returns:
An instance of `tf.Graph` containing the composed logic.
"""
with tf.Graph().as_default() as composed_graph:
output_elements = tf.import_graph_def(
uniquify_shared_names_with_suffix(graph_def_list[0],
graph_names_list[0]),
return_elements=out_names[0],
name=graph_names_list[0])
for k in range(1, len(graph_names_list)):
# The GraphDef we are about to import must have its shared name
# attributes set to unique values to avoid variables being wired together
# incorrectly.
graph_def_to_merge = uniquify_shared_names_with_suffix(
graph_def_list[k], graph_names_list[k])
input_map = dict(zip(in_names[k], output_elements))
with composed_graph.as_default():
output_elements = tf.import_graph_def(
graph_def_to_merge,
input_map=input_map,
return_elements=out_names[k],
name=graph_names_list[k])
output_map = dict(zip(out_names[-1], [x.name for x in output_elements]))
return composed_graph, output_map
composed_graph, out_name_map = _compose_graphs(graph_def_list, in_names_list,
out_names_list,
graph_names_list)
in_name_map = {
x: '{}/{}'.format(graph_names_list[0], x) for x in in_names_list[0]
}
merged_init_op_name = _get_merged_init_op_name(composed_graph,
graph_names_list,
init_op_names_list)
return composed_graph, merged_init_op_name, in_name_map, out_name_map
| 45.06367 | 87 | 0.714927 |
aae4ccdd62b964dfe6d8b6fa0856d9926af945e7 | 4,974 | py | Python | tests/unit/adapters/netbox_api/models/test_netbox_vlan_pre29.py | itdependsnetworks/network-importer | 6d4e8835af0ca28df2fca8d42792156882f30691 | [
"Apache-2.0"
] | null | null | null | tests/unit/adapters/netbox_api/models/test_netbox_vlan_pre29.py | itdependsnetworks/network-importer | 6d4e8835af0ca28df2fca8d42792156882f30691 | [
"Apache-2.0"
] | null | null | null | tests/unit/adapters/netbox_api/models/test_netbox_vlan_pre29.py | itdependsnetworks/network-importer | 6d4e8835af0ca28df2fca8d42792156882f30691 | [
"Apache-2.0"
] | null | null | null | """
(c) 2020 Network To Code
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import yaml
import pytest
import pynetbox
from diffsync.exceptions import ObjectNotFound
from network_importer.adapters.netbox_api.models import NetboxVlanPre29, NetboxDevice
ROOT = os.path.abspath(os.path.dirname(__file__))
FIXTURE_28 = "../fixtures/netbox_28"
def test_vlan_create_from_pynetbox(netbox_api_base):
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_28}/vlan_101_no_tag.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
item = NetboxVlanPre29.create_from_pynetbox(diffsync=netbox_api_base, obj=pnb, site_name="nyc")
assert isinstance(item, NetboxVlanPre29) is True
assert item.remote_id == 1
assert item.vid == 101
assert item.associated_devices == []
def test_vlan_create_from_pynetbox_with_tags(netbox_api_base):
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_28}/vlan_101_tags_01.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
netbox_api_base.add(NetboxDevice(name="devA", site_name="nyc", remote_id=30))
item = NetboxVlanPre29.create_from_pynetbox(diffsync=netbox_api_base, obj=pnb, site_name="nyc")
assert isinstance(item, NetboxVlanPre29) is True
assert item.remote_id == 1
assert item.vid == 101
assert item.associated_devices == ["devA"]
# Try again with one additional device in the inventory
netbox_api_base.add(NetboxDevice(name="devB", site_name="nyc", remote_id=31))
item = NetboxVlanPre29.create_from_pynetbox(diffsync=netbox_api_base, obj=pnb, site_name="nyc")
assert isinstance(item, NetboxVlanPre29) is True
assert item.remote_id == 1
assert item.vid == 101
assert item.associated_devices == ["devA", "devB"]
def test_translate_attrs_for_netbox_no_attrs(netbox_api_base):
vlan = NetboxVlanPre29(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
params = vlan.translate_attrs_for_netbox({})
assert "name" in params
assert params["name"] == "vlan-100"
assert "site" in params
assert params["site"] == 10
assert "tags" not in params
def test_translate_attrs_for_netbox_with_attrs(netbox_api_base):
vlan = NetboxVlanPre29(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
params = vlan.translate_attrs_for_netbox({"name": "VOICE", "associated_devices": ["dev1", "dev2"]})
assert "name" in params
assert params["name"] == "VOICE"
assert "site" in params
assert params["site"] == 10
assert "tags" in params
assert params["tags"] == ["device=dev1", "device=dev2"]
def test_translate_attrs_for_netbox_missing_site(netbox_api_base):
vlan = NetboxVlanPre29(vid=100, site_name="NOTPRESENT", remote_id=30)
netbox_api_base.add(vlan)
with pytest.raises(ObjectNotFound):
vlan.translate_attrs_for_netbox({})
assert True
def test_update_clean_tags_no_incoming_tags(netbox_api_base):
vlan = NetboxVlanPre29(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_28}/vlan_101_tags_01.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
params = vlan.translate_attrs_for_netbox({"name": "VOICE"})
clean_params = vlan.update_clean_tags(nb_params=params, obj=pnb)
assert "tags" not in clean_params
def test_update_clean_tags_with_incoming_tags(netbox_api_base):
vlan = NetboxVlanPre29(vid=100, site_name="HQ", remote_id=30)
netbox_api_base.add(vlan)
netbox_api_base.add(NetboxDevice(name="dev1", site_name="HQ", remote_id=32, device_tag_id=12))
netbox_api_base.add(NetboxDevice(name="dev2", site_name="HQ", remote_id=33, device_tag_id=13))
api = pynetbox.api(url="http://mock", token="1234567890")
data = yaml.safe_load(open(f"{ROOT}/{FIXTURE_28}/vlan_101_tags_01.json"))
pnb = pynetbox.core.response.Record(values=data, api=api, endpoint=1)
params = vlan.translate_attrs_for_netbox({"name": "VOICE", "associated_devices": ["dev1", "dev2"]})
clean_params = vlan.update_clean_tags(nb_params=params, obj=pnb)
assert "tags" in clean_params
assert sorted(clean_params["tags"]) == ["device=dev1", "device=dev2", "device=devA", "device=devB", "notadevice"]
| 36.043478 | 117 | 0.735223 |
ed3520b697a4a2ac285e91c3e448cbfbf49cabbc | 2,387 | py | Python | assignments/assignment1/gradient_check.py | lordofprograms/dlcourse_ai | 1b645c25566a6af9e7eefe45b8d3849283af51d1 | [
"MIT"
] | null | null | null | assignments/assignment1/gradient_check.py | lordofprograms/dlcourse_ai | 1b645c25566a6af9e7eefe45b8d3849283af51d1 | [
"MIT"
] | null | null | null | assignments/assignment1/gradient_check.py | lordofprograms/dlcourse_ai | 1b645c25566a6af9e7eefe45b8d3849283af51d1 | [
"MIT"
] | null | null | null | import numpy as np
def numeric_grad_array(f, x, h):
"""
calculating numerical differentiation 2-point formula: (f(x+h) - f(x-h))/2h
source: https://en.wikipedia.org/wiki/Numerical_differentiation
Arguments:
f: function that receives x and computes value and gradient
x: np array, initial point where gradient is checked
h: small change in x to compute numerical gradient
Return:
numpy.nd.array of numerical gradient
"""
dx = np.zeros_like(x)
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
x_plus_h, x_minus_h = x.copy(), x.copy()
x_plus_h[ix] += h
x_minus_h[ix] -= h
dx[ix] = (f(x_plus_h)[0] - f(x_minus_h)[0]) / (2 * h)
it.iternext()
return dx
def check_gradient(f, x, delta=1e-5, tol=1e-4):
'''
Checks the implementation of analytical gradient by comparing
it to numerical gradient using two-point formula
Arguments:
f: function that receives x and computes value and gradient
x: np array, initial point where gradient is checked
delta: step to compute numerical gradient
tol: tolerance for comparing numerical and analytical gradient
Return:
bool indicating whether gradients match or not
'''
assert isinstance(x, np.ndarray)
assert x.dtype == np.float
orig_x = x.copy()
fx, analytic_grad = f(x)
assert np.all(np.isclose(orig_x, x, tol)), "Functions shouldn't modify input variables"
assert analytic_grad.shape == x.shape
analytic_grad = analytic_grad.copy()
numeric_grad = numeric_grad_array(f, x, h=delta)
# We will go through every dimension of x and compute numeric
# derivative for it
it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
ix = it.multi_index
analytic_grad_at_ix = analytic_grad[ix]
numeric_grad_at_ix = numeric_grad[ix]
if not np.isclose(numeric_grad_at_ix, analytic_grad_at_ix, tol):
print("Gradients are different at %s. Analytic: %2.5f, Numeric: %2.5f" % (ix, analytic_grad_at_ix,
numeric_grad_at_ix))
return False
it.iternext()
print("Gradient check passed!")
return True
| 33.152778 | 110 | 0.636783 |
978bf530cd99ec6af74a49cb96ff98023d7a15cb | 20,722 | py | Python | tensorflow/contrib/boosted_trees/python/kernel_tests/stats_accumulator_ops_test.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/boosted_trees/python/kernel_tests/stats_accumulator_ops_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/boosted_trees/python/kernel_tests/stats_accumulator_ops_test.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class StatsAccumulatorScalarTest(test_util.TensorFlowTestCase):
"""Tests for scalar gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(0, [1], [[2, 0]], [0.1], [0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, bucket_ids, grads, hessians = sess.run(
[num_updates, partition, bucket_ids, grads, hessians])
result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
# Key is partion, bucket, dimension
self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])
self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
def testMultidimensionalAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2, 1],
feature_ids=[[2, 2], [3, 0], [2, 2]],
gradients=[0.1, 0.3, 0.8],
hessians=[0.2, 0.4, -9])
op2 = accumulator.add(0, [2, 1], [[3, 1], [2, 2]], [0.1, 1], [0.2, -1])
with ops.control_dependencies([op1, op2]):
num_updates, partition, bucket_ids, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, bucket_ids, grads, hessians = sess.run(
[num_updates, partition, bucket_ids, grads, hessians])
result = _AccumulatorResultToDict(partition, bucket_ids, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 3)
# Key is partion, bucket, dimension.
self.assertAllClose(result[(1, 2, 2)], [1.9, -9.8])
self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
self.assertAllClose(result[(2, 3, 1)], [0.1, 0.2])
def testDropStaleUpdate(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[[2, 0]],
gradients=[0.1],
hessians=[0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)], [0.1, 0.2])
self.assertAllClose(result[(2, 3, 0)], [0.3, 0.4])
def testSerialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
(stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1) = accumulator.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates, partition_1, feature_1, grads_1, hessians_1,
num_updates_2, partition_2, feature_2, grads_2, hessians_2) = sess.run(
[
stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2, 0)], [0.1, 0.2])
self.assertAllClose(result_1[(2, 3, 0)], [0.3, 0.4])
self.assertAllEqual(result_1, result_2)
self.assertEqual(0, stamp_token)
def testDeserialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 1]],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
deserialize = (
accumulator.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[[5, 0], [6, 2]],
gradients=[0.4, 0.5],
hessians=[0.6, 0.7]))
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 5, 0)], [0.4, 0.6])
self.assertAllClose(result[(4, 6, 2)], [0.5, 0.7])
def testMakeSummary(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[[2, 0], [3, 1], [2, 0]],
gradients=[0.1, 0.3, 0.1],
hessians=[0.2, 0.4, 0.2])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)], [0.2, 0.4])
self.assertAllClose(result[(2, 3, 1)], [0.3, 0.4])
class StatsAccumulatorTensorTest(test_util.TensorFlowTestCase):
"""Tests for tensor gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
op2 = accumulator.add(
stamp_token=0,
partition_ids=[1],
feature_ids=[[2, 0]],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2, 0)][1],
[[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])
def testMultidimensionalAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 4], [3, 1]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
op2 = accumulator.add(
stamp_token=0,
partition_ids=[1],
feature_ids=[[2, 4]],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 4)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2, 4)][1],
[[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3, 1)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 1)][1], [[0.05, 0.06], [0.07, 0.08]])
def testDropStaleUpdate(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 5], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[[2, 5]],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 5)][0], [0.1, 0.1])
self.assertAllClose(result[(1, 2, 5)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result[(2, 3, 0)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])
def testSerialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
with ops.control_dependencies([op1]):
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1) = accumulator.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2) = sess.run([
stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates_1, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2, 0)][0], [0.1, 0.1])
self.assertAllClose(result_1[(1, 2, 0)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result_1[(2, 3, 0)][0], [0.2, 0.2])
self.assertAllClose(result_1[(2, 3, 0)][1], [[0.05, 0.06], [0.07, 0.08]])
self.assertAllEqual(result_1[1, 2, 0][0], result_2[1, 2, 0][0])
self.assertAllEqual(result_1[1, 2, 0][1], result_2[1, 2, 0][1])
self.assertAllEqual(result_1[2, 3, 0][0], result_2[2, 3, 0][0])
self.assertAllEqual(result_1[2, 3, 0][1], result_2[2, 3, 0][1])
def testDeserialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[[2, 0], [3, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07,
0.08]]])
with ops.control_dependencies([op1]):
deserialize = accumulator.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[[4, 0], [5, 0]],
# Two values for gradients,
gradients=[[0.3, 0.3], [0.5, 0.5]],
# A 2x2 matrix for each hessian.
hessians=[[[0.03, 0.04], [0.05, 0.06]], [[0.07, 0.08], [0.09,
0.10]]])
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 4, 0)][0], [0.3, 0.3])
self.assertAllClose(result[(3, 4, 0)][1], [[0.03, 0.04], [0.05, 0.06]])
self.assertAllClose(result[(4, 5, 0)][0], [0.5, 0.5])
self.assertAllClose(result[(4, 5, 0)][1], [[0.07, 0.08], [0.09, 0.10]])
def testMakeSummary(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[[2, 0], [3, 2], [2, 0]],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2], [0.10, 0.11]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07, 0.08]],
[[0.011, 0.022], [0.033, 0.044]]])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2, 0)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2, 0)][1],
[[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3, 2)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3, 2)][1], [[0.05, 0.06], [0.07, 0.08]])
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i, 0], feature[i, 1]): (grads[i], hessians[i])
for i in range(len(partition))}
if __name__ == "__main__":
googletest.main()
| 44.950108 | 80 | 0.589518 |
2a8680b89a86be35b0f93ce640455cdef018a2d8 | 2,166 | py | Python | podman/networks/__init__.py | jmguzik/podman-py | 132eb096ac962c759eacfbd88829a0cfbdd56ab9 | [
"Apache-2.0"
] | null | null | null | podman/networks/__init__.py | jmguzik/podman-py | 132eb096ac962c759eacfbd88829a0cfbdd56ab9 | [
"Apache-2.0"
] | null | null | null | podman/networks/__init__.py | jmguzik/podman-py | 132eb096ac962c759eacfbd88829a0cfbdd56ab9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""network provides the network operations for a Podman service"""
import json
import podman.errors as errors
def create(api, name, network):
"""create a network"""
if not isinstance(network, str):
data = json.dumps(network)
else:
data = network
path = '/networks/create?name={}'.format(api.quote(name))
response = api.post(path, params=data, headers={'content-type': 'application/json'})
return json.loads(str(response.read(), 'utf-8'))
def inspect(api, name):
"""inspect a network"""
try:
response = api.get('/networks/{}/json'.format(api.quote(name)))
return json.loads(str(response.read(), 'utf-8'))
except errors.NotFoundError as e:
api.raise_not_found(e, e.response, errors.NetworkNotFound)
def list_networks(api, filters=None):
"""list network useing filter"""
filters_param = {}
if filters:
filters_param = {'filter': filters}
response = api.get('/networks/json', filters_param)
return json.loads(str(response.read(), 'utf-8'))
def remove(api, name, force=None):
"""Remove named/identified image from Podman storage."""
params = {}
path = '/networks/{}'.format(api.quote(name))
if force is not None:
params = {'force': force}
try:
response = api.delete(path, params)
return json.loads(str(response.read(), 'utf-8'))
except errors.NotFoundError as e:
api.raise_not_found(e, e.response, errors.NetworkNotFound)
__all__ = [
"create",
"inspect",
"list_networks",
"remove",
]
| 31.391304 | 88 | 0.664358 |
7410e166e565de57a0c51f1e8e050e9695c04ca0 | 2,898 | py | Python | submarine-sdk/pysubmarine/tests/tracking/test_tracking.py | akizminet/submarine | aa6e865f27167a26050d8daa293e0b4f41a144b6 | [
"Apache-2.0"
] | 544 | 2019-10-29T02:35:31.000Z | 2022-03-31T21:22:44.000Z | submarine-sdk/pysubmarine/tests/tracking/test_tracking.py | akizminet/submarine | aa6e865f27167a26050d8daa293e0b4f41a144b6 | [
"Apache-2.0"
] | 545 | 2019-10-29T03:21:38.000Z | 2022-03-30T05:21:15.000Z | submarine-sdk/pysubmarine/tests/tracking/test_tracking.py | akizminet/submarine | aa6e865f27167a26050d8daa293e0b4f41a144b6 | [
"Apache-2.0"
] | 220 | 2019-10-29T05:14:03.000Z | 2022-03-28T07:29:30.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
from os import environ
import pytest
import submarine
from submarine.store.database import models
from submarine.store.database.models import SqlExperiment, SqlMetric, SqlParam
from submarine.store.sqlalchemy_store import SqlAlchemyStore
JOB_ID = "application_123456789"
@pytest.mark.e2e
class TestTracking(unittest.TestCase):
def setUp(self):
environ["JOB_ID"] = JOB_ID
submarine.set_db_uri(
"mysql+pymysql://submarine_test:password_test@localhost:3306/submarine_test"
)
self.db_uri = submarine.get_db_uri()
self.store = SqlAlchemyStore(self.db_uri)
# TODO: use submarine.tracking.fluent to support experiment create
with self.store.ManagedSessionMaker() as session:
instance = SqlExperiment(
id=JOB_ID,
experiment_spec='{"value": 1}',
create_by="test",
create_time=datetime.now(),
update_by=None,
update_time=None,
)
session.add(instance)
session.commit()
def tearDown(self):
submarine.set_db_uri(None)
models.Base.metadata.drop_all(self.store.engine)
def test_log_param(self):
submarine.log_param("name_1", "a")
# Validate params
with self.store.ManagedSessionMaker() as session:
params = session.query(SqlParam).options().filter(SqlParam.id == JOB_ID).all()
assert params[0].key == "name_1"
assert params[0].value == "a"
assert params[0].id == JOB_ID
def test_log_metric(self):
submarine.log_metric("name_1", 5)
submarine.log_metric("name_1", 6)
# Validate params
with self.store.ManagedSessionMaker() as session:
metrics = session.query(SqlMetric).options().filter(SqlMetric.id == JOB_ID).all()
assert len(metrics) == 2
assert metrics[0].key == "name_1"
assert metrics[0].value == 5
assert metrics[0].id == JOB_ID
assert metrics[1].value == 6
| 38.131579 | 93 | 0.668737 |
2493379c3dfaaa1e06a3fce186802640ea21f8a2 | 416 | py | Python | tests/tests/models.py | NineteenPeriod/django-bulk-update-or-create | 7729e5c5375f9227044ec476d64b3d24610315ca | [
"MIT"
] | 80 | 2020-07-14T22:39:10.000Z | 2022-03-08T20:44:09.000Z | tests/tests/models.py | NineteenPeriod/django-bulk-update-or-create | 7729e5c5375f9227044ec476d64b3d24610315ca | [
"MIT"
] | 26 | 2020-07-15T15:11:02.000Z | 2022-03-30T06:30:47.000Z | tests/tests/models.py | NineteenPeriod/django-bulk-update-or-create | 7729e5c5375f9227044ec476d64b3d24610315ca | [
"MIT"
] | 15 | 2020-08-07T09:23:02.000Z | 2022-03-04T08:41:03.000Z | from django.db import models
from bulk_update_or_create import BulkUpdateOrCreateQuerySet
class RandomData(models.Model):
objects = BulkUpdateOrCreateQuerySet.as_manager()
uuid = models.IntegerField(unique=True)
value = models.IntegerField(default=0)
data = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return f'{self.uuid} - {self.data} - {self.value}'
| 29.714286 | 66 | 0.740385 |
f5593946875e010a6720d7dee5c4a61c9ae3472f | 1,930 | py | Python | data/external/repositories/126714/kaggle-avazu-master/script/gbdt_dense.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/126714/kaggle-avazu-master/script/gbdt_dense.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/126714/kaggle-avazu-master/script/gbdt_dense.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
# generate dense feature for gbdt
from datetime import datetime
import marshal
id_stat = marshal.load(open("../id_stat"))
# load name data
def load_name_sample(input,isTest):
f = open(input)
y = []
x = []
line = f.readline()
index = 3
if isTest == True:
index = 2
cnt = 0
isValid = False
while True:
line = f.readline().strip()
if not line :
break
fields = line.split(',')
if isTest == False:
label = int(fields[1])
if label == 0:
label = -1
y.append(label)
if isValid==False:
if int(fields[2][4:6]) > 28:
isValid = True
else:
y.append(-1)
cur_x = []
for i in xrange(index,len(fields)):
if i == len(fields)-19:
cur_x.append(id_stat["j_"+fields[i]])
#continue
elif i == len(fields)-20:
#cur_x.append(gbdt_id["i_"+fields[i]])
continue
elif i == len(fields)-7:
cur_x.append(id_stat["v_"+fields[i]])
elif i > len(fields)-7:
cur_x.append(int(fields[i]))
cur_str_x = [str(x) for x in cur_x]
if isTest == True:
print >> gbdt_test,str(y[cnt])+" "+" ".join(cur_str_x)
else:
print >> gbdt_train,str(y[cnt])+" "+" ".join(cur_str_x)
cnt = cnt + 1
if cnt % 1000000 == 0:
print cnt
starttime = datetime.now()
d = {}
gbdt_train = open("../train_dense","w")
gbdt_test = open("../test_dense","w")
load_name_sample('../train_c',False)
load_name_sample('../test_c',True)
gbdt_train.close()
gbdt_test.close()
#learner = field_fm(k,l,t,alpha,beta,max_feature,field_cnt)
endtime = datetime.now()
print (endtime-starttime).seconds | 26.081081 | 68 | 0.504145 |
7023ab17cb8976658e548e9b3f003116fc18d379 | 3,157 | py | Python | visual/visual_temp_compare.py | AdamPaslawski/OurGan_TransformerNLP | 1d9a82793ae2018ba73fd11b0fb6b09b126fba07 | [
"MIT"
] | 13 | 2021-08-15T12:37:37.000Z | 2022-02-22T13:22:28.000Z | visual/visual_temp_compare.py | srijankr/petgen | 28705aff5c6f4ed168ffd208dcb6e3b1e4ed104a | [
"MIT"
] | null | null | null | visual/visual_temp_compare.py | srijankr/petgen | 28705aff5c6f4ed168ffd208dcb6e3b1e4ed104a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
title_dict = {
'NLL_oracle': 'NLL_oracle',
'NLL_gen': 'NLL_gen',
'NLL_div': 'NLL_div',
'nll_oracle': 'nll_oracle',
'nll_div': 'nll_div',
'temp': 'temp',
}
color_list = ['#e74c3c', '#f1c40f', '#1abc9c', '#9b59b6']
def plt_data(data, title, c_id):
pre_x = np.arange(0, 150, 10)
adv_x = np.arange(150, 2000, 40)
x = np.concatenate((pre_x, adv_x))
plt.plot(x, data, color=color_list[c_id], label=title)
# plt.xticks(np.arange(0, 2000, 500))
def get_log_data(filename):
with open(filename, 'r') as fin:
all_lines = fin.read().strip().split('\n')
data_dict = {'NLL_oracle': [], 'NLL_gen': [], 'NLL_div': [], 'temp': []}
for line in all_lines:
items = line.split()
try:
for key in data_dict.keys():
if '>>>' not in items and key in items:
target = items[items.index(key) + 2]
if ',' in target:
target = target[:-1]
data_dict[key].append(float(target))
except:
break
return data_dict
if __name__ == '__main__':
# log_file_root = '../log/'
log_file_root = 'savefig/figure_log/'
log_file_list = ['catgan_temp1_final', 'catgan_temp5_final', 'relgan_temp1_final', 'relgan_temp5_final']
legend_text = [r'CatGAN ($\tau_{\rm{tar}}$=1)', r'CatGAN ($\tau_{\rm{tar}}$=5)', r'RelGAN ($\tau_{\rm{tar}}$=1)',
r'RelGAN ($\tau_{\rm{tar}}$=5)']
data_name_list = ['NLL_oracle', 'NLL_div']
if_save = True
plt.clf()
plt.figure(figsize=(8, 3.5))
for cur_id, data_name in enumerate(data_name_list):
assert data_name in title_dict.keys(), 'Error data name'
plt.subplot(12 * 10 + cur_id + 1)
if cur_id == 0:
# plt.title(r"$\rm{NLL}_{\rm{oracle}}$")
plt.ylabel(r"$\rm{NLL}_{\rm{oracle}}$", fontsize=12)
plt.plot([150, 150], [8.3, 9.4], 'k--')
else:
# plt.title(r"$\rm{NLL}_{\rm{div}}$")
plt.ylabel(r"$\rm{NLL}_{\rm{div}}$", fontsize=12)
plt.plot([150, 150], [3.3, 5], 'k--')
plt.xlabel("training iterations", fontsize=12)
color_id = 0
all_data_list = []
for idx, item in enumerate(log_file_list):
log_file = log_file_root + item + '.txt'
# save log file
all_data = get_log_data(log_file)
if 'catgan' in log_file or 'relgan' in log_file:
temp = all_data[title_dict[data_name]]
last = list(np.array(temp)[range(15, 108, 2)])
res = temp[:15] + last
plt_data(res, legend_text[idx], color_id)
else:
plt_data(all_data[title_dict[data_name]], legend_text[idx], color_id)
color_id += 1
plt.legend()
plt.tight_layout()
if if_save:
plt.savefig('savefig/temp_figure.pdf')
plt.show()
| 33.585106 | 118 | 0.51853 |
7d8e75e2d06cbde45047e1f85c1bd6899cb0342e | 3,007 | py | Python | src/test_files.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | 1 | 2015-11-04T22:22:10.000Z | 2015-11-04T22:22:10.000Z | src/test_files.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | src/test_files.py | bdbaddog/scons-gh-migrate | c76589c83ec00650a2d07dce79fc6dc5ca6465fb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that we have certain important files in our distribution
packages.
Note that this is a packaging test, not a functional test, so the
name of this script doesn't end in *Tests.py.
"""
import os
import os.path
import re
import TestSCons
test = TestSCons.TestSCons()
try:
cwd = os.environ['SCONS_CWD']
except KeyError:
cwd = os.getcwd()
def build_path(*args):
return os.path.join(cwd, 'build', *args)
build_scons_tar_gz = build_path('unpack-tar-gz', 'scons-'+test.scons_version)
build_scons_zip = build_path('unpack-zip', 'scons-'+test.scons_version)
build_local_tar_gz = build_path('test-local-tar-gz')
build_local_zip = build_path('test-local-zip')
scons_files = [
'CHANGES.txt',
'LICENSE.txt',
'README.txt',
'RELEASE.txt',
]
local_files = [
'scons-LICENSE',
'scons-README',
]
# Map each directory to search (dictionary keys) to a list of its
# subsidiary files and directories to exclude from copyright checks.
check = {
build_scons_tar_gz : scons_files,
build_scons_zip : scons_files,
build_local_tar_gz : local_files,
build_local_zip : local_files,
}
missing = []
no_result = []
for directory, check_list in check.items():
if os.path.exists(directory):
for c in check_list:
f = os.path.join(directory, c)
if not os.path.isfile(f):
missing.append(f)
else:
no_result.append(directory)
if missing:
print "Missing the following files:\n"
print "\t" + "\n\t".join(missing)
test.fail_test(1)
if no_result:
print "Cannot check files, the following have apparently not been built:"
print "\t" + "\n\t".join(no_result)
test.no_result(1)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 28.367925 | 78 | 0.715663 |
7885acd08e0a01995f09071e39749097e29e2800 | 1,260 | py | Python | Lib/site-packages/sphinx/builders/htmlhelp.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 3 | 2020-01-04T16:46:59.000Z | 2020-10-09T03:04:31.000Z | Lib/site-packages/sphinx/builders/htmlhelp.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 10 | 2021-06-16T20:48:32.000Z | 2021-10-04T18:22:02.000Z | Lib/site-packages/sphinx/builders/htmlhelp.py | nemarugommula/ecommerce | 60185e79655fbaf0fcad9e877a886fe9eb3c4451 | [
"bzip2-1.0.6"
] | 2 | 2019-11-02T08:03:09.000Z | 2020-06-29T14:52:15.000Z | """
sphinx.builders.htmlhelp
~~~~~~~~~~~~~~~~~~~~~~~~
Build HTML help support files.
Parts adapted from Python's Doc/tools/prechm.py.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import warnings
from typing import Any, Dict
from sphinxcontrib.htmlhelp import (
chm_locales, chm_htmlescape, HTMLHelpBuilder, default_htmlhelp_basename
)
from sphinx.application import Sphinx
from sphinx.deprecation import RemovedInSphinx40Warning, deprecated_alias
deprecated_alias('sphinx.builders.htmlhelp',
{
'chm_locales': chm_locales,
'chm_htmlescape': chm_htmlescape,
'HTMLHelpBuilder': HTMLHelpBuilder,
'default_htmlhelp_basename': default_htmlhelp_basename,
},
RemovedInSphinx40Warning)
def setup(app: Sphinx) -> Dict[str, Any]:
warnings.warn('sphinx.builders.htmlhelp has been moved to sphinxcontrib-htmlhelp.',
RemovedInSphinx40Warning)
app.setup_extension('sphinxcontrib.htmlhelp')
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 29.302326 | 87 | 0.647619 |
b78136579ae102afc707780890c1d45e0f975a14 | 5,466 | py | Python | src/utils/livelossplot/generic_plot.py | yohann84L/faster_rcnn_test_case | c960790629462a94c1934c8efc59e494d392410c | [
"MIT"
] | null | null | null | src/utils/livelossplot/generic_plot.py | yohann84L/faster_rcnn_test_case | c960790629462a94c1934c8efc59e494d392410c | [
"MIT"
] | null | null | null | src/utils/livelossplot/generic_plot.py | yohann84L/faster_rcnn_test_case | c960790629462a94c1934c8efc59e494d392410c | [
"MIT"
] | null | null | null | from __future__ import division
import math
from .core import draw_plot, print_extrema, not_inline_warning, MATPLOTLIB_TARGET, NEPTUNE_TARGET
from collections import OrderedDict
def _is_unset(metric):
return metric is None or math.isnan(metric) or math.isinf(metric)
class PlotLosses():
def __init__(self,
figsize=None,
cell_size=(6, 4),
dynamic_x_axis=False,
max_cols=2,
max_epoch=None,
metric2title={},
series_fmt={'training': '{}', 'validation': 'val_{}'},
validation_fmt="val_{}",
plot_extrema=True,
skip_first=2,
extra_plots=[],
fig_path=None,
tensorboard_dir=None,
target=MATPLOTLIB_TARGET):
self.figsize = figsize
self.cell_size = cell_size
self.dynamic_x_axis = dynamic_x_axis
self.max_cols = max_cols
self.max_epoch = max_epoch
self.metric2title = metric2title
self.series_fmt = series_fmt
if validation_fmt is not None:
# backward compatibility
self.series_fmt['validation'] = validation_fmt
self.logs = None
self.base_metrics = None
self.metrics_extrema = None
self.plot_extrema = plot_extrema
self.skip_first = skip_first
self.target = target
self._validate_target()
if target == MATPLOTLIB_TARGET:
not_inline_warning()
self.fig_path = fig_path
if tensorboard_dir:
from .tensorboard import TensorboardLogger
self.tensorboard_logger = TensorboardLogger(tensorboard_dir)
else:
self.tensorboard_logger = None
self.set_max_epoch(max_epoch)
self.extra_plots = extra_plots
self.global_step = 0
def set_max_epoch(self, max_epoch):
self.max_epoch = max_epoch if not self.dynamic_x_axis else None
def set_metrics(self, metrics):
self.base_metrics = metrics
if self.plot_extrema:
self.metrics_extrema = {
ftm.format(metric): {
'min': float('inf'),
'max': -float('inf'),
}
for metric in metrics
for ftm in list(self.series_fmt.values())
}
if self.figsize is None:
self.figsize = (
self.max_cols * self.cell_size[0],
((len(self.base_metrics) + 1) // self.max_cols + 1) * self.cell_size[1]
)
self.logs = []
def _update_extrema(self, log):
for metric, value in log.items():
if metric != "_i":
try:
extrema = self.metrics_extrema[metric]
except KeyError:
self.metrics_extrema[metric] = {
'min': float('inf'),
'max': -float('inf'),
}
extrema = self.metrics_extrema[metric]
if _is_unset(extrema['min']) or value < extrema['min']:
extrema['min'] = float(value)
if _is_unset(extrema['max']) or value > extrema['max']:
extrema['max'] = float(value)
def _get_metric(self, log_metric):
for format_string in reversed(sorted(list(self.series_fmt.values()), key=len)):
if log_metric.startswith(format_string.replace('{}', '')):
return log_metric[len(format_string.replace('{}', '')):]
def update(self, log, step=1):
self.global_step += step
if self.logs is None:
self.set_metrics(list(OrderedDict.fromkeys([self._get_metric(log_metric) for log_metric in log.keys()])))
log["_i"] = self.global_step
self.logs.append(log)
if self.tensorboard_logger:
self.tensorboard_logger.log_logs(log, self.global_step)
if self.plot_extrema:
self._update_extrema(log)
def draw(self):
if self.target == MATPLOTLIB_TARGET:
draw_plot(self.logs, self.base_metrics,
figsize=self.figsize,
max_epoch=self.max_epoch,
max_cols=self.max_cols,
series_fmt=self.series_fmt,
metric2title=self.metric2title,
skip_first=self.skip_first,
extra_plots=self.extra_plots,
fig_path=self.fig_path)
if self.metrics_extrema:
print_extrema(self.logs,
self.base_metrics,
self.metrics_extrema,
series_fmt=self.series_fmt,
metric2title=self.metric2title)
if self.target == NEPTUNE_TARGET:
from .neptune_integration import neptune_send_plot
neptune_send_plot(self.logs)
def close(self):
self.tensorboard_logger.close()
def _validate_target(self):
assert isinstance(self.target, str), \
'target must be str, got "{}" instead.'.format(type(self.target))
if self.target != MATPLOTLIB_TARGET and self.target != NEPTUNE_TARGET:
raise ValueError(
'Target must be "{}" or "{}", got "{}" instead.'.format(MATPLOTLIB_TARGET, NEPTUNE_TARGET, self.target)) | 37.958333 | 120 | 0.553055 |
812198805c3ebfcaa612c54b249dfc2196cbc87e | 513 | py | Python | care/facility/migrations/0117_patientsample_icmr_category.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 189 | 2020-03-17T17:18:58.000Z | 2022-02-22T09:49:45.000Z | care/facility/migrations/0117_patientsample_icmr_category.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 598 | 2020-03-19T21:22:09.000Z | 2022-03-30T05:08:37.000Z | care/facility/migrations/0117_patientsample_icmr_category.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 159 | 2020-03-19T18:45:56.000Z | 2022-03-17T13:23:12.000Z | # Generated by Django 2.2.11 on 2020-06-15 15:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facility', '0116_facility_pincode'),
]
operations = [
migrations.AddField(
model_name='patientsample',
name='icmr_category',
field=models.IntegerField(choices=[(0, 'Cat 0'), (10, 'Cat 1'), (20, 'Cat 2'), (30, 'Cat 3'), (40, 'Cat 4'), (50, 'Cat 5a'), (60, 'Cat 5b')], default=0),
),
]
| 27 | 165 | 0.569201 |
c138e4cbdd70019b43ea127c277a11fa7ca3b5d5 | 10,141 | py | Python | emu101asm/assembler.py | en0/emu101 | c0c28fc459bb839bd54a593b36b64e7d547f692d | [
"MIT"
] | null | null | null | emu101asm/assembler.py | en0/emu101 | c0c28fc459bb839bd54a593b36b64e7d547f692d | [
"MIT"
] | null | null | null | emu101asm/assembler.py | en0/emu101 | c0c28fc459bb839bd54a593b36b64e7d547f692d | [
"MIT"
] | null | null | null | import re
from itertools import count
from io import FileIO
from enum import IntFlag
from typing import NamedTuple
_re_label = re.compile(r"^(\w+):(.*)")
_re_op = re.compile(r"^(?:(?P<dst>[a-z0-9]+)(?:,(?P<dst_b>[a-z09]+))?)=(?P<src>[a-z0-9+@! ]+)(?:\?(?:(?P<cond>[a-z]+)(?:,(?P<cond_src>[a-z0-9]+))?))?")
io_map = {
"r": 0b0000000000000000,
"w": 0b1000000000000000,
}
compute_map = {
"sub d0": 0b0000000000000,
"sub d1": 0b0000100000000,
"sub d2": 0b0001000000000,
"d0": 0b0001100000000,
"add d0": 0b0010000000000,
"add d1": 0b0010100000000,
"add d2": 0b0011000000000,
"d1": 0b0011100000000,
"and d0": 0b0100000000000,
"and d1": 0b0100100000000,
"and d2": 0b0101000000000,
"d2": 0b0101100000000,
"or d0": 0b0110000000000,
"or d1": 0b0110100000000,
"or d2": 0b0111000000000,
"shl": 0b0111100000000,
"xor d0": 0b1000000000000,
"xor d1": 0b1000100000000,
"xor d2": 0b1001000000000,
"ip": 0b1001100000000,
"inc d0": 0b1010000000000,
"inc d1": 0b1010100000000,
"inc d2": 0b1011000000000,
"sp": 0b1011100000000,
"dec d0": 0b1100000000000,
"dec d1": 0b1100100000000,
"dec d2": 0b1101000000000,
"dp": 0b1101100000000,
"not d0": 0b1110000000000,
"not d1": 0b1110100000000,
"not d2": 0b1111000000000,
"shr": 0b1111100000000,
}
address_map = {
"dp": 0b000000000000000,
"sp": 0b010000000000000,
"dp+d0": 0b100000000000000,
"sp+d0": 0b110000000000000,
"default": 0b000000000000000 ,
}
condition_map = {
"gt": 0b100,
"ge": 0b110,
"eq": 0b010,
"le": 0b011,
"lt": 0b001,
"ne": 0b101,
"z": 0b010,
"nz": 0b101,
"true": 0b111,
"false": 0b000,
"default": 0b111,
}
dest_map = {
"d0": 0b000000,
"d1": 0b001000,
"d2": 0b010000,
"ip": 0b100000,
"sp": 0b101000,
"dp": 0b110000,
"default": 0b111000,
}
source_map = {
"alu": 0b01000000,
"data": 0b10000000,
"!": 0b11000000,
"@": 0b11000000,
"default": 0b00000000,
}
class AssemblyBytes(NamedTuple):
bytecode: bytes
opcode: str
line_no: int
has_imm: bool
class CompileError(RuntimeError):
def __init__(self, line_no: int, label: str, symbol: str, info: str):
self.line_no = line_no
self.label = label
self.symbol = symbol
self.info = info
class DecodingError(RuntimeError):
def __init__(self, info):
self.info = info
class Assembler:
def __init__(self, in_fp: FileIO, out_fp: FileIO, prog_offset=0x0200, ram_offset=0xf000):
self._ram = count(prog_offset)
self._prog = ram_offset
self._offset = ram_offset
self._fp = in_fp
self._out = out_fp
self._refs = {}
self._assembled_bytes = []
self._pending_refs = {}
def _iter(self):
last_label = None
last_label_i = 0
for line_no, line in enumerate(self._fp):
line = line.rstrip("\n").strip(" ")
if line.startswith("#"):
continue
elif line == "":
continue
lb_match = _re_label.match(line)
if lb_match:
lb, op, *_ = lb_match.groups()
last_label = lb
last_label_i = 0
yield line_no, lb.lower(), op.strip(" ").lower()
elif last_label:
last_label_i += 1
yield line_no, f"{last_label}+{last_label_i}", line.lower()
else:
yield line_no, None, line.lower()
def _get_ref(self, label):
if label not in self._refs:
self._add_label(label, next(self._ram))
return self._refs[label]
def _add_label(self, label, offset=None):
if offset is None:
self._refs[label] = len(self._assembled_bytes)+self._prog
else:
self._refs[label] = offset
def _decode_cond(self, cond, cond_src, **kwargs):
if cond is None:
return condition_map["default"]
if cond in condition_map:
code = condition_map[cond]
else:
raise DecodingError("Unknown Conditional")
if cond_src is not None and cond_src in compute_map:
code |= compute_map[cond_src]
elif cond_src is not None:
raise DecodingError("Unknown Source or Computation in Conditional")
return code
def _decode_src(self, src, **kwargs):
code, imm = 0 | source_map["default"], None
if src.startswith("!0x"):
imm = int(src[3:], 16)
code = source_map["!"]
elif src.startswith("!0b"):
imm = int(src[3:], 2)
code = source_map["!"]
elif src.startswith("!"):
imm = int(src[1:], 10)
code = source_map["!"]
elif src.startswith("@"):
imm = src[1:]
code = source_map["@"]
elif src == "data":
code = source_map["data"]
elif src == "stack":
code = source_map["data"]
elif src in compute_map:
code = compute_map[src] | source_map["alu"]
else:
raise DecodingError("Unknown Source or Computation")
return code, imm
def _decode_dst(self, dst, dst_b, **kwargs):
def get_dest(d):
try:
return dest_map[d]
except KeyError as ex:
raise DecodingError("Unknown Destination")
code = dest_map["default"]
if dst_b is None and dst == 'data':
code = io_map["w"] | dest_map["default"] | address_map["dp"]
elif dst_b is None and dst == 'stack':
code = io_map["w"] | dest_map["default"] | address_map["sp"]
elif dst_b is None:
code = get_dest(dst)
elif dst == dst_b:
raise DecodingError("Duplicate Destination Error")
elif dst_b == 'data':
code = io_map["w"] | get_dest(dst) | address_map["dp"]
elif dst_b == 'stack':
code = io_map["w"] | get_dest(dst) | address_map["sp"]
elif dst == 'data':
code = io_map["w"] | get_dest(dst_b) | address_map["dp"]
elif dst == 'stack':
code = io_map["w"] | get_dest(dst_b) | address_map["sp"]
else:
raise DecodingError("Unknown Destination Error")
return code
def _append_bytes(self, code: int, imm, opcode, lineno):
self._assembled_bytes.append(AssemblyBytes(
bytecode=code.to_bytes(2, "big"),
opcode=opcode,
line_no=lineno,
has_imm=imm is not None
))
if isinstance(imm, int):
self._assembled_bytes.append(AssemblyBytes(
bytecode=imm.to_bytes(2, "big"),
opcode=opcode,
line_no=lineno,
has_imm=False
))
elif isinstance(imm, str):
self._pending_refs[len(self._assembled_bytes)] = imm
self._assembled_bytes.append(AssemblyBytes(
bytecode=None,
opcode=opcode,
line_no=lineno,
has_imm=False
))
def _set_bytes(self, val: int, offset: int):
ab = self._assembled_bytes[offset]
self._assembled_bytes[offset] = AssemblyBytes(
bytecode=val.to_bytes(2, 'big'),
opcode=ab.opcode,
line_no=ab.line_no,
has_imm=ab.has_imm,
)
def assemble(self):
try:
for line_no, label, op in self._iter():
try:
self._add_label(label)
code = 0b0000000000000000
imm = None
op_match = _re_op.match(op)
if op == "hlt":
code = 0b1111111111111111
elif op == "nop" or op == "noop":
code = 0b0000000000000000
elif op == "brk":
code = 0b0101010101010101
elif op_match:
c = op_match.groupdict()
code |= self._decode_dst(**c)
src_code, imm = self._decode_src(**c)
code |= src_code
code |= self._decode_cond(**c)
else:
raise CompileError(line_no, label, op, "Syntax Error")
except DecodingError as ex:
raise CompileError(line_no, label, op, ex.info) from ex
except CompileError as ex:
raise
except Exception as ex:
raise CompileError(line_no, label, op, "Unknown Error") from ex
else:
self._append_bytes(code, imm, op, line_no)
except CompileError as ex:
print(f"{ex.info}\nLine: {ex.line_no}, Symbol: {ex.symbol}")
return
self._fulfill_pending_refs()
self._write_assembly()
def _fulfill_pending_refs(self):
for offset, label in self._pending_refs.items():
addr = self._get_ref(label)
self._set_bytes(addr, offset)
def _write_assembly(self):
assembly_bytes = enumerate(iter(self._assembled_bytes))
try:
while True:
i, ab = next(assembly_bytes)
self._out.write(ab.bytecode)
if ab.has_imm:
_, imm = next(assembly_bytes)
self._out.write(imm.bytecode)
print("{:04x}: {:016b} {:04x} {}".format(
i + self._prog,
int.from_bytes(ab.bytecode, "big"),
int.from_bytes(imm.bytecode, "big"),
ab.opcode,
))
else:
print("{:04x}: {:016b} ---- {}".format(
i + self._prog,
int.from_bytes(ab.bytecode, "big"),
ab.opcode,
))
except StopIteration:
pass
| 31.203077 | 151 | 0.523124 |
058f85dbd0a1d47529223499b1f298c9094ffbd8 | 5,529 | py | Python | st2tests/integration/mistral/test_wiring.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 2 | 2021-08-04T01:04:06.000Z | 2021-08-04T01:04:08.000Z | st2tests/integration/mistral/test_wiring.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 1 | 2022-03-31T03:53:22.000Z | 2022-03-31T03:53:22.000Z | st2tests/integration/mistral/test_wiring.py | saucetray/st2 | 8f507d6c8d9483c8371e386fe2b7998596856fd7 | [
"Apache-2.0"
] | 1 | 2019-10-11T14:42:28.000Z | 2019-10-11T14:42:28.000Z | # Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import shutil
import tempfile
import eventlet
from integration.mistral import base
from six.moves import range
from st2common.constants import action as action_constants
class WiringTest(base.TestWorkflowExecution):
temp_dir_path = None
def setUp(self):
super(WiringTest, self).setUp()
# Create temporary directory used by the tests
_, self.temp_dir_path = tempfile.mkstemp()
os.chmod(self.temp_dir_path, 0o755) # nosec
def tearDown(self):
if self.temp_dir_path and os.path.exists(self.temp_dir_path):
if os.path.isdir(self.temp_dir_path):
shutil.rmtree(self.temp_dir_path)
else:
os.remove(self.temp_dir_path)
def test_basic_workflow(self):
ex = self._execute_workflow('examples.mistral-basic', {'cmd': 'date'})
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('stdout', ex.result)
self.assertEqual(len(ex.result.get('tasks', [])), 1)
def test_basic_reverse_workflow(self):
ex = self._execute_workflow('examples.mistral-reverse-basic', {'cmd': 'date'})
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('stdout', ex.result)
self.assertEqual(len(ex.result.get('tasks', [])), 1)
def test_reverse_workflow_with_requires(self):
params = {'question': 'life universe everything'}
ex = self._execute_workflow('examples.mistral-reverse-requires', params)
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('answer', ex.result)
self.assertEqual(len(ex.result.get('tasks', [])), 5)
def test_basic_workbook(self):
ex = self._execute_workflow('examples.mistral-workbook-basic', {'cmd': 'date'})
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('stdout', ex.result)
self.assertEqual(len(ex.result.get('tasks', [])), 1)
def test_complex_workbook_with_yaql(self):
params = {'vm_name': 'demo1'}
ex = self._execute_workflow('examples.mistral-workbook-complex', params)
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('vm_id', ex.result)
self.assertEqual(len(ex.result.get('tasks', [])), 8)
def test_complex_workbook_with_jinja(self):
params = {'vm_name': 'demo2'}
ex = self._execute_workflow('examples.mistral-jinja-workbook-complex', params)
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('vm_id', ex.result)
self.assertEqual(len(ex.result.get('tasks', [])), 8)
def test_complex_workbook_subflow_actions(self):
params = {'subject': 'st2', 'adjective': 'cool'}
ex = self._execute_workflow('examples.mistral-workbook-subflows', params)
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('tagline', ex.result)
self.assertEqual(ex.result['tagline'], 'st2 is cool!')
self.assertEqual(len(ex.result.get('tasks', [])), 2)
def test_with_items(self):
params = {'cmd': 'date', 'count': 8}
ex = self._execute_workflow('examples.mistral-repeat', params)
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEqual(len(ex.result['result']), params['count'])
self.assertEqual(len(ex.result.get('tasks', [])), 1)
def test_concurrent_load(self):
wf_name = 'examples.mistral-workbook-complex'
wf_params = {'vm_name': 'demo1'}
exs = [self._execute_workflow(wf_name, wf_params) for i in range(3)]
eventlet.sleep(20)
for ex in exs:
e = self._wait_for_completion(ex)
self.assertEqual(e.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertIn('vm_id', e.result)
self.assertEqual(len(e.result.get('tasks', [])), 8)
def test_execution_failure(self):
ex = self._execute_workflow('examples.mistral-basic', {'cmd': 'foo'})
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_FAILED)
def test_invoke_from_action_chain(self):
ex = self._execute_workflow('examples.invoke-mistral-with-jinja', {'cmd': 'date'})
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
| 42.530769 | 90 | 0.687828 |
c04700abe7e0f1c03dc65060f3481c69759df459 | 15,095 | py | Python | main.py | A-Katopodis/Neural-Network-Implemenation | 4e28f695fba57c63aa9e3d5b7ac6036a341d97d5 | [
"Apache-2.0"
] | null | null | null | main.py | A-Katopodis/Neural-Network-Implemenation | 4e28f695fba57c63aa9e3d5b7ac6036a341d97d5 | [
"Apache-2.0"
] | null | null | null | main.py | A-Katopodis/Neural-Network-Implemenation | 4e28f695fba57c63aa9e3d5b7ac6036a341d97d5 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import time
import _pickle as cPickle
# Function used for loading the CIFAR10 dataset
def unpickle(file):
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
# Compute the softmax function of the output
def softmax(y):
max_of_rows = np.max(y, 1)
m = np.array([max_of_rows, ] * y.shape[1]).T
y = y - m
y = np.exp(y)
return y / (np.array([np.sum(y, 1), ] * y.shape[1])).T
# Returns the outputs of the hidden level
def get_z(X, w1):
a = X.dot(w1.T)
z = activationFunction(a)
# Z is N,M right now(since w1 is M), so we add ones at the beginning
z = np.hstack((np.ones((z.shape[0], 1)), z))
return z
# Returns the cost function and the gradients for w1,w2
def compute_gradients_cost(T, X, w1, w2, lamda):
Z = get_z(X,w1)
# The result of Z*w2
z_w2 = Z.dot(w2.T)
Y = softmax(z_w2)
# Compute the cost function to check convergence
max_error = np.max(z_w2, axis=1)
Ew = np.sum(T * z_w2) - np.sum(max_error) - \
np.sum(np.log(np.sum(np.exp(z_w2 - np.array([max_error, ] * z_w2.shape[1]).T), 1))) - \
(0.5 * lamda) * (np.sum(np.square(w1)) + np.sum(np.square(w2)))
# Calculate gradient for w2
grad_w2 = (T-Y).T.dot(Z) - lamda * w2
# We remove the bias since z0 is not dependant by w1
w2_temp = np.copy(w2[:, 1:])
# This is the result of the derivative of the activation function
der = activationFunctionDerivative(X.dot(w1.T))
temp = (T-Y).dot(w2_temp) * der
# Calculate gradient for w1
grad_w1 = temp.T.dot(X) - lamda*w1
return Ew, grad_w1, grad_w2
def train_neural_network(T, X, lamda, w1_init, w2_init, options):
"""inputs :
t: N x 1 binary output data vector indicating the two classes
X: N x (D+1) input data vector with ones already added in the first column
lamda: the positive regularizarion parameter
winit: D+1 dimensional vector of the initial values of the parameters
options: options(1) is the maximum number of iterations
options(2) is the tolerance
options(3) is the learning rate eta
outputs :
w: the trained D+1 dimensional vector of the parameters"""
w1 = np.copy(w1_init)
w2 = np.copy(w2_init)
# Maximum number of iteration for each season clean
_iter = options[0]
# Minibatch Size
mb_size = options[1]
n = X.shape[0]
# Learing rate
eta = options[2]
# Since we apply gradients on batches the eta
# needs to be relevant to the batch size not to the whole dataset
eta = eta / mb_size
# We save each cost we compute across all season in order to plot it
costs = []
# iter is the number of epoch the algorithm is running
for i in range(_iter):
# Shuffle the array's in the same order.
# If we don't shuffle them the same, a X_train row will not correspond to the original T row
set = list(zip(X,T))
np.random.shuffle(set)
a, b = zip(*set)
temp_X = np.asarray(a)
temp_T = np.asarray(b)
for e in range(0, n, mb_size):
# Get the new elements for gradient ascent
x_b = temp_X[e: e+mb_size, :]
t_b = temp_T[e: e+mb_size, :]
Ew, grad_w1, grad_w2 = compute_gradients_cost(t_b, x_b, w1, w2, lamda)
# Save the cost
costs.append(Ew)
# Update parameters based on gradient ascend
w1 += eta * grad_w1
w2 += eta * grad_w2
return w1, w2, costs
# Run the w1,w2 we caculcated for the test data
def run_test_final(w1, w2, x_test):
Z = get_z(x_test, w1)
z_w2 = Z.dot(w2.T)
ytest = softmax(z_w2)
# Hard classification decisions
ttest = np.argmax(ytest, 1)
return ttest
# Return the result of the activation function
def activationFunction(a):
if activation_option == 0:
return np.maximum(a, 0) + np.log(1 + np.exp(-np.abs(a)))
elif activation_option == 1:
return np.tanh(a)
else:
return np.cos(a)
# Return the result of the derivative of the activation function
def activationFunctionDerivative(a):
if activation_option == 0:
return np.exp(np.minimum(0,a))/(1+np.exp(-np.abs(a)))
elif activation_option == 1:
return 1 - np.tanh(a)**2
else:
return -(np.sin(a))
def load_data_mnist(data='mnist'):
"""
Loads the MNIST dataset. Reads the training files and creates matrices.
:return: train_data:the matrix with the training data
test_data: the matrix with the data that will be used for testing
train_truth: the matrix consisting of one
hot vectors on each row(ground truth for training)
test_truth: the matrix consisting of one
hot vectors on each row(ground truth for testing)
"""
train_files = [data+'/train%d.txt' % (i,) for i in range(10)]
test_files = [data+'/test%d.txt' % (i,) for i in range(10)]
tmp = []
for i in train_files:
with open(i, 'r') as fp:
tmp += fp.readlines()
# load train data in N*D array (60000x784 for MNIST)
# divided by 255 to achieve normalization
train_data = np.array([[j for j in i.split(" ")] for i in tmp], dtype='int') / 255
print ("Train data array size: ", train_data.shape)
tmp = []
for i in test_files:
with open(i, 'r') as fp:
tmp += fp.readlines()
# load test data in N*D array (10000x784 for MNIST)
# divided by 255 to achieve normalization
test_data = np.array([[j for j in i.split(" ")] for i in tmp], dtype='int') / 255
print ("Test data array size: ", test_data.shape)
tmp = []
for i, _file in enumerate(train_files):
with open(_file, 'r') as fp:
for line in fp:
tmp.append([1 if j == i else 0 for j in range(0, 10)])
train_truth = np.array(tmp, dtype='int')
del tmp[:]
for i, _file in enumerate(test_files):
with open(_file, 'r') as fp:
for _ in fp:
tmp.append([1 if j == i else 0 for j in range(0, 10)])
test_truth = np.array(tmp, dtype='int')
print ("Train truth array size: ", train_truth.shape)
print ("Test truth array size: ", test_truth.shape)
return train_data, test_data, train_truth, test_truth
def load_data_cifar10(data='cifar'):
train_files = [data+'/data_batch_%d' % (i,) for i in range(1,6)]
test_file = data+'/test_batch'
train_data = []
dictonaries = []
train_truth = np.zeros((50000,10))
k = 0
# We store all data batch
for i in train_files:
dictonaries.append(unpickle(i))
for batch in dictonaries:
# For each input we append it
for img in batch['data']:
train_data.append(img)
for label in batch['labels']:
# for k image we put the label it belong to
train_truth[k][label] = 1
k += 1
train_data = np.asarray(train_data)
# We normalize the data. All values will be in [0,1]
train_data = train_data/255
#We do the same for the one test batch
temp_dict = unpickle(test_file)
test_data = []
test_truth = np.zeros((10000,10))
k = 0
for img in temp_dict['data']:
test_data.append(img)
for label in temp_dict['labels']:
#for k image we put the label it belong to
test_truth[k][label] = 1
k += 1
test_data = np.asarray(test_data)
# Normalize the test as well
test_data = test_data/255
return train_data, test_data, train_truth, test_truth
# Check the w1,w2 derivatives
def gradient_check(w1_init,w2_init, X, t, lamda):
w1 = np.random.rand(*w1_init.shape)
w2 = np.random.rand(*w2_init.shape)
epsilon = 1e-6
_list = np.random.randint(X.shape[0], size=5)
x_sample = np.array(X[_list, :])
t_sample = np.array(t[_list, :])
Ew, gradw1, gradw2 = compute_gradients_cost(t_sample,x_sample,w1,w2, lamda)
numericalGrad = np.zeros(gradw1.shape)
# Compute all numerical gradient estimates and store them in
# the matrix numericalGrad
print (gradw1.shape , gradw2.shape , w1.shape, w2.shape)
for k in range(numericalGrad.shape[0]):
for d in range(numericalGrad.shape[1]):
# Calculate W1 gradient
w_tmp = np.copy(w1)
w_tmp[k, d] += epsilon
e_plus, _, _ = compute_gradients_cost(t_sample, x_sample, w_tmp, w2, lamda)
w_tmp = np.copy(w1)
w_tmp[k, d] -= epsilon
e_minus, _, _ = compute_gradients_cost(t_sample, x_sample, w_tmp, w2, lamda)
numericalGrad[k,d] = (e_plus - e_minus) / (2 * epsilon)
# Absolute norm
print ("The difference estimate for gradient of w1 is : ", np.max(np.abs(gradw1 - numericalGrad)))
numericalGrad = np.zeros(gradw2.shape)
# Compute all numerical gradient estimates and store them in
# the matrix numericalGrad
for k in range(numericalGrad.shape[0]):
for d in range(numericalGrad.shape[1]):
# Calculate W1 gradient
w_tmp = np.copy(w2)
w_tmp[k, d] += epsilon
e_plus, _, _ = compute_gradients_cost(t_sample, x_sample,w1 ,w_tmp , lamda)
w_tmp = np.copy(w2)
w_tmp[k, d] -= epsilon
e_minus, _, _ = compute_gradients_cost(t_sample, x_sample, w1, w_tmp, lamda)
numericalGrad[k, d] = (e_plus - e_minus) / (2 * epsilon)
# Absolute norm
print ("The difference estimate for gradient of w2 is : ", np.max(np.abs(gradw2 - numericalGrad)))
def start(options, dataset):
# The center of our distruption. Zero for our normalize data is perfect
center = 0
# The range of the distrubtion
# Should always be relevant to the dimensions of the data
s = 1/ np.sqrt(D+1)
# Initialize the weights
w_2 = np.zeros((K, M + 1))
# We use this in order for our activation function to be more effective
w_1 = np.random.normal(center,s,(M,D+1))
# We add the bias
w_1[:, 1] = 1
w_2[:, 1] = 1
# We use gradient check for both weights
if i==1:
gradient_check(w_1, w_2, X_train, y_train, lamda)
# We use to calculate the time needed to train the model
start_time = time.clock()
# Start training the neural network
w1_final, w2_final, costs = train_neural_network(y_train, X_train, lamda, w_1, w_2, options)
# We compare the results against the real ones
ttest = run_test_final(w1_final, w2_final, X_test)
error_count = np.not_equal(np.argmax(y_test, 1), ttest).sum()
print (error_count / y_test.shape[0] * 100)
# We save the output to a file
file = open(dataset+".txt", "a")
file.write("\n"+str(activation_option) + "\t" + str(M) +"\t"+str(options[1])+"\t"+str(options[2])+"\t"+str(options[0]))
file.write("\t"+str(error_count / y_test.shape[0] * 100)+"\t"+str(time.clock() - start_time))
file.close()
# We plot the result
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("M =" + str(M)+" Minibatch="+str(options[1]))
# We save the plot as an image
plt.savefig(dataset+'_Af_' + str(activation_option) + 'eta_' + str(options[2]) + 'M_' + str(M)+'mb_'+str(options[1])+'eta_'+str(options[0])+'.png', bbox_inches='tight')
plt.clf()
## CODE WE USED FOR RUNNING OUR EXPIREMENTS
##----------------------------------------------------------#
# Method for our expirements we were tickering values here
# to produce the results on the report
# X_train, X_test, y_train, y_test = load_data_mnist()
#
# N, D = X_train.shape
#
# # The number classes
# K = y_train.shape[1]
#
# # Adds a row of 1 in the beginning
# X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))
# X_test = np.hstack((np.ones((X_test.shape[0], 1)), X_test))
# print "Train truth array size (with ones): ", X_train.shape
# print "Test truth array size (with ones): ", X_test.shape
# print "MNIST: "
# # Which activation function to use
# activation_options = [0, 1, 2]
# # lamda
# lamda = 0.01
# # learning rate
# # iteration
# iter_options = [400]
#
# mb_options = [100, 200]
# eta = 0.05
# # For all activation functions
# M_options = [100, 200, 300]
#
# for act in activation_options:
# activation_option = act
# for M in M_options:
# for mb in mb_options:
# for iter in iter_options:
# start([iter, mb, eta], "mnist")
#
# X_train, X_test, y_train, y_test = load_data_cifar10()
#
# N, D = X_train.shape
#
# # The number classes
# K = y_train.shape[1]
#
# # Adds a row of 1 in the beginning
# X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))
# X_test = np.hstack((np.ones((X_test.shape[0], 1)), X_test))
# print "Train truth array size (with ones): ", X_train.shape
# print "Test truth array size (with ones): ", X_test.shape
#
# # Which activation function to use
# activation_options = [2]
# # lamda
# lamda = 0.01
# # learning rate
# # iteration
# iter_options = [400]
#
# mb_options = [100, 200]
# eta = 0.006
# # For all activation functions
# M_options = [100, 200, 300]
#
# for act in activation_options:
# activation_option = act
# for M in M_options:
# for mb in mb_options:
# for iter in iter_options:
# start([iter, mb, eta], "cifar")
# Initialize all the parameters
lamda = 0.01
eta = 0
iter = 0
M = 0
mb = 0
activation_option = -1
i = int( input('Chose a dataset: \n\t1 for MNIST \n\t2 for CIFAR-10\n>'))
if i > 2 or i < 1:
print ("Invalid input!")
exit()
print ("Loading data....")
# We put values to receive the optimal error score in each dataset based on our experiments
if i == 2:
X_train, X_test, y_train, y_test = load_data_cifar10()
eta = 0.005
iter = 200
M = 300
mb = 100
dataset = "cifar"
else:
X_train, X_test, y_train, y_test = load_data_mnist()
eta = 0.05
iter = 400
M = 300
mb = 100
dataset = "mnist"
i = int(input('Chose a activation option: \n\t1: log \n\t2: tanh\n\t3: cos\n>'))
activation_option = i-1
if i != 1 and i != 2 and i != 3:
print ("Invalid Input!")
exit()
# The optimal values here are those that obtained us the minimum score in each dataset
i = int(input('Do you want to set other variables? Press 1 for Yes (Optimal values are default): \n>'))
if i == 1:
eta = float(input('Give the eta(float):\n>'))
lamda = float(input('Give the lamda(float):\n>'))
iter = int(input('Give the number of epoch(int):\n>'))
M = int(input('Give the number of neurons(int):\n>'))
mb = int(input('Give the size of the minibatch(int):\n>'))
N, D = X_train.shape
# The number classes
K = y_train.shape[1]
gradcheck = -1
gradcheck = int(input('Peform Gradient Check? Press 1 for Yes:\n>'))
# Adds a row of 1 in the beginning
X_train = np.hstack((np.ones((X_train.shape[0], 1)), X_train))
X_test = np.hstack((np.ones((X_test.shape[0], 1)), X_test))
start([iter, mb, eta], dataset) | 32.532328 | 172 | 0.619278 |
fd8cbb5a5e5e52bb58741eff31dcb673914e42e4 | 1,584 | py | Python | ch9/login4.py | chunhua2017/pythonprogrammingdemo | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | [
"MIT"
] | 4 | 2020-05-18T05:25:44.000Z | 2021-07-30T01:02:39.000Z | ch9/login4.py | chunhua2017/pythonprogrammingdemo | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | [
"MIT"
] | null | null | null | ch9/login4.py | chunhua2017/pythonprogrammingdemo | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | [
"MIT"
] | 2 | 2021-09-15T05:41:05.000Z | 2022-01-25T05:44:43.000Z | import tkinter as tk #导入tkinter模块
window = tk.Tk() #创建主窗口对象
screen_width = window.winfo_screenwidth() #获取屏幕宽度
screen_height = window.winfo_screenheight() #获取屏幕高度
x = (screen_width - 200) / 2 #计算窗口坐标x
y = (screen_height - 100) / 2 #计算窗口坐标y
window.geometry('260x100+%d+%d' % (x, y)) #设置窗口大小与位置
window.title('login') #设置窗口标题
#读取图片文件
logo = tk.PhotoImage(file="python_logo.png")
icon_login = tk.PhotoImage(file="login.png")
icon_cancel = tk.PhotoImage(file="cancel.png")
#在左侧放置一个标签,显示logo图片
tk.Label(window, justify=tk.LEFT, image=logo).grid(row=0, column=0, rowspan=2)
#放置两个标签,一个为“账户”,一个为“密码”,字体:微软雅黑,12号
tk.Label(window, text='账户', font=('微软雅黑', 12)).grid(row=0, column=1)
tk.Label(window, text='密码', font=('微软雅黑', 12)).grid(row=1, column=1)
#在标签的右侧放置输入控件
var_usr_name = tk.StringVar()
var_usr_pwd = tk.StringVar()
tk.Entry(window, width=15, textvariable=var_usr_name).grid(row=0, column=2)
tk.Entry(window, width=15, textvariable=var_usr_pwd,show='*').grid(row=1, column=2)
#定义"登录"按钮的回调函数
def login():
user = var_usr_name.get()
psw = var_usr_pwd.get()
if user and psw :
print("Username:%s\nPassword:%s" % (user, psw))
else:
print("Please Enter Username and Password!")
#放置两个按钮,一个“登录”,一个“取消”
tk.Button(window,compound=tk.LEFT,image=icon_login,text='登录',font=('Microsoft YaHei', 11),anchor=tk.E,padx=10, width=60, command=login).grid(row=2, column=1)
tk.Button(window,compound=tk.LEFT,image=icon_cancel,text='取消',font=('Microsoft YaHei', 11),anchor=tk.E,padx = 10,width=60,command=window.quit).grid(row=2, column=2)
window.mainloop() #进入Tk事件循环
| 42.810811 | 164 | 0.710227 |
a54e0970ccc6cc1edaf7d940f920aab9ade2306b | 1,248 | py | Python | djcopybook/fixedwidth/tests/fields/test_integer_field.py | imtapps/django-copybook | 33967a017956d58ae1d57b6c7b7c5eb8ed5f18ee | [
"BSD-2-Clause"
] | 3 | 2017-01-23T19:30:18.000Z | 2019-07-08T06:42:50.000Z | djcopybook/fixedwidth/tests/fields/test_integer_field.py | imtapps/django-copybook | 33967a017956d58ae1d57b6c7b7c5eb8ed5f18ee | [
"BSD-2-Clause"
] | 7 | 2015-05-19T14:26:39.000Z | 2022-02-09T19:21:44.000Z | djcopybook/fixedwidth/tests/fields/test_integer_field.py | imtapps/django-copybook | 33967a017956d58ae1d57b6c7b7c5eb8ed5f18ee | [
"BSD-2-Clause"
] | 3 | 2016-09-15T20:48:00.000Z | 2017-10-24T19:33:29.000Z | import unittest
from djcopybook.fixedwidth import fields
class IntegerFieldTests(unittest.TestCase):
def test_to_record_returns_string_of_value_padded_to_length(self):
field = fields.IntegerField(length=5)
self.assertEqual("00012", field.to_record(12))
def test_to_record_returns_padded_zeros_when_value_is_None(self):
field = fields.IntegerField(length=5)
self.assertEqual("00000", field.to_record(None))
def test_to_python_turns_value_to_int(self):
field = fields.IntegerField(length=5)
python_val = field.to_python("10")
self.assertEqual(10, python_val)
self.assertIsInstance(python_val, int)
def test_to_python_returns_none_when_value_is_none(self):
field = fields.IntegerField(length=5)
self.assertEqual(None, field.to_python(None))
def test_to_python_returns_none_when_value_is_empty_string(self):
field = fields.IntegerField(length=5)
self.assertEqual(None, field.to_python(" "))
def test_to_python_raises_value_error_when_value_given_has_characters(self):
field = fields.IntegerField(length=5)
with self.assertRaises(ValueError):
self.assertEqual(None, field.to_python("0001A"))
| 36.705882 | 80 | 0.733173 |
a64e699b57fb99b7390800bd99a4269c940dcba9 | 1,188 | py | Python | setup.py | zachary-hawk/NC_CRYST | 21874e50054a753351df0499ec5fd0ec63df3531 | [
"MIT"
] | null | null | null | setup.py | zachary-hawk/NC_CRYST | 21874e50054a753351df0499ec5fd0ec63df3531 | [
"MIT"
] | null | null | null | setup.py | zachary-hawk/NC_CRYST | 21874e50054a753351df0499ec5fd0ec63df3531 | [
"MIT"
] | null | null | null | from setuptools import find_packages
import pathlib
from numpy.distutils.core import setup, Extension
HERE=pathlib.Path(__file__).parent
README=(HERE / "README.md").read_text()
ext1 = Extension(name='nc_fort',
sources=['nc_cryst/nc_fort.f90'],
f2py_options=['--quiet'],
)
setup(name="nc_cryst-CASTEP",
version="0.1.33",
#package_dir={"": "nc_cryst"},
packages=["nc_cryst"],#find_packages(where="nc_cryst"),
ext_modules=[ext1],
description="Crystal visulaliser",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/zachary-hawk/NC_CRYST.git",
author="Zachary Hawkhead",
author_email="zachary.hawkhead@durham.ac.uk",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
include_package_data=True,
install_requires=["numpy","matplotlib","scipy","ase","pyvista","vtk","argparse"],
entry_points={"console_scripts":["nc_cryst=nc_cryst.__main__:main",]
}
)
| 32.108108 | 87 | 0.628788 |
ab42dcd277450e99a226d08cfc1bb66a8f6f5f2a | 299 | py | Python | strudra/__init__.py | domenukk/strudra | 3c618779f9a9cf646d145e3ab703a0b4e7dbb767 | [
"MIT"
] | 28 | 2021-03-17T01:14:46.000Z | 2022-01-21T02:55:07.000Z | strudra/__init__.py | domenukk/strudra | 3c618779f9a9cf646d145e3ab703a0b4e7dbb767 | [
"MIT"
] | null | null | null | strudra/__init__.py | domenukk/strudra | 3c618779f9a9cf646d145e3ab703a0b4e7dbb767 | [
"MIT"
] | null | null | null | from .strudra import (
get_fmt,
Member,
data_from_file,
Strudra,
data_from_ghidra,
struds_from_data,
serialize_struds,
ghidra_struct_to_strud,
define_struct,
add_struct,
parse_to_strud,
parse_struct_members,
target_is_big_endian,
gh_bridge,
)
| 17.588235 | 27 | 0.692308 |
56bfa69a860ca47ef3be9d41bcd89b9bfef6523b | 5,135 | py | Python | examples/camera.py | conductiveIT/pymunk-1 | 61de8b2e652503356ac14a2d648cc11aa6a8070f | [
"MIT"
] | 670 | 2015-01-01T19:10:15.000Z | 2022-03-29T23:05:47.000Z | examples/camera.py | conductiveIT/pymunk-1 | 61de8b2e652503356ac14a2d648cc11aa6a8070f | [
"MIT"
] | 122 | 2015-01-02T19:06:19.000Z | 2022-03-20T19:44:25.000Z | examples/camera.py | conductiveIT/pymunk-1 | 61de8b2e652503356ac14a2d648cc11aa6a8070f | [
"MIT"
] | 222 | 2015-01-28T03:34:52.000Z | 2022-03-27T06:44:52.000Z | """Basic showcase on how the transform property on SpaceDebugDrawOptions can
be used as a camera to allow panning. Use arrows to move the camera.
"""
__docformat__ = "reStructuredText"
import random
import sys
import pygame
import pymunk
import pymunk.pygame_util
from pymunk.vec2d import Vec2d
random.seed(0)
def main():
pygame.init()
screen = pygame.display.set_mode((600, 600))
clock = pygame.time.Clock()
running = True
font = pygame.font.Font(None, 16)
text = font.render(
"Use Arrows (up, down, left, right) to move the camera, "
"a and z to zoom in / out and s and x to rotate.",
True,
pygame.Color("black"),
)
### Physics stuff
space = pymunk.Space()
space.gravity = Vec2d(0.0, 900.0)
draw_options = pymunk.pygame_util.DrawOptions(screen)
## Balls
balls = []
body = pymunk.Body()
body.position = pymunk.Vec2d(407, 354)
s1 = pymunk.Segment(body, Vec2d(-300, -30), Vec2d(0, 0), 1.0)
s2 = pymunk.Segment(body, Vec2d(0, 0), Vec2d(0, -100), 1.0)
s1.density = 0.1
s2.density = 0.1
s1.friction = 1
s2.friction = 1
space.add(body, s1, s2)
c1 = pymunk.constraints.DampedSpring(
space.static_body,
body,
(427, 200),
(0, -100),
Vec2d(407, 254).get_distance((427, 200)),
2000,
100,
)
c2 = pymunk.constraints.DampedSpring(
space.static_body,
body,
(87, 200),
(-300, -30),
Vec2d(107, 324).get_distance((87, 200)),
2000,
100,
)
space.add(c1, c2)
# extra to show how constraints are drawn when very small / large
body = pymunk.Body(1, 100)
body.position = 450, 305
c3 = pymunk.constraints.DampedSpring(
space.static_body, body, (450, 300), (0, 0), 5, 1000, 100
)
space.add(body, c3)
body = pymunk.Body(1, 100)
body.position = 500, 2025
c3 = pymunk.constraints.DampedSpring(
space.static_body, body, (500, 25), (0, 0), 2000, 1000, 100
)
space.add(body, c3)
ticks_to_next_ball = 10
translation = pymunk.Transform()
scaling = 1
rotation = 0
while running:
for event in pygame.event.get():
if (
event.type == pygame.QUIT
or event.type == pygame.KEYDOWN
and event.key == pygame.K_ESCAPE
):
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_p:
pygame.image.save(screen, "camera.png")
keys = pygame.key.get_pressed()
left = int(keys[pygame.K_LEFT])
up = int(keys[pygame.K_UP])
down = int(keys[pygame.K_DOWN])
right = int(keys[pygame.K_RIGHT])
zoom_in = int(keys[pygame.K_a])
zoom_out = int(keys[pygame.K_z])
rotate_left = int(keys[pygame.K_s])
rotate_right = int(keys[pygame.K_x])
translate_speed = 10
translation = translation.translated(
translate_speed * left - translate_speed * right,
translate_speed * up - translate_speed * down,
)
zoom_speed = 0.1
scaling *= 1 + (zoom_speed * zoom_in - zoom_speed * zoom_out)
rotation_speed = 0.1
rotation += rotation_speed * rotate_left - rotation_speed * rotate_right
# to zoom with center of screen as origin we need to offset with
# center of screen, scale, and then offset back
draw_options.transform = (
pymunk.Transform.translation(300, 300)
@ pymunk.Transform.scaling(scaling)
@ translation
@ pymunk.Transform.rotation(rotation)
@ pymunk.Transform.translation(-300, -300)
)
ticks_to_next_ball -= 1
if ticks_to_next_ball <= 0:
ticks_to_next_ball = 100
mass = 10
radius = 25
inertia = pymunk.moment_for_circle(mass, 0, radius, (0, 0))
body = pymunk.Body(mass, inertia)
x = random.randint(115, 350)
body.position = x, 100
if random.random() > 0.5:
shape = pymunk.Circle(body, radius)
else:
shape = pymunk.Poly.create_box(
body, size=(radius * 2, radius * 2), radius=2
)
shape.friction = 1
space.add(body, shape)
balls.append(shape)
### Clear screen
screen.fill(pygame.Color("white"))
### Draw stuff
space.debug_draw(draw_options)
balls_to_remove = []
for ball in balls:
if ball.body.position.y > 500:
balls_to_remove.append(ball)
for ball in balls_to_remove:
space.remove(ball, ball.body)
balls.remove(ball)
screen.blit(text, (5, 5))
### Update physics
dt = 1.0 / 60.0
space.step(dt)
### Flip screen
pygame.display.flip()
clock.tick(50)
pygame.display.set_caption("fps: " + str(clock.get_fps()))
if __name__ == "__main__":
sys.exit(main())
| 28.060109 | 80 | 0.565141 |
5abaa68bbcec9c6fdc51249395e01ef7cbfc5a1c | 830 | py | Python | tests/codec.py | axsguard/sstp-server | 393986654cd429ce55f9e5a242d60610a4fed88b | [
"MIT"
] | 223 | 2015-01-17T17:35:46.000Z | 2022-03-20T10:39:35.000Z | tests/codec.py | axsguard/sstp-server | 393986654cd429ce55f9e5a242d60610a4fed88b | [
"MIT"
] | 34 | 2015-04-02T09:03:08.000Z | 2022-03-13T06:57:30.000Z | tests/codec.py | axsguard/sstp-server | 393986654cd429ce55f9e5a242d60610a4fed88b | [
"MIT"
] | 99 | 2015-04-14T14:03:02.000Z | 2022-03-26T19:27:39.000Z | #!/usr/bin/env python3
import os
import timeit
from sstpd.codec import escape, PppDecoder
decoder = PppDecoder()
def get_enscaped():
frames = [os.urandom(1500) for i in range(2)]
return b''.join([escape(f) for f in frames])
def prof_unescape():
return timeit.timeit('decoder.unescape(data)',
setup='data = get_enscaped()',
globals=globals())
def codec_test():
frame = os.urandom(1500)
escaped = escape(frame)
print("escaped: %d bytes " % len(escaped))
unescaped = PppDecoder().unescape(escaped)
assert len(unescaped) == 1
print("unescaped: %d bytes" % len(unescaped[0]))
assert unescaped[0] == frame
def main():
codec_test()
print('Test unescape...')
print('\t%f' % prof_unescape())
if __name__ == '__main__':
main()
| 24.411765 | 55 | 0.618072 |
a4bfe13b5c7b24c2032ccdda824c6e64691d72f0 | 3,541 | py | Python | tensorflow_privacy/privacy/estimators/test_utils.py | SoaringChicken/tensorflow-privacy | 70ab071e2332694efaa7ad88240832f5b778d55c | [
"Apache-2.0"
] | null | null | null | tensorflow_privacy/privacy/estimators/test_utils.py | SoaringChicken/tensorflow-privacy | 70ab071e2332694efaa7ad88240832f5b778d55c | [
"Apache-2.0"
] | null | null | null | tensorflow_privacy/privacy/estimators/test_utils.py | SoaringChicken/tensorflow-privacy | 70ab071e2332694efaa7ad88240832f5b778d55c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper functions for unit tests for DP-enabled Estimators."""
import numpy as np
import tensorflow as tf
def make_input_data(size, classes):
"""Create raw input data for testing."""
feature_a = np.random.normal(4, 1, (size))
feature_b = np.random.normal(5, 0.7, (size))
feature_c = np.random.normal(6, 2, (size))
noise = np.random.normal(0, 30, (size))
features = {
'feature_a': feature_a,
'feature_b': feature_b,
'feature_c': feature_c,
}
if classes == 2:
labels = np.array(
np.power(feature_a, 3) + np.power(feature_b, 2) +
np.power(feature_c, 1) + noise > 125).astype(int)
else:
def label_fn(x):
if x < 110.0:
return 0
elif x < 140.0:
return 1
else:
return 2
labels = list(
map(
label_fn,
np.power(feature_a, 3) + np.power(feature_b, 2) +
np.power(feature_c, 1) + noise))
return features, labels
def make_multilabel_input_data(size):
"""Create raw input data for testing."""
feature_a = np.random.normal(4, 1, (size))
feature_b = np.random.normal(5, 0.7, (size))
feature_c = np.random.normal(6, 2, (size))
noise_a = np.random.normal(0, 1, (size))
noise_b = np.random.normal(0, 1, (size))
noise_c = np.random.normal(0, 1, (size))
features = {
'feature_a': feature_a,
'feature_b': feature_b,
'feature_c': feature_c,
}
def label_fn(a, b, c):
return [int(a > 4), int(b > 5), int(c > 6)]
labels = list(
map(label_fn, feature_a + noise_a, feature_b + noise_b,
feature_c + noise_c))
return features, labels
def make_input_fn(features, labels, training, batch_size=16):
"""Returns an input function suitable for an estimator."""
def input_fn():
"""An input function for training or evaluating."""
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# Shuffle if in training mode.
if training:
dataset = dataset.shuffle(1000)
return dataset.batch(batch_size)
return input_fn
def make_model_fn(head, optimizer, feature_columns):
"""Constructs and returns a model_fn using supplied head."""
def model_fn(features, labels, mode, params, config=None): # pylint: disable=unused-argument
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
inputs = feature_layer(features)
hidden_layer = tf.keras.layers.Dense(units=3, activation='relu')
hidden_layer_values = hidden_layer(inputs)
logits_layer = tf.keras.layers.Dense(
units=head.logits_dimension, activation=None)
logits = logits_layer(hidden_layer_values)
return head.create_estimator_spec(
features=features,
labels=labels,
mode=mode,
logits=logits,
trainable_variables=hidden_layer.trainable_weights +
logits_layer.trainable_weights,
optimizer=optimizer)
return model_fn
| 30.264957 | 95 | 0.670997 |
fb173fbadea610e46271e78970482d81245d28b4 | 629 | py | Python | crawler/validators/__init__.py | wanlitengfei/IPProxy | 7a69ca184e8f0c5f154b2a394431e95d4689ef87 | [
"MIT"
] | 1 | 2018-03-20T09:14:33.000Z | 2018-03-20T09:14:33.000Z | crawler/validators/__init__.py | wanlitengfei/IPProxy | 7a69ca184e8f0c5f154b2a394431e95d4689ef87 | [
"MIT"
] | null | null | null | crawler/validators/__init__.py | wanlitengfei/IPProxy | 7a69ca184e8f0c5f154b2a394431e95d4689ef87 | [
"MIT"
] | 1 | 2018-09-08T08:06:54.000Z | 2018-09-08T08:06:54.000Z | """
All the spiders are used to validate ip resources.
Here are all the validator website
https://httpbin.org/ip
http://httpbin.org/ip
https://weibo.cn/
If you want to add your own validators,you must add all the queues
in config/settings.py and register tasks in config/rules.py,and add
the task key to HttpBinInitValidator's https_tasks or http_tasks
"""
from .httpbin import (
HttpBinInitValidator, HttpValidator,
HttpsValidator)
from .zhihu import ZhiHuValidator
from .weibo import WeiBoValidator
all_validators = [
HttpBinInitValidator, HttpValidator,
HttpsValidator, WeiBoValidator,
ZhiHuValidator
] | 26.208333 | 67 | 0.779014 |
34f640021fa58987cccf4060f433c5836c88d6ee | 1,306 | py | Python | kano_blog/app.py | healeycodes/full-stack-test-for-kano | ab644569b57694d65059cd76fedbfa467d79dee5 | [
"MIT"
] | 2 | 2019-05-21T00:17:00.000Z | 2019-05-21T01:01:13.000Z | kano_blog/app.py | healeycodes/full-stack-test-for-kano | ab644569b57694d65059cd76fedbfa467d79dee5 | [
"MIT"
] | null | null | null | kano_blog/app.py | healeycodes/full-stack-test-for-kano | ab644569b57694d65059cd76fedbfa467d79dee5 | [
"MIT"
] | null | null | null | import sqlite3
import sys
from pathlib import Path
from flask import g, Flask, flash, render_template, request
# create Flask instance
app = Flask(__name__)
app.secret_key = 'secret'
app.config['DATABASE'] = 'db.sqlite'
# sqlite database will stored be in package dir for MVP
db_path = str(Path(app.root_path) / app.config['DATABASE'])
# get an sqlite connection to db
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(db_path)
return db
# when the current request finishes, close any db connections
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# the only route for our MVP - a blog post
@app.route('/', methods=('GET', 'POST'))
def post():
if request.method == 'POST':
email = request.form['email']
name = request.form['name']
message = request.form['message']
get_db().execute(
'INSERT INTO comment (email, name, message) VALUES (?, ?, ?)',
(email, name, message)
)
get_db().commit()
flash('Thank you!')
return render_template('post.html')
# if ran as main, run Flask development server
if __name__ == '__main__':
app.run(debug=True)
| 23.745455 | 74 | 0.644717 |
7558d632ab7033108ae7f45253c1f57394927f46 | 870 | py | Python | isi_sdk_7_2/test/test_worm_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_7_2/test/test_worm_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_7_2/test/test_worm_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_7_2
from isi_sdk_7_2.models.worm_settings import WormSettings # noqa: E501
from isi_sdk_7_2.rest import ApiException
class TestWormSettings(unittest.TestCase):
"""WormSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testWormSettings(self):
"""Test WormSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_7_2.models.worm_settings.WormSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.219512 | 79 | 0.694253 |
21c787ac92d245b20423b8b9f0b30e1152fae9c9 | 18,584 | py | Python | tests/test_networking.py | standy66/anyio | 39daeb8903f4a53b3dfdba63234a3bad24bc2b5d | [
"MIT"
] | null | null | null | tests/test_networking.py | standy66/anyio | 39daeb8903f4a53b3dfdba63234a3bad24bc2b5d | [
"MIT"
] | null | null | null | tests/test_networking.py | standy66/anyio | 39daeb8903f4a53b3dfdba63234a3bad24bc2b5d | [
"MIT"
] | null | null | null | import socket
import ssl
import sys
from pathlib import Path
import pytest
from anyio import (
create_task_group, connect_tcp, create_udp_socket, connect_unix, create_unix_server,
create_tcp_server)
from anyio.exceptions import IncompleteRead, DelimiterNotFound, ClosedResourceError
class TestTCPStream:
@pytest.mark.anyio
async def test_receive_some(self):
async def server():
async with await stream_server.accept() as stream:
assert stream._socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) != 0
command = await stream.receive_some(100)
await stream.send_all(command[::-1])
async with create_task_group() as tg:
async with await create_tcp_server(interface='localhost') as stream_server:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
assert client._socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY) != 0
await client.send_all(b'blah')
response = await client.receive_some(100)
assert response == b'halb'
@pytest.mark.anyio
async def test_receive_some_from_cache(self):
async def server():
async with await stream_server.accept() as stream:
await stream.receive_until(b'a', 10)
request = await stream.receive_some(1)
await stream.send_all(request + b'\n')
received = None
async with create_task_group() as tg:
async with await create_tcp_server(interface='localhost') as stream_server:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
await client.send_all(b'abc')
received = await client.receive_until(b'\n', 3)
assert received == b'b'
@pytest.mark.parametrize('method_name, params', [
('receive_until', [b'\n', 100]),
('receive_exactly', [5])
], ids=['read_until', 'read_exactly'])
@pytest.mark.anyio
async def test_read_partial(self, method_name, params):
async def server():
async with await stream_server.accept() as stream:
method = getattr(stream, method_name)
line1 = await method(*params)
line2 = await method(*params)
await stream.send_all(line1.strip() + line2.strip())
async with create_task_group() as tg:
async with await create_tcp_server(interface='localhost') as stream_server:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
await client.send_all(b'bla')
await client.send_all(b'h\nb')
await client.send_all(b'leh\n')
response = await client.receive_some(100)
assert response == b'blahbleh'
@pytest.mark.anyio
async def test_send_large_buffer(self):
async def server():
async with await stream_server.accept() as stream:
await stream.send_all(buffer)
buffer = b'\xff' * 1024 * 1024 # should exceed the maximum kernel send buffer size
async with create_task_group() as tg:
async with await create_tcp_server(interface='localhost') as stream_server:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
response = await client.receive_exactly(len(buffer))
with pytest.raises(IncompleteRead):
await client.receive_exactly(1)
assert response == buffer
@pytest.mark.parametrize('method_name, params', [
('receive_until', [b'\n', 100]),
('receive_exactly', [5])
], ids=['read_until', 'read_exactly'])
@pytest.mark.anyio
async def test_incomplete_read(self, method_name, params):
async def server():
async with await stream_server.accept() as stream:
await stream.send_all(b'bla')
async with create_task_group() as tg:
async with await create_tcp_server(interface='localhost') as stream_server:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
method = getattr(client, method_name)
with pytest.raises(IncompleteRead):
await method(*params)
@pytest.mark.anyio
async def test_delimiter_not_found(self):
async def server():
async with await stream_server.accept() as stream:
await stream.send_all(b'blah\n')
async with create_task_group() as tg:
async with await create_tcp_server(interface='localhost') as stream_server:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
with pytest.raises(DelimiterNotFound) as exc:
await client.receive_until(b'\n', 3)
assert exc.match(' first 3 bytes$')
@pytest.mark.anyio
async def test_receive_chunks(self):
async def server():
async with await stream_server.accept() as stream:
async for chunk in stream.receive_chunks(2):
chunks.append(chunk)
chunks = []
async with await create_tcp_server(interface='localhost') as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
await client.send_all(b'blah')
assert chunks == [b'bl', b'ah']
@pytest.mark.anyio
async def test_buffer(self):
async def server():
async with await stream_server.accept() as stream:
chunks.append(await stream.receive_until(b'\n', 10))
chunks.append(await stream.receive_exactly(4))
chunks.append(await stream.receive_exactly(2))
chunks = []
async with await create_tcp_server(interface='localhost') as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
await client.send_all(b'blah\nfoobar')
assert chunks == [b'blah', b'foob', b'ar']
@pytest.mark.anyio
async def test_receive_delimited_chunks(self):
async def server():
async with await stream_server.accept() as stream:
async for chunk in stream.receive_delimited_chunks(b'\r\n', 8):
chunks.append(chunk)
chunks = []
async with await create_tcp_server(interface='localhost') as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
for chunk in (b'bl', b'ah', b'\r', b'\nfoo', b'bar\r\n'):
await client.send_all(chunk)
assert chunks == [b'blah', b'foobar']
@pytest.mark.anyio
async def test_accept_connections(self):
async def handle_client(stream):
async with stream:
line = await stream.receive_until(b'\n', 10)
lines.add(line)
if len(lines) == 2:
await stream_server.close()
async def server():
async for stream in stream_server.accept_connections():
await tg.spawn(handle_client, stream)
lines = set()
async with await create_tcp_server(interface='localhost') as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port) as client:
await client.send_all(b'client1\n')
async with await connect_tcp('localhost', stream_server.port) as client:
await client.send_all(b'client2\n')
assert lines == {b'client1', b'client2'}
class TestUNIXStream:
@pytest.mark.skipif(sys.platform == 'win32',
reason='UNIX sockets are not available on Windows')
@pytest.mark.parametrize('as_path', [False])
@pytest.mark.anyio
async def test_connect_unix(self, tmpdir_factory, as_path):
async def server():
async with await stream_server.accept() as stream:
command = await stream.receive_some(100)
await stream.send_all(command[::-1])
async with create_task_group() as tg:
path = str(tmpdir_factory.mktemp('unix').join('socket'))
if as_path:
path = Path(path)
async with await create_unix_server(path) as stream_server:
await tg.spawn(server)
async with await connect_unix(path) as client:
await client.send_all(b'blah')
response = await client.receive_some(100)
assert response == b'halb'
class TestTLSStream:
@pytest.fixture
def server_context(self):
server_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
server_context.load_cert_chain(certfile=str(Path(__file__).with_name('cert.pem')),
keyfile=str(Path(__file__).with_name('key.pem')))
return server_context
@pytest.fixture
def client_context(self):
client_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
client_context.load_verify_locations(cafile=str(Path(__file__).with_name('cert.pem')))
return client_context
@pytest.mark.anyio
async def test_handshake_on_connect(self, server_context, client_context):
async def server():
nonlocal server_binding
async with await stream_server.accept() as stream:
assert stream.server_side
assert stream.server_hostname is None
assert stream.tls_version.startswith('TLSv')
assert stream.cipher in stream.shared_ciphers
server_binding = stream.get_channel_binding()
command = await stream.receive_some(100)
await stream.send_all(command[::-1])
server_binding = None
async with create_task_group() as tg:
async with await create_tcp_server(
interface='localhost', ssl_context=server_context) as stream_server:
await tg.spawn(server)
async with await connect_tcp(
'localhost', stream_server.port, ssl_context=client_context,
autostart_tls=True) as client:
assert not client.server_side
assert client.server_hostname == 'localhost'
assert client.tls_version.startswith('TLSv')
assert client.cipher in client.shared_ciphers
client_binding = client.get_channel_binding()
await client.send_all(b'blah')
response = await client.receive_some(100)
assert response == b'halb'
assert client_binding == server_binding
assert isinstance(client_binding, bytes)
@pytest.mark.skipif(not ssl.HAS_ALPN, reason='ALPN support not available')
@pytest.mark.anyio
async def test_alpn_negotiation(self, server_context, client_context):
async def server():
async with await stream_server.accept() as stream:
assert stream.alpn_protocol == 'dummy2'
client_context.set_alpn_protocols(['dummy1', 'dummy2'])
server_context.set_alpn_protocols(['dummy2', 'dummy3'])
async with await create_tcp_server(interface='localhost',
ssl_context=server_context) as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp(
'localhost', stream_server.port, ssl_context=client_context,
autostart_tls=True) as client:
assert client.alpn_protocol == 'dummy2'
@pytest.mark.anyio
async def test_manual_handshake(self, server_context, client_context):
async def server():
async with await stream_server.accept() as stream:
assert stream.tls_version is None
while True:
command = await stream.receive_exactly(5)
if command == b'START':
await stream.start_tls()
assert stream.tls_version.startswith('TLSv')
elif command == b'CLOSE':
break
async with await create_tcp_server(interface='localhost', ssl_context=server_context,
autostart_tls=False) as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp('localhost', stream_server.port,
ssl_context=client_context) as client:
assert client.tls_version is None
await client.send_all(b'START') # arbitrary string
await client.start_tls()
assert client.tls_version.startswith('TLSv')
await client.send_all(b'CLOSE') # arbitrary string
@pytest.mark.anyio
async def test_buffer(self, server_context, client_context):
async def server():
async with await stream_server.accept() as stream:
chunks.append(await stream.receive_until(b'\n', 10))
chunks.append(await stream.receive_exactly(4))
chunks.append(await stream.receive_exactly(2))
chunks = []
async with await create_tcp_server(interface='localhost',
ssl_context=server_context) as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp(
'localhost', stream_server.port, ssl_context=client_context,
autostart_tls=True) as client:
await client.send_all(b'blah\nfoobar')
assert chunks == [b'blah', b'foob', b'ar']
@pytest.mark.parametrize('server_compatible, client_compatible, exc_class', [
(True, True, IncompleteRead),
(True, False, ssl.SSLEOFError),
(False, True, IncompleteRead),
(False, False, IncompleteRead)
], ids=['both_standard', 'server_standard', 'client_standard', 'neither_standard'])
@pytest.mark.anyio
async def test_ragged_eofs(self, server_context, client_context, server_compatible,
client_compatible, exc_class):
async def server():
async with await stream_server.accept() as stream:
chunks.append(await stream.receive_exactly(2))
with pytest.raises(exc_class):
await stream.receive_exactly(2)
chunks = []
async with await create_tcp_server(
interface='localhost', ssl_context=server_context,
tls_standard_compatible=server_compatible) as stream_server:
async with create_task_group() as tg:
await tg.spawn(server)
async with await connect_tcp(
'localhost', stream_server.port, ssl_context=client_context,
autostart_tls=True, tls_standard_compatible=client_compatible) as client:
await client.send_all(b'bl')
assert chunks == [b'bl']
class TestUDPSocket:
@pytest.mark.anyio
async def test_udp(self):
async with await create_udp_socket(port=5000, interface='localhost',
target_port=5000, target_host='localhost') as socket:
await socket.send(b'blah')
request, addr = await socket.receive(100)
assert request == b'blah'
assert addr == ('127.0.0.1', 5000)
await socket.send(b'halb')
response, addr = await socket.receive(100)
assert response == b'halb'
assert addr == ('127.0.0.1', 5000)
@pytest.mark.anyio
async def test_udp_noconnect(self):
async with await create_udp_socket(interface='localhost') as socket:
await socket.send(b'blah', 'localhost', socket.port)
request, addr = await socket.receive(100)
assert request == b'blah'
assert addr == ('127.0.0.1', socket.port)
await socket.send(b'halb', 'localhost', socket.port)
response, addr = await socket.receive(100)
assert response == b'halb'
assert addr == ('127.0.0.1', socket.port)
@pytest.mark.anyio
async def test_udp_close_socket_from_other_task(self):
async with create_task_group() as tg:
async with await create_udp_socket(interface='127.0.0.1') as udp:
await tg.spawn(udp.close)
with pytest.raises(ClosedResourceError):
await udp.receive(100)
@pytest.mark.anyio
async def test_udp_receive_packets(self):
async def serve():
async for packet, addr in server.receive_packets(10000):
await server.send(packet[::-1], *addr)
async with await create_udp_socket(interface='127.0.0.1') as server:
async with await create_udp_socket(target_host='127.0.0.1',
target_port=server.port) as client:
async with create_task_group() as tg:
await tg.spawn(serve)
await client.send(b'FOOBAR')
assert await client.receive(100) == (b'RABOOF', ('127.0.0.1', server.port))
await client.send(b'123456')
assert await client.receive(100) == (b'654321', ('127.0.0.1', server.port))
await tg.cancel_scope.cancel()
| 43.830189 | 97 | 0.595512 |
26a882f66bd8e4a72d641a6c1038214ea0e1b89f | 2,615 | py | Python | astro/dags/example-dag.py | zkan/hello-astronomer | 9da23a254d4f1bc02bce511e3c5243e0093c2080 | [
"Apache-2.0"
] | 9 | 2021-12-10T18:15:09.000Z | 2022-03-27T15:50:05.000Z | astro/dags/example-dag.py | zkan/hello-astronomer | 9da23a254d4f1bc02bce511e3c5243e0093c2080 | [
"Apache-2.0"
] | 1 | 2022-02-28T01:17:20.000Z | 2022-02-28T02:17:43.000Z | astro/dags/example-dag.py | zkan/hello-astronomer | 9da23a254d4f1bc02bce511e3c5243e0093c2080 | [
"Apache-2.0"
] | 2 | 2021-11-02T19:08:56.000Z | 2021-11-03T14:19:59.000Z | from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.version import version
from datetime import datetime, timedelta
def my_custom_function(ts,**kwargs):
"""
This can be any python code you want and is called from the python operator. The code is not executed until
the task is run by the airflow scheduler.
"""
print(f"I am task number {kwargs['task_number']}. This DAG Run execution date is {ts} and the current time is {datetime.now()}")
print('Here is the full DAG Run context. It is available because provide_context=True')
print(kwargs)
# Default settings applied to all tasks
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5)
}
# Using a DAG context manager, you don't have to specify the dag property of each task
with DAG('example_dag',
start_date=datetime(2019, 1, 1),
max_active_runs=3,
schedule_interval=timedelta(minutes=30), # https://airflow.apache.org/docs/stable/scheduler.html#dag-runs
default_args=default_args,
# catchup=False # enable if you don't want historical dag runs to run
) as dag:
t0 = DummyOperator(
task_id='start'
)
t1 = DummyOperator(
task_id='group_bash_tasks'
)
t2 = BashOperator(
task_id='bash_print_date1',
bash_command='sleep $[ ( $RANDOM % 30 ) + 1 ]s && date')
t3 = BashOperator(
task_id='bash_print_date2',
bash_command='sleep $[ ( $RANDOM % 30 ) + 1 ]s && date')
# generate tasks with a loop. task_id must be unique
for task in range(5):
if version.startswith('2'):
tn = PythonOperator(
task_id=f'python_print_date_{task}',
python_callable=my_custom_function, # make sure you don't include the () of the function
op_kwargs={'task_number': task},
)
else:
tn = PythonOperator(
task_id=f'python_print_date_{task}',
python_callable=my_custom_function, # make sure you don't include the () of the function
op_kwargs={'task_number': task},
provide_context=True,
)
t0 >> tn # indented inside for loop so each task is added downstream of t0
t0 >> t1
t1 >> [t2, t3] # lists can be used to specify multiple tasks | 36.319444 | 132 | 0.648566 |
3a9ec05d3374e0d4a2576e68eeae7bb41d564c6d | 7,293 | py | Python | tljh/configurer.py | budgester/the-littlest-jupyterhub | 94f747fd5d6acd6cb818c97c766eb8869128eabe | [
"BSD-3-Clause"
] | 1 | 2020-06-28T12:21:32.000Z | 2020-06-28T12:21:32.000Z | tljh/configurer.py | budgester/the-littlest-jupyterhub | 94f747fd5d6acd6cb818c97c766eb8869128eabe | [
"BSD-3-Clause"
] | null | null | null | tljh/configurer.py | budgester/the-littlest-jupyterhub | 94f747fd5d6acd6cb818c97c766eb8869128eabe | [
"BSD-3-Clause"
] | null | null | null | """
Parse YAML config file & update JupyterHub config.
Config should never append or mutate, only set. Functions here could
be called many times per lifetime of a jupyterhub.
Traitlets that modify the startup of JupyterHub should not be here.
FIXME: A strong feeling that JSON Schema should be involved somehow.
"""
import os
import sys
from .config import CONFIG_FILE, STATE_DIR
from .yaml import yaml
# Default configuration for tljh
# User provided config is merged into this
default = {
'auth': {
'type': 'firstuseauthenticator.FirstUseAuthenticator',
'FirstUseAuthenticator': {
'create_users': False
}
},
'users': {
'allowed': [],
'banned': [],
'admin': [],
'extra_user_groups': {}
},
'limits': {
'memory': None,
'cpu': None,
},
'http': {
'port': 80,
},
'https': {
'enabled': False,
'port': 443,
'tls': {
'cert': '',
'key': '',
},
'letsencrypt': {
'email': '',
'domains': [],
},
},
'traefik_api': {
'ip': "127.0.0.1",
'port': 8099,
'username': 'api_admin',
'password': '',
},
'user_environment': {
'default_app': 'classic',
},
'services': {
'cull': {
'enabled': True,
'timeout': 600,
'every': 60,
'concurrency': 5,
'users': False,
'max_age': 0
}
}
}
def load_config(config_file=CONFIG_FILE):
"""Load the current config as a dictionary
merges overrides from config.yaml with default config
"""
if os.path.exists(config_file):
with open(config_file) as f:
config_overrides = yaml.load(f)
else:
config_overrides = {}
secrets = load_secrets()
config = _merge_dictionaries(dict(default), secrets)
config = _merge_dictionaries(config, config_overrides)
return config
def apply_config(config_overrides, c):
"""
Merge config_overrides with config defaults & apply to JupyterHub config c
"""
tljh_config = _merge_dictionaries(dict(default), config_overrides)
update_auth(c, tljh_config)
update_userlists(c, tljh_config)
update_usergroups(c, tljh_config)
update_limits(c, tljh_config)
update_user_environment(c, tljh_config)
update_user_account_config(c, tljh_config)
update_traefik_api(c, tljh_config)
update_services(c, tljh_config)
def set_if_not_none(parent, key, value):
"""
Set attribute 'key' on parent if value is not None
"""
if value is not None:
setattr(parent, key, value)
def load_traefik_api_credentials():
"""Load traefik api secret from a file"""
proxy_secret_path = os.path.join(STATE_DIR, 'traefik-api.secret')
if not os.path.exists(proxy_secret_path):
return {}
with open(proxy_secret_path,'r') as f:
password = f.read()
return {
'traefik_api': {
'password': password,
}
}
def load_secrets():
"""Load any secret values stored on disk
Returns dict to be merged into config during load
"""
config = {}
config = _merge_dictionaries(config, load_traefik_api_credentials())
return config
def update_auth(c, config):
"""
Set auth related configuration from YAML config file
Use auth.type to determine authenticator to use. All parameters
in the config under auth.{auth.type} will be passed straight to the
authenticators themselves.
"""
auth = config.get('auth')
# FIXME: Make sure this is something importable.
# FIXME: SECURITY: Class must inherit from Authenticator, to prevent us being
# used to set arbitrary properties on arbitrary types of objects!
authenticator_class = auth['type']
# When specifying fully qualified name, use classname as key for config
authenticator_configname = authenticator_class.split('.')[-1]
c.JupyterHub.authenticator_class = authenticator_class
# Use just class name when setting config. If authenticator is dummyauthenticator.DummyAuthenticator,
# its config will be set under c.DummyAuthenticator
authenticator_parent = getattr(c, authenticator_class.split('.')[-1])
for k, v in auth.get(authenticator_configname, {}).items():
set_if_not_none(authenticator_parent, k, v)
def update_userlists(c, config):
"""
Set user whitelists & admin lists
"""
users = config['users']
c.Authenticator.whitelist = set(users['allowed'])
c.Authenticator.blacklist = set(users['banned'])
c.Authenticator.admin_users = set(users['admin'])
def update_usergroups(c, config):
"""
Set user groups
"""
users = config['users']
c.UserCreatingSpawner.user_groups = users['extra_user_groups']
def update_limits(c, config):
"""
Set user server limits
"""
limits = config['limits']
c.Spawner.mem_limit = limits['memory']
c.Spawner.cpu_limit = limits['cpu']
def update_user_environment(c, config):
"""
Set user environment configuration
"""
user_env = config['user_environment']
# Set default application users are launched into
if user_env['default_app'] == 'jupyterlab':
c.Spawner.default_url = '/lab'
elif user_env['default_app'] == 'nteract':
c.Spawner.default_url = '/nteract'
def update_user_account_config(c, config):
c.SystemdSpawner.username_template = 'jupyter-{USERNAME}'
def update_traefik_api(c, config):
"""
Set traefik api endpoint credentials
"""
c.TraefikTomlProxy.traefik_api_username = config['traefik_api']['username']
c.TraefikTomlProxy.traefik_api_password = config['traefik_api']['password']
def set_cull_idle_service(config):
"""
Set Idle Culler service
"""
cull_cmd = [
sys.executable, '-m', 'tljh.cull_idle_servers'
]
cull_config = config['services']['cull']
print()
cull_cmd += ['--timeout=%d' % cull_config['timeout']]
cull_cmd += ['--cull-every=%d' % cull_config['every']]
cull_cmd += ['--concurrency=%d' % cull_config['concurrency']]
cull_cmd += ['--max-age=%d' % cull_config['max_age']]
if cull_config['users']:
cull_cmd += ['--cull-users']
cull_service = {
'name': 'cull-idle',
'admin': True,
'command': cull_cmd,
}
return cull_service
def update_services(c, config):
c.JupyterHub.services = []
if config['services']['cull']['enabled']:
c.JupyterHub.services.append(set_cull_idle_service(config))
def _merge_dictionaries(a, b, path=None, update=True):
"""
Merge two dictionaries recursively.
From https://stackoverflow.com/a/7205107
"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
_merge_dictionaries(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
elif update:
a[key] = b[key]
else:
raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))
else:
a[key] = b[key]
return a
| 27.111524 | 105 | 0.620321 |
5622a03b4679c2edb231d908587ff4b0ae17ca11 | 10,163 | py | Python | test/test_vxlan_gpe.py | adwait1-G/vpp | 5f9f3c8de8d5d9f83a437661a98e5cc2453705f9 | [
"Apache-2.0"
] | null | null | null | test/test_vxlan_gpe.py | adwait1-G/vpp | 5f9f3c8de8d5d9f83a437661a98e5cc2453705f9 | [
"Apache-2.0"
] | null | null | null | test/test_vxlan_gpe.py | adwait1-G/vpp | 5f9f3c8de8d5d9f83a437661a98e5cc2453705f9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import socket
from util import ip4n_range, ip4_range
import unittest
from framework import VppTestCase, VppTestRunner, running_extended_tests
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether, Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from scapy.utils import atol
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import INVALID_INDEX
@unittest.skipUnless(running_extended_tests, "part of extended tests")
class TestVxlanGpe(BridgeDomain, VppTestCase):
""" VXLAN-GPE Test Case """
def __init__(self, *args):
BridgeDomain.__init__(self)
VppTestCase.__init__(self, *args)
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding VXLAN-GPE header
with its UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def ip_range(self, start, end):
""" range of remote ip's """
return ip4_range(self.pg0.remote_ip4, start, end)
def encap_mcast(self, pkt, src_ip, src_mac, vni):
"""
Encapsulate the original payload frame by adding VXLAN-GPE header
with its UDP, IP and Ethernet fields
"""
return (Ether(src=src_mac, dst=self.mcast_mac) /
IP(src=src_ip, dst=self.mcast_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags) /
pkt)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN-GPE header
"""
# check if is set I and P flag
self.assertEqual(pkt[VXLAN].flags, 0x0c)
return pkt[VXLAN].payload
# Method for checking VXLAN-GPE encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
else:
self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
# Verify VXLAN-GPE tunnel src IP is VPP_IP and dst IP is MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
else:
self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
# Verify UDP destination port is VXLAN-GPE 4790, source UDP port
# could be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# Verify VNI
self.assertEqual(pkt[VXLAN].vni, vni)
@classmethod
def create_vxlan_gpe_flood_test_bd(cls, vni, n_ucast_tunnels):
# Create 10 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
next_hop_address = cls.pg0.remote_ip4
for dest_ip4 in ip4_range(next_hop_address, ip_range_start,
ip_range_end):
# add host route so dest_ip4n will not be resolved
rip = VppIpRoute(cls, dest_ip4, 32,
[VppRoutePath(next_hop_address,
INVALID_INDEX)],
register=False)
rip.add_vpp_config()
dest_ip4n = socket.inet_pton(socket.AF_INET, dest_ip4)
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
vni=vni)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=vni)
@classmethod
def add_del_shared_mcast_dst_load(cls, is_add):
"""
add or del tunnels sharing the same mcast dst
to test vxlan_gpe ref_count mechanism
"""
n_shared_dst_tunnels = 20
vni_start = 1000
vni_end = vni_start + n_shared_dst_tunnels
for vni in range(vni_start, vni_end):
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
if r.sw_if_index == 0xffffffff:
raise ValueError("bad sw_if_index: ~0")
@classmethod
def add_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=1)
@classmethod
def del_shared_mcast_dst_load(cls):
cls.add_del_shared_mcast_dst_load(is_add=0)
@classmethod
def add_del_mcast_tunnels_load(cls, is_add):
"""
add or del tunnels to test vxlan_gpe stability
"""
n_distinct_dst_tunnels = 20
ip_range_start = 10
ip_range_end = ip_range_start + n_distinct_dst_tunnels
for dest_ip4n in ip4n_range(cls.mcast_ip4n, ip_range_start,
ip_range_end):
vni = bytearray(dest_ip4n)[3]
cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=dest_ip4n,
mcast_sw_if_index=1,
vni=vni,
is_add=is_add)
@classmethod
def add_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=1)
@classmethod
def del_mcast_tunnels_load(cls):
cls.add_del_mcast_tunnels_load(is_add=0)
# Class method to start the VXLAN-GPE test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestVxlanGpe, cls).setUpClass()
try:
cls.dport = 4790
cls.flags = 0x0c
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Our Multicast address
cls.mcast_ip4 = '239.1.1.1'
cls.mcast_ip4n = socket.inet_pton(socket.AF_INET, cls.mcast_ip4)
iplong = atol(cls.mcast_ip4)
cls.mcast_mac = "01:00:5e:%02x:%02x:%02x" % (
(iplong >> 16) & 0x7F, (iplong >> 8) & 0xFF, iplong & 0xFF)
# Create VXLAN-GPE VTEP on VPP pg0, and put vxlan_gpe_tunnel0
# and pg1 into BD.
cls.single_tunnel_bd = 11
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.pg0.remote_ip4n,
vni=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg1.sw_if_index, bd_id=cls.single_tunnel_bd)
# Setup vni 2 to test multicast flooding
cls.n_ucast_tunnels = 10
cls.mcast_flood_bd = 12
cls.create_vxlan_gpe_flood_test_bd(cls.mcast_flood_bd,
cls.n_ucast_tunnels)
r = cls.vapi.vxlan_gpe_add_del_tunnel(
src_addr=cls.pg0.local_ip4n,
dst_addr=cls.mcast_ip4n,
mcast_sw_if_index=1,
vni=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.mcast_flood_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg2.sw_if_index, bd_id=cls.mcast_flood_bd)
# Add and delete mcast tunnels to check stability
cls.add_shared_mcast_dst_load()
cls.add_mcast_tunnels_load()
cls.del_shared_mcast_dst_load()
cls.del_mcast_tunnels_load()
# Setup vni 3 to test unicast flooding
cls.ucast_flood_bd = 13
cls.create_vxlan_gpe_flood_test_bd(cls.ucast_flood_bd,
cls.n_ucast_tunnels)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg3.sw_if_index, bd_id=cls.ucast_flood_bd)
except Exception:
super(TestVxlanGpe, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestVxlanGpe, cls).tearDownClass()
@unittest.skip("test disabled for vxlan-gpe")
def test_mcast_flood(self):
""" inherited from BridgeDomain """
pass
@unittest.skip("test disabled for vxlan-gpe")
def test_mcast_rcv(self):
""" inherited from BridgeDomain """
pass
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestVxlanGpe, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show bridge-domain 11 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 12 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 13 detail"))
self.logger.info(self.vapi.cli("show int"))
self.logger.info(self.vapi.cli("show vxlan-gpe"))
self.logger.info(self.vapi.cli("show trace"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| 38.496212 | 79 | 0.607104 |
c19f7b034b1641c9ccd88634f12fcdc3013bce09 | 15,336 | py | Python | projects/DensePose/densepose/data/datasets/coco.py | mmabrouk/detectron2 | 158e395acdb8ca6ed6d488b43475f9ef9d200405 | [
"Apache-2.0"
] | 21,274 | 2019-10-10T17:50:46.000Z | 2022-03-31T17:58:45.000Z | projects/DensePose/densepose/data/datasets/coco.py | mmabrouk/detectron2 | 158e395acdb8ca6ed6d488b43475f9ef9d200405 | [
"Apache-2.0"
] | 3,253 | 2019-10-10T20:39:47.000Z | 2022-03-31T22:27:53.000Z | projects/DensePose/densepose/data/datasets/coco.py | mmabrouk/detectron2 | 158e395acdb8ca6ed6d488b43475f9ef9d200405 | [
"Apache-2.0"
] | 6,288 | 2019-10-10T18:00:27.000Z | 2022-03-31T21:22:58.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import contextlib
import io
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Optional
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
from detectron2.utils.file_io import PathManager
from ..utils import maybe_prepend_base_path
DENSEPOSE_MASK_KEY = "dp_masks"
DENSEPOSE_IUV_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V"]
DENSEPOSE_CSE_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_vertex", "ref_model"]
DENSEPOSE_ALL_POSSIBLE_KEYS = set(
DENSEPOSE_IUV_KEYS_WITHOUT_MASK + DENSEPOSE_CSE_KEYS_WITHOUT_MASK + [DENSEPOSE_MASK_KEY]
)
DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/"
@dataclass
class CocoDatasetInfo:
name: str
images_root: str
annotations_fpath: str
DATASETS = [
CocoDatasetInfo(
name="densepose_coco_2014_train",
images_root="coco/train2014",
annotations_fpath="coco/annotations/densepose_train2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_minival2014_100.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival",
images_root="coco/val2014",
annotations_fpath="coco/annotations/densepose_valminusminival2014.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_train_cse",
images_root="coco/train2014",
annotations_fpath="coco_cse/densepose_train2014_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_minival2014_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_minival_100_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_minival2014_100_cse.json",
),
CocoDatasetInfo(
name="densepose_coco_2014_valminusminival_cse",
images_root="coco/val2014",
annotations_fpath="coco_cse/densepose_valminusminival2014_cse.json",
),
CocoDatasetInfo(
name="densepose_chimps",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_densepose.json",
),
CocoDatasetInfo(
name="densepose_chimps_cse_train",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_cse_train.json",
),
CocoDatasetInfo(
name="densepose_chimps_cse_val",
images_root="densepose_chimps/images",
annotations_fpath="densepose_chimps/densepose_chimps_cse_val.json",
),
CocoDatasetInfo(
name="posetrack2017_train",
images_root="posetrack2017/posetrack_data_2017",
annotations_fpath="posetrack2017/densepose_posetrack_train2017.json",
),
CocoDatasetInfo(
name="posetrack2017_val",
images_root="posetrack2017/posetrack_data_2017",
annotations_fpath="posetrack2017/densepose_posetrack_val2017.json",
),
CocoDatasetInfo(
name="lvis_v05_train",
images_root="coco/train2017",
annotations_fpath="lvis/lvis_v0.5_plus_dp_train.json",
),
CocoDatasetInfo(
name="lvis_v05_val",
images_root="coco/val2017",
annotations_fpath="lvis/lvis_v0.5_plus_dp_val.json",
),
]
BASE_DATASETS = [
CocoDatasetInfo(
name="base_coco_2017_train",
images_root="coco/train2017",
annotations_fpath="coco/annotations/instances_train2017.json",
),
CocoDatasetInfo(
name="base_coco_2017_val",
images_root="coco/val2017",
annotations_fpath="coco/annotations/instances_val2017.json",
),
CocoDatasetInfo(
name="base_coco_2017_val_100",
images_root="coco/val2017",
annotations_fpath="coco/annotations/instances_val2017_100.json",
),
]
def get_metadata(base_path: Optional[str]) -> Dict[str, Any]:
"""
Returns metadata associated with COCO DensePose datasets
Args:
base_path: Optional[str]
Base path used to load metadata from
Returns:
Dict[str, Any]
Metadata in the form of a dictionary
"""
meta = {
"densepose_transform_src": maybe_prepend_base_path(base_path, "UV_symmetry_transforms.mat"),
"densepose_smpl_subdiv": maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"),
"densepose_smpl_subdiv_transform": maybe_prepend_base_path(
base_path,
"SMPL_SUBDIV_TRANSFORM.mat",
),
}
return meta
def _load_coco_annotations(json_file: str):
"""
Load COCO annotations from a JSON file
Args:
json_file: str
Path to the file to load annotations from
Returns:
Instance of `pycocotools.coco.COCO` that provides access to annotations
data
"""
from pycocotools.coco import COCO
logger = logging.getLogger(__name__)
timer = Timer()
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
return coco_api
def _add_categories_metadata(dataset_name: str, categories: List[Dict[str, Any]]):
meta = MetadataCatalog.get(dataset_name)
meta.categories = {c["id"]: c["name"] for c in categories}
logger = logging.getLogger(__name__)
logger.info("Dataset {} categories: {}".format(dataset_name, meta.categories))
def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]):
if "minival" in json_file:
# Skip validation on COCO2014 valminusminival and minival annotations
# The ratio of buggy annotations there is tiny and does not affect accuracy
# Therefore we explicitly white-list them
return
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "bbox" not in ann_dict:
return
obj["bbox"] = ann_dict["bbox"]
obj["bbox_mode"] = BoxMode.XYWH_ABS
def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "segmentation" not in ann_dict:
return
segm = ann_dict["segmentation"]
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
return
obj["segmentation"] = segm
def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
if "keypoints" not in ann_dict:
return
keypts = ann_dict["keypoints"] # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
if key in ann_dict:
obj[key] = ann_dict[key]
def _combine_images_with_annotations(
dataset_name: str,
image_root: str,
img_datas: Iterable[Dict[str, Any]],
ann_datas: Iterable[Iterable[Dict[str, Any]]],
):
ann_keys = ["iscrowd", "category_id"]
dataset_dicts = []
contains_video_frame_info = False
for img_dict, ann_dicts in zip(img_datas, ann_datas):
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
record["image_id"] = img_dict["id"]
record["dataset"] = dataset_name
if "frame_id" in img_dict:
record["frame_id"] = img_dict["frame_id"]
record["video_id"] = img_dict.get("vid_id", None)
contains_video_frame_info = True
objs = []
for ann_dict in ann_dicts:
assert ann_dict["image_id"] == record["image_id"]
assert ann_dict.get("ignore", 0) == 0
obj = {key: ann_dict[key] for key in ann_keys if key in ann_dict}
_maybe_add_bbox(obj, ann_dict)
_maybe_add_segm(obj, ann_dict)
_maybe_add_keypoints(obj, ann_dict)
_maybe_add_densepose(obj, ann_dict)
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if contains_video_frame_info:
create_video_frame_mapping(dataset_name, dataset_dicts)
return dataset_dicts
def get_contiguous_id_to_category_id_map(metadata):
cat_id_2_cont_id = metadata.thing_dataset_id_to_contiguous_id
cont_id_2_cat_id = {}
for cat_id, cont_id in cat_id_2_cont_id.items():
if cont_id in cont_id_2_cat_id:
continue
cont_id_2_cat_id[cont_id] = cat_id
return cont_id_2_cat_id
def maybe_filter_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta)
cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id
# filter categories
cats = []
for cat in coco_api.dataset["categories"]:
cat_id = cat["id"]
if cat_id not in cat_id_2_cont_id:
continue
cont_id = cat_id_2_cont_id[cat_id]
if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id):
cats.append(cat)
coco_api.dataset["categories"] = cats
# filter annotations, if multiple categories are mapped to a single
# contiguous ID, use only one category ID and map all annotations to that category ID
anns = []
for ann in coco_api.dataset["annotations"]:
cat_id = ann["category_id"]
if cat_id not in cat_id_2_cont_id:
continue
cont_id = cat_id_2_cont_id[cat_id]
ann["category_id"] = cont_id_2_cat_id[cont_id]
anns.append(ann)
coco_api.dataset["annotations"] = anns
# recreate index
coco_api.createIndex()
def maybe_filter_and_map_categories_cocoapi(dataset_name, coco_api):
meta = MetadataCatalog.get(dataset_name)
category_id_map = meta.thing_dataset_id_to_contiguous_id
# map categories
cats = []
for cat in coco_api.dataset["categories"]:
cat_id = cat["id"]
if cat_id not in category_id_map:
continue
cat["id"] = category_id_map[cat_id]
cats.append(cat)
coco_api.dataset["categories"] = cats
# map annotation categories
anns = []
for ann in coco_api.dataset["annotations"]:
cat_id = ann["category_id"]
if cat_id not in category_id_map:
continue
ann["category_id"] = category_id_map[cat_id]
anns.append(ann)
coco_api.dataset["annotations"] = anns
# recreate index
coco_api.createIndex()
def create_video_frame_mapping(dataset_name, dataset_dicts):
mapping = defaultdict(dict)
for d in dataset_dicts:
video_id = d.get("video_id")
if video_id is None:
continue
mapping[video_id].update({d["frame_id"]: d["file_name"]})
MetadataCatalog.get(dataset_name).set(video_frame_mapping=mapping)
def load_coco_json(annotations_json_file: str, image_root: str, dataset_name: str):
"""
Loads a JSON file with annotations in COCO instances format.
Replaces `detectron2.data.datasets.coco.load_coco_json` to handle metadata
in a more flexible way. Postpones category mapping to a later stage to be
able to combine several datasets with different (but coherent) sets of
categories.
Args:
annotations_json_file: str
Path to the JSON file with annotations in COCO instances format.
image_root: str
directory that contains all the images
dataset_name: str
the name that identifies a dataset, e.g. "densepose_coco_2014_train"
extra_annotation_keys: Optional[List[str]]
If provided, these keys are used to extract additional data from
the annotations.
"""
coco_api = _load_coco_annotations(PathManager.get_local_path(annotations_json_file))
_add_categories_metadata(dataset_name, coco_api.loadCats(coco_api.getCatIds()))
# sort indices for reproducible results
img_ids = sorted(coco_api.imgs.keys())
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = coco_api.loadImgs(img_ids)
logger = logging.getLogger(__name__)
logger.info("Loaded {} images in COCO format from {}".format(len(imgs), annotations_json_file))
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images.
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
_verify_annotations_have_unique_ids(annotations_json_file, anns)
dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
return dataset_records
def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None):
"""
Registers provided COCO DensePose dataset
Args:
dataset_data: CocoDatasetInfo
Dataset data
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
def load_annotations():
return load_coco_json(
annotations_json_file=annotations_fpath,
image_root=images_root,
dataset_name=dataset_data.name,
)
DatasetCatalog.register(dataset_data.name, load_annotations)
MetadataCatalog.get(dataset_data.name).set(
json_file=annotations_fpath,
image_root=images_root,
**get_metadata(DENSEPOSE_METADATA_URL_PREFIX)
)
def register_datasets(
datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
):
"""
Registers provided COCO DensePose datasets
Args:
datasets_data: Iterable[CocoDatasetInfo]
An iterable of dataset datas
datasets_root: Optional[str]
Datasets root folder (default: None)
"""
for dataset_data in datasets_data:
register_dataset(dataset_data, datasets_root)
| 35.418014 | 100 | 0.68838 |
8f9cb1a645d6b0a22cd77accba2a6d1d35d02140 | 15,078 | py | Python | goeset/calcs.py | jbellino-usgs/Florida-GOES-ET | 237b1604ea99851f8032e9ad39bd571629c76a34 | [
"CC0-1.0"
] | null | null | null | goeset/calcs.py | jbellino-usgs/Florida-GOES-ET | 237b1604ea99851f8032e9ad39bd571629c76a34 | [
"CC0-1.0"
] | null | null | null | goeset/calcs.py | jbellino-usgs/Florida-GOES-ET | 237b1604ea99851f8032e9ad39bd571629c76a34 | [
"CC0-1.0"
] | null | null | null | import numpy as np
# Fixed variables for ET calculations
P_atm = 101.3
gsc = 0.082
stefan_MJ = 4.903e-9
stefan_W = 5.67e-8
e_surface = 0.97
# Locally calibrated Sellers' parameters
a1 = 0.575
a2 = 0.054
alpha = 1.26
def avg_arrays(arrays):
"""
Take the average of multiple arrays.
arrays : list of numpy ndarrays
"""
return np.ma.sum(arrays, axis=0) / len(arrays)
def calc_saturation_vapor_pressure(t):
"""
Calculate the saturation vapor pressure at air temperature T.
Equation 11 of Allen (1998). Temperature is in Celsius.
Parameters
----------
t : numpy ndarray
Air temperature, in degrees Celsius
"""
return 0.6108 * np.exp((17.27 * t) / (t + 237.3))
def calc_actual_vapor_pressure(es_tmin, es_tmax, hmin, hmax):
"""
Calculate actual vapor pressure from saturation vapor pressure
and relative humidity. Equation 17 of Allen (1998).
Parameters
----------
es_tmin : numpy ndarray
Saturation vapor pressure at minimum air temperature.
es_tmax : numpy ndarray
Saturation vapor pressure at maximum air temperature.
hmin : numpy ndarray
Minimum relative humidity, in percent
hmax : numpy ndarray
Maximum relative humidity, in percent
"""
return ((es_tmax * hmin / 100.) + (es_tmin * hmax / 100.)) / 2.
def calc_saturation_vapor_pressure_curve_delta(t):
"""
Calculate the slope of the saturation vapor pressure curve at air temperature T.
Equation 13 of Allen (1998).
Parameters
----------
t : numpy ndarray
Air temperature, in degrees Celsius
"""
return 4098 * (0.6108 * np.exp((17.27 * t) / (t + 237.3))) / ((t + 237.3) ** 2.)
def calc_inverse_earth_sun_dist(j, ndays):
"""
Calculate the inverse Earth-sun distance for Julian day(s) J.
Equation 23 of Allen (1998).
Parameters
----------
j : numpy ndarray
Julian day
ndays : int
Number of days in the year
"""
return 1. + 0.033 * np.cos(2. * np.pi * (j - .5) / ndays)
def calc_declination(j):
"""
Calculate the declination of the sun for Julian day(s) J. Note the original Fortran code used [2pi/366] * J.
Equation 24 of Allen (1998).
Parameters
----------
j : numpy ndarray
Julian day
"""
return 0.409 * np.sin(2. * np.pi * (j - .5) / 365. - 1.39)
def calc_rnl(tmin, tmax, ea, fcd):
"""
Equation 39 of Allen (1998). Net outgoing longwave radiation.
Parameters
----------
tmin : numpy ndarray of floats
Minimum daily air temperature, in degrees Celsius
tmax : numpy ndarray of floats
Maximum daily air temperature, in degrees Celsius
ea : numpy ndarray
Actual vapor pressure
*fcd : numpy ndarray
Total solar radiation divided by clear-sky solar radiation
* Not in equation 39 of Allen (1998), logic was present in Fortran script provided by W.B. Shoemaker
"""
return stefan_MJ * (((tmax + 273.15) ** 4. + (tmin + 273.15) ** 4.) / 2.) * (0.34 - 0.14 * np.sqrt(ea)) * fcd
def calc_ws(rad_lat, declination):
"""
Solar time angle at mid day.
Parameters
----------
rad_lat : numpy ndarray
Latitude in radians.
declination : numpy ndarray
Solar declination.
"""
xx = 1.0 - (((np.tan(rad_lat)) ** 2.) * ((np.tan(declination)) ** 2.))
ws = np.pi / 2. - np.arctan((-np.tan(rad_lat)) * (np.tan(declination)) / (xx ** 0.5))
return ws
def calc_ra(dr, ws, rad_lat, declination):
"""
Compute extraterrestrial radiation
Variation on equation 28 of Allen (1998)
Note: Allen uses 12 hours, but Jacobs paper uses 24.
Parameters
----------
dr : numpy ndarray
Inverse relative Earth-sun distance.
ws : numpy ndarray
Solar time angle at mid day.
rad_lat : numpy ndarray
Latitude in radians.
declination : numpy ndarray
Solar declination.
"""
ra = (24. * 60. / np.pi) * gsc * dr * (ws * (np.sin(rad_lat)) *
(np.sin(declination)) + (np.cos(rad_lat)) * (np.cos(declination)) * (
np.sin(ws)))
return ra
def calc_rns(rs, albedo):
"""
Total incoming shortwave radiation.
Parameters
----------
rs : numpy ndarray
Total incoming shortwave solar radiation, in MegaJoules per square meter per day
albedo : numpy ndarray
Shortwave blue-sky albedo, unitless
"""
return (1. - albedo) * rs
def calc_rlu(t):
"""
Upwelling longwave radiation.
Parameters
----------
t : numpy ndarray
Air temperature, in degrees Celsius
"""
return e_surface * stefan_W * (t + 273.15) ** 4.
def calc_rso(ra, frac=0.75):
"""
Clear-sky solar radiation.
Parameters
----------
ra : numpy ndarray
Extraterrestrial solar radiation.
frac : float <= 1
Fraction of extraterrestrial solar radiation that
reaches earth on clear-sky days.
"""
s = 'Please enter a fractional value less than or equal to 1.0 and ' \
'greater than or equal to 0.0.'
assert 0 <= frac <= 1, s
return ra * frac
def calc_fcd(rso):
"""
# IF((RS(ss)/Rso) .GT. 1.0)THEN
# fcd = 1.0
# ELSE IF((RS(ss)/Rso) .LT. 0.3)THEN
# fcd = 0.05
# ELSE
# fcd = 1.35 * (RS(ss)/Rso) - 0.35
# ENDIF
Parameters
----------
rso : numpy ndarray
Clear-sky solar radiation.
"""
fcd = rso.copy()
fcd[rso > 1] = 1.
fcd[rso < 0.3] = 0.05
fcd[((rso >= 0.3) & (rso <= 1))] = (1.35 * fcd[((rso >= 0.3) & (rso <= 1))]) - .35
return fcd
def calc_rldc(ea, t):
"""
Clear sky downwelling longwave radiation.
Parameters
----------
ea : numpy ndarray
Actual vapor pressure
t : numpy ndarray
Air temperature, in degrees Celsius
"""
return (a1 + a2 * np.sqrt(10. * ea)) * stefan_W * (t + 273.15) ** 4.
def calc_clf(rs, rso):
"""
Crawford and Duchon (1999) cloud fraction.
Parameters
----------
rs : numpy ndarray
Total incoming shortwave solar radiation, in MegaJoules per square meter per day
rso : numpy ndarray
Clear-sky solar radiation.
"""
clf = 1.0 - (rs / rso)
# From Crawford and Duchon (1999):
# Calculated values of clf less than zero were adjusted back to
# zero so as to be physically realistic.
clf[clf < 0] = 0
return clf
def calc_rld(rldc, clf, t):
"""
Cloudy sky downwelling longwave radiation.
Parameters
----------
rldc : numpy ndarray of floats
Clear sky downwelling longwave radiation.
clf : numpy ndarray of floats
Crawford and Duchon (1999) cloud fraction.
t : numpy ndarray of floats
Air temperature, in degrees Celsius.
"""
s = 'Please enter a cloud-fraction value less than or equal to 1.0 and ' \
'greater than or equal to 0.0.'
assert np.all((0 <= clf) & (clf <= 1)), s
return rldc * (1. - clf) + clf * stefan_W * (t + 273.15) ** 4.
def calc_ern(rns, rld, rlu):
"""
Energy Balance (ERn)
Conversion from W/m2 to MJ/m2
Parameters
----------
rns : numpy ndarray of floats
Net shortwave radiation.
rld : numpy ndarray of floats
Cloudy sky downwelling longwave radiation.
rlu : numpy ndarray of floats
Upwelling longwave radiation.
"""
return rns + e_surface * rld * 0.0864 - rlu * 0.0864
def calc_eto(dates, tmin, tmax, hmin, hmax, ws2m, rs, lat, albedo=0.23):
"""
Allen, R.G., Pereira, L.S., Paes, D., and Smith, M., 1998, Crop
evapotranspiration - Guidelines for computing crop water requirements -
FAO Irrigation and drainage paper 56.
Parameters
----------
dates : numpy ndarray of datetime.date objects
Dates for which potential evapotranspiration are to be computed
tmin : numpy ndarray of floats
Daily minimum air temperature, in degrees Celsius
tmax : numpy ndarray of floats
Daily maximum air temperature, in degrees Celsius
hmin : numpy ndarray of floats
Daily minimum percent relative humidity
hmax : numpy ndarray of floats
Daily maximum percent relative humidity
rs : numpy ndarray of floats
Total incoming shortwave solar radiation, in MegaJoules per square meter per day
ws2m : numpy ndarray of floats
Daily average wind speed at 2-meter height, in meters per second
lat : numpy ndarray of floats
Latitude, in degrees
albedo : float
Albedo, unitless (Albedo for RET is 0.23.)
"""
# Number of days in the year.
ndays = len(dates)
# Julian days
jday = np.array(list(range(1, ndays + 1)))
# Atmospheric pressure is 101.3. Can adjust to temperature, if desired.
# Use optional equations embedded in DO loop.
# P_atm = 101.3*((293.0-(0.0065*elevation))/293.0)**5.26
# P_atm = 101.3*((Temp/293.0)**5.26
gamma = 0.665 * 10.0 ** (-3.0) * P_atm
# Compute average temperature and humidity
tavg = avg_arrays([tmax, tmin])
# Compute saturation vapor pressure (eq. 12, Allen [1998])
# Not equivalent to saturation vapor pressure of average temperature.
es_tmin = calc_saturation_vapor_pressure(tmin)
es_tmax = calc_saturation_vapor_pressure(tmax)
es = avg_arrays([es_tmin, es_tmax])
# Calculate actual vapor pressure
ea = calc_actual_vapor_pressure(es_tmin, es_tmax, hmin, hmax)
# Calculate the slope of the saturation vapor pressure curve
delta = calc_saturation_vapor_pressure_curve_delta(tavg)
# Latitude in radians.
rad_lat = np.array([(np.pi / 180.) * lat] * ndays)
# Calculate inverse relative Earth-sun distance
dr = np.ones_like(rad_lat)
for j in jday:
dr[j - 1] *= calc_inverse_earth_sun_dist(j, ndays)
# Calculate solar declination
declination = np.ones_like(rad_lat)
for j in jday:
declination[j - 1] *= calc_declination(j)
# Calculate solar time angle at mid day
ws = calc_ws(rad_lat, declination)
# Compute extraterrestrial radiation
ra = calc_ra(dr, ws, rad_lat, declination)
# Clear-sky solar radiation where 75 percent of extraterrestrial
# radiation reaches earth on clear-sky days.
rso = calc_rso(ra, frac=0.75)
# Net shortwave radiation.
rns = calc_rns(rs, albedo)
# Compute total solar radiation divided by clear-sky solar radiation.
rs_rso = rs / rso
# Create new array "fcd", equal to total solar radiation
# divided by clear-sky solar radiation.
fcd = rs_rso.copy()
# IF((RS(ss)/Rso) .GT. 1.0)THEN
# fcd = 1.0
# ELSE IF((RS(ss)/Rso) .LT. 0.3)THEN
# fcd = 0.05
# ELSE
# fcd = 1.35 * (RS(ss)/Rso) - 0.35
# ENDIF
fcd[rs_rso > 1] = 1.
fcd[rs_rso < 0.3] = 0.05
fcd[((rs_rso >= 0.3) & (rs_rso <= 1))] = (1.35 * fcd[((rs_rso >= 0.3) & (rs_rso <= 1))]) - .35
# Calculate net outgoing longwave radiation
rnl = calc_rnl(tmin, tmax, ea, fcd)
# Equation 40 of Allen(1998). Net radiation as the difference between
# the incoming net shortwave radiation and the outgoing net longwave
# radiation
rn = rns - rnl
# Equation 6 of Allen(1998). Reference evapotranspiration based on Food and Agriculural
# Organization of the United Nations, Penman-Monteith equation. Temperature converted
# to Kelvin (note Allen used 273.16).
# Assume soil heat flux density is zero (term in Rn-0.0).
g = 0.
eto = (0.408 * delta * (rn - g) + gamma * (900.0 * ws2m * (es - ea)) / (tavg + 273.15)) / (
delta + gamma * (1. + 0.34 * ws2m))
return eto
def calc_pet(dates, tmin, tmax, hmin, hmax, rs, lat, albedo):
"""
Priestly-Taylor (1972) potential evapotranspiration.
Parameters
----------
dates : numpy ndarray of datetime.date objects
Dates for which potential evapotranspiration are to be computed
tmin : numpy ndarray of floats
Daily minimum air temperature, in degrees Celsius
tmax : numpy ndarray of floats
Daily maximum air temperature, in degrees Celsius
hmin : numpy ndarray of floats
Daily minimum percent relative humidity
hmax : numpy ndarray of floats
Daily maximum percent relative humidity
rs : numpy ndarray of floats
Total incoming shortwave solar radiation, in MegaJoules per square meter per day
lat : numpy ndarray of floats
Latitude, in degrees
albedo : numpy ndarray of floats
Shortwave blue-sky albedo, unitless
"""
# Number of days in the year.
ndays = len(dates)
# Julian days
jday = np.array(list(range(1, ndays + 1)))
# Compute average temperature and humidity
tavg = avg_arrays([tmax, tmin])
havg = avg_arrays([hmax, hmin])
# Atmospheric pressure is 101.3. Can adjust to temperture, if desired.
# Use optional equations embedded in DO loop.
# P_atm = 101.3*((293.0-(0.0065*elevation))/293.0)**5.26
# P_atm = 101.3*((Temp/293.0)**5.26
gamma = 0.665 * 10.0 ** (-3.0) * P_atm
# Calculate the slope of the saturation vapor pressure curve
delta = calc_saturation_vapor_pressure_curve_delta(tavg)
# Net shortwave radiation
rns = calc_rns(rs, albedo)
# Compute saturation vapor pressure
es_tmax = calc_saturation_vapor_pressure(tmax)
es_tmin = calc_saturation_vapor_pressure(tmin)
# Calculate actual vapor pressure
ea = calc_actual_vapor_pressure(es_tmin, es_tmax, hmin, hmax)
# Calculate clear sky downwelling longwave radiation
rldc = calc_rldc(ea, tavg)
# Latitude in radians.
rad_lat = np.array([(np.pi / 180.) * lat] * ndays)
# Calculate inverse relative Earth-sun distance
dr = np.ones_like(rad_lat)
for j in jday:
dr[j - 1] *= calc_inverse_earth_sun_dist(j, ndays)
# Calculate solar declination
declination = np.ones_like(rad_lat)
for j in jday:
declination[j - 1] *= calc_declination(j)
# Calculate solar time angle at mid day
ws = calc_ws(rad_lat, declination)
# Compute extraterrestrial radiation
ra = calc_ra(dr, ws, rad_lat, declination)
# Clear-sky solar radiation where 75 percent of extraterrestrial
# radiation reaches earth on clear-sky days.
rso = calc_rso(ra, frac=0.75)
# Crawford and Duchon (1999) cloud fraction.
clf = calc_clf(rs, rso)
# Cloudy sky downwelling longwave radiation.
rld = calc_rld(rldc, clf, tavg)
# Upwelling longwave radiation
rlu = calc_rlu(tavg)
# Energy Balance
ern = calc_ern(rns, rld, rlu)
# Convert from 𝑊/𝑚2 to 𝑚𝑚/𝑑 by dividing by the density and latent heat of vaporization of water:
# ((2.501−0.002361𝑇)×1000)×1000
# Assume soil heat flux density is zero (term in Rn-0.0).
g = 0.
return (alpha * (delta / (delta + gamma)) * (ern - g)) / ((2.501 - 0.002361 * tavg) * 1000.) * 1000.
| 28.611006 | 113 | 0.622297 |
2e8b9545df53d9ac531cd05bcffab332f5e86577 | 6,317 | py | Python | inquirer2/prompts/expand.py | allemand-instable/PyInquirer2 | 6a2d00120a5a4c798cd373127175904778ac5b97 | [
"MIT"
] | null | null | null | inquirer2/prompts/expand.py | allemand-instable/PyInquirer2 | 6a2d00120a5a4c798cd373127175904778ac5b97 | [
"MIT"
] | null | null | null | inquirer2/prompts/expand.py | allemand-instable/PyInquirer2 | 6a2d00120a5a4c798cd373127175904778ac5b97 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
`expand` type question
"""
from prompt_toolkit.application import Application
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.filters import IsDone
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.layout.containers import ConditionalContainer, HSplit
from prompt_toolkit.layout.dimension import LayoutDimension as D
from . import PromptParameterException
from ..separator import Separator
from .common import default_style
from .common import if_mousedown
# custom control based on FormattedTextControl
class InquirerControl(FormattedTextControl):
def __init__(self, choices, default=None, **kwargs):
self.pointer_index = 0
self.answered = False
self._init_choices(choices, default)
self._help_active = False # help is activated via 'h' key
super().__init__(self._get_choice_tokens, **kwargs)
def _init_choices(self, choices, default=None):
# helper to convert from question format to internal format
self.choices = [] # list (key, name, value)
for i, c in enumerate(choices):
if isinstance(c, Separator):
self.choices.append(c)
else:
if isinstance(c, str):
self.choices.append((None, c, c))
else:
key = c.get('key')
name = c.get('name')
value = c.get('value', name)
self.choices.append([key, name, value])
# append the help choice
self.choices.append(['h', 'Help, list all options', '__HELP__'])
# set the default
for i, choice in enumerate(self.choices):
if isinstance(choice, list):
key = choice[0]
default = default or "h"
if default == key:
self.pointer_index = i
choice[0] = key.upper() # default key is in uppercase
@property
def choice_count(self):
return len(self.choices)
def _get_choice_tokens(self):
tokens = []
def _append(index, line):
if isinstance(line, Separator):
tokens.append(('class:separator', ' %s\n' % line))
else:
key = line[0]
line = line[1]
pointed_at = (index == self.pointer_index)
@if_mousedown
def select_item(mouse_event):
# bind option with this index to mouse event
self.pointer_index = index
if pointed_at:
tokens.append(
('class:focus', ' %s) %s' % (key, line), select_item))
else:
tokens.append(('', ' %s) %s' % (key, line), select_item))
tokens.append(('', '\n'))
if self._help_active:
# prepare the select choices
for i, choice in enumerate(self.choices):
_append(i, choice)
tokens.append(
('', ' Answer: %s' % self.choices[self.pointer_index][0]))
else:
tokens.append(('class:pointer', '>> '))
tokens.append(('', self.choices[self.pointer_index][1]))
return tokens
def get_selected_value(self):
# get value not label
return self.choices[self.pointer_index][2]
def question(message, **kwargs):
# TODO extract common parts for list, checkbox, rawlist, expand
# TODO up, down navigation
if not 'choices' in kwargs:
raise PromptParameterException('choices')
choices = kwargs.pop('choices', None)
default = kwargs.pop('default', None)
qmark = kwargs.pop('qmark', '[?]')
# TODO style defaults on detail level
style = kwargs.pop('style', default_style)
ic = InquirerControl(choices, default)
def get_prompt_tokens():
tokens = []
tokens.append(('class:questionmark', qmark))
tokens.append(('class:question', ' %s ' % message))
if not ic.answered:
tokens.append(('class:instruction', ' (%s)' % ''.join(
[k[0] for k in ic.choices if not isinstance(k, Separator)])))
else:
tokens.append(('class:answer', ' %s' % ic.get_selected_value()))
return tokens
#@Condition
#def is_help_active():
# return ic._help_active
# assemble layout
layout = HSplit([
Window(
height=D.exact(1),
content=FormattedTextControl(get_prompt_tokens),
always_hide_cursor=True,
),
ConditionalContainer(
Window(ic, always_hide_cursor=True),
#filter=is_help_active & ~IsDone() # ~ bitwise inverse
filter=~IsDone() # ~ bitwise inverse
)
])
# key bindings
kb = KeyBindings()
@kb.add('c-q', eager=True)
@kb.add('c-c', eager=True)
def _(event):
event.app.exit(result=None)
#raise KeyboardInterrupt()
# add key bindings for choices
for i, c in enumerate(ic.choices):
if not isinstance(c, Separator):
def _reg_binding(i, keys):
# trick out late evaluation with a "function factory":
# http://stackoverflow.com/questions/3431676/creating-functions-in-a-loop
@kb.add(keys, eager=True)
def select_choice(event):
ic.pointer_index = i
if c[0] not in ['h', 'H']:
_reg_binding(i, c[0])
if c[0].isupper():
_reg_binding(i, c[0].lower())
@kb.add('H', eager=True)
@kb.add('h', eager=True)
def help_choice(event):
ic._help_active = not ic._help_active
@kb.add('enter', eager=True)
def set_answer(event):
selected_value = ic.get_selected_value()
if selected_value == '__HELP__':
ic._help_active = True
else:
ic.answered = True
event.app.exit(result=selected_value)
return Application(layout=Layout(layout),
key_bindings=kb,
mouse_support=True,
style=style)
| 33.42328 | 89 | 0.568308 |
bb7d2bc2951a0bb8139cccbcea254ad4f1bed70a | 130 | py | Python | pyforfluids/models/__init__.py | fedebenelli/gerg2008 | 72027143bec4e3af28b5ef0edf92d9d9b2c2c398 | [
"MIT"
] | 9 | 2021-09-06T23:23:48.000Z | 2022-03-08T20:51:27.000Z | pyforfluids/models/__init__.py | fedebenelli/PyForFluids | 72027143bec4e3af28b5ef0edf92d9d9b2c2c398 | [
"MIT"
] | 4 | 2021-09-08T15:06:49.000Z | 2021-11-10T13:02:05.000Z | pyforfluids/models/__init__.py | fedebenelli/gerg2008 | 72027143bec4e3af28b5ef0edf92d9d9b2c2c398 | [
"MIT"
] | null | null | null | """Models.
Modules that contains the multiple models to estimate fluid's properties.
"""
from .gerg2008 import GERG2008 # noqa
| 18.571429 | 73 | 0.753846 |
0a14f2a20432e21e4bbd1a9ff157e500b389e4e1 | 1,043 | py | Python | codes/session_8/libEg2.py | Code-by-practice/python-syntax-reference | 63aaab597d8186c2a25b557a6a6504fe9e3e2311 | [
"MIT"
] | 1 | 2018-07-29T06:39:32.000Z | 2018-07-29T06:39:32.000Z | codes/session_8/libEg2.py | Code-by-practice/python-syntax-reference | 63aaab597d8186c2a25b557a6a6504fe9e3e2311 | [
"MIT"
] | null | null | null | codes/session_8/libEg2.py | Code-by-practice/python-syntax-reference | 63aaab597d8186c2a25b557a6a6504fe9e3e2311 | [
"MIT"
] | null | null | null | # How to Run? Open Terminal> python3 libEg2.py
# Read the code comments and output on the terminal
# Objective: Know about standard built-in library
# Dates and Times
from datetime import date
# Get today's date
now = date.today()
print("Today's date: ", now)
# Set date
customDate = date(2000, 12, 2)
print("Custom date: ", customDate)
print('\n')
# Data compression
import zlib
print('Data Compression Try 1: \n')
dataStr1 = b'Hello world! my name is Ashwin' # prefix 'b' means bytes-like object
print('Original: ', len(dataStr1) )
comDataStr1 = zlib.compress(dataStr1)
print('Compress: ', len(comDataStr1) )
deComDataStr1 = zlib.decompress(comDataStr1)
print('Decompress: ', len(deComDataStr1) )
print('\n')
print('Data Compression Try 2: \n')
dataStr2 = b'Ashwin Ashwin Ashwin Ashwin' # prefix 'b' means bytes-like object
print('Original: ', len(dataStr2) )
comDataStr2 = zlib.compress(dataStr2)
print('Compress: ', len(comDataStr2) )
deComDataStr2 = zlib.decompress(comDataStr2)
print('Decompress: ', len(deComDataStr2) )
| 22.673913 | 81 | 0.724832 |
8eca0bb2afba74c012bc075be1a583f55932d83c | 1,446 | py | Python | cryomem/cmtools/lib/plot_svjj.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | 1 | 2018-09-16T12:29:04.000Z | 2018-09-16T12:29:04.000Z | cryomem/cmtools/lib/plot_svjj.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | null | null | null | cryomem/cmtools/lib/plot_svjj.py | bebaek/cryomem | 088fba2568d10451adda51a068c15c8c2a73d9ce | [
"MIT"
] | null | null | null | # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
Run analyze_data.py
BB, 2014
"""
#import analyze_jjivarray as an
#from IPython.lib import deepreload; deepreload.reload(an)
#from imp import reload; reload(an)
import numpy as np
import matplotlib.pyplot as plt
from plothyst import plothyst
# display units
unit_i = 1e-6 # uA
unit_v = 1e-6 # uV
unit_r = 1 # Ohm
unit_i1 = 1e-3 # mA; control I
unit_v1 = 1e-3 # mV; control V
unit_h = 10 # mT
def setplotparams():
plt.rcParams['font.size'] = 12
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['legend.frameon'] = False
def plot_svjj(filenames, **kwargs):
whichplot = kwargs('whichplot', 'hic')
if whichplot == 'hic': # H vs Ic
ix = 1; iy = 3
for fn = in filenames:
data = np.loadtxt(filename,
def app(args):
if len(args) < 2:
print('')
print('Usage: python plot_cryomem.py <filename> [<arguments>]\n')
print('Examples:')
print('python daq_dipstick.py sqwave_field h=150')
print('python daq_dipstick.py set_sweepvolt .7')
sys.exit(0)
func = args[1]
fn = args[2]
if func == 'iv':
pass
elif func == 'hic':
data = np.loadtxt(data, skiprows=1, usecols=(2,4))
plothyst.plothyst(data[0], data[1])
plt.show()
if __name__ == '__main__':
import sys
print(sys.version)
app(sys.argv)
| 24.508475 | 73 | 0.621024 |
52da9dec68493e8484b9e06c806befc3e73ed1b2 | 2,629 | py | Python | penv/tune.py | lucifer2288/penv | e9745e5ca7025ee575fe30da6a849b17c14dee9a | [
"MIT"
] | 20 | 2021-04-13T01:57:39.000Z | 2022-02-10T10:27:05.000Z | penv/tune.py | lucifer2288/penv | e9745e5ca7025ee575fe30da6a849b17c14dee9a | [
"MIT"
] | 1 | 2021-06-04T04:51:57.000Z | 2021-06-04T04:52:20.000Z | penv/tune.py | lucifer2288/penv | e9745e5ca7025ee575fe30da6a849b17c14dee9a | [
"MIT"
] | 9 | 2021-04-12T21:34:48.000Z | 2022-03-18T18:37:05.000Z |
import json
import os
import click
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
@click.command()
@click.option("--num-samples", default=4, type=int)
@click.option("--num-workers", default=8, type=int)
def main(num_samples: int, num_workers: int):
pbt = PopulationBasedTraining(
time_attr="time_total_s",
perturbation_interval=50,
resample_probability=0.25,
hyperparam_mutations={
"lambda": tune.uniform(0.9, 1.0),
"clip_param": tune.uniform(0.01, 0.5),
"lr": [1e-2, 1e-3, 5e-4, 1e-4, 5e-5, 1e-5],
"num_sgd_iter": tune.randint(1, 30),
"sgd_minibatch_size": tune.randint(128, 16384),
"train_batch_size": tune.randint(2000, 160000),
}
)
analysis = tune.run(
"PPO",
name="pbt_portfolio_reallocation",
scheduler=pbt,
num_samples=num_samples,
metric="episode_reward_min",
mode="max",
config={
"env": "TradingEnv",
"env_config":{
"total_steps": 1000,
"num_assets": 4,
"commission": 1e-3,
"time_cost": 0,
"window_size": tune.randint(5, 50),
"min_periods": 150
},
"kl_coeff": 1.0,
"num_workers": num_workers,
"num_gpus": 0,
"observation_filter": tune.choice(["NoFilter", "MeanStdFilter"]),
"framework": "torch",
"model": {
"custom_model": "reallocate",
"custom_model_config": {
"num_assets": 4
},
"custom_action_dist": "dirichlet",
},
"num_sgd_iter": 10,
"sgd_minibatch_size": 128,
"lambda": tune.uniform(0.9, 1.0),
"clip_param": tune.uniform(0.1, 0.5),
"lr": tune.loguniform(1e-2, 1e-5),
"train_batch_size": tune.randint(1000, 20000)
},
stop={
"episode_reward_min": 20,
"training_iteration": 100
},
checkpoint_at_end=True,
local_dir="./results"
)
checkpoints = analysis.get_trial_checkpoints_paths(
trial=analysis.get_best_trial(metric="episode_reward_min", mode="max"),
metric="episode_reward_mean"
)
params = {
"config": analysis.best_config,
"checkpoints": checkpoints
}
json.dump(params, open("data/tuned_params.json", "w"), indent=4)
if __name__ == "__main__":
ray.init()
main()
ray.shutdown()
| 28.576087 | 79 | 0.536706 |
b482e4392c3f0cf93e6ae943d0f4b9da8f9c7c80 | 3,573 | py | Python | DiffNet/networks/wgan_multi_output.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | 1 | 2021-12-02T06:42:38.000Z | 2021-12-02T06:42:38.000Z | DiffNet/networks/wgan_multi_output.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | null | null | null | DiffNet/networks/wgan_multi_output.py | adityabalu/DiffNet | a21e024ad9948fa76fe73796e216a0a6601f2c7c | [
"MIT"
] | 2 | 2021-12-01T20:53:24.000Z | 2021-12-02T06:42:39.000Z | from torch import nn
from torch.autograd import grad
import torch
import numpy as np
from torch.nn.modules.utils import _pair
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm3d") != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
##############################
# U-NET
##############################
class UNetDown(nn.Module):
def __init__(self, in_size, out_size, normalize=True, dropout=0.0):
super(UNetDown, self).__init__()
layers = [nn.Conv2d(in_size, out_size, 4, 2, 1, bias=False)]
if normalize:
layers.append(nn.InstanceNorm2d(out_size))
layers.append(nn.LeakyReLU(0.2))
if dropout:
layers.append(nn.Dropout(dropout))
self.model = nn.Sequential(*layers)
def forward(self, x):
return self.model(x)
class UNetUp(nn.Module):
def __init__(self, in_size, out_size, dropout=0.0):
super(UNetUp, self).__init__()
layers = [
nn.ConvTranspose2d(in_size, out_size, 4, 2, 1, bias=False),
nn.InstanceNorm2d(out_size),
nn.ReLU(inplace=True),
]
if dropout:
layers.append(nn.Dropout(dropout))
self.model = nn.Sequential(*layers)
def forward(self, x, skip_input):
x = self.model(x)
x = torch.cat((x, skip_input), 1)
return x
class GoodGenerator(nn.Module):
def __init__(self, in_channels=1, out_channels=1, num_outputs=3):
super(GoodGenerator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.num_outputs = num_outputs
self.down1 = UNetDown(in_channels, 32)
self.down2 = UNetDown(32, 64)
self.down3 = UNetDown(64, 128)
self.down4 = UNetDown(128, 256, dropout=0.5)
self.down5 = UNetDown(256, 256, dropout=0.5)
# self.up3 = []
# self.up4 = []
# self.up5 = []
# self.up6 = []
# self.final = []
self.up3 = nn.ModuleList()
self.up4 = nn.ModuleList()
self.up5 = nn.ModuleList()
self.up6 = nn.ModuleList()
self.final = nn.ModuleList()
for _ in range(self.num_outputs):
self.up3.append(UNetUp(256, 256, dropout=0.5))
self.up4.append(UNetUp(512, 128, dropout=0.5))
self.up5.append(UNetUp(256, 64))
self.up6.append(UNetUp(128, 32))
self.final.append(nn.Sequential(
nn.Upsample(scale_factor=2),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(64, out_channels, 4, padding=1),
nn.Sigmoid(),
))
def forward(self, x):
# U-Net generator with skip connections from encoder to decoder
d1 = self.down1(x)
d2 = self.down2(d1)
d3 = self.down3(d2)
d4 = self.down4(d3)
d5 = self.down5(d4)
# d6 = self.down6(d5)
# d7 = self.down7(d6)
# u1 = self.up1(d7, d6)
# u2 = self.up2(d6, d5)
outs = []
print("self.num_outputs = ", self.num_outputs)
for idx in range(self.num_outputs):
u3 = self.up3[idx](d5, d4)
u4 = self.up4[idx](u3, d3)
u5 = self.up5[idx](u4, d2)
u6 = self.up6[idx](u5, d1)
outs.append(self.final[idx](u6))
return outs | 31.619469 | 71 | 0.550238 |
46adaf3870b79c7e1238174442d600db0bcd8547 | 18,274 | py | Python | Python-3.5.5/build/lib.linux-x86_64-3.5/_sysconfigdata.py | it315/PSPNet-Keras-tensorflow | 876448d9c44a8ca475cf0f60f69eb3c72651be87 | [
"MIT"
] | null | null | null | Python-3.5.5/build/lib.linux-x86_64-3.5/_sysconfigdata.py | it315/PSPNet-Keras-tensorflow | 876448d9c44a8ca475cf0f60f69eb3c72651be87 | [
"MIT"
] | null | null | null | Python-3.5.5/build/lib.linux-x86_64-3.5/_sysconfigdata.py | it315/PSPNet-Keras-tensorflow | 876448d9c44a8ca475cf0f60f69eb3c72651be87 | [
"MIT"
] | null | null | null | # system configuration generated and used by the sysconfig module
build_time_vars = {'ABIFLAGS': 'm',
'AC_APPLE_UNIVERSAL_BUILD': 0,
'AIX_GENUINE_CPLUSPLUS': 0,
'AR': 'ar',
'ARFLAGS': 'rc',
'BASECFLAGS': '-Wno-unused-result -Wsign-compare',
'BASECPPFLAGS': '',
'BASEMODLIBS': '',
'BINDIR': '/usr/local/bin',
'BINLIBDEST': '/usr/local/lib/python3.5',
'BLDLIBRARY': 'libpython3.5m.a',
'BLDSHARED': 'gcc -pthread -shared',
'BUILDEXE': '',
'BUILDPYTHON': 'python',
'BUILD_GNU_TYPE': 'x86_64-pc-linux-gnu',
'BYTESTR_DEPS': '\\',
'CC': 'gcc -pthread',
'CCSHARED': '-fPIC',
'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall '
'-Wstrict-prototypes',
'CFLAGSFORSHARED': '',
'CFLAGS_ALIASING': '',
'CFLAGS_NODIST': '',
'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in '
'Makefile.pre.in',
'CONFIGURE_CFLAGS': '',
'CONFIGURE_CFLAGS_NODIST': '-Werror=declaration-after-statement',
'CONFIGURE_CPPFLAGS': '',
'CONFIGURE_LDFLAGS': '',
'CONFIG_ARGS': "'--enable-optimizations'",
'CONFINCLUDEDIR': '/usr/local/include',
'CONFINCLUDEPY': '/usr/local/include/python3.5m',
'COREPYTHONPATH': ':plat-linux',
'COVERAGE_INFO': '/home/ripsuser1/PSPNet-Keras-tensorflow/Python-3.5.5/coverage.info',
'COVERAGE_REPORT': '/home/ripsuser1/PSPNet-Keras-tensorflow/Python-3.5.5/lcov-report',
'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov '
'report"',
'CPPFLAGS': '-I. -I./Include',
'CXX': 'g++ -pthread',
'DESTDIRS': '/usr/local /usr/local/lib /usr/local/lib/python3.5 '
'/usr/local/lib/python3.5/lib-dynload',
'DESTLIB': '/usr/local/lib/python3.5',
'DESTPATH': '',
'DESTSHARED': '/usr/local/lib/python3.5/lib-dynload',
'DIRMODE': 755,
'DIST': 'README ChangeLog configure configure.ac acconfig.h pyconfig.h.in '
'Makefile.pre.in Include Lib Misc Ext-dummy',
'DISTDIRS': 'Include Lib Misc Ext-dummy',
'DISTFILES': 'README ChangeLog configure configure.ac acconfig.h '
'pyconfig.h.in Makefile.pre.in',
'DLINCLDIR': '.',
'DLLLIBRARY': '',
'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0,
'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0,
'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1,
'DYNLOADFILE': 'dynload_shlib.o',
'ENABLE_IPV6': 1,
'ENSUREPIP': 'upgrade',
'EXE': '',
'EXEMODE': 755,
'EXTRAMACHDEPPATH': '',
'EXTRATESTOPTS': '',
'EXTRA_CFLAGS': '',
'EXT_SUFFIX': '.cpython-35m-x86_64-linux-gnu.so',
'FILEMODE': 644,
'FLOCK_NEEDS_LIBBSD': 0,
'GETPGRP_HAVE_ARG': 0,
'GETTIMEOFDAY_NO_TZ': 0,
'GITBRANCH': '',
'GITTAG': '',
'GITVERSION': '',
'GNULD': 'yes',
'HAVE_ACCEPT4': 1,
'HAVE_ACOSH': 1,
'HAVE_ADDRINFO': 1,
'HAVE_ALARM': 1,
'HAVE_ALIGNED_REQUIRED': 0,
'HAVE_ALLOCA_H': 1,
'HAVE_ALTZONE': 0,
'HAVE_ASINH': 1,
'HAVE_ASM_TYPES_H': 1,
'HAVE_ATANH': 1,
'HAVE_BIND_TEXTDOMAIN_CODESET': 1,
'HAVE_BLUETOOTH_BLUETOOTH_H': 0,
'HAVE_BLUETOOTH_H': 0,
'HAVE_BROKEN_MBSTOWCS': 0,
'HAVE_BROKEN_NICE': 0,
'HAVE_BROKEN_PIPE_BUF': 0,
'HAVE_BROKEN_POLL': 0,
'HAVE_BROKEN_POSIX_SEMAPHORES': 0,
'HAVE_BROKEN_PTHREAD_SIGMASK': 0,
'HAVE_BROKEN_SEM_GETVALUE': 0,
'HAVE_BROKEN_UNSETENV': 0,
'HAVE_BUILTIN_ATOMIC': 1,
'HAVE_C99_BOOL': 1,
'HAVE_CHFLAGS': 0,
'HAVE_CHOWN': 1,
'HAVE_CHROOT': 1,
'HAVE_CLOCK': 1,
'HAVE_CLOCK_GETRES': 1,
'HAVE_CLOCK_GETTIME': 1,
'HAVE_COMPUTED_GOTOS': 1,
'HAVE_CONFSTR': 1,
'HAVE_CONIO_H': 0,
'HAVE_COPYSIGN': 1,
'HAVE_CTERMID': 1,
'HAVE_CTERMID_R': 0,
'HAVE_CURSES_H': 1,
'HAVE_CURSES_IS_TERM_RESIZED': 1,
'HAVE_CURSES_RESIZETERM': 1,
'HAVE_CURSES_RESIZE_TERM': 1,
'HAVE_DECL_ISFINITE': 1,
'HAVE_DECL_ISINF': 1,
'HAVE_DECL_ISNAN': 1,
'HAVE_DECL_TZNAME': 0,
'HAVE_DEVICE_MACROS': 1,
'HAVE_DEV_PTC': 0,
'HAVE_DEV_PTMX': 1,
'HAVE_DIRECT_H': 0,
'HAVE_DIRENT_D_TYPE': 1,
'HAVE_DIRENT_H': 1,
'HAVE_DIRFD': 1,
'HAVE_DLFCN_H': 1,
'HAVE_DLOPEN': 1,
'HAVE_DUP2': 1,
'HAVE_DUP3': 1,
'HAVE_DYNAMIC_LOADING': 1,
'HAVE_ENDIAN_H': 1,
'HAVE_EPOLL': 1,
'HAVE_EPOLL_CREATE1': 1,
'HAVE_ERF': 1,
'HAVE_ERFC': 1,
'HAVE_ERRNO_H': 1,
'HAVE_EXECV': 1,
'HAVE_EXPM1': 1,
'HAVE_FACCESSAT': 1,
'HAVE_FCHDIR': 1,
'HAVE_FCHMOD': 1,
'HAVE_FCHMODAT': 1,
'HAVE_FCHOWN': 1,
'HAVE_FCHOWNAT': 1,
'HAVE_FCNTL_H': 1,
'HAVE_FDATASYNC': 1,
'HAVE_FDOPENDIR': 1,
'HAVE_FEXECVE': 1,
'HAVE_FINITE': 1,
'HAVE_FLOCK': 1,
'HAVE_FORK': 1,
'HAVE_FORKPTY': 1,
'HAVE_FPATHCONF': 1,
'HAVE_FSEEK64': 0,
'HAVE_FSEEKO': 1,
'HAVE_FSTATAT': 1,
'HAVE_FSTATVFS': 1,
'HAVE_FSYNC': 1,
'HAVE_FTELL64': 0,
'HAVE_FTELLO': 1,
'HAVE_FTIME': 1,
'HAVE_FTRUNCATE': 1,
'HAVE_FUTIMENS': 1,
'HAVE_FUTIMES': 1,
'HAVE_FUTIMESAT': 1,
'HAVE_GAI_STRERROR': 1,
'HAVE_GAMMA': 1,
'HAVE_GCC_ASM_FOR_MC68881': 0,
'HAVE_GCC_ASM_FOR_X64': 1,
'HAVE_GCC_ASM_FOR_X87': 1,
'HAVE_GCC_UINT128_T': 1,
'HAVE_GETADDRINFO': 1,
'HAVE_GETC_UNLOCKED': 1,
'HAVE_GETENTROPY': 1,
'HAVE_GETGROUPLIST': 1,
'HAVE_GETGROUPS': 1,
'HAVE_GETHOSTBYNAME': 0,
'HAVE_GETHOSTBYNAME_R': 1,
'HAVE_GETHOSTBYNAME_R_3_ARG': 0,
'HAVE_GETHOSTBYNAME_R_5_ARG': 0,
'HAVE_GETHOSTBYNAME_R_6_ARG': 1,
'HAVE_GETITIMER': 1,
'HAVE_GETLOADAVG': 1,
'HAVE_GETLOGIN': 1,
'HAVE_GETNAMEINFO': 1,
'HAVE_GETPAGESIZE': 1,
'HAVE_GETPEERNAME': 1,
'HAVE_GETPGID': 1,
'HAVE_GETPGRP': 1,
'HAVE_GETPID': 1,
'HAVE_GETPRIORITY': 1,
'HAVE_GETPWENT': 1,
'HAVE_GETRANDOM': 1,
'HAVE_GETRANDOM_SYSCALL': 1,
'HAVE_GETRESGID': 1,
'HAVE_GETRESUID': 1,
'HAVE_GETSID': 1,
'HAVE_GETSPENT': 1,
'HAVE_GETSPNAM': 1,
'HAVE_GETTIMEOFDAY': 1,
'HAVE_GETWD': 1,
'HAVE_GLIBC_MEMMOVE_BUG': 0,
'HAVE_GRP_H': 1,
'HAVE_HSTRERROR': 1,
'HAVE_HTOLE64': 1,
'HAVE_HYPOT': 1,
'HAVE_IEEEFP_H': 0,
'HAVE_IF_NAMEINDEX': 1,
'HAVE_INET_ATON': 1,
'HAVE_INET_PTON': 1,
'HAVE_INITGROUPS': 1,
'HAVE_INT32_T': 1,
'HAVE_INT64_T': 1,
'HAVE_INTTYPES_H': 1,
'HAVE_IO_H': 0,
'HAVE_IPA_PURE_CONST_BUG': 0,
'HAVE_KILL': 1,
'HAVE_KILLPG': 1,
'HAVE_KQUEUE': 0,
'HAVE_LANGINFO_H': 1,
'HAVE_LARGEFILE_SUPPORT': 0,
'HAVE_LCHFLAGS': 0,
'HAVE_LCHMOD': 0,
'HAVE_LCHOWN': 1,
'HAVE_LGAMMA': 1,
'HAVE_LIBDL': 1,
'HAVE_LIBDLD': 0,
'HAVE_LIBIEEE': 0,
'HAVE_LIBINTL_H': 1,
'HAVE_LIBREADLINE': 1,
'HAVE_LIBRESOLV': 0,
'HAVE_LIBSENDFILE': 0,
'HAVE_LIBUTIL_H': 0,
'HAVE_LINK': 1,
'HAVE_LINKAT': 1,
'HAVE_LINUX_CAN_BCM_H': 1,
'HAVE_LINUX_CAN_H': 1,
'HAVE_LINUX_CAN_RAW_FD_FRAMES': 1,
'HAVE_LINUX_CAN_RAW_H': 1,
'HAVE_LINUX_NETLINK_H': 1,
'HAVE_LINUX_RANDOM_H': 1,
'HAVE_LINUX_TIPC_H': 1,
'HAVE_LOCKF': 1,
'HAVE_LOG1P': 1,
'HAVE_LOG2': 1,
'HAVE_LONG_DOUBLE': 1,
'HAVE_LONG_LONG': 1,
'HAVE_LSTAT': 1,
'HAVE_LUTIMES': 1,
'HAVE_MAKEDEV': 1,
'HAVE_MBRTOWC': 1,
'HAVE_MEMMOVE': 1,
'HAVE_MEMORY_H': 1,
'HAVE_MEMRCHR': 1,
'HAVE_MKDIRAT': 1,
'HAVE_MKFIFO': 1,
'HAVE_MKFIFOAT': 1,
'HAVE_MKNOD': 1,
'HAVE_MKNODAT': 1,
'HAVE_MKTIME': 1,
'HAVE_MMAP': 1,
'HAVE_MREMAP': 1,
'HAVE_NCURSES_H': 1,
'HAVE_NDIR_H': 0,
'HAVE_NETPACKET_PACKET_H': 1,
'HAVE_NET_IF_H': 1,
'HAVE_NICE': 1,
'HAVE_OPENAT': 1,
'HAVE_OPENPTY': 1,
'HAVE_OSX105_SDK': 0,
'HAVE_PATHCONF': 1,
'HAVE_PAUSE': 1,
'HAVE_PIPE2': 1,
'HAVE_PLOCK': 0,
'HAVE_POLL': 1,
'HAVE_POLL_H': 1,
'HAVE_POSIX_FADVISE': 1,
'HAVE_POSIX_FALLOCATE': 1,
'HAVE_PREAD': 1,
'HAVE_PRLIMIT': 1,
'HAVE_PROCESS_H': 0,
'HAVE_PROTOTYPES': 1,
'HAVE_PTHREAD_ATFORK': 1,
'HAVE_PTHREAD_DESTRUCTOR': 0,
'HAVE_PTHREAD_H': 1,
'HAVE_PTHREAD_INIT': 0,
'HAVE_PTHREAD_KILL': 1,
'HAVE_PTHREAD_SIGMASK': 1,
'HAVE_PTY_H': 1,
'HAVE_PUTENV': 1,
'HAVE_PWRITE': 1,
'HAVE_READLINK': 1,
'HAVE_READLINKAT': 1,
'HAVE_READV': 1,
'HAVE_REALPATH': 1,
'HAVE_RENAMEAT': 1,
'HAVE_RL_APPEND_HISTORY': 1,
'HAVE_RL_CALLBACK': 1,
'HAVE_RL_CATCH_SIGNAL': 1,
'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1,
'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1,
'HAVE_RL_COMPLETION_MATCHES': 1,
'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1,
'HAVE_RL_PRE_INPUT_HOOK': 1,
'HAVE_RL_RESIZE_TERMINAL': 1,
'HAVE_ROUND': 1,
'HAVE_SCHED_GET_PRIORITY_MAX': 1,
'HAVE_SCHED_H': 1,
'HAVE_SCHED_RR_GET_INTERVAL': 1,
'HAVE_SCHED_SETAFFINITY': 1,
'HAVE_SCHED_SETPARAM': 1,
'HAVE_SCHED_SETSCHEDULER': 1,
'HAVE_SELECT': 1,
'HAVE_SEM_GETVALUE': 1,
'HAVE_SEM_OPEN': 1,
'HAVE_SEM_TIMEDWAIT': 1,
'HAVE_SEM_UNLINK': 1,
'HAVE_SENDFILE': 1,
'HAVE_SETEGID': 1,
'HAVE_SETEUID': 1,
'HAVE_SETGID': 1,
'HAVE_SETGROUPS': 1,
'HAVE_SETHOSTNAME': 1,
'HAVE_SETITIMER': 1,
'HAVE_SETLOCALE': 1,
'HAVE_SETPGID': 1,
'HAVE_SETPGRP': 1,
'HAVE_SETPRIORITY': 1,
'HAVE_SETREGID': 1,
'HAVE_SETRESGID': 1,
'HAVE_SETRESUID': 1,
'HAVE_SETREUID': 1,
'HAVE_SETSID': 1,
'HAVE_SETUID': 1,
'HAVE_SETVBUF': 1,
'HAVE_SHADOW_H': 1,
'HAVE_SIGACTION': 1,
'HAVE_SIGALTSTACK': 1,
'HAVE_SIGINTERRUPT': 1,
'HAVE_SIGNAL_H': 1,
'HAVE_SIGPENDING': 1,
'HAVE_SIGRELSE': 1,
'HAVE_SIGTIMEDWAIT': 1,
'HAVE_SIGWAIT': 1,
'HAVE_SIGWAITINFO': 1,
'HAVE_SNPRINTF': 1,
'HAVE_SOCKADDR_SA_LEN': 0,
'HAVE_SOCKADDR_STORAGE': 1,
'HAVE_SOCKETPAIR': 1,
'HAVE_SPAWN_H': 1,
'HAVE_SSIZE_T': 1,
'HAVE_STATVFS': 1,
'HAVE_STAT_TV_NSEC': 1,
'HAVE_STAT_TV_NSEC2': 0,
'HAVE_STDARG_PROTOTYPES': 1,
'HAVE_STDINT_H': 1,
'HAVE_STDLIB_H': 1,
'HAVE_STD_ATOMIC': 1,
'HAVE_STRDUP': 1,
'HAVE_STRFTIME': 1,
'HAVE_STRINGS_H': 1,
'HAVE_STRING_H': 1,
'HAVE_STRLCPY': 0,
'HAVE_STROPTS_H': 1,
'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0,
'HAVE_STRUCT_STAT_ST_BLKSIZE': 1,
'HAVE_STRUCT_STAT_ST_BLOCKS': 1,
'HAVE_STRUCT_STAT_ST_FLAGS': 0,
'HAVE_STRUCT_STAT_ST_GEN': 0,
'HAVE_STRUCT_STAT_ST_RDEV': 1,
'HAVE_STRUCT_TM_TM_ZONE': 1,
'HAVE_SYMLINK': 1,
'HAVE_SYMLINKAT': 1,
'HAVE_SYNC': 1,
'HAVE_SYSCONF': 1,
'HAVE_SYSEXITS_H': 1,
'HAVE_SYS_AUDIOIO_H': 0,
'HAVE_SYS_BSDTTY_H': 0,
'HAVE_SYS_DEVPOLL_H': 0,
'HAVE_SYS_DIR_H': 0,
'HAVE_SYS_ENDIAN_H': 0,
'HAVE_SYS_EPOLL_H': 1,
'HAVE_SYS_EVENT_H': 0,
'HAVE_SYS_FILE_H': 1,
'HAVE_SYS_IOCTL_H': 1,
'HAVE_SYS_KERN_CONTROL_H': 0,
'HAVE_SYS_LOADAVG_H': 0,
'HAVE_SYS_LOCK_H': 0,
'HAVE_SYS_MKDEV_H': 0,
'HAVE_SYS_MODEM_H': 0,
'HAVE_SYS_NDIR_H': 0,
'HAVE_SYS_PARAM_H': 1,
'HAVE_SYS_POLL_H': 1,
'HAVE_SYS_RANDOM_H': 1,
'HAVE_SYS_RESOURCE_H': 1,
'HAVE_SYS_SELECT_H': 1,
'HAVE_SYS_SENDFILE_H': 1,
'HAVE_SYS_SOCKET_H': 1,
'HAVE_SYS_STATVFS_H': 1,
'HAVE_SYS_STAT_H': 1,
'HAVE_SYS_SYSCALL_H': 1,
'HAVE_SYS_SYS_DOMAIN_H': 0,
'HAVE_SYS_TERMIO_H': 0,
'HAVE_SYS_TIMES_H': 1,
'HAVE_SYS_TIME_H': 1,
'HAVE_SYS_TYPES_H': 1,
'HAVE_SYS_UIO_H': 1,
'HAVE_SYS_UN_H': 1,
'HAVE_SYS_UTSNAME_H': 1,
'HAVE_SYS_WAIT_H': 1,
'HAVE_SYS_XATTR_H': 1,
'HAVE_TCGETPGRP': 1,
'HAVE_TCSETPGRP': 1,
'HAVE_TEMPNAM': 1,
'HAVE_TERMIOS_H': 1,
'HAVE_TERM_H': 1,
'HAVE_TGAMMA': 1,
'HAVE_TIMEGM': 1,
'HAVE_TIMES': 1,
'HAVE_TMPFILE': 1,
'HAVE_TMPNAM': 1,
'HAVE_TMPNAM_R': 1,
'HAVE_TM_ZONE': 1,
'HAVE_TRUNCATE': 1,
'HAVE_TZNAME': 0,
'HAVE_UCS4_TCL': 0,
'HAVE_UINT32_T': 1,
'HAVE_UINT64_T': 1,
'HAVE_UINTPTR_T': 1,
'HAVE_UNAME': 1,
'HAVE_UNISTD_H': 1,
'HAVE_UNLINKAT': 1,
'HAVE_UNSETENV': 1,
'HAVE_USABLE_WCHAR_T': 0,
'HAVE_UTIL_H': 0,
'HAVE_UTIMENSAT': 1,
'HAVE_UTIMES': 1,
'HAVE_UTIME_H': 1,
'HAVE_WAIT3': 1,
'HAVE_WAIT4': 1,
'HAVE_WAITID': 1,
'HAVE_WAITPID': 1,
'HAVE_WCHAR_H': 1,
'HAVE_WCSCOLL': 1,
'HAVE_WCSFTIME': 1,
'HAVE_WCSXFRM': 1,
'HAVE_WMEMCMP': 1,
'HAVE_WORKING_TZSET': 1,
'HAVE_WRITEV': 1,
'HAVE_ZLIB_COPY': 1,
'HAVE__GETPTY': 0,
'HOST_GNU_TYPE': 'x86_64-pc-linux-gnu',
'INCLDIRSTOMAKE': '/usr/local/include /usr/local/include '
'/usr/local/include/python3.5m '
'/usr/local/include/python3.5m',
'INCLUDEDIR': '/usr/local/include',
'INCLUDEPY': '/usr/local/include/python3.5m',
'INSTALL': '/usr/bin/install -c',
'INSTALL_DATA': '/usr/bin/install -c -m 644',
'INSTALL_PROGRAM': '/usr/bin/install -c',
'INSTALL_SCRIPT': '/usr/bin/install -c',
'INSTALL_SHARED': '/usr/bin/install -c -m 555',
'INSTSONAME': 'libpython3.5m.a',
'IO_H': 'Modules/_io/_iomodule.h',
'IO_OBJS': '\\',
'LDCXXSHARED': 'g++ -pthread -shared',
'LDFLAGS': '',
'LDLAST': '',
'LDLIBRARY': 'libpython3.5m.a',
'LDLIBRARYDIR': '',
'LDSHARED': 'gcc -pthread -shared',
'LDVERSION': '3.5m',
'LIBC': '',
'LIBDEST': '/usr/local/lib/python3.5',
'LIBDIR': '/usr/local/lib',
'LIBFFI_INCLUDEDIR': '',
'LIBM': '-lm',
'LIBOBJDIR': 'Python/',
'LIBOBJS': '',
'LIBPC': '/usr/local/lib/pkgconfig',
'LIBPL': '/usr/local/lib/python3.5/config-3.5m',
'LIBRARY': 'libpython3.5m.a',
'LIBRARY_OBJS': '\\',
'LIBRARY_OBJS_OMIT_FROZEN': '\\',
'LIBS': '-lpthread -ldl -lutil',
'LIBSUBDIRS': 'tkinter tkinter/test tkinter/test/test_tkinter \\',
'LINKCC': 'gcc -pthread',
'LINKFORSHARED': '-Xlinker -export-dynamic',
'LIPO_32BIT_FLAGS': '',
'LLVM_PROF_ERR': 'no',
'LLVM_PROF_FILE': '',
'LLVM_PROF_MERGER': 'true',
'LN': 'ln',
'LOCALMODLIBS': '',
'LOG1P_DROPS_ZERO_SIGN': 0,
'MACHDEP': 'linux',
'MACHDEPPATH': ':plat-linux',
'MACHDEPS': 'plat-linux',
'MACHDEP_OBJS': '',
'MACHDESTLIB': '/usr/local/lib/python3.5',
'MACOSX_DEPLOYMENT_TARGET': '',
'MAINCC': 'gcc -pthread',
'MAJOR_IN_MKDEV': 0,
'MAJOR_IN_SYSMACROS': 0,
'MAKESETUP': './Modules/makesetup',
'MANDIR': '/usr/local/share/man',
'MKDIR_P': '/bin/mkdir -p',
'MODLIBS': '',
'MODNAMES': '_thread _signal posix errno pwd _sre _codecs _weakref _functools '
'_operator _collections itertools atexit _stat time _locale _io '
'zipimport faulthandler _tracemalloc _symtable xxsubtype',
'MODOBJS': 'Modules/_threadmodule.o Modules/signalmodule.o '
'Modules/posixmodule.o Modules/errnomodule.o '
'Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o '
'Modules/_weakref.o Modules/_functoolsmodule.o '
'Modules/_operator.o Modules/_collectionsmodule.o '
'Modules/itertoolsmodule.o Modules/atexitmodule.o '
'Modules/_stat.o Modules/timemodule.o Modules/_localemodule.o '
'Modules/_iomodule.o Modules/iobase.o Modules/fileio.o '
'Modules/bytesio.o Modules/bufferedio.o Modules/textio.o '
'Modules/stringio.o Modules/zipimport.o Modules/faulthandler.o '
'Modules/_tracemalloc.o Modules/hashtable.o '
'Modules/symtablemodule.o Modules/xxsubtype.o',
'MODULE_OBJS': '\\',
'MULTIARCH': 'x86_64-linux-gnu',
'MVWDELCH_IS_EXPRESSION': 1,
'NO_AS_NEEDED': '-Wl,--no-as-needed',
'OBJECT_OBJS': '\\',
'OPT': '-DNDEBUG -g -fwrapv -O3 -Wall -Wstrict-prototypes',
'OTHER_LIBTOOL_OPT': '',
'PACKAGE_BUGREPORT': 0,
'PACKAGE_NAME': 0,
'PACKAGE_STRING': 0,
'PACKAGE_TARNAME': 0,
'PACKAGE_URL': 0,
'PACKAGE_VERSION': 0,
'PARSER_HEADERS': '\\',
'PARSER_OBJS': '\\ Parser/myreadline.o Parser/parsetok.o Parser/tokenizer.o',
'PGEN': 'Parser/pgen',
'PGENOBJS': '\\ \\',
'PGENSRCS': '\\ \\',
'PGOBJS': '\\',
'PGO_PROF_GEN_FLAG': '-fprofile-generate',
'PGO_PROF_USE_FLAG': '-fprofile-use -fprofile-correction',
'PGSRCS': '\\',
'PLATDIR': 'plat-linux',
'POBJS': '\\',
'POSIX_SEMAPHORES_NOT_ENABLED': 0,
'PROFILE_TASK': '-m test.regrtest --pgo -x test_asyncore test_gdb '
'test_multiprocessing_fork test_multiprocessing_forkserver '
'test_multiprocessing_main_handling '
'test_multiprocessing_spawn test_subprocess',
'PSRCS': '\\',
'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1,
'PURIFY': '',
'PY3LIBRARY': '',
'PYLONG_BITS_IN_DIGIT': 0,
'PYTHON': 'python',
'PYTHONFRAMEWORK': '',
'PYTHONFRAMEWORKDIR': 'no-framework',
'PYTHONFRAMEWORKINSTALLDIR': '',
'PYTHONFRAMEWORKPREFIX': '',
'PYTHONPATH': ':plat-linux',
'PYTHON_FOR_BUILD': './python -E',
'PYTHON_FOR_REGEN': 'python3',
'PYTHON_HEADERS': '\\',
'PYTHON_OBJS': '\\',
'PY_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall '
'-Wstrict-prototypes',
'PY_CFLAGS_NODIST': '-Werror=declaration-after-statement',
'PY_CORE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 '
'-Wall -Wstrict-prototypes '
'-Werror=declaration-after-statement -I. -I./Include '
'-DPy_BUILD_CORE',
'PY_CPPFLAGS': '-I. -I./Include',
'PY_FORMAT_LONG_LONG': '"ll"',
'PY_FORMAT_SIZE_T': '"z"',
'PY_LDFLAGS': '',
'Py_DEBUG': 0,
'Py_ENABLE_SHARED': 0,
'Py_HASH_ALGORITHM': 0,
'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\',
'RANLIB': 'ranlib',
'READELF': 'readelf',
'RESSRCDIR': 'Mac/Resources/framework',
'RETSIGTYPE': 'void',
'RUNSHARED': '',
'SCRIPTDIR': '/usr/local/lib',
'SETPGRP_HAVE_ARG': 0,
'SGI_ABI': '',
'SHELL': '/bin/sh',
'SHLIBS': '-lpthread -ldl -lutil',
'SHLIB_SUFFIX': '.so',
'SIGNAL_OBJS': '',
'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0,
'SITEPATH': '',
'SIZEOF_DOUBLE': 8,
'SIZEOF_FLOAT': 4,
'SIZEOF_FPOS_T': 16,
'SIZEOF_INT': 4,
'SIZEOF_LONG': 8,
'SIZEOF_LONG_DOUBLE': 16,
'SIZEOF_LONG_LONG': 8,
'SIZEOF_OFF_T': 8,
'SIZEOF_PID_T': 4,
'SIZEOF_PTHREAD_T': 8,
'SIZEOF_SHORT': 2,
'SIZEOF_SIZE_T': 8,
'SIZEOF_TIME_T': 8,
'SIZEOF_UINTPTR_T': 8,
'SIZEOF_VOID_P': 8,
'SIZEOF_WCHAR_T': 4,
'SIZEOF__BOOL': 1,
'SOABI': 'cpython-35m-x86_64-linux-gnu',
'SRCDIRS': 'Parser Grammar Objects Python Modules Mac Programs',
'SRC_GDB_HOOKS': './Tools/gdb/libpython.py',
'STDC_HEADERS': 1,
'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */",
'STRIPFLAG': '-s',
'SUBDIRS': '',
'SUBDIRSTOO': 'Include Lib Misc',
'SYSLIBS': '-lm',
'SYS_SELECT_WITH_SYS_TIME': 1,
'TANH_PRESERVES_ZERO_SIGN': 1,
'TCLTK_INCLUDES': '',
'TCLTK_LIBS': '',
'TESTOPTS': '',
'TESTPATH': '',
'TESTPYTHON': './python',
'TESTPYTHONOPTS': '',
'TESTRUNNER': './python ./Tools/scripts/run_tests.py',
'TESTTIMEOUT': 3600,
'THREADOBJ': 'Python/thread.o',
'TIMEMODULE_LIB': 0,
'TIME_WITH_SYS_TIME': 1,
'TM_IN_SYS_TIME': 0,
'UNICODE_DEPS': '\\',
'UNIVERSALSDK': '',
'USE_COMPUTED_GOTOS': 0,
'USE_INLINE': 1,
'VA_LIST_IS_ARRAY': 1,
'VERSION': '3.5',
'WANT_SIGFPE_HANDLER': 0,
'WINDOW_HAS_FLAGS': 1,
'WITH_DOC_STRINGS': 1,
'WITH_DYLD': 0,
'WITH_LIBINTL': 0,
'WITH_NEXT_FRAMEWORK': 0,
'WITH_PYMALLOC': 1,
'WITH_THREAD': 1,
'WITH_TSC': 0,
'WITH_VALGRIND': 0,
'X87_DOUBLE_ROUNDING': 0,
'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax',
'abs_builddir': '/home/ripsuser1/PSPNet-Keras-tensorflow/Python-3.5.5',
'abs_srcdir': '/home/ripsuser1/PSPNet-Keras-tensorflow/Python-3.5.5',
'datarootdir': '/usr/local/share',
'exec_prefix': '/usr/local',
'prefix': '/usr/local',
'srcdir': '.'}
| 27.729894 | 87 | 0.671282 |
3afb968e532749cf03c88b5ad4aed440ae256ad4 | 21,123 | py | Python | pyredner/load_mitsuba.py | swordigo1995/redner | 195696deb4269447b7e4648d6285ab34da2ed24f | [
"MIT"
] | null | null | null | pyredner/load_mitsuba.py | swordigo1995/redner | 195696deb4269447b7e4648d6285ab34da2ed24f | [
"MIT"
] | null | null | null | pyredner/load_mitsuba.py | swordigo1995/redner | 195696deb4269447b7e4648d6285ab34da2ed24f | [
"MIT"
] | null | null | null | import torch
import xml.etree.ElementTree as etree
import numpy as np
import redner
import os
import pyredner
import pyredner.transform as transform
# from .load_obj import load_obj, load_obj_fast
def parse_transform(node):
ret = torch.eye(4)
for child in node:
if child.tag == 'matrix':
value = torch.from_numpy(\
np.reshape(\
# support both ',' and ' ' seperator
np.fromstring(child.attrib['value'], dtype=np.float32, sep=',' if ',' in child.attrib['value'] else ' '),
(4, 4)))
ret = value @ ret
elif child.tag == 'translate':
x = float(child.attrib['x'])
y = float(child.attrib['y'])
z = float(child.attrib['z'])
value = transform.gen_translate_matrix(torch.tensor([x, y, z]))
ret = value @ ret
elif child.tag == 'scale':
# single scale value
if 'value' in child.attrib:
x = y = z = float(child.attrib['value'])
else:
x = float(child.attrib['x'])
y = float(child.attrib['y'])
z = float(child.attrib['z'])
value = transform.gen_scale_matrix(torch.tensor([x, y, z]))
ret = value @ ret
return ret
def parse_vector(str):
v = np.fromstring(str, dtype=np.float32, sep=',')
if v.shape[0] != 3:
v = np.fromstring(str, dtype=np.float32, sep=' ')
assert(v.ndim == 1)
return torch.from_numpy(v)
def parse_camera(node):
fov = torch.tensor([45.0])
position = None
look_at = None
up = None
clip_near = 1e-2
resolution = [256, 256]
for child in node:
if 'name' in child.attrib:
if child.attrib['name'] == 'fov':
fov = torch.tensor([float(child.attrib['value'])])
elif child.attrib['name'] == 'toWorld':
has_lookat = False
for grandchild in child:
if grandchild.tag.lower() == 'lookat':
has_lookat = True
position = parse_vector(grandchild.attrib['origin'])
look_at = parse_vector(grandchild.attrib['target'])
up = parse_vector(grandchild.attrib['up'])
if not has_lookat:
print('Unsupported Mitsuba scene format: please use a look at transform')
assert(False)
if child.tag == 'film':
for grandchild in child:
if 'name' in grandchild.attrib:
if grandchild.attrib['name'] == 'width':
resolution[1] = int(grandchild.attrib['value'])
elif grandchild.attrib['name'] == 'height':
resolution[0] = int(grandchild.attrib['value'])
return pyredner.Camera(position = position,
look_at = look_at,
up = up,
fov = fov,
clip_near = clip_near,
resolution = resolution)
def parse_material(node, two_sided = False):
def parse_material_bitmap(node, scale = None):
reflectance_texture = None
uv_scale = torch.tensor([1.0, 1.0])
for grandchild in node:
if grandchild.attrib['name'] == 'filename':
reflectance_texture = pyredner.imread(grandchild.attrib['value'])
if scale:
reflectance_texture = reflectance_texture * scale
elif grandchild.attrib['name'] == 'uscale':
uv_scale[0] = float(grandchild.attrib['value'])
elif grandchild.attrib['name'] == 'vscale':
uv_scale[1] = float(grandchild.attrib['value'])
assert reflectance_texture is not None
return reflectance_texture, uv_scale
# support mitsuba pulgin 'scale' for texture
def parse_texture(node):
if node.attrib['type'] == 'scale':
scale_value = None
for grandchild in node:
if grandchild.attrib['name'] == 'scale' and grandchild.tag == 'float':
scale_value = float(grandchild.attrib['value'])
elif grandchild.attrib['type'] == 'bitmap' and grandchild.tag == 'texture':
assert scale_value is not None # avoid 'scale' element is declared below the 'bitmap'
return parse_material_bitmap(grandchild, scale_value)
else:
raise NotImplementedError('Unsupported scale param type {}'.format(grandchild.child['type']))
elif node.attrib['type'] == 'bitmap':
return parse_material_bitmap(node)
else:
raise NotImplementedError('Unsupported Texture type {}'.format(node.attrib['type']))
node_id = None
if 'id' in node.attrib:
node_id = node.attrib['id']
if node.attrib['type'] == 'diffuse':
diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
diffuse_uv_scale = torch.tensor([1.0, 1.0])
specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
specular_uv_scale = torch.tensor([1.0, 1.0])
roughness = torch.tensor([1.0])
for child in node:
if child.attrib['name'] == 'reflectance':
if child.tag == 'texture':
diffuse_reflectance, diffuse_uv_scale = parse_texture(child)
elif child.tag == 'rgb' or child.tag == 'spectrum':
diffuse_reflectance = parse_vector(child.attrib['value'])
elif child.attrib['name'] == 'specular':
if child.tag == 'texture':
specular_reflectance, specular_uv_scale = parse_texture(child)
elif child.tag == 'rgb' or child.tag == 'spectrum':
specular_reflectance = parse_vector(child.attrib['value'])
elif child.attrib['name'] == 'roughness':
roughness = torch.tensor([float(child.attrib['value'])])
if pyredner.get_use_gpu():
# Copy to GPU
diffuse_reflectance = diffuse_reflectance.cuda()
specular_reflectance = specular_reflectance.cuda()
roughness = roughness.cuda()
return (node_id, pyredner.Material(\
diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale),
specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale),
roughness = pyredner.Texture(roughness),
two_sided = two_sided))
elif node.attrib['type'] == 'roughplastic':
diffuse_reflectance = torch.tensor([0.5, 0.5, 0.5])
diffuse_uv_scale = torch.tensor([1.0, 1.0])
specular_reflectance = torch.tensor([0.0, 0.0, 0.0])
specular_uv_scale = torch.tensor([1.0, 1.0])
roughness = torch.tensor([1.0])
roughness_uv_scale = torch.tensor([1.0, 1.0])
for child in node:
if child.attrib['name'] == 'diffuseReflectance':
if child.tag == 'texture':
diffuse_reflectance, diffuse_uv_scale = parse_texture(child)
elif child.tag == 'rgb' or child.tag == 'spectrum':
diffuse_reflectance = parse_vector(child.attrib['value'])
elif child.attrib['name'] == 'specularReflectance':
if child.tag == 'texture':
specular_reflectance, specular_uv_scale = parse_texture(child)
elif child.tag == 'rgb' or child.tag == 'spectrum':
specular_reflectance = parse_vector(child.attrib['value'])
elif child.attrib['name'] == 'alpha':
# Add 'alpha texture' support
if child.tag == 'texture':
#TODO KJL
roughness, roughness_uv_scale = parse_texture(child) #? not sure to do square here
elif child.tag == 'float':
alpha = float(child.attrib['value'])
roughness = torch.tensor([alpha * alpha])
if pyredner.get_use_gpu():
# Copy to GPU
diffuse_reflectance = diffuse_reflectance.cuda()
specular_reflectance = specular_reflectance.cuda()
roughness = roughness.cuda()
return (node_id, pyredner.Material(\
diffuse_reflectance = pyredner.Texture(diffuse_reflectance, diffuse_uv_scale),
specular_reflectance = pyredner.Texture(specular_reflectance, specular_uv_scale),
roughness = pyredner.Texture(roughness, roughness_uv_scale),
two_sided = two_sided))
elif node.attrib['type'] == 'twosided':
ret = parse_material(node[0], True)
return (node_id, ret[1])
# Simply bypass mask's opacity
elif node.attrib['type'] == 'mask': #TODO add opacity!!!
ret = parse_material(node[0])
return (node_id, ret[1])
else:
print('Unsupported material type:', node.attrib['type'])
assert(False)
def parse_shape(node, material_dict, shape_id, shape_group_dict = None):
if node.attrib['type'] == 'obj' or node.attrib['type'] == 'serialized':
print(node.attrib['id'])
to_world = torch.eye(4)
serialized_shape_id = 0
mat_id = -1
light_intensity = None
filename = ''
mat_name2id = {}
for child in node:
if 'name' in child.attrib:
if child.attrib['name'] == 'filename':
filename = child.attrib['value']
elif child.attrib['name'] == 'toWorld':
to_world = parse_transform(child)
elif child.attrib['name'] == 'shapeIndex':
serialized_shape_id = int(child.attrib['value'])
if child.tag == 'ref':
mat_id = material_dict[child.attrib['id']]
if 'name' in child.attrib.keys() and child.attrib['name'] != 'bsdf':
mat_name2id[child.attrib['name']] = child.attrib['id']
elif child.tag == 'bsdf':
#TODO hack! use default diffuse if countering internal declaration bsdf
mat_id = 0
# node_id, material = parse_material(child)
# if node_id is not None:
# material_dict[node_id] = len(materials)
# materials.append(material)
elif child.tag == 'emitter':
for grandchild in child:
if grandchild.attrib['name'] == 'radiance':
light_intensity = parse_vector(grandchild.attrib['value'])
if light_intensity.shape[0] == 1:
light_intensity = torch.tensor(\
[light_intensity[0],
light_intensity[0],
light_intensity[0]])
if node.attrib['type'] == 'obj':
_, mesh_list, _ = pyredner.load_obj.load_obj_fast(filename, is_load_mtl=False)
shape_list = []
for mesh in mesh_list:
mat_name = mesh[0]
vertices = mesh[1].vertices.cpu()
indices = mesh[1].indices.cpu()
uvs = mesh[1].uvs
normals = mesh[1].normals
if uvs is not None:
uvs = uvs.cpu()
if normals is not None:
normals = normals.cpu()
# Transform the vertices and normals
vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)), dim=1)
vertices = vertices @ torch.transpose(to_world, 0, 1)
vertices = vertices / vertices[:, 3:4]
vertices = vertices[:, 0:3].contiguous()
if normals is not None:
normals = normals @ (torch.inverse(torch.transpose(to_world, 0, 1))[:3, :3])
normals = normals.contiguous()
assert (vertices is not None)
assert (indices is not None)
lgt = None
if light_intensity is not None:
lgt = pyredner.AreaLight(shape_id, light_intensity)
if pyredner.get_use_gpu():
# Copy to GPU
vertices = vertices.cuda()
indices = indices.cuda()
if uvs is not None:
uvs = uvs.cuda()
if normals is not None:
normals = normals.cuda()
# Assign material
if mat_name != '' and mat_name is not None: # skip no material mesh
mat_id = material_dict[mat_name2id[mat_name]]
shape_list.append(pyredner.Shape(vertices, indices, uvs, normals, mat_id))
return shape_list, lgt
else:
assert(node.attrib['type'] == 'serialized')
mitsuba_tri_mesh = redner.load_serialized(filename, serialized_shape_id)
vertices = torch.from_numpy(mitsuba_tri_mesh.vertices)
indices = torch.from_numpy(mitsuba_tri_mesh.indices)
uvs = torch.from_numpy(mitsuba_tri_mesh.uvs)
normals = torch.from_numpy(mitsuba_tri_mesh.normals)
if uvs.shape[0] == 0:
uvs = None
if normals.shape[0] == 0:
normals = None
# Transform the vertices and normals
vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)), dim = 1)
vertices = vertices @ torch.transpose(to_world, 0, 1)
vertices = vertices / vertices[:, 3:4]
vertices = vertices[:, 0:3].contiguous()
if normals is not None:
normals = normals @ (torch.inverse(torch.transpose(to_world, 0, 1))[:3, :3])
normals = normals.contiguous()
assert(vertices is not None)
assert(indices is not None)
lgt = None
if light_intensity is not None:
lgt = pyredner.AreaLight(shape_id, light_intensity)
if pyredner.get_use_gpu():
# Copy to GPU
vertices = vertices.cuda()
indices = indices.cuda()
if uvs is not None:
uvs = uvs.cuda()
if normals is not None:
normals = normals.cuda()
return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
elif node.attrib['type'] == 'rectangle':
indices = torch.tensor([[0, 2, 1], [1, 2, 3]],
dtype = torch.int32)
vertices = torch.tensor([[-1.0, -1.0, 0.0],
[-1.0, 1.0, 0.0],
[ 1.0, -1.0, 0.0],
[ 1.0, 1.0, 0.0]])
uvs = None
normals = None
to_world = torch.eye(4)
mat_id = -1
light_intensity = None
for child in node:
if 'name' in child.attrib:
if child.attrib['name'] == 'toWorld':
to_world = parse_transform(child)
if child.tag == 'ref':
mat_id = material_dict[child.attrib['id']]
elif child.tag == 'emitter':
for grandchild in child:
if grandchild.attrib['name'] == 'radiance':
light_intensity = parse_vector(grandchild.attrib['value'])
if light_intensity.shape[0] == 1:
light_intensity = torch.tensor(\
[light_intensity[0],
light_intensity[0],
light_intensity[0]])
# Transform the vertices
# Transform the vertices and normals
vertices = torch.cat((vertices, torch.ones(vertices.shape[0], 1)), dim = 1)
vertices = vertices @ torch.transpose(to_world, 0, 1)
vertices = vertices / vertices[:, 3:4]
vertices = vertices[:, 0:3].contiguous()
if normals is not None:
normals = normals @ (torch.inverse(torch.transpose(to_world, 0, 1))[:3, :3])
normals = normals.contiguous()
assert(vertices is not None)
assert(indices is not None)
lgt = None
if light_intensity is not None:
lgt = pyredner.AreaLight(shape_id, light_intensity)
if pyredner.get_use_gpu():
# Copy to GPU
vertices = vertices.cuda()
indices = indices.cuda()
if uvs is not None:
uvs = uvs.cuda()
if normals is not None:
normals = normals.cuda()
return pyredner.Shape(vertices, indices, uvs, normals, mat_id), lgt
# Add instance support
# TODO (simply transform & create a new shape now)
elif node.attrib['type'] == 'instance':
shape = None
for child in node:
if 'name' in child.attrib:
if child.attrib['name'] == 'toWorld':
to_world = parse_transform(child)
if pyredner.get_use_gpu():
to_world = to_world.cuda()
if child.tag == 'ref':
shape_ = shape_group_dict[child.attrib['id']]
shape_list = []
for shape in list(shape_):
# transform instance
vertices = shape.vertices
normals = shape.normals
vector1 = torch.ones(vertices.shape[0], 1)
vertices = torch.cat((vertices, vector1.cuda() if pyredner.get_use_gpu() else vector1), dim = 1)
vertices = vertices @ torch.transpose(to_world, 0, 1)
vertices = vertices / vertices[:, 3:4]
vertices = vertices[:, 0:3].contiguous()
if normals is not None:
normals = normals @ (torch.inverse(torch.transpose(to_world, 0, 1))[:3, :3])
normals = normals.contiguous()
# assert(vertices is not None)
# assert(indices is not None)
# lgt = None
# if light_intensity is not None:
# lgt = pyredner.AreaLight(shape_id, light_intensity)
shape_list.append(pyredner.Shape(vertices, shape.indices, shape.uvs, normals, shape.material_id))
return shape_list, None
else:
print('Shape type {} is not supported!'.format(node.attrib['type']))
assert(False)
def parse_scene(node):
cam = None
resolution = None
materials = []
material_dict = {}
shapes = []
lights = []
shape_group_dict = {}
envmap = None
for child in node:
if child.tag == 'sensor':
cam = parse_camera(child)
elif child.tag == 'bsdf':
node_id, material = parse_material(child)
if node_id is not None:
material_dict[node_id] = len(materials)
materials.append(material)
# shapegroup for instancing
elif child.tag == 'shape' and child.attrib['type'] == 'shapegroup':
for child_s in child:
if child_s.tag == 'shape':
shape_group_dict[child.attrib['id']] = parse_shape(child_s, material_dict, None)[0]
elif child.tag == 'shape':
shape, light = parse_shape(child, material_dict, len(shapes), shape_group_dict if child.attrib['type'] == 'instance' else None)
if isinstance(shape, list):
shapes = shapes + shape
else:
shapes.append(shape)
if light is not None:
lights.append(light)
# Add envmap loading support
elif child.tag == 'emitter' and child.attrib['type'] == 'envmap':
# read envmap params from xml
scale = 1.0
envmap_filename = None
to_world = torch.eye(4)
for child_s in child:
if child_s.attrib['name'] == 'scale':
assert child_s.tag == 'float'
scale = float(child_s.attrib['value'])
if child_s.attrib['name'] == 'filename':
assert child_s.tag == 'string'
envmap_filename = child_s.attrib['value']
if child_s.attrib['name'] == 'toWorld':
to_world = parse_transform(child_s)
# load envmap
envmap = scale * pyredner.imread(envmap_filename)
if pyredner.get_use_gpu():
envmap = envmap.cuda()
envmap = pyredner.EnvironmentMap(envmap, env_to_world=to_world)
return pyredner.Scene(cam, shapes, materials, lights, envmap)
def load_mitsuba(filename):
"""
Load from a Mitsuba scene file as PyTorch tensors.
"""
tree = etree.parse(filename)
root = tree.getroot()
cwd = os.getcwd()
os.chdir(os.path.dirname(filename))
ret = parse_scene(root)
os.chdir(cwd)
return ret
| 45.62203 | 139 | 0.533163 |
364d4d916fff7f5905dc6d35d7fe4f3b14fac491 | 3,315 | py | Python | main/models/VAEh4.py | MarcSerraPeralta/rec-flows | d05c3eca944f2228cffa575698ee5b010e83f167 | [
"MIT"
] | null | null | null | main/models/VAEh4.py | MarcSerraPeralta/rec-flows | d05c3eca944f2228cffa575698ee5b010e83f167 | [
"MIT"
] | null | null | null | main/models/VAEh4.py | MarcSerraPeralta/rec-flows | d05c3eca944f2228cffa575698ee5b010e83f167 | [
"MIT"
] | null | null | null | import torch
class model(torch.nn.Module):
def __init__(self, **kwargs):
"""
emdim : int
Embedding matrix = "10^6" x emdim
(en realitat els 10^6 són 181497)
Scheme
------
ENCODER Linear
|-------> mu (dim[2])
INP Embed+ReLU H1 Linear+ReLU H2 |
"10^6" ------------> dim[0] ------------> dim[1] -|
| Linear
|-------> logvar (dim[2])
LATENT SPACE
z = mu + N(0, exp(logvar)) #revisar si es passa mu, std o mu, var en la funció torch.random
DECODER
z Linear+ReLU H3 Linear+ReLU H4 Linear OUTOUT
dim[2] -------------> dim[1] --------------> dim[0] ----------> "10^6"
No té el mateix número que la multiple_layers
"""
super(model, self).__init__()
# PARAMS
params = {}
for key in ['Nsongs', 'dim', 'reduction_emb', 'Nmeta_classes']:
params[key] = kwargs[key]
for k, v in params.items():
setattr(self, k, v)
# STRUCTURE
self.emb = torch.nn.Embedding(self.Nsongs+1, self.dim[0], padding_idx=0) #extra index for padding
self.w1 = torch.nn.Linear(self.dim[0], self.dim[1])
self.mu = torch.nn.Linear(self.dim[1], self.dim[2])
self.mu.bias.data.fill_(0)
self.logvar = torch.nn.Linear(self.dim[1], self.dim[2])
self.logvar.bias.data.fill_(0)
self.w3 = torch.nn.Linear(self.dim[2], self.dim[1])
self.w4 = torch.nn.Linear(self.dim[1], self.dim[0])
self.inv = torch.nn.Linear(self.dim[0], self.Nsongs)
self.relu = torch.nn.ReLU()
self.inv.bias.data.copy_(torch.load("results/metadata/bias_inicialization"))
# TUNING
self.z_tag = torch.nn.Parameter(torch.rand(self.Nmeta_classes, self.dim[-1]))
# ATTENTION
if self.reduction_emb == "attention":
self.attention_l = torch.nn.Linear(self.dim[0],1)
self.attention_a = torch.nn.Tanh()
return
def encoder(self, x):
if self.reduction_emb == "sum":
h = self.relu(self.emb(x+1).sum(1)) #x+1 for padding_idx = 0 (-1+1)
if self.reduction_emb == "mean":
h = self.emb(x+1).sum(1)
Nitems = (x != 0).sum(1).float().to(h.device)
h = h / Nitems.view(Nitems.shape[0],1)
h = self.relu(h)
if self.reduction_emb == "attention":
h = self.emb(x+1)
att = self.attention_a(self.attention_l(h))
att = torch.softmax(att, dim=1)
h = (att*h).sum(dim=1)
h = self.relu(h)
h = self.relu(self.w1(h))
mu = self.mu(h)
logvar = self.logvar(h)
return mu, logvar
def reparametrize(self, mu, logvar):
std = torch.exp(0.5*logvar)
z = mu + std*torch.randn_like(std) #std inside randn_like only determines the size of the tensor of N(0,I)
return z
def decoder(self, z):
h = self.relu(self.w3(z))
h = self.relu(self.w4(h))
xhat = self.inv(h)
return xhat
def forward(self, x, tag=None): #x = batch = matrix (tensor)
if tag is None:
with torch.no_grad(): self.emb.weight[0] = 0 #padding_idx elements always 0
mu, logvar = self.encoder(x)
z = self.reparametrize(mu, logvar)
xhat = self.decoder(z)
else:
with torch.no_grad(): self.emb.weight[0] = 0 #padding_idx elements always 0
mu, logvar = self.encoder(x)
z = self.reparametrize(mu, logvar)
z = z + self.z_tag[tag]
xhat = self.decoder(z)
return xhat, [mu, logvar]
def latent(self, x):
mu, logvar = self.encoder(x)
z = self.reparametrize(mu, logvar)
return z | 30.412844 | 108 | 0.611161 |
8a6f774a359c699a851fe529263ed9a310e68545 | 13 | py | Python | example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Bessel Functions/Faster versions of common Bessel Functions/y1 Bessel function of the second kind of order 1, $Y_1(x)$.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Bessel Functions/Faster versions of common Bessel Functions/y1 Bessel function of the second kind of order 1, $Y_1(x)$.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Special functions/Bessel Functions/Faster versions of common Bessel Functions/y1 Bessel function of the second kind of order 1, $Y_1(x)$.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | special.y1(x) | 13 | 13 | 0.769231 |
6d5302b5f0af2419ea256075cb56208953588cd1 | 417 | py | Python | workshops/templatetags/state.py | tracykteal/amy | cb19e318d36b880b1c3be2104efff42ef776118a | [
"MIT"
] | null | null | null | workshops/templatetags/state.py | tracykteal/amy | cb19e318d36b880b1c3be2104efff42ef776118a | [
"MIT"
] | 1 | 2019-12-13T11:22:47.000Z | 2019-12-13T11:22:47.000Z | workshops/templatetags/state.py | tracykteal/amy | cb19e318d36b880b1c3be2104efff42ef776118a | [
"MIT"
] | null | null | null | from django import template
from django.utils.safestring import mark_safe
from workshops.models import TrainingRequest
register = template.Library()
@register.simple_tag
def state_label(req):
assert hasattr(req, 'state')
switch = {
'p': 'badge badge-warning',
'a': 'badge badge-success',
'd': 'badge badge-danger',
}
result = switch[req.state]
return mark_safe(result)
| 21.947368 | 45 | 0.678657 |
5cb1b7c3dc3957f18fd66840a1355e0e26f9fbe2 | 111,696 | py | Python | tests/unit/common/db/test_migrations.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | tests/unit/common/db/test_migrations.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | tests/unit/common/db/test_migrations.py | DeanHwd/rally | d284aa0746c54f1c375470e76dd206d19877a7fd | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for DB migration."""
import copy
import datetime as dt
import iso8601
import json
import pickle
import pprint
import uuid
import alembic
import jsonschema
import mock
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as db_utils
import six
import sqlalchemy as sa
import rally
from rally.common import db
from rally.common.db import models
from rally import consts
from tests.unit.common.db import test_migrations_base
from tests.unit import test as rtest
class MigrationTestCase(rtest.DBTestCase,
test_migrations.ModelsMigrationsSync):
"""Test for checking of equality models state and migrations.
For the opportunistic testing you need to set up a db named
'openstack_citest' with user 'openstack_citest' and password
'openstack_citest' on localhost.
The test will then use that db and user/password combo to run the tests.
For PostgreSQL on Ubuntu this can be done with the following commands::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner
openstack_citest;
For MySQL on Ubuntu this can be done with the following commands::
mysql -u root
>create database openstack_citest;
>grant all privileges on openstack_citest.* to
openstack_citest@localhost identified by 'openstack_citest';
Output is a list that contains information about differences between db and
models. Output example::
[('add_table',
Table('bat', MetaData(bind=None),
Column('info', String(), table=<bat>), schema=None)),
('remove_table',
Table(u'bar', MetaData(bind=None),
Column(u'data', VARCHAR(), table=<bar>), schema=None)),
('add_column',
None,
'foo',
Column('data', Integer(), table=<foo>)),
('remove_column',
None,
'foo',
Column(u'old_data', VARCHAR(), table=None)),
[('modify_nullable',
None,
'foo',
u'x',
{'existing_server_default': None,
'existing_type': INTEGER()},
True,
False)]]
* ``remove_*`` means that there is extra table/column/constraint in db;
* ``add_*`` means that it is missing in db;
* ``modify_*`` means that on column in db is set wrong
type/nullable/server_default. Element contains information:
- what should be modified,
- schema,
- table,
- column,
- existing correct column parameters,
- right value,
- wrong value.
"""
def setUp(self):
# we change DB metadata in tests so we reload
# models to refresh the metadata to it's original state
six.moves.reload_module(rally.common.db.models)
super(MigrationTestCase, self).setUp()
self.alembic_config = db.schema._alembic_config()
self.engine = db.get_engine()
# remove everything from DB and stamp it as 'base'
# so that migration (i.e. upgrade up to 'head')
# will actually take place
db.schema.schema_cleanup()
db.schema.schema_stamp("base")
def db_sync(self, engine):
db.schema.schema_upgrade()
def get_engine(self):
return self.engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == "table" and name == "alembic_version":
return False
return super(MigrationTestCase, self).include_object(
object_, name, type_, reflected, compare_to)
def _create_fake_model(self, table_name):
type(
"FakeModel",
(models.BASE, models.RallyBase),
{"__tablename__": table_name,
"id": sa.Column(sa.Integer, primary_key=True,
autoincrement=True)}
)
def _get_metadata_diff(self):
with self.get_engine().connect() as conn:
opts = {
"include_object": self.include_object,
"compare_type": self.compare_type,
"compare_server_default": self.compare_server_default,
}
mc = alembic.migration.MigrationContext.configure(conn, opts=opts)
# compare schemas and fail with diff, if it"s not empty
diff = self.filter_metadata_diff(
alembic.autogenerate.compare_metadata(mc, self.get_metadata()))
return diff
@mock.patch("rally.common.db.schema.schema_stamp")
def test_models_sync(self, mock_schema_stamp):
# drop all tables after a test run
self.addCleanup(db.schema.schema_cleanup)
# run migration scripts
self.db_sync(self.get_engine())
diff = self._get_metadata_diff()
if diff:
msg = pprint.pformat(diff, indent=2, width=20)
self.fail(
"Models and migration scripts aren't in sync:\n%s" % msg)
@mock.patch("rally.common.db.schema.schema_stamp")
def test_models_sync_negative__missing_table_in_script(
self, mock_schema_stamp):
# drop all tables after a test run
self.addCleanup(db.schema.schema_cleanup)
self._create_fake_model("fake_model")
# run migration scripts
self.db_sync(self.get_engine())
diff = self._get_metadata_diff()
self.assertEqual(1, len(diff))
action, object = diff[0]
self.assertEqual("add_table", action)
self.assertIsInstance(object, sa.Table)
self.assertEqual("fake_model", object.name)
@mock.patch("rally.common.db.schema.schema_stamp")
def test_models_sync_negative__missing_model_in_metadata(
self, mock_schema_stamp):
# drop all tables after a test run
self.addCleanup(db.schema.schema_cleanup)
table = self.get_metadata().tables["tags"]
self.get_metadata().remove(table)
# run migration scripts
self.db_sync(self.get_engine())
diff = self._get_metadata_diff()
self.assertEqual(1, len(diff))
action, object = diff[0]
self.assertEqual("remove_table", action)
self.assertIsInstance(object, sa.Table)
self.assertEqual("tags", object.name)
class MigrationWalkTestCase(rtest.DBTestCase,
test_migrations_base.BaseWalkMigrationMixin):
"""Test case covers upgrade method in migrations."""
def setUp(self):
super(MigrationWalkTestCase, self).setUp()
self.engine = db.get_engine()
def assertColumnExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertIn(column, t.c)
def assertColumnsExists(self, engine, table, columns):
for column in columns:
self.assertColumnExists(engine, table, column)
def assertColumnCount(self, engine, table, columns):
t = db_utils.get_table(engine, table)
self.assertEqual(len(columns), len(t.columns))
def assertColumnNotExists(self, engine, table, column):
t = db_utils.get_table(engine, table)
self.assertNotIn(column, t.c)
def assertIndexExists(self, engine, table, index):
t = db_utils.get_table(engine, table)
index_names = [idx.name for idx in t.indexes]
self.assertIn(index, index_names)
def assertColumnType(self, engine, table, column, sqltype):
t = db_utils.get_table(engine, table)
col = getattr(t.c, column)
self.assertIsInstance(col.type, sqltype)
def assertIndexMembers(self, engine, table, index, members):
self.assertIndexExists(engine, table, index)
t = db_utils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(members), sorted(index_columns))
def test_walk_versions(self):
self.walk_versions(self.engine)
def _check_3177d36ea270(self, engine, data):
self.assertEqual(
"3177d36ea270", db.schema.schema_revision(engine=engine))
self.assertColumnExists(engine, "deployments", "credentials")
self.assertColumnNotExists(engine, "deployments", "admin")
self.assertColumnNotExists(engine, "deployments", "users")
def _pre_upgrade_54e844ebfbc3(self, engine):
self._54e844ebfbc3_deployments = {
# right config which should not be changed after migration
"should-not-be-changed-1": {
"admin": {"username": "admin",
"password": "passwd",
"project_name": "admin"},
"auth_url": "http://example.com:5000/v3",
"region_name": "RegionOne",
"type": "ExistingCloud"},
# right config which should not be changed after migration
"should-not-be-changed-2": {
"admin": {"username": "admin",
"password": "passwd",
"tenant_name": "admin"},
"users": [{"username": "admin",
"password": "passwd",
"tenant_name": "admin"}],
"auth_url": "http://example.com:5000/v2.0",
"region_name": "RegionOne",
"type": "ExistingCloud"},
# not ExistingCloud config which should not be changed
"should-not-be-changed-3": {
"url": "example.com",
"type": "Something"},
# normal config created with "fromenv" feature
"from-env": {
"admin": {"username": "admin",
"password": "passwd",
"tenant_name": "admin",
"project_domain_name": "",
"user_domain_name": ""},
"auth_url": "http://example.com:5000/v2.0",
"region_name": "RegionOne",
"type": "ExistingCloud"},
# public endpoint + keystone v3 config with tenant_name
"ksv3_public": {
"admin": {"username": "admin",
"password": "passwd",
"tenant_name": "admin",
"user_domain_name": "bla",
"project_domain_name": "foo"},
"auth_url": "http://example.com:5000/v3",
"region_name": "RegionOne",
"type": "ExistingCloud",
"endpoint_type": "public"},
# internal endpoint + existing_users
"existing_internal": {
"admin": {"username": "admin",
"password": "passwd",
"tenant_name": "admin"},
"users": [{"username": "admin",
"password": "passwd",
"tenant_name": "admin",
"project_domain_name": "",
"user_domain_name": ""}],
"auth_url": "http://example.com:5000/v2.0",
"region_name": "RegionOne",
"type": "ExistingCloud",
"endpoint_type": "internal"},
}
deployment_table = db_utils.get_table(engine, "deployments")
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
with engine.connect() as conn:
for deployment in self._54e844ebfbc3_deployments:
conf = json.dumps(self._54e844ebfbc3_deployments[deployment])
conn.execute(
deployment_table.insert(),
[{"uuid": deployment, "name": deployment,
"config": conf,
"enum_deployments_status": deployment_status,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
_OLD_DEPLOYMENT_SCHEMA = {
"type": "object",
"description": "Deprecated schema (openstack only)",
"properties": {
"type": {"type": "string"},
"auth_url": {"type": "string"},
"region_name": {"type": "string"},
"endpoint": {"type": ["string", "null"]},
"endpoint_type": {"enum": [consts.EndpointType.ADMIN,
consts.EndpointType.INTERNAL,
consts.EndpointType.PUBLIC,
None]},
"https_insecure": {"type": "boolean"},
"https_cacert": {"type": "string"},
"profiler_hmac_key": {"type": ["string", "null"]},
"profiler_conn_str": {"type": ["string", "null"]},
"admin": {"$ref": "#/definitions/user"},
"users": {"type": "array",
"items": {"$ref": "#/definitions/user"},
"minItems": 1},
"extra": {"type": "object", "additionalProperties": True}
},
"anyOf": [
{"description": "The case when the admin is specified and the "
"users can be created via 'users' context or "
"'existing_users' will be used.",
"required": ["type", "auth_url", "admin"]},
{"description": "The case when the only existing users are "
"specified.",
"required": ["type", "auth_url", "users"]}
],
"additionalProperties": False,
"definitions": {
"user": {
"type": "object",
"oneOf": [
{
"description": "Keystone V2.0",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"tenant_name": {"type": "string"},
},
"required": ["username", "password", "tenant_name"],
"additionalProperties": False
},
{
"description": "Keystone V3.0",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"domain_name": {"type": "string"},
"user_domain_name": {"type": "string"},
"project_name": {"type": "string"},
"project_domain_name": {"type": "string"},
},
"required": ["username", "password", "project_name"],
"additionalProperties": False
}
],
}
}
}
def _check_54e844ebfbc3(self, engine, data):
self.assertEqual("54e844ebfbc3",
db.schema.schema_revision(engine=engine))
original_deployments = self._54e844ebfbc3_deployments
deployment_table = db_utils.get_table(engine, "deployments")
with engine.connect() as conn:
deployments_found = conn.execute(
deployment_table.select()).fetchall()
for deployment in deployments_found:
# check deployment
self.assertIn(deployment.uuid, original_deployments)
self.assertIn(deployment.name, original_deployments)
config = json.loads(deployment.config)
if config != original_deployments[deployment.uuid]:
if deployment.uuid.startswith("should-not-be-changed"):
self.fail("Config of deployment '%s' is changes, but "
"should not." % deployment.uuid)
endpoint_type = (original_deployments[
deployment.uuid].get("endpoint_type"))
if endpoint_type in (None, "public"):
self.assertNotIn("endpoint_type", config)
else:
self.assertIn("endpoint_type", config)
self.assertEqual(endpoint_type,
config["endpoint_type"])
jsonschema.validate(config, self._OLD_DEPLOYMENT_SCHEMA)
else:
if not deployment.uuid.startswith("should-not-be-changed"):
self.fail("Config of deployment '%s' is not changes, "
"but should." % deployment.uuid)
# this deployment created at _pre_upgrade step is not needed
# anymore and we can remove it
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment.uuid)
)
def _pre_upgrade_08e1515a576c(self, engine):
self._08e1515a576c_logs = [
{"pre": "No such file name",
"post": {"etype": IOError.__name__, "msg": "No such file name"}},
{"pre": "Task config is invalid: bla",
"post": {"etype": "InvalidTaskException",
"msg": "Task config is invalid: bla"}},
{"pre": "Failed to load task foo",
"post": {"etype": "FailedToLoadTask",
"msg": "Failed to load task foo"}},
{"pre": ["SomeCls", "msg", json.dumps(
["File some1.py, line ...\n",
"File some2.py, line ...\n"])],
"post": {"etype": "SomeCls",
"msg": "msg",
"trace": "Traceback (most recent call last):\n"
"File some1.py, line ...\n"
"File some2.py, line ...\nSomeCls: msg"}},
]
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
self._08e1515a576c_deployment_uuid = "08e1515a576c-uuuu-uuuu-iiii-dddd"
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{"uuid": self._08e1515a576c_deployment_uuid,
"name": self._08e1515a576c_deployment_uuid,
"config": six.b("{}"),
"enum_deployments_status":
consts.DeployStatus.DEPLOY_FINISHED,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
for i in range(0, len(self._08e1515a576c_logs)):
log = json.dumps(self._08e1515a576c_logs[i]["pre"])
conn.execute(
task_table.insert(),
[{"uuid": i,
"verification_log": log,
"status": "failed",
"enum_tasks_status": "failed",
"deployment_uuid": self._08e1515a576c_deployment_uuid
}])
def _check_08e1515a576c(self, engine, data):
self.assertEqual("08e1515a576c",
db.schema.schema_revision(engine=engine))
tasks = self._08e1515a576c_logs
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
with engine.connect() as conn:
tasks_found = conn.execute(task_table.select()).fetchall()
for task in tasks_found:
actual_log = json.loads(task.verification_log)
self.assertIsInstance(actual_log, dict)
expected = tasks[int(task.uuid)]["post"]
for key in expected:
self.assertEqual(expected[key], actual_log[key])
conn.execute(
task_table.delete().where(task_table.c.uuid == task.uuid))
deployment_uuid = self._08e1515a576c_deployment_uuid
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))
def _pre_upgrade_e654a0648db0(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
taskresult_table = db_utils.get_table(engine, "task_results")
self._e654a0648db0_task_uuid = str(uuid.uuid4())
self._e654a0648db0_deployment_uuid = str(uuid.uuid4())
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": self._e654a0648db0_deployment_uuid,
"name": self._e654a0648db0_deployment_uuid,
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._e654a0648db0_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"verification_log": json.dumps({}),
"tag": "test_tag",
"deployment_uuid": self._e654a0648db0_deployment_uuid
}]
)
conn.execute(
taskresult_table.insert(), [
{
"task_uuid": self._e654a0648db0_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"key": json.dumps({
"name": "test_scenario",
"pos": 0,
"kw": {
"args": {"a": "A"},
"runner": {"type": "theRunner"},
"context": {"c": "C"},
"sla": {"s": "S"}
}
}),
"data": json.dumps({
"raw": [
{"error": "e", "duration": 3},
{"duration": 1},
{"duration": 8},
],
"load_duration": 42,
"full_duration": 142,
"sla": [{"success": True}, {"success": False}]
})
}
]
)
def _check_e654a0648db0(self, engine, data):
self.assertEqual(
"e654a0648db0", db.schema.schema_revision(engine=engine))
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
workloaddata_table = db_utils.get_table(engine, "workloaddata")
tag_table = db_utils.get_table(engine, "tags")
deployment_table = db_utils.get_table(engine, "deployments")
with engine.connect() as conn:
# Check task
tasks_found = conn.execute(
task_table.select().
where(task_table.c.uuid == self._e654a0648db0_task_uuid)
).fetchall()
self.assertEqual(1, len(tasks_found))
task_found = tasks_found[0]
self.assertEqual(self._e654a0648db0_task_uuid, task_found.uuid)
self.assertEqual(self._e654a0648db0_deployment_uuid,
task_found.deployment_uuid)
self.assertEqual(consts.TaskStatus.FINISHED, task_found.status)
# NOTE(ikhudoshyn): if for all workloads success == True
self.assertFalse(task_found.pass_sla)
# NOTE(ikhudoshyn): sum of all full_durations of all workloads
self.assertEqual(142, task_found.task_duration)
# NOTE(ikhudoshyn): we have no info on validation duration in old
# schema
self.assertEqual(0, task_found.validation_duration)
self.assertEqual({}, json.loads(task_found.validation_result))
# Check subtask
subtasks_found = conn.execute(
subtask_table.select().
where(subtask_table.c.task_uuid ==
self._e654a0648db0_task_uuid)
).fetchall()
self.assertEqual(1, len(subtasks_found))
subtask_found = subtasks_found[0]
self.assertEqual(self._e654a0648db0_task_uuid,
subtask_found.task_uuid)
# NOTE(ikhudoshyn): if for all workloads success == True
self.assertFalse(subtask_found.pass_sla)
# NOTE(ikhudoshyn): sum of all full_durations of all workloads
self.assertEqual(142, subtask_found.duration)
self._e654a0648db0_subtask_uuid = subtask_found.uuid
# Check tag
tags_found = conn.execute(
tag_table.select().
where(tag_table.c.uuid == self._e654a0648db0_task_uuid)
).fetchall()
self.assertEqual(1, len(tags_found))
self.assertEqual("test_tag", tags_found[0].tag)
self.assertEqual(consts.TagType.TASK, tags_found[0].type)
# Check workload
workloads_found = conn.execute(
workload_table.select().
where(workload_table.c.task_uuid ==
self._e654a0648db0_task_uuid)
).fetchall()
self.assertEqual(1, len(workloads_found))
workload_found = workloads_found[0]
self.assertEqual(self._e654a0648db0_task_uuid,
workload_found.task_uuid)
self.assertEqual(self._e654a0648db0_subtask_uuid,
workload_found.subtask_uuid)
self.assertEqual("test_scenario", workload_found.name)
self.assertEqual(0, workload_found.position)
self.assertEqual("theRunner", workload_found.runner_type)
self.assertEqual(json.dumps({"type": "theRunner"}),
workload_found.runner)
self.assertEqual(json.dumps({"s": "S"}), workload_found.sla)
self.assertEqual(json.dumps({"a": "A"}), workload_found.args)
self.assertEqual(json.dumps({"c": "C"}), workload_found.context)
self.assertEqual(json.dumps({
"sla": [{"success": True},
{"success": False}]
}), workload_found.sla_results)
self.assertEqual(json.dumps({}), workload_found.context_execution)
self.assertEqual(42, workload_found.load_duration)
self.assertEqual(142, workload_found.full_duration)
self.assertEqual(1, workload_found.min_duration)
self.assertEqual(8, workload_found.max_duration)
self.assertEqual(3, workload_found.total_iteration_count)
self.assertEqual(1, workload_found.failed_iteration_count)
self.assertFalse(workload_found.pass_sla)
self._e654a0648db0_workload_uuid = workload_found.uuid
# Check workloadData
workloaddata_found = conn.execute(
workloaddata_table.select().
where(workloaddata_table.c.task_uuid ==
self._e654a0648db0_task_uuid)
).fetchall()
self.assertEqual(1, len(workloaddata_found))
wloaddata_found = workloaddata_found[0]
self.assertEqual(self._e654a0648db0_task_uuid,
wloaddata_found.task_uuid)
self.assertEqual(self._e654a0648db0_workload_uuid,
wloaddata_found.workload_uuid)
self.assertEqual(0, wloaddata_found.chunk_order)
self.assertEqual(0, wloaddata_found.chunk_size)
self.assertEqual(0, wloaddata_found.compressed_chunk_size)
self.assertEqual(3, wloaddata_found.iteration_count)
self.assertEqual(1, wloaddata_found.failed_iteration_count)
self.assertEqual(
json.dumps(
{
"raw": [
{"error": "e", "duration": 3},
{"duration": 1},
{"duration": 8},
]
}
), wloaddata_found.chunk_data
)
# Delete all stuff created at _pre_upgrade step
conn.execute(
tag_table.delete().
where(tag_table.c.uuid == self._e654a0648db0_task_uuid)
)
conn.execute(
workloaddata_table.delete().
where(workloaddata_table.c.task_uuid ==
self._e654a0648db0_task_uuid)
)
conn.execute(
workload_table.delete().
where(workload_table.c.task_uuid ==
self._e654a0648db0_task_uuid)
)
conn.execute(
subtask_table.delete().
where(subtask_table.c.task_uuid ==
self._e654a0648db0_task_uuid)
)
conn.execute(
task_table.delete().
where(task_table.c.uuid == self._e654a0648db0_task_uuid)
)
conn.execute(
deployment_table.delete().
where(deployment_table.c.uuid ==
self._e654a0648db0_deployment_uuid)
)
def _pre_upgrade_6ad4f426f005(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
task_result_table = db_utils.get_table(engine, "task_results")
with engine.connect() as conn:
# create deployment
conf = {
"admin": {"username": "admin",
"password": "passwd",
"project_name": "admin"},
"auth_url": "http://example.com:5000/v3",
"region_name": "RegionOne",
"type": "ExistingCloud"
}
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
conn.execute(
deployment_table.insert(),
[{
"uuid": "my_deployment",
"name": "my_deployment",
"config": json.dumps(conf),
"enum_deployments_status": deployment_status,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
# create task
conn.execute(
task_table.insert(),
[{
"uuid": "my_task",
"deployment_uuid": "my_deployment",
"status": consts.TaskStatus.INIT,
}])
# create task result with empty data
conn.execute(
task_result_table.insert(),
[{
"task_uuid": "my_task",
"key": json.dumps({}),
"data": json.dumps({}),
}]
)
def _check_6ad4f426f005(self, engine, data):
self.assertEqual("6ad4f426f005",
db.schema.schema_revision(engine=engine))
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
task_result_table = db_utils.get_table(engine, "task_results")
with engine.connect() as conn:
task_results = conn.execute(task_result_table.select()).fetchall()
self.assertEqual(1, len(task_results))
task_result = task_results[0]
# check that "hooks" field added
self.assertEqual({"hooks": []}, json.loads(task_result.data))
# Remove task result
conn.execute(
task_result_table.delete().where(
task_result_table.c.id == task_result.id)
)
# Remove task
conn.execute(
task_table.delete().where(task_table.c.uuid == "my_task"))
# Remove deployment
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == "my_deployment")
)
def _pre_upgrade_32fada9b2fde(self, engine):
self._32fada9b2fde_deployments = {
# right config which should not be changed after migration
"should-not-be-changed-1": {
"admin": {"username": "admin",
"password": "passwd",
"project_name": "admin"},
"auth_url": "http://example.com:5000/v3",
"region_name": "RegionOne",
"type": "ExistingCloud"},
# right config which should not be changed after migration
"should-not-be-changed-2": {
"admin": {"username": "admin",
"password": "passwd",
"tenant_name": "admin"},
"users": [{"username": "admin",
"password": "passwd",
"tenant_name": "admin"}],
"auth_url": "http://example.com:5000/v2.0",
"region_name": "RegionOne",
"type": "ExistingCloud"},
# not ExistingCloud config which should not be changed
"should-not-be-changed-3": {
"url": "example.com",
"type": "Something"},
# with `admin_domain_name` field
"with_admin_domain_name": {
"admin": {"username": "admin",
"password": "passwd",
"project_name": "admin",
"admin_domain_name": "admin"},
"auth_url": "http://example.com:5000/v3",
"region_name": "RegionOne",
"type": "ExistingCloud"},
}
deployment_table = db_utils.get_table(engine, "deployments")
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
with engine.connect() as conn:
for deployment in self._32fada9b2fde_deployments:
conf = json.dumps(
self._32fada9b2fde_deployments[deployment])
conn.execute(
deployment_table.insert(),
[{"uuid": deployment, "name": deployment,
"config": conf,
"enum_deployments_status": deployment_status,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
def _check_32fada9b2fde(self, engine, data):
self.assertEqual("32fada9b2fde",
db.schema.schema_revision(engine=engine))
original_deployments = self._32fada9b2fde_deployments
deployment_table = db_utils.get_table(engine, "deployments")
with engine.connect() as conn:
deployments_found = conn.execute(
deployment_table.select()).fetchall()
for deployment in deployments_found:
# check deployment
self.assertIn(deployment.uuid, original_deployments)
self.assertIn(deployment.name, original_deployments)
config = json.loads(deployment.config)
if config != original_deployments[deployment.uuid]:
if deployment.uuid.startswith("should-not-be-changed"):
self.fail("Config of deployment '%s' is changes, but "
"should not." % deployment.uuid)
if "admin_domain_name" in deployment.config:
self.fail("Config of deployment '%s' should not "
"contain `admin_domain_name` field." %
deployment.uuid)
endpoint_type = (original_deployments[
deployment.uuid].get("endpoint_type"))
if endpoint_type in (None, "public"):
self.assertNotIn("endpoint_type", config)
else:
self.assertIn("endpoint_type", config)
self.assertEqual(endpoint_type,
config["endpoint_type"])
jsonschema.validate(config, self._OLD_DEPLOYMENT_SCHEMA)
else:
if not deployment.uuid.startswith("should-not-be-changed"):
self.fail("Config of deployment '%s' is not changes, "
"but should." % deployment.uuid)
# this deployment created at _pre_upgrade step is not needed
# anymore and we can remove it
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment.uuid)
)
def _pre_upgrade_484cd9413e66(self, engine):
self._484cd9413e66_deployment_uuid = "484cd9413e66-deploy"
self._484cd9413e66_verifications = [
{"total": {"time": 1.0,
"failures": 2,
"skipped": 3,
"success": 4,
"errors": 0,
"tests": 2
},
"test_cases": {"test1": {"status": "OK"},
"test2": {"status": "FAIL",
"failure": {"log": "trace"}}},
"set_name": "full"},
{"total": {"time": 2.0,
"failures": 3,
"skipped": 4,
"success": 5,
"unexpected_success": 6,
"expected_failures": 7,
"tests": 2
},
"test_cases": {"test1": {"status": "success"},
"test2": {"status": "failed", ""
"traceback": "trace"}},
"set_name": "smoke"}
]
deployment_table = db_utils.get_table(engine, "deployments")
verifications_table = db_utils.get_table(engine, "verifications")
vresults_table = db_utils.get_table(engine,
"verification_results")
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
vstatus = consts.TaskStatus.FINISHED
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{"uuid": self._484cd9413e66_deployment_uuid,
"name": self._484cd9413e66_deployment_uuid,
"config": six.b(json.dumps([])),
"enum_deployments_status": deployment_status,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
for i in range(len(self._484cd9413e66_verifications)):
verification = self._484cd9413e66_verifications[i]
vuuid = "uuid-%s" % i
conn.execute(
verifications_table.insert(),
[{"uuid": vuuid,
"deployment_uuid":
self._484cd9413e66_deployment_uuid,
"status": vstatus,
"set_name": verification["set_name"],
"tests": verification["total"]["tests"],
"failures": verification["total"]["failures"],
"time": verification["total"]["time"],
"errors": 0,
}])
data = copy.deepcopy(verification)
data["total"]["test_cases"] = data["test_cases"]
data = data["total"]
conn.execute(
vresults_table.insert(),
[{"uuid": vuuid,
"verification_uuid": vuuid,
"data": json.dumps(data)
}])
def _check_484cd9413e66(self, engine, data):
self.assertEqual("484cd9413e66",
db.schema.schema_revision(engine=engine))
verifications_table = db_utils.get_table(engine, "verifications")
with engine.connect() as conn:
verifications = conn.execute(
verifications_table.select()).fetchall()
for i in range(len(verifications)):
verification_orig = self._484cd9413e66_verifications[i]
verification = verifications[i]
total = {"time": verification.tests_duration,
"failures": verification.failures,
"skipped": verification.skipped,
"success": verification.success,
"tests": verification.tests_count}
results = verification_orig["test_cases"]
old_format = "errors" in verification_orig["total"]
if old_format:
total["errors"] = 0
for test_name in results:
status = results[test_name]["status"]
if status == "OK":
status = "success"
elif status == "FAIL":
status = "fail"
results[test_name]["traceback"] = results[
test_name]["failure"].pop("log")
results[test_name].pop("failure")
results[test_name]["status"] = status
else:
uxsucess = verification.unexpected_success
total["unexpected_success"] = uxsucess
total["expected_failures"] = verification.expected_failures
self.assertEqual(verification_orig["total"], total)
self.assertEqual(results, json.loads(verification.tests))
self.assertEqual(
{"pattern": "set=%s" % verification_orig["set_name"]},
json.loads(verification.run_args))
self.assertEqual(
verification_orig["total"].get("unexpected_success", 0),
verification.unexpected_success)
self.assertEqual(
verification_orig["total"].get("expected_failures", 0),
verification.expected_failures)
conn.execute(
verifications_table.delete().where(
verifications_table.c.uuid == verification.uuid)
)
deployment_table = db_utils.get_table(engine, "deployments")
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid ==
self._484cd9413e66_deployment_uuid)
)
def _pre_upgrade_37fdbb373e8d(self, engine):
self._37fdbb373e8d_deployment_uuid = "37fdbb373e8d-deployment"
self._37fdbb373e8d_verifier_uuid = "37fdbb373e8d-verifier"
self._37fdbb373e8d_verifications_tests = [
{
"test_1[smoke, negative]": {
"name": "test_1",
"time": 2.32,
"status": "success",
"tags": ["smoke", "negative"]
},
"test_2[smoke, negative]": {
"name": "test_2",
"time": 4.32,
"status": "success",
"tags": ["smoke", "negative"]
}
},
{
"test_3[smoke, negative]": {
"name": "test_3",
"time": 6.32,
"status": "success",
"tags": ["smoke", "negative"]
},
"test_4[smoke, negative]": {
"name": "test_4",
"time": 8.32,
"status": "success",
"tags": ["smoke", "negative"]
}
}
]
deployment_table = db_utils.get_table(engine, "deployments")
verifiers_table = db_utils.get_table(engine, "verifiers")
verifications_table = db_utils.get_table(engine, "verifications")
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{"uuid": self._37fdbb373e8d_deployment_uuid,
"name": self._37fdbb373e8d_deployment_uuid,
"config": six.b(json.dumps([])),
"enum_deployments_status": deployment_status,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
conn.execute(
verifiers_table.insert(),
[{"uuid": self._37fdbb373e8d_verifier_uuid,
"name": self._37fdbb373e8d_verifier_uuid,
"type": "some-type",
"status": consts.VerifierStatus.INSTALLED
}])
for i in range(len(self._37fdbb373e8d_verifications_tests)):
tests = self._37fdbb373e8d_verifications_tests[i]
conn.execute(
verifications_table.insert(),
[{"uuid": "verification-uuid-%s" % i,
"deployment_uuid":
self._37fdbb373e8d_deployment_uuid,
"verifier_uuid": self._37fdbb373e8d_verifier_uuid,
"status": consts.VerificationStatus.FINISHED,
"tests": json.dumps(tests)
}])
def _check_37fdbb373e8d(self, engine, data):
self.assertEqual("37fdbb373e8d",
db.schema.schema_revision(engine=engine))
verifications_table = db_utils.get_table(engine, "verifications")
with engine.connect() as conn:
verifications = conn.execute(
verifications_table.select()).fetchall()
self.assertEqual(len(verifications),
len(self._37fdbb373e8d_verifications_tests))
for i in range(len(verifications)):
v = verifications[i]
updated_tests = json.loads(v.tests)
expected_tests = self._37fdbb373e8d_verifications_tests[i]
for test in expected_tests.values():
duration = test.pop("time")
test["duration"] = duration
self.assertEqual(expected_tests, updated_tests)
conn.execute(
verifications_table.delete().where(
verifications_table.c.uuid == v.uuid)
)
deployment_table = db_utils.get_table(engine, "deployments")
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid ==
self._37fdbb373e8d_deployment_uuid)
)
def _pre_upgrade_a6f364988fc2(self, engine):
self._a6f364988fc2_tags = [
{
"uuid": "uuid-1",
"type": "task",
"tag": "tag-1"
},
{
"uuid": "uuid-2",
"type": "subtask",
"tag": "tag-2"
},
{
"uuid": "uuid-3",
"type": "task",
"tag": "tag-3"
}
]
tags_table = db_utils.get_table(engine, "tags")
with engine.connect() as conn:
for t in self._a6f364988fc2_tags:
conn.execute(
tags_table.insert(),
[{
"uuid": t["uuid"],
"enum_tag_types": t["type"],
"type": t["type"],
"tag": t["tag"]
}])
def _check_a6f364988fc2(self, engine, data):
self.assertEqual("a6f364988fc2",
db.schema.schema_revision(engine=engine))
tags_table = db_utils.get_table(engine, "tags")
with engine.connect() as conn:
tags = conn.execute(tags_table.select()).fetchall()
self.assertEqual(len(tags), len(self._a6f364988fc2_tags))
for i in range(len(tags)):
for k in ("uuid", "type", "tag"):
self.assertEqual(self._a6f364988fc2_tags[i][k], tags[i][k])
conn.execute(
tags_table.delete().where(
tags_table.c.uuid == tags[i].uuid))
def _pre_upgrade_f33f4610dcda(self, engine):
self._f33f4610dcda_deployment_uuid = "f33f4610dcda-deployment"
self._f33f4610dcda_verifier_uuid = "f33f4610dcda-verifier"
self._f33f4610dcda_verifications = [
{"status": "init", "failures": 0, "unexpected_success": 0},
{"status": "running", "failures": 0, "unexpected_success": 0},
{"status": "finished", "failures": 0, "unexpected_success": 0},
{"status": "finished", "failures": 1, "unexpected_success": 0,
"new_status": "failed"},
{"status": "finished", "failures": 1, "unexpected_success": 1,
"new_status": "failed"},
{"status": "finished", "failures": 0, "unexpected_success": 1,
"new_status": "failed"},
{"status": "failed", "failures": 0, "unexpected_success": 0,
"new_status": "crashed"},
]
deployment_table = db_utils.get_table(engine, "deployments")
verifiers_table = db_utils.get_table(engine, "verifiers")
verifications_table = db_utils.get_table(engine, "verifications")
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{"uuid": self._f33f4610dcda_deployment_uuid,
"name": self._f33f4610dcda_deployment_uuid,
"config": six.b(json.dumps([])),
"enum_deployments_status": deployment_status,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
conn.execute(
verifiers_table.insert(),
[{"uuid": self._f33f4610dcda_verifier_uuid,
"name": self._f33f4610dcda_verifier_uuid,
"type": "some-type",
"status": consts.VerifierStatus.INSTALLED
}])
for i in range(len(self._f33f4610dcda_verifications)):
v = self._f33f4610dcda_verifications[i]
conn.execute(
verifications_table.insert(),
[{"uuid": "verification-uuid-%s" % i,
"deployment_uuid": self._f33f4610dcda_deployment_uuid,
"verifier_uuid": self._f33f4610dcda_verifier_uuid,
"status": v["status"],
"failures": v["failures"],
"unexpected_success": v["unexpected_success"]
}])
def _check_f33f4610dcda(self, engine, data):
self.assertEqual("f33f4610dcda",
db.schema.schema_revision(engine=engine))
verifications_table = db_utils.get_table(engine, "verifications")
with engine.connect() as conn:
verifications = conn.execute(
verifications_table.select()).fetchall()
self.assertEqual(len(verifications),
len(self._f33f4610dcda_verifications))
for i in range(len(verifications)):
if "new_status" in self._f33f4610dcda_verifications[i]:
self.assertEqual(
self._f33f4610dcda_verifications[i]["new_status"],
verifications[i].status)
conn.execute(
verifications_table.delete().where(
verifications_table.c.uuid == verifications[i].uuid)
)
deployment_table = db_utils.get_table(engine, "deployments")
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid ==
self._f33f4610dcda_deployment_uuid)
)
def _pre_upgrade_4ef544102ba7(self, engine):
self._4ef544102ba7_deployment_uuid = "4ef544102ba7-deploy"
self.tasks = {
"should-not-be-changed-1": {
"uuid": "should-not-be-changed-1",
"deployment_uuid": self._4ef544102ba7_deployment_uuid,
"validation_result": {
"etype": "SomeCls",
"msg": "msg",
"trace": "Traceback (most recent call last):\n"
"File some1.py, line ...\n"
"File some2.py, line ...\nSomeCls: msg"},
"status": "finished"},
"should-be-changed-1": {
"uuid": "should-be-changed-1",
"deployment_uuid": self._4ef544102ba7_deployment_uuid,
"validation_result": {},
"status": "failed"},
"should-be-changed-2": {
"uuid": "should-be-changed-2",
"deployment_uuid": self._4ef544102ba7_deployment_uuid,
"validation_result": {},
"status": "verifying"},
}
deployment_table = db_utils.get_table(engine, "deployments")
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{"uuid": self._4ef544102ba7_deployment_uuid,
"name": self._4ef544102ba7_deployment_uuid,
"config": six.b(json.dumps([])),
"enum_deployments_status":
consts.DeployStatus.DEPLOY_FINISHED,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}])
task_table = db_utils.get_table(engine, "tasks")
with engine.connect() as conn:
for task in self.tasks:
conn.execute(
task_table.insert(),
[{
"deployment_uuid": self.tasks[task][
"deployment_uuid"],
"status": self.tasks[task]["status"],
"validation_result": json.dumps(
self.tasks[task]["validation_result"]),
"uuid": self.tasks[task]["uuid"]
}])
subtask_table = db_utils.get_table(engine, "subtasks")
with engine.connect() as conn:
for task in self.tasks:
conn.execute(
subtask_table.insert(),
[{
"task_uuid": self.tasks[task]["uuid"],
"status": consts.SubtaskStatus.RUNNING,
"context": json.dumps({}),
"sla": json.dumps({}),
"run_in_parallel": False,
"uuid": "subtask_" + self.tasks[task]["uuid"]
}])
def _check_4ef544102ba7(self, engine, data):
self.assertEqual("4ef544102ba7",
db.schema.schema_revision(engine=engine))
org_tasks = self.tasks
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
with engine.connect() as conn:
subtasks_found = conn.execute(
subtask_table.select()).fetchall()
for subtask in subtasks_found:
conn.execute(
subtask_table.delete().where(
subtask_table.c.id == subtask.id)
)
with engine.connect() as conn:
tasks_found = conn.execute(
task_table.select()).fetchall()
self.assertEqual(3, len(tasks_found))
for task in tasks_found:
self.assertIn("uuid", task)
self.assertIn("status", task)
if task.status != org_tasks[task.uuid]["status"]:
if task.uuid.startswith("should-not-be-changed"):
self.fail("Config of deployment '%s' is changes, but "
"should not." % task.uuid)
if task.status != "crashed" and task.uuid == (
"should-be-changed-1"):
self.fail("Task '%s' status should be changed to "
"crashed." % task.uuid)
if task.status != "validating" and task.uuid == (
"should-be-changed-2"):
self.fail("Task '%s' status should be changed to "
"validating." % task.uuid)
else:
if not task.uuid.startswith("should-not-be-changed"):
self.fail("Config of deployment '%s' is not changes, "
"but should." % task.uuid)
conn.execute(
task_table.delete().where(
task_table.c.id == task.id)
)
deployment_table = db_utils.get_table(engine, "deployments")
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid ==
self._4ef544102ba7_deployment_uuid)
)
def _pre_upgrade_92aaaa2a6bb3(self, engine):
self._92aaaa2a6bb3_deployments = [
("1-cred", [["openstack", {"foo": "bar"}]]),
("2-cred", [["openstack", {"foo": "bar1"}],
["openstack", {"foo": "bar2"}]]),
("multi-cred", [["spam", {"foo": "bar1"}],
["eggs", {"foo": "bar2"}]]),
]
deployment_table = db_utils.get_table(engine, "deployments")
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
with engine.connect() as conn:
for deployment, creds in self._92aaaa2a6bb3_deployments:
conn.execute(
deployment_table.insert(),
[{"uuid": deployment, "name": deployment,
"config": json.dumps({}),
"enum_deployments_status": deployment_status,
"credentials": pickle.dumps(creds),
}])
def _check_92aaaa2a6bb3(self, engine, data):
expected_credentials = [
("1-cred", {"openstack": [{"foo": "bar"}]}),
("2-cred", {"openstack": [{"foo": "bar1"},
{"foo": "bar2"}]}),
("multi-cred", {"spam": [{"foo": "bar1"}],
"eggs": [{"foo": "bar2"}]}),
]
deployment_table = db_utils.get_table(engine, "deployments")
with engine.connect() as conn:
for deployment, expected_creds in expected_credentials:
dep_obj = conn.execute(
deployment_table.select().where(
deployment_table.c.uuid == deployment)).fetchone()
self.assertEqual(
expected_creds, json.loads(dep_obj.credentials))
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment))
def _pre_upgrade_35fe16d4ab1c(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
deployment_uuid = str(uuid.uuid4())
self._35fe16d4ab1c_task_uuid = str(uuid.uuid4())
self._35fe16d4ab1c_subtasks = {
str(uuid.uuid4()): [
{"uuid": str(uuid.uuid4()),
"pass_sla": False,
"load_duration": 1},
{"uuid": str(uuid.uuid4()),
"pass_sla": False,
"load_duration": 2.6}
],
str(uuid.uuid4()): [
{"uuid": str(uuid.uuid4()),
"pass_sla": True,
"load_duration": 3},
{"uuid": str(uuid.uuid4()),
"pass_sla": False,
"load_duration": 7}
]
}
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": deployment_uuid,
"name": str(uuid.uuid4()),
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._35fe16d4ab1c_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": deployment_uuid
}]
)
for subtask_id, workloads in self._35fe16d4ab1c_subtasks.items():
conn.execute(
subtask_table.insert(),
[{
"uuid": subtask_id,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"task_uuid": self._35fe16d4ab1c_task_uuid,
"context": six.b(json.dumps([])),
"sla": six.b(json.dumps([])),
"run_in_parallel": False
}]
)
for workload in workloads:
conn.execute(
workload_table.insert(),
[{
"uuid": workload["uuid"],
"name": "foo",
"task_uuid": self._35fe16d4ab1c_task_uuid,
"subtask_uuid": subtask_id,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"position": 0,
"runner": "",
"runner_type": "",
"context": "",
"context_execution": "",
"statistics": "",
"hooks": "",
"sla": "",
"sla_results": "",
"args": "",
"load_duration": workload["load_duration"],
"pass_sla": workload["pass_sla"]
}]
)
def _check_35fe16d4ab1c(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
with engine.connect() as conn:
task_id = self._35fe16d4ab1c_task_uuid
task_obj = conn.execute(
task_table.select().where(
task_table.c.uuid == task_id)).fetchone()
self.assertFalse(task_obj.pass_sla)
subtask_duration = dict(
[(k, sum([w["load_duration"] for w in v]))
for k, v in self._35fe16d4ab1c_subtasks.items()])
self.assertEqual(sum(subtask_duration.values()),
task_obj.task_duration)
for subtask_id, workloads in self._35fe16d4ab1c_subtasks.items():
subtask_obj = conn.execute(
subtask_table.select().where(
subtask_table.c.uuid == subtask_id)).fetchone()
self.assertFalse(subtask_obj.pass_sla)
self.assertEqual(sum([w["load_duration"] for w in workloads]),
subtask_obj.duration)
conn.execute(
workload_table.delete().where(
workload_table.c.subtask_uuid == subtask_id))
conn.execute(
subtask_table.delete().where(
subtask_table.c.uuid == subtask_id))
conn.execute(
task_table.delete().where(
task_table.c.uuid == task_obj.uuid))
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == task_obj.deployment_uuid))
def _pre_upgrade_7948b83229f6(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
wdata_table = db_utils.get_table(engine, "workloaddata")
self._7948b83229f6_deployment_uuid = str(uuid.uuid4())
self._7948b83229f6_task_uuid = str(uuid.uuid4())
subtask_uuid = str(uuid.uuid4())
self._7948b83229f6_workloads = {
str(uuid.uuid4()): {"preprocessed": 1, "expected": 1},
str(uuid.uuid4()): {"preprocessed": 0, "expected": None},
str(uuid.uuid4()): {"preprocessed": 0, "expected": 0,
"wdata": True},
str(uuid.uuid4()): {"preprocessed": -1, "expected": None}}
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": self._7948b83229f6_deployment_uuid,
"name": str(uuid.uuid4()),
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._7948b83229f6_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": self._7948b83229f6_deployment_uuid
}]
)
conn.execute(
subtask_table.insert(),
[{
"uuid": subtask_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"task_uuid": self._7948b83229f6_task_uuid,
"context": six.b(json.dumps([])),
"sla": six.b(json.dumps([])),
"run_in_parallel": False
}]
)
for w_uuid, workload in self._7948b83229f6_workloads.items():
conn.execute(
workload_table.insert(),
[{
"uuid": w_uuid,
"name": "foo",
"task_uuid": self._7948b83229f6_task_uuid,
"subtask_uuid": subtask_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"position": 0,
"runner": "",
"runner_type": "",
"context": "",
"context_execution": "",
"statistics": "",
"hooks": "",
"sla": "",
"sla_results": "",
"args": "",
"load_duration": 0,
"pass_sla": True,
"min_duration": workload["preprocessed"],
"max_duration": workload["preprocessed"]
}]
)
if workload.get("wdata", False):
conn.execute(
wdata_table.insert(),
[{
"uuid": str(uuid.uuid4()),
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"started_at": dt.datetime.utcnow(),
"finished_at": dt.datetime.utcnow(),
"task_uuid": self._7948b83229f6_task_uuid,
"workload_uuid": w_uuid,
"chunk_order": 0,
"iteration_count": 0,
"failed_iteration_count": 0,
"chunk_size": 0,
"compressed_chunk_size": 0,
"chunk_data": six.b(json.dumps([]))
}]
)
def _check_7948b83229f6(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
wdata_table = db_utils.get_table(engine, "workloaddata")
subtask_uuid = None
with engine.connect() as conn:
task_uuid = self._7948b83229f6_task_uuid
for workload in conn.execute(workload_table.select().where(
workload_table.c.task_uuid == task_uuid)).fetchall():
if subtask_uuid is None:
subtask_uuid = workload.subtask_uuid
if workload.uuid not in self._7948b83229f6_workloads:
self.fail("Unknown workload found for 7948b83229f6 "
"migration.")
original = self._7948b83229f6_workloads[workload.uuid]
self.assertEqual(original["expected"],
workload.min_duration)
self.assertEqual(original["expected"],
workload.max_duration)
if original.get("wdata", False):
conn.execute(
wdata_table.delete().where(
wdata_table.c.workload_uuid == workload.uuid))
conn.execute(
workload_table.delete().where(
workload_table.c.uuid == workload.uuid))
conn.execute(
subtask_table.delete().where(
subtask_table.c.uuid == subtask_uuid))
conn.execute(
task_table.delete().where(task_table.c.uuid == task_uuid))
deployment_uuid = self._7948b83229f6_deployment_uuid
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))
def _pre_upgrade_046a38742e89(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
self._046a38742e89_deployment_uuid = str(uuid.uuid4())
self._046a38742e89_task_uuid = str(uuid.uuid4())
subtask_uuid = str(uuid.uuid4())
workloads = [
{
"runner": {"type": "constant",
"times": 1000}},
{
"runner": {"type": "rps",
"rps": 300},
"hooks": [
{
"config": {"args": {"arg1": "v1"},
"description": "descr",
"name": "foo",
"trigger": {"name": "bar",
"args": {"arg2": "v2"}}}}
]
}
]
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": self._046a38742e89_deployment_uuid,
"name": str(uuid.uuid4()),
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._046a38742e89_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": self._046a38742e89_deployment_uuid
}]
)
conn.execute(
subtask_table.insert(),
[{
"uuid": subtask_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"task_uuid": self._046a38742e89_task_uuid,
"context": six.b(json.dumps([])),
"sla": six.b(json.dumps([])),
"run_in_parallel": False
}]
)
for workload in workloads:
conn.execute(
workload_table.insert(),
[{
"uuid": str(uuid.uuid4()),
"name": "foo",
"task_uuid": self._046a38742e89_task_uuid,
"subtask_uuid": subtask_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"position": 0,
"runner": json.dumps(workload["runner"]),
"runner_type": "",
"context": "",
"context_execution": "",
"statistics": "",
"hooks": json.dumps(workload.get("hooks", "")),
"sla": "",
"sla_results": "",
"args": "",
"load_duration": 0,
"pass_sla": True,
"min_duration": 0,
"max_duration": 1
}]
)
def _check_046a38742e89(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
subtask_uuid = None
with engine.connect() as conn:
task_uuid = self._046a38742e89_task_uuid
for workload in conn.execute(workload_table.select().where(
workload_table.c.task_uuid == task_uuid)).fetchall():
if subtask_uuid is None:
subtask_uuid = workload.subtask_uuid
runner = json.loads(workload.runner)
self.assertNotIn("type", runner)
hooks = json.loads(workload.hooks)
if hooks:
for hook in hooks:
hook_cfg = hook["config"]
self.assertEqual(2, len(hook_cfg["action"]))
self.assertEqual(2, len(hook_cfg["trigger"]))
conn.execute(
workload_table.delete().where(
workload_table.c.uuid == workload.uuid))
conn.execute(
subtask_table.delete().where(
subtask_table.c.uuid == subtask_uuid))
conn.execute(
task_table.delete().where(task_table.c.uuid == task_uuid))
deployment_uuid = self._046a38742e89_deployment_uuid
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))
def _pre_upgrade_4394bdc32cfd(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
wdata_table = db_utils.get_table(engine, "workloaddata")
self._4394bdc32cfd_deployment_uuid = str(uuid.uuid4())
task_uuid = str(uuid.uuid4())
self._4394bdc32cfd_subtask = str(uuid.uuid4())
self._4394bdc32cfd_workloads = [
{"uuid": str(uuid.uuid4()),
"start_time": 0.0,
"data": [{"timestamp": 0,
# deprecated output
"scenario_output": {"data": {1: 2}},
"duration": 3,
"idle_duration": 0,
"error": ["Something", "Foo"],
# old format of atomics
"atomic_actions": {"foo": 3}}],
"statistics": {"durations": {
"atomics": [{"children": [],
"count_per_iteration": 1,
"data": {"90%ile": 3.0,
"95%ile": 3.0,
"avg": 3.0,
"iteration_count": 1,
"max": 3.0,
"median": 3.0,
"min": 3.0,
"success": "0.0%"},
"display_name": "foo",
"name": "foo"}],
"total": {
"display_name": "total",
"name": "total",
"count_per_iteration": 1,
"data": {"90%ile": 3.0,
"95%ile": 3.0,
"avg": 3.0,
"iteration_count": 1,
"max": 3.0,
"median": 3.0,
"min": 3.0,
"success": "0.0%"},
"children": [
{"display_name": "duration",
"name": "duration",
"children": [],
"count_per_iteration": 1,
"data": {"90%ile": 3.0,
"95%ile": 3.0,
"avg": 3.0,
"iteration_count": 1,
"max": 3.0,
"median": 3.0,
"min": 3.0,
"success": "0.0%"}},
{"display_name": "idle_duration",
"name": "idle_duration",
"children": [],
"count_per_iteration": 1,
"data": {"90%ile": 0.0,
"95%ile": 0.0,
"avg": 0.0,
"iteration_count": 1,
"max": 0.0,
"median": 0.0,
"min": 0.0,
"success": "0.0%"}}]}}}},
{"uuid": str(uuid.uuid4()),
"start_time": 1.0,
"data": [{"timestamp": 1, "output": {},
"duration": 5,
"idle_duration": 0,
"error": None,
"atomic_actions": [
{"name": "foo", "started_at": 2,
"finished_at": 3, "children": []},
{"name": "foo", "started_at": 3,
"finished_at": 5, "children": []}]},
{"timestamp": 6,
"output": {},
"duration": 4,
"idle_duration": 0,
"error": None,
"atomic_actions": [
{"name": "foo", "started_at": 6,
"finished_at": 9, "children": []},
{"name": "foo", "started_at": 9,
"finished_at": 10, "children": []}]}],
"statistics": {"durations": {
"atomics": [{"display_name": "foo (x2)",
"name": "foo",
"children": [],
"count_per_iteration": 2,
"data": {"90%ile": 3.9,
"95%ile": 3.95,
"avg": 3.5,
"iteration_count": 2,
"max": 4.0,
"median": 3.5,
"min": 3.0,
"success": "100.0%"}}],
"total": {
"display_name": "total",
"name": "total",
"count_per_iteration": 1,
"data": {"90%ile": 4.9,
"95%ile": 4.95,
"avg": 4.5,
"iteration_count": 2,
"max": 5.0,
"median": 4.5,
"min": 4.0,
"success": "100.0%"},
"children": [
{
"display_name": "duration",
"name": "duration",
"children": [],
"count_per_iteration": 1,
"data": {"90%ile": 4.9,
"95%ile": 4.95,
"avg": 4.5,
"iteration_count": 2,
"max": 5.0,
"median": 4.5,
"min": 4.0,
"success": "100.0%"}},
{
"display_name": "idle_duration",
"name": "idle_duration",
"children": [],
"count_per_iteration": 1,
"data": {"90%ile": 0.0,
"95%ile": 0.0,
"avg": 0.0,
"iteration_count": 2,
"max": 0.0,
"median": 0.0,
"min": 0.0,
"success": "100.0%"}
}
]
}
}}}
]
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": self._4394bdc32cfd_deployment_uuid,
"name": str(uuid.uuid4()),
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": self._4394bdc32cfd_deployment_uuid
}]
)
conn.execute(
subtask_table.insert(),
[{
"uuid": self._4394bdc32cfd_subtask,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"task_uuid": task_uuid,
"context": six.b(json.dumps([])),
"sla": six.b(json.dumps([])),
"run_in_parallel": False
}]
)
for workload in self._4394bdc32cfd_workloads:
conn.execute(
workload_table.insert(),
[{
"uuid": workload["uuid"],
"name": "foo",
"task_uuid": task_uuid,
"subtask_uuid": self._4394bdc32cfd_subtask,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"position": 0,
"runner": "",
"runner_type": "",
"context": "",
"context_execution": "",
"statistics": "",
"hooks": "",
"sla": "",
"sla_results": "",
"args": "",
"load_duration": 0,
"pass_sla": True
}]
)
conn.execute(
wdata_table.insert(),
[{
"uuid": str(uuid.uuid4()),
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"started_at": dt.datetime.utcnow(),
"finished_at": dt.datetime.utcnow(),
"task_uuid": task_uuid,
"workload_uuid": workload["uuid"],
"chunk_order": 0,
"iteration_count": 0,
"failed_iteration_count": 0,
"chunk_size": 0,
"compressed_chunk_size": 0,
"chunk_data": json.dumps({"raw": workload["data"]})
}]
)
def _check_4394bdc32cfd(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
wdata_table = db_utils.get_table(engine, "workloaddata")
task_uuid = None
with engine.connect() as conn:
subtask_id = self._4394bdc32cfd_subtask
for workload in conn.execute(workload_table.select().where(
workload_table.c.subtask_uuid == subtask_id)).fetchall():
if task_uuid is None:
task_uuid = workload.task_uuid
original = [w for w in self._4394bdc32cfd_workloads
if w["uuid"] == workload.uuid][0]
if workload.start_time is None:
start_time = None
else:
start_time = workload.start_time / 1000000.0
self.assertEqual(original["start_time"], start_time)
self.assertEqual(original["statistics"],
json.loads(workload.statistics))
wuuid = workload.uuid
for wdata in conn.execute(wdata_table.select().where(
wdata_table.c.workload_uuid == wuuid)).fetchall():
for iter in json.loads(wdata.chunk_data)["raw"]:
self.assertNotIn("scenario_output", iter)
self.assertIn("output", iter)
self.assertIsInstance(iter["atomic_actions"], list)
if iter["error"]:
last_atomic = iter["atomic_actions"][-1]
self.assertTrue(last_atomic.get("failed", False))
conn.execute(
wdata_table.delete().where(
wdata_table.c.workload_uuid == workload.uuid))
conn.execute(
workload_table.delete().where(
workload_table.c.uuid == workload.uuid))
conn.execute(
subtask_table.delete().where(
subtask_table.c.uuid == subtask_id))
conn.execute(
task_table.delete().where(task_table.c.uuid == task_uuid))
deployment_uuid = self._4394bdc32cfd_deployment_uuid
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))
def _pre_upgrade_dc46687661df(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
self._dc46687661df_deployment_uuid = str(uuid.uuid4())
self._dc46687661df_task_uuid = str(uuid.uuid4())
subtask_uuid = str(uuid.uuid4())
start_time = 1483221602
created_at = dt.datetime.fromtimestamp(start_time - 2)
created_at = created_at.replace(tzinfo=iso8601.iso8601.UTC)
start_time = float(start_time) * 1000000.0
self._dc46687661df_workloads = {
str(uuid.uuid4()): {
"start_time": None,
"created_at": dt.datetime.utcnow(),
"context": {"users": {"tenants": 3}},
"full_duration": 5,
"load_duration": 3
},
str(uuid.uuid4()): {
"start_time": start_time,
"created_at": created_at,
"context": {"users": {"tenants": 3},
"volumes": {"foo": "bar"}},
"full_duration": 10,
"load_duration": 3
},
str(uuid.uuid4()): {
"start_time": start_time,
"created_at": created_at,
"context": {"foobar": {"foo": "bar"}},
"with_fake_context": True,
"full_duration": 10,
"load_duration": 3
}
}
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": self._046a38742e89_deployment_uuid,
"name": str(uuid.uuid4()),
"config": "{}",
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([])),
"users": six.b(json.dumps([]))
}]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._dc46687661df_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": self._046a38742e89_deployment_uuid
}]
)
conn.execute(
subtask_table.insert(),
[{
"uuid": subtask_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"task_uuid": self._dc46687661df_task_uuid,
"context": six.b(json.dumps([])),
"sla": six.b(json.dumps([])),
"run_in_parallel": False
}]
)
conn.execute(
workload_table.insert(),
[{
"uuid": w_uuid,
"name": "foo",
"task_uuid": self._dc46687661df_task_uuid,
"subtask_uuid": subtask_uuid,
"created_at": w["created_at"],
"updated_at": dt.datetime.utcnow(),
"position": 0,
"runner": six.b(json.dumps([])),
"runner_type": "",
"context": json.dumps(w["context"]),
"context_execution": "foo",
"statistics": "",
"hooks": six.b(json.dumps([])),
"sla": "",
"sla_results": "",
"args": "",
"load_duration": w["load_duration"],
"full_duration": w["full_duration"],
"start_time": w["start_time"],
"pass_sla": True,
"min_duration": 0,
"max_duration": 1
} for w_uuid, w in self._dc46687661df_workloads.items()]
)
def _check_dc46687661df(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
subtask_table = db_utils.get_table(engine, "subtasks")
workload_table = db_utils.get_table(engine, "workloads")
with engine.connect() as conn:
task_uuid = self._dc46687661df_task_uuid
for w_uuid, w in self._dc46687661df_workloads.items():
workload = conn.execute(workload_table.select().where(
workload_table.c.uuid == w_uuid)).first()
self.assertNotIn("context", workload)
self.assertNotIn("context_execution", workload)
self.assertEqual(w["context"],
json.loads(workload["contexts"]))
if w["start_time"] is None:
self.assertEqual("[]", workload["contexts_results"])
elif w.get("with_fake_context", False):
self.assertEqual([{
"plugin_cfg": {"description": mock.ANY},
"plugin_name": "AllExecutedContexts",
"setup": {"started_at": 1483221600.0,
"finished_at": 1483221601.99,
"atomic_actions": [],
"error": None},
"cleanup": {"started_at": 1483221605.01,
"finished_at": 1483221609.9,
"atomic_actions": [],
"error": None}}],
json.loads(workload["contexts_results"]))
else:
self.assertEqual([{
"plugin_name": "AllExecutedContexts",
"plugin_cfg": {
"description": mock.ANY,
"order_of_execution": {
"note": mock.ANY,
"order": ["users@openstack.setup",
"volumes@openstack.setup",
"volumes@openstack.cleanup",
"users@openstack.cleanup"]}},
"setup": {"started_at": 1483221600.0,
"finished_at": 1483221601.99,
"atomic_actions": [],
"error": None},
"cleanup": {"started_at": 1483221605.01,
"finished_at": 1483221609.9,
"atomic_actions": [],
"error": None}}],
json.loads(workload["contexts_results"]))
conn.execute(
workload_table.delete().where(
workload_table.c.uuid == workload.uuid))
subtask = conn.execute(subtask_table.select().where(
subtask_table.c.task_uuid == task_uuid)).first()
self.assertNotIn("context", subtask)
self.assertNotIn("context_execution", subtask)
self.assertEqual("{}", subtask["contexts"])
self.assertEqual("[]", subtask["contexts_results"])
conn.execute(
subtask_table.delete().where(
subtask_table.c.uuid == subtask["uuid"]))
conn.execute(
task_table.delete().where(task_table.c.uuid == task_uuid))
deployment_uuid = self._046a38742e89_deployment_uuid
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == deployment_uuid))
def _pre_upgrade_dc0fe6de6786(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
self._dc0fe6de6786_deployments = [
# empty config
(str(uuid.uuid4()), {"type": "ExistingCloud", "creds": {}}),
# new config with openstack
(str(uuid.uuid4()), {
"type": "ExistingCloud",
"creds": {
"openstack": {
"auth_url": "http://example.net:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "myadminpass",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": ""
}
}
}),
# old config with openstack
(str(uuid.uuid4()), {
"type": "ExistingCloud",
"auth_url": "http://example.net:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "myadminpass",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": "",
"extra": {}
}),
# some custom unknown thing
(str(uuid.uuid4()), {"some_special_deployment": "foo"})
]
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": d_uuid,
"name": str(uuid.uuid4()),
"config": (
json.dumps(d_cfg) if d_cfg
else six.b(json.dumps(d_cfg))),
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([]))
} for d_uuid, d_cfg in self._dc0fe6de6786_deployments]
)
def _check_dc0fe6de6786(self, engine, data):
deployment_table = db_utils.get_table(engine, "deployments")
with engine.connect() as conn:
for d_uuid, d_cfg in self._dc0fe6de6786_deployments:
deployment = conn.execute(deployment_table.select().where(
deployment_table.c.uuid == d_uuid)).first()
config = json.loads(deployment.config)
if "type" in config:
self.assertEqual(config["type"],
"ExistingCloud")
self.assertIn("creds", config)
if "creds" not in d_cfg:
new_cfg = {"type": d_cfg.pop("type"),
"extra": d_cfg.pop("extra"),
"creds": {"openstack": d_cfg}}
self.assertEqual(new_cfg, config)
else:
self.assertEqual(d_cfg, config)
conn.execute(
deployment_table.delete().where(
deployment_table.c.uuid == d_uuid))
def _pre_upgrade_bc908ac9a1fc(self, engine):
deployment_table = db_utils.get_table(engine, "deployments")
task_table = db_utils.get_table(engine, "tasks")
verifier_table = db_utils.get_table(engine, "verifiers")
verification_table = db_utils.get_table(engine, "verifications")
self._bc908ac9a1fc_deployments = [
# empty config
(str(uuid.uuid4()), {"type": "ExistingCloud", "creds": {}}),
# OpenStack default config
(str(uuid.uuid4()), {
"type": "ExistingCloud",
"creds": {
"openstack": {
"auth_url": "http://example.net:5000/v2.0/",
"region_name": "RegionOne",
"endpoint_type": "public",
"admin": {
"username": "admin",
"password": "myadminpass",
"tenant_name": "demo"
},
"https_insecure": False,
"https_cacert": ""
}
}
}),
# some custom unknown thing
(str(uuid.uuid4()), {"some_special_deployment": "foo"})
]
self._bc908ac9a1fc_task_uuid = str(uuid.uuid4())
self._bc908ac9a1fc_verifier_uuid = str(uuid.uuid4())
self._bc908ac9a1fc_verification_uuid = str(uuid.uuid4())
with engine.connect() as conn:
conn.execute(
deployment_table.insert(),
[{
"uuid": d_uuid,
"name": str(uuid.uuid4()),
"config": (
json.dumps(d_cfg) if d_cfg
else six.b(json.dumps(d_cfg))),
"enum_deployments_status": consts.DeployStatus.DEPLOY_INIT,
"credentials": six.b(json.dumps([]))
} for d_uuid, d_cfg in self._bc908ac9a1fc_deployments]
)
conn.execute(
task_table.insert(),
[{
"uuid": self._bc908ac9a1fc_task_uuid,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.TaskStatus.FINISHED,
"validation_result": six.b(json.dumps({})),
"deployment_uuid": self._bc908ac9a1fc_deployments[0][0]
}]
)
conn.execute(
verifier_table.insert(),
[{
"uuid": self._bc908ac9a1fc_verifier_uuid,
"name": str(uuid.uuid4()),
"type": str(uuid.uuid4()),
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
"status": consts.VerifierStatus.INIT
}]
)
conn.execute(
verification_table.insert(),
[{
"uuid": self._bc908ac9a1fc_verification_uuid,
"deployment_uuid": self._bc908ac9a1fc_deployments[0][0],
"verifier_uuid": self._bc908ac9a1fc_verifier_uuid,
"status": consts.VerificationStatus.INIT,
"created_at": dt.datetime.utcnow(),
"updated_at": dt.datetime.utcnow(),
}]
)
def _check_bc908ac9a1fc(self, engine, data):
env_table = db_utils.get_table(engine, "envs")
platform_table = db_utils.get_table(engine, "platforms")
task_table = db_utils.get_table(engine, "tasks")
verifier_table = db_utils.get_table(engine, "verifiers")
verification_table = db_utils.get_table(engine, "verifications")
with engine.connect() as conn:
task = conn.execute(task_table.select().where(
task_table.c.uuid == self._bc908ac9a1fc_task_uuid)).first()
self.assertNotIn("deployment_uuid", task)
self.assertIn("env_uuid", task)
self.assertEqual(self._bc908ac9a1fc_deployments[0][0],
task["env_uuid"])
conn.execute(
task_table.delete().where(
task_table.c.uuid == self._bc908ac9a1fc_task_uuid))
v_id = self._bc908ac9a1fc_verification_uuid
verification = conn.execute(verification_table.select().where(
verification_table.c.uuid == v_id)).first()
self.assertNotIn("deployment_uuid", verification)
self.assertIn("env_uuid", verification)
self.assertEqual(self._bc908ac9a1fc_deployments[0][0],
verification["env_uuid"])
conn.execute(
verification_table.delete().where(
verification_table.c.uuid == v_id))
conn.execute(
verifier_table.delete().where(
verifier_table.c.uuid == self._bc908ac9a1fc_verifier_uuid))
for d_uuid, d_cfg in self._bc908ac9a1fc_deployments:
env = conn.execute(env_table.select().where(
env_table.c.uuid == d_uuid)).first()
if d_cfg.get("creds", {}):
# openstack deployment
env_spec = json.loads(env["spec"])
self.assertEqual({"existing@openstack"},
set(env_spec.keys()))
self.assertEqual(
d_cfg["creds"]["openstack"],
env_spec["existing@openstack"])
platforms = conn.execute(platform_table.select().where(
platform_table.c.env_uuid == d_uuid)).fetchall()
self.assertEqual(1, len(platforms))
self.assertEqual("READY", platforms[0].status)
self.assertEqual("existing@openstack",
platforms[0].plugin_name)
self.assertEqual(env_spec["existing@openstack"],
json.loads(platforms[0].plugin_spec))
self.assertEqual("openstack",
platforms[0].platform_name)
self.assertEqual(
{"admin": {
"username": "admin",
"tenant_name": "demo",
"password": "myadminpass",
"region_name": "RegionOne",
"https_insecure": False,
"https_cacert": "",
"endpoint_type": "public",
"auth_url": "http://example.net:5000/v2.0/"},
"users": []},
json.loads(platforms[0].platform_data))
conn.execute(
platform_table.delete().where(
platform_table.c.env_uuid == d_uuid))
else:
if "creds" in d_cfg:
# empty deployment
self.assertEqual({}, json.loads(env["spec"]))
else:
# something
self.assertEqual(d_cfg, json.loads(env["spec"]))
platforms = conn.execute(platform_table.select().where(
platform_table.c.env_uuid == d_uuid)).fetchall()
self.assertEqual(0, len(platforms))
conn.execute(
env_table.delete().where(
env_table.c.uuid == d_uuid))
| 42.910488 | 79 | 0.470384 |
3134191fe5c1d8b09a4568170249ec1adfdbacc4 | 393 | py | Python | usertracker/migrations/0003_monitor_activated.py | nathanielCherian/django-usertracker | 399e70dee9c6b56fe241b92ac5affe644989c756 | [
"MIT"
] | null | null | null | usertracker/migrations/0003_monitor_activated.py | nathanielCherian/django-usertracker | 399e70dee9c6b56fe241b92ac5affe644989c756 | [
"MIT"
] | null | null | null | usertracker/migrations/0003_monitor_activated.py | nathanielCherian/django-usertracker | 399e70dee9c6b56fe241b92ac5affe644989c756 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2020-12-24 19:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usertracker', '0002_monitor_redirect'),
]
operations = [
migrations.AddField(
model_name='monitor',
name='activated',
field=models.BooleanField(default=False),
),
]
| 20.684211 | 53 | 0.605598 |
a30c9cd0bae160aa5824a173b30fef01a21b47a6 | 1,544 | py | Python | 1_Practice_Python/DB_Handler.py | CyberThulhu22/Python-Projects | 448f7b934e0a316cf87be36c7b294f81b039a008 | [
"MIT"
] | null | null | null | 1_Practice_Python/DB_Handler.py | CyberThulhu22/Python-Projects | 448f7b934e0a316cf87be36c7b294f81b039a008 | [
"MIT"
] | null | null | null | 1_Practice_Python/DB_Handler.py | CyberThulhu22/Python-Projects | 448f7b934e0a316cf87be36c7b294f81b039a008 | [
"MIT"
] | 1 | 2022-01-05T04:19:44.000Z | 2022-01-05T04:19:44.000Z | #!/usr/bin/env python3
#-*- Coding: utf-8 -*-
"""
NAME:
VERSION: 1.0
AUTHOR: Jesse Leverett (CyberThulhu)
STATUS: Building Initial code framework
DESCRIPTION:
TO-DO:
COPYRIGHT © 2021 Jesse Leverett
"""
import sqlite3
from sqlite3 import Error
def sql_connection(db_file):
try:
conn = sqlite3.connect(db_file)
return conn
print("Connected")
except Error as e:
print(e)
return None
def show_all_db(conn):
c = conn.cursor()
c.execute('SELECT * FROM QuizTest')
data = c.fetchall()
for row in data:
print(row)
def update_db(conn):
c = conn.cursor()
c.execute('UPDATE QuizTest set Answer = asdf where ID = *')
conn.commit
print "Total number of rows update :", conn.total_changes
def main():
database = "/Users/theenawman/Documents/GitHub/WiredUpQuiz/Test_JGN/main.db"
conn = sql_connection(database)
with conn:
print("Printing all table data")
show_all_db(conn)
def menu():
print (30 * '-')
print (" M A I N - M E N U")
print (30 * '-')
print ("1. Show All DB Entrys")
print ("2. Delete Entry")
print ("3. Update Entry")
print ("4. Exit")
print (30 * '-')
selection=raw_input("Please Select A Choice:")
selection=int(selection)
if selection == 1:
main()
elif selection == 2:
print("b")
elif selection == 3:
print("c")
elif selection == 4:
print("Exit")
else:
print("Unknown Command")
if __name__ == '__main__':
menu()
| 21.150685 | 80 | 0.601684 |
513c169e5879653af161b4170baf2e97965f2167 | 344 | py | Python | colossalai/nn/optimizer/utils.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 1,630 | 2021-10-30T01:00:27.000Z | 2022-03-31T23:02:41.000Z | colossalai/nn/optimizer/utils.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 166 | 2021-10-30T01:03:01.000Z | 2022-03-31T14:19:07.000Z | colossalai/nn/optimizer/utils.py | RichardoLuo/ColossalAI | 797a9dc5a9e801d7499b8667c3ef039a38aa15ba | [
"Apache-2.0"
] | 253 | 2021-10-30T06:10:29.000Z | 2022-03-31T13:30:06.000Z | class CpuAdamCounter(object):
"""Used to record the total number of CPU Adam.
We must use it to avoid hybrid cpu adam and cpu adam using the same id.
"""
def __init__(self):
self.number = 0
def __call__(self):
self.number += 1
return self.number - 1
CPU_ADAM_CNT = CpuAdamCounter()
| 22.933333 | 76 | 0.610465 |
d0097454edddc5266bd21d2d1cbf601fefe8614e | 19,980 | py | Python | topoflow/gui_old/ZZ_Old_Stuff/Input_Dialog_LAST.py | mintproject/topoflow36 | 1dd25ce1b37cef129c8ee74b30af851658a7a8d9 | [
"MIT"
] | null | null | null | topoflow/gui_old/ZZ_Old_Stuff/Input_Dialog_LAST.py | mintproject/topoflow36 | 1dd25ce1b37cef129c8ee74b30af851658a7a8d9 | [
"MIT"
] | null | null | null | topoflow/gui_old/ZZ_Old_Stuff/Input_Dialog_LAST.py | mintproject/topoflow36 | 1dd25ce1b37cef129c8ee74b30af851658a7a8d9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# August 8 & 11, 2008
# February 10, 2009
# April 21, 2009
# S.D. Peckham
## This needs to set TF home directory, etc. #################
import wx
import wx.html
import xml.dom.minidom
import time
import webbrowser ## standard Python module
# Check these out later.
# import urllib
# import xmllib (deprecated: use xml.sax instead)
# import htmllib
#-------------------------------------------------------------
class TF_Input_Dialog(wx.Frame):
#-----------------------------------------------------
# Notes: This class is for creating TopoFlow input
# dialogs, which all use a similar template.
# Initial settings, labels, etc. are read
# from the XML file provided
# Default is wx.ALIGN_LEFT for labels & text
#-----------------------------------------------------
#---------------------------
# Create top-level dialog
#---------------------------
def __init__(self, parent=None, id=-1, \
xml_file="xml/snowmelt_degree_day.xml", \
title="Snowmelt: Degree-Day Input Dialog"):
#-------------------------------------------
# Initialize a wxPython frame, add a panel
#-------------------------------------------
wx.Frame.__init__(self, parent, id, title)
panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetBackgroundColour('Light Blue')
self.panel = panel
self.main_sizer = sizer
# self.panel.SetBackgroundColour('White')
# self.panel.SetForegroundColour('White')
#------------------------------------------------
# Saving parent allows collected values to be
# stored in parent frame before this one closes.
#------------------------------------------------
self.parent = parent
self.title = title
self.vgap = 10
self.hgap = 6
self.type_code = {'Scalar':0, 'Time series':1, \
'Grid':2, 'Grid sequence':3}
self.type_name = {0:'Scalar', 1:'Time series', \
2:'Grid', 3:'Grid sequence'}
#--------------------------------------------
# Set XML file to read info from, including
# the name of the HTML help file
#--------------------------------------------
self.xml_file = xml_file
self.read_var_info()
#-----------------------------------------
# Create objects to appear in the dialog
#-----------------------------------------
var_box = self.variable_box() # (returns a sizer)
pad_row1 = wx.StaticLine(panel)
time_box = self.timestep_box()
pad_row2 = wx.StaticLine(panel)
button_bar = self.button_bar()
pad_row3 = wx.StaticLine(panel)
self.var_box = var_box ########## EXPERIMENT
#--------------------------------
# Add objects to the main sizer
#--------------------------------
vpad = 0
sizer.Add(var_box, 0, wx.ALL, self.vgap)
sizer.Add(pad_row1, 0, wx.ALL, self.vgap)
sizer.Add(time_box, 0, wx.ALL, vpad)
sizer.Add(pad_row2, 0, wx.ALL, self.vgap)
sizer.Add(button_bar, 0, wx.ALL, vpad)
sizer.Add(pad_row3, 0, wx.ALL, self.vgap)
panel.SetSizer(sizer)
sizer.Fit(self)
## sizer.SetSizeHints(self) # (not needed)
#--------------------------------------------
# Doesn't work for some reason (see p. 328)
#--------------------------------------------
# self.SetSizer(sizer)
# self.Fit()
# __init__()
#----------------------------------------------------------------
def get_xml_tag_data(self, var, tag_name):
#-------------------------------------------
# Get data string for an XML variable tag
#-------------------------------------------
vstr = var.getElementsByTagName(tag_name)[0].firstChild.data
return vstr.strip()
# get_xml_tag_data()
#----------------------------------------------------------------
def read_var_info(self):
#--------------------------------------------------
# Read descriptions of input variables from file
#--------------------------------------------------
self.var_names = []
self.var_units = []
self.var_values = []
self.var_types = []
self.var_typelist = []
#---------------------------------------------
# Read variable info from an XML file
#---------------------------------------------
file_obj = open(self.xml_file, 'r')
# Read entire file into a string
doc_string = file_obj.read()
dom = xml.dom.minidom.parseString( doc_string )
variables = dom.firstChild.getElementsByTagName("variable")
# print "variables =", variables
for var in variables:
symbol = self.get_xml_tag_data(var, "symbol")
units = self.get_xml_tag_data(var, "units")
value = self.get_xml_tag_data(var, "value")
vtype = self.get_xml_tag_data(var, "type")
typelist = self.get_xml_tag_data(var, "typelist")
self.var_names.append( symbol )
self.var_values.append( value )
self.var_types.append( vtype )
typelist = typelist.split(",")
self.var_typelist.append( typelist )
self.var_units.append( units )
self.n_vars = len(self.var_names)
## This doesn't work
## symbol = variables.getElementsByTagName("symbol")[0].firstChild.data
## for symbol in symbols:
## print "symbol =", symbol
#-----------------------------
# Read timestep information
#-----------------------------
timesteps = dom.firstChild.getElementsByTagName("timestep")
if (len(timesteps) > 0):
var = timesteps[0]
self.timestep_label = self.get_xml_tag_data(var, "label")
self.timestep_value = self.get_xml_tag_data(var, "value")
self.timestep_units = self.get_xml_tag_data(var, "units")
#-------------------------------
# Read name of HTML help file
#-------------------------------
help_files = dom.firstChild.getElementsByTagName("help_file")
if (len(help_files) > 0):
self.help_file = help_files[0].firstChild.data.strip()
# print "help file =", self.help_file
# read_var_info()
#----------------------------------------------------------------
def variable_box(self):
#-------------------------------------------
# Create sizer box for all input var info
# (This provides a frame and title.)
#-------------------------------------------
vbox = wx.StaticBox(self.panel, -1, "Input variables:")
sizer = wx.StaticBoxSizer(vbox, wx.VERTICAL)
#---------------------------------------------
# Create another sizer box for rows of info
#---------------------------------------------
# Use "vgap=0" for most compact dialog
#---------------------------------------------
header = ["Variable:", "Type:", "Scalar or Grid Filename:",
"Units:"]
nh = len(header)
fg_sizer = wx.FlexGridSizer(cols=nh, hgap=self.hgap, vgap=0)
#fg_sizer = wx.FlexGridSizer(cols=nh, hgap=self.hgap, vgap=self.vgap)
#----------------------------------------------
# Specify which columns can expand on resize
#----------------------------------------------
# fg_sizer.AddGrowableCol(1)
self.text_boxes = []
#-------------------------
# Add the column headers
#-------------------------
for row in range(nh):
L1 = wx.StaticText(self.panel, -1, header[row])
fg_sizer.Add(L1, 0, wx.ALL, self.hgap)
#------------------------------------------------
# Create a row in the dialog for each variable
#------------------------------------------------
for row in range(self.n_vars):
vstr = self.var_names[row] + ": "
label = wx.StaticText(self.panel, -1, vstr)
#----------------------------------------------------------
row_ID = (5000 + row) #####
### row_ID = 'tf_row_' + str(row) # (ID can't be string.)
dlist = wx.Choice(self.panel, row_ID, choices=self.var_typelist[row])
dlist.Select(self.type_code[self.var_types[row]])
self.Bind(wx.EVT_CHOICE, self.on_Type_Choice, dlist)
#----------------------------------------------------------
text = wx.TextCtrl(self.panel, -1, self.var_values[row],
size=(160,-1))
self.text_boxes.append( text ) #######
#----------------------------------------------------------
ustr = wx.StaticText(self.panel, -1, self.var_units[row])
#----------------------------------------------------------
fg_sizer.Add(label, 0, wx.ALL, self.hgap)
fg_sizer.Add(dlist, 0, wx.ALL, self.hgap)
fg_sizer.Add(text, 0, wx.ALL, self.hgap)
fg_sizer.Add(ustr, 0, wx.ALL, self.hgap)
#---------------------------------
# Add fg_sizer to the main sizer
#---------------------------------
sizer.Add(fg_sizer, 0, wx.ALL, self.vgap) ######
return sizer
#-----------------------------------------
# Leave this to "main" to avoid trouble
#-----------------------------------------
# self.panel.SetSizer(sizer)
# sizer.Fit(self)
# sizer.SetSizeHints(self)
# variable_box()
#----------------------------------------------------------------
def timestep_box(self):
#---------------------------------------------
# Create sizer box for the process timestep
#---------------------------------------------
L1 = wx.StaticText(self.panel, -1, self.timestep_label)
text = wx.TextCtrl(self.panel, -1, self.timestep_value)
L2 = wx.StaticText(self.panel, -1, self.timestep_units)
#------------------------------------------------------------
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add((24,24), 1)
box.Add(L1)
box.Add((self.hgap, self.hgap), 1)
box.Add(text)
box.Add((self.hgap, self.hgap), 1)
box.Add(L2)
box.Add((self.vgap, self.vgap), 1)
# box.AddMany([L1, text, L2]) #, 1, wx.EXPAND)
# self.SetSizer(box) ## (Bad to do this here.)
return box
# timestep_box()
#----------------------------------------------------------------
def button_bar(self):
#----------------------------
# Create bottom button bar
#----------------------------
start_btn = wx.Button(self.panel, -1, "Start")
help_btn = wx.Button(self.panel, -1, "Help")
cancel_btn = wx.Button(self.panel, -1, "Cancel")
#-------------------------------------------------
box = wx.BoxSizer(wx.HORIZONTAL)
box.Add((20,20), 1)
box.Add(start_btn)
box.Add((10,10), 1)
box.Add(help_btn)
box.Add((10,10), 1)
box.Add(cancel_btn)
box.Add((10,10), 1)
# box.AddMany([start_btn, help_btn, cancel_btn])
#-----------------------------------------------------
self.Bind(wx.EVT_BUTTON, self.on_Start, start_btn)
self.Bind(wx.EVT_BUTTON, self.on_Help, help_btn)
self.Bind(wx.EVT_BUTTON, self.on_Cancel, cancel_btn)
# self.SetSizer(box) ## (Bad to do this here.)
return box
# button_bar()
#----------------------------------------------------------------
def on_Type_Choice(self, event):
#-----------------------------------------
# Event handler for the Type droplists.
#-----------------------------------------
print('droplist index =', event.GetSelection())
print('droplist string =', event.GetString())
# print 'event.Id =', event.Id
# print 'event.GetId() =', event.GetId()
#---------------------------------------
# Need to know the row of the droplist
#---------------------------------------
row = (event.Id - 5000)
# Idea: row_ID = 'tf_row_' + str(row)
# row = int(event.Id[7:]) # (ID can't be string ?)
print('row =', row)
index = event.GetSelection()
self.var_types[ row ] = self.type_name[ index ]
print(' ')
# on_Type_Choice()
#----------------------------------------------------------------
def on_Start(self, event):
################################################
# Make sure that timestep gets saved also.
################################################
#-----------------------------------------
# Event handler for the Start button.
# Read values from text boxes and save
# them somewhere with droplist settings
#--------------------------------------------
# Can use validator with TextCtrl to check
# that values entered are valid (p. 282)
#--------------------------------------------
k = 0
for box in self.text_boxes:
val_string = box.GetValue()
if (len(val_string) == 0):
wx.MessageBox("Each text box must contain a number or a filename.", "Sorry,")
box.SetBackgroundColour("pink")
time.sleep(0.5)
box.SetFocus()
box.Refresh()
return
else:
#-----------------------------------------------
# Save values from GUI (numerical or string)
# into the state of the dialog's parent frame.
# (use var_types, var_units, var_values)
#-----------------------------------------------
self.var_values[k] = val_string # (update self)
print(' ')
print('Saving values into parent...')
value = val_string
name = self.var_names[k]
tcode = self.type_code[ self.var_types[k] ]
field_str = 'self.parent.' + name
print("value =", value)
exec(field_str + "_type = " + str(tcode))
if (tcode == 0):
exec(field_str + '= ' + value) # (scalar)
exec(field_str + "_file = ''")
else:
exec(field_str + '= None')
exec(field_str + '_file = "' + value + '"')
k += 1
#-----------------------------------------
# At this point, values in self are lost
# but values in parent TLB are retained.
#-----------------------------------------
self.Destroy()
# on_Start()
#----------------------------------------------------------------
def on_Help(self, event):
#------------------------------------
# EXPERIMENT (4/22/09) This works.
#------------------------------------
## self.main_sizer.Hide(self.var_box)
## self.main_sizer.Layout()
## return
#------------------------------------------
# Event handler for the Help button.
# Open the default browser (e.g. Safari)
#------------------------------------------
result = webbrowser.open('file://' + self.help_file)
# For testing only
## print 'For testing: Some values saved in parent.'
## print 'self.c0 =', self.c0
## print 'self.c0_type =', self.c0_type
## print 'self.c0_file =', self.c0_file
# on_Help()
#----------------------------------------------------------------
def on_Help2(self, event):
#-----------------------------------------------
# Event handler for the Help button.
# Alternate method that uses HTML_Help_Window
# class, defined below.
#-----------------------------------------------
app = wx.PySimpleApp()
frame = HTML_Help_Window(parent=None,
title="HTML Help System",
html_file=self.help_file)
frame.Show()
app.MainLoop()
# on_Help2()
#----------------------------------------------------------------
def on_Cancel(self, event):
#----------------------------------------
# Event handler for the Cancel button.
#----------------------------------------
self.Destroy()
# on_Cancel()
#----------------------------------------------------------------
#-----------------------------------------
# Class for displaying HTML help
# (now using webbrowser module instead).
#-----------------------------------------
##class HTML_Help_Window(wx.Frame):
## def __init__(self, parent, title, html_file):
## wx.Frame.__init__(self, parent, -1, title,
## size=(700,800), pos=(600,50))
## html = wx.html.HtmlWindow(self)
## if "gtk2" in wx.PlatformInfo:
## html.SetStandardFonts()
##
## html.LoadPage(html_file)
#-------------------------------------------------------------
def Get_Input_Vars():
#------------------------------------------------------
# Note: This is for testing. It creates two dialogs
# that are identical, on top of each other.
# Change values in the top one (the child) and
# then click on its Start button. The child
# dialog closes. Now click on the Help button
# of the remaining (parent) to print out some
# of the stored values. Later on, the parent
# will be the TopoFlow main wizard.
#------------------------------------------------------
# Note: You need to comment out the last few lines
# in this file before using this function.
#------------------------------------------------------
#----------------------------------
# Open a TopoFlow input dialog as
# the "main program window"
#----------------------------------
app = wx.PySimpleApp()
frame1 = TF_Input_Dialog(xml_file="xml/snowmelt_degree_day.xml")
frame1.Show()
#--------------------------------------------------
# Open a 2nd dialog that has frame1 as its parent
# and which will store its values into its parent
# before it is destroyed.
#--------------------------------------------------
frame2 = TF_Input_Dialog(parent=frame1, \
xml_file="xml/snowmelt_degree_day.xml")
frame2.Show()
#-----------------------
# Start the event loop
#-----------------------
app.MainLoop()
# Get_Input_Vars()
#-------------------------------------------------------------
#---------------------------------------
# Support two different usage options
#---------------------------------------
if (__name__ == '__main__'):
app = wx.PySimpleApp()
# frame = TF_Input_Dialog(parent=None, id=-1)
frame = TF_Input_Dialog(parent=None, id=-1, \
xml_file="xml/infil_richards_1D.xml", \
title="Infiltration: Richards 1D Input Dialog")
frame.Show()
app.MainLoop()
| 40.04008 | 93 | 0.405055 |
44cce5b9c6abdc67e1c0179a8f6653139047bdb2 | 2,055 | py | Python | submissions/day12.py | ShreyGupta19/advent-of-code | ba994238ec97d067e4c2d2e60ca10586affbc1cf | [
"MIT"
] | null | null | null | submissions/day12.py | ShreyGupta19/advent-of-code | ba994238ec97d067e4c2d2e60ca10586affbc1cf | [
"MIT"
] | null | null | null | submissions/day12.py | ShreyGupta19/advent-of-code | ba994238ec97d067e4c2d2e60ca10586affbc1cf | [
"MIT"
] | null | null | null | from advent import AdventProblem
def preprocess(line):
return line[0], int(line[1:])
def step_in_dir(pos, dir, steps):
new_pos = list(pos)
if dir == 'N':
new_pos[1] += steps
if dir == 'S':
new_pos[1] -= steps
if dir == 'E':
new_pos[0] += steps
if dir == 'W':
new_pos[0] -= steps
return new_pos
def part_1(instrs):
dirs = ['E', 'N', 'W', 'S']
reversed_dirs = list(reversed(dirs))
curr_dir = 'E'
pos = (0, 0)
for instr, steps in instrs:
if instr in dirs:
pos = step_in_dir(pos, instr, steps)
elif instr == 'F':
pos = step_in_dir(pos, curr_dir, steps)
elif instr == 'L':
curr_dir = dirs[(steps // 90 + dirs.index(curr_dir)) % len(dirs)]
elif instr == 'R':
curr_dir = reversed_dirs[(steps // 90 + reversed_dirs.index(curr_dir)) % len(reversed_dirs)]
return abs(pos[1]) + abs(pos[0])
def move_left(waypoint_pos, degs):
if degs == 90:
waypoint_pos = (-waypoint_pos[1], waypoint_pos[0])
if degs == 180:
waypoint_pos = (-waypoint_pos[0], -waypoint_pos[1])
if degs == 270:
waypoint_pos = (waypoint_pos[1], -waypoint_pos[0])
return waypoint_pos
def part_2(instrs):
dirs = ['E', 'N', 'W', 'S']
reversed_dirs = list(reversed(dirs))
pos = [0, 0]
waypoint_pos = [10, 1]
for instr, steps in instrs:
if instr in dirs:
waypoint_pos = step_in_dir(waypoint_pos, instr, steps)
elif instr == 'F':
pos[0] += waypoint_pos[0] * steps
pos[1] += waypoint_pos[1] * steps
elif instr == 'L':
waypoint_pos = move_left(waypoint_pos, steps)
elif instr == 'R':
waypoint_pos = move_left(waypoint_pos, 360 - steps)
return abs(pos[1]) + abs(pos[0])
if __name__ == '__main__':
part1 = AdventProblem(12, 1, preprocess)
part1.add_solution(part_1)
part1.run()
part2 = AdventProblem(12, 2, preprocess)
part2.add_solution(part_2)
part2.run()
| 28.541667 | 104 | 0.56691 |
bfa72ef5f070b7ac4c86b59ee8af991c8c82e879 | 26,270 | py | Python | vendor/chromium/mojo/public/tools/bindings/pylib/mojom/generate/translate.py | mkljczk/fivem | 187b2e5f922297bcbde5cfb1db70815223c53680 | [
"MIT"
] | 6 | 2021-03-29T05:26:18.000Z | 2021-07-13T12:53:03.000Z | vendor/chromium/mojo/public/tools/bindings/pylib/mojom/generate/translate.py | big-rip/fivem-1 | c08af22110802e77816dfdde29df1662f8dea563 | [
"MIT"
] | 7 | 2021-08-31T22:30:30.000Z | 2022-03-24T06:50:38.000Z | vendor/chromium/mojo/public/tools/bindings/pylib/mojom/generate/translate.py | big-rip/fivem-1 | c08af22110802e77816dfdde29df1662f8dea563 | [
"MIT"
] | 3 | 2021-02-15T16:38:50.000Z | 2021-12-09T08:54:34.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert parse tree to AST.
This module converts the parse tree to the AST we use for code generation. The
main entry point is OrderedModule, which gets passed the parser
representation of a mojom file. When called it's assumed that all imports have
already been parsed and converted to ASTs before.
"""
import itertools
import os
import re
import mojom.generate.module as mojom
from mojom.parse import ast
def _DuplicateName(values):
"""Returns the 'mojom_name' of the first entry in |values| whose 'mojom_name'
has already been encountered. If there are no duplicates, returns None."""
names = set()
for value in values:
if value.mojom_name in names:
return value.mojom_name
names.add(value.mojom_name)
return None
def _ElemsOfType(elems, elem_type, scope):
"""Find all elements of the given type.
Args:
elems: {Sequence[Any]} Sequence of elems.
elem_type: {Type[C]} Extract all elems of this type.
scope: {str} The name of the surrounding scope (e.g. struct
definition). Used in error messages.
Returns:
{List[C]} All elems of matching type.
"""
assert isinstance(elem_type, type)
result = [elem for elem in elems if isinstance(elem, elem_type)]
duplicate_name = _DuplicateName(result)
if duplicate_name:
raise Exception('Names in mojom must be unique within a scope. The name '
'"%s" is used more than once within the scope "%s".' %
(duplicate_name, scope))
return result
def _MapKind(kind):
map_to_kind = {'bool': 'b',
'int8': 'i8',
'int16': 'i16',
'int32': 'i32',
'int64': 'i64',
'uint8': 'u8',
'uint16': 'u16',
'uint32': 'u32',
'uint64': 'u64',
'float': 'f',
'double': 'd',
'string': 's',
'handle': 'h',
'handle<data_pipe_consumer>': 'h:d:c',
'handle<data_pipe_producer>': 'h:d:p',
'handle<message_pipe>': 'h:m',
'handle<shared_buffer>': 'h:s'}
if kind.endswith('?'):
base_kind = _MapKind(kind[0:-1])
# NOTE: This doesn't rule out enum types. Those will be detected later, when
# cross-reference is established.
reference_kinds = ('m', 's', 'h', 'a', 'r', 'x', 'asso', 'rmt', 'rcv',
'rma', 'rca')
if re.split('[^a-z]', base_kind, 1)[0] not in reference_kinds:
raise Exception(
'A type (spec "%s") cannot be made nullable' % base_kind)
return '?' + base_kind
if kind.endswith('}'):
lbracket = kind.rfind('{')
value = kind[0:lbracket]
return 'm[' + _MapKind(kind[lbracket+1:-1]) + '][' + _MapKind(value) + ']'
if kind.endswith(']'):
lbracket = kind.rfind('[')
typename = kind[0:lbracket]
return 'a' + kind[lbracket+1:-1] + ':' + _MapKind(typename)
if kind.endswith('&'):
return 'r:' + _MapKind(kind[0:-1])
if kind.startswith('asso<'):
assert kind.endswith('>')
return 'asso:' + _MapKind(kind[5:-1])
if kind.startswith('rmt<'):
assert kind.endswith('>')
return 'rmt:' + _MapKind(kind[4:-1])
if kind.startswith('rcv<'):
assert kind.endswith('>')
return 'rcv:' + _MapKind(kind[4:-1])
if kind.startswith('rma<'):
assert kind.endswith('>')
return 'rma:' + _MapKind(kind[4:-1])
if kind.startswith('rca<'):
assert kind.endswith('>')
return 'rca:' + _MapKind(kind[4:-1])
if kind in map_to_kind:
return map_to_kind[kind]
return 'x:' + kind
def _AttributeListToDict(attribute_list):
if attribute_list is None:
return None
assert isinstance(attribute_list, ast.AttributeList)
# TODO(vtl): Check for duplicate keys here.
return dict([(attribute.key, attribute.value)
for attribute in attribute_list])
builtin_values = frozenset([
"double.INFINITY",
"double.NEGATIVE_INFINITY",
"double.NAN",
"float.INFINITY",
"float.NEGATIVE_INFINITY",
"float.NAN"])
def _IsBuiltinValue(value):
return value in builtin_values
def _LookupKind(kinds, spec, scope):
"""Tries to find which Kind a spec refers to, given the scope in which its
referenced. Starts checking from the narrowest scope to most general. For
example, given a struct field like
Foo.Bar x;
Foo.Bar could refer to the type 'Bar' in the 'Foo' namespace, or an inner
type 'Bar' in the struct 'Foo' in the current namespace.
|scope| is a tuple that looks like (namespace, struct/interface), referring
to the location where the type is referenced."""
if spec.startswith('x:'):
mojom_name = spec[2:]
for i in range(len(scope), -1, -1):
test_spec = 'x:'
if i > 0:
test_spec += '.'.join(scope[:i]) + '.'
test_spec += mojom_name
kind = kinds.get(test_spec)
if kind:
return kind
return kinds.get(spec)
def _LookupValue(values, mojom_name, scope, kind):
"""Like LookupKind, but for constant values."""
# If the type is an enum, the value can be specified as a qualified name, in
# which case the form EnumName.ENUM_VALUE must be used. We use the presence
# of a '.' in the requested name to identify this. Otherwise, we prepend the
# enum name.
if isinstance(kind, mojom.Enum) and '.' not in mojom_name:
mojom_name = '%s.%s' % (kind.spec.split(':', 1)[1], mojom_name)
for i in reversed(range(len(scope) + 1)):
test_spec = '.'.join(scope[:i])
if test_spec:
test_spec += '.'
test_spec += mojom_name
value = values.get(test_spec)
if value:
return value
return values.get(mojom_name)
def _FixupExpression(module, value, scope, kind):
"""Translates an IDENTIFIER into a built-in value or structured NamedValue
object."""
if isinstance(value, tuple) and value[0] == 'IDENTIFIER':
# Allow user defined values to shadow builtins.
result = _LookupValue(module.values, value[1], scope, kind)
if result:
if isinstance(result, tuple):
raise Exception('Unable to resolve expression: %r' % value[1])
return result
if _IsBuiltinValue(value[1]):
return mojom.BuiltinValue(value[1])
return value
def _Kind(kinds, spec, scope):
"""Convert a type name into a mojom.Kind object.
As a side-effect this function adds the result to 'kinds'.
Args:
kinds: {Dict[str, mojom.Kind]} All known kinds up to this point, indexed by
their names.
spec: {str} A name uniquely identifying a type.
scope: {Tuple[str, str]} A tuple that looks like (namespace,
struct/interface), referring to the location where the type is
referenced.
Returns:
{mojom.Kind} The type corresponding to 'spec'.
"""
kind = _LookupKind(kinds, spec, scope)
if kind:
return kind
if spec.startswith('?'):
kind = _Kind(kinds, spec[1:], scope).MakeNullableKind()
elif spec.startswith('a:'):
kind = mojom.Array(_Kind(kinds, spec[2:], scope))
elif spec.startswith('asso:'):
inner_kind = _Kind(kinds, spec[5:], scope)
if isinstance(inner_kind, mojom.InterfaceRequest):
kind = mojom.AssociatedInterfaceRequest(inner_kind)
else:
kind = mojom.AssociatedInterface(inner_kind)
elif spec.startswith('a'):
colon = spec.find(':')
length = int(spec[1:colon])
kind = mojom.Array(_Kind(kinds, spec[colon+1:], scope), length)
elif spec.startswith('r:'):
kind = mojom.InterfaceRequest(_Kind(kinds, spec[2:], scope))
elif spec.startswith('rmt:'):
kind = mojom.PendingRemote(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rcv:'):
kind = mojom.PendingReceiver(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rma:'):
kind = mojom.PendingAssociatedRemote(_Kind(kinds, spec[4:], scope))
elif spec.startswith('rca:'):
kind = mojom.PendingAssociatedReceiver(_Kind(kinds, spec[4:], scope))
elif spec.startswith('m['):
# Isolate the two types from their brackets.
# It is not allowed to use map as key, so there shouldn't be nested ']'s
# inside the key type spec.
key_end = spec.find(']')
assert key_end != -1 and key_end < len(spec) - 1
assert spec[key_end+1] == '[' and spec[-1] == ']'
first_kind = spec[2:key_end]
second_kind = spec[key_end+2:-1]
kind = mojom.Map(_Kind(kinds, first_kind, scope),
_Kind(kinds, second_kind, scope))
else:
kind = mojom.Kind(spec)
kinds[spec] = kind
return kind
def _Import(module, import_module):
# Copy the struct kinds from our imports into the current module.
importable_kinds = (mojom.Struct, mojom.Union, mojom.Enum, mojom.Interface)
for kind in import_module.kinds.values():
if (isinstance(kind, importable_kinds) and
kind.module.path == import_module.path):
module.kinds[kind.spec] = kind
# Ditto for values.
for value in import_module.values.values():
if value.module.path == import_module.path:
module.values[value.GetSpec()] = value
return import_module
def _Struct(module, parsed_struct):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_struct: {ast.Struct} Parsed struct.
Returns:
{mojom.Struct} AST struct.
"""
struct = mojom.Struct(module=module)
struct.mojom_name = parsed_struct.mojom_name
struct.native_only = parsed_struct.body is None
struct.spec = 'x:' + module.mojom_namespace + '.' + struct.mojom_name
module.kinds[struct.spec] = struct
if struct.native_only:
struct.enums = []
struct.constants = []
struct.fields_data = []
else:
struct.enums = map(
lambda enum: _Enum(module, enum, struct),
_ElemsOfType(parsed_struct.body, ast.Enum, parsed_struct.mojom_name))
struct.constants = map(
lambda constant: _Constant(module, constant, struct),
_ElemsOfType(parsed_struct.body, ast.Const, parsed_struct.mojom_name))
# Stash fields parsed_struct here temporarily.
struct.fields_data = _ElemsOfType(
parsed_struct.body, ast.StructField, parsed_struct.mojom_name)
struct.attributes = _AttributeListToDict(parsed_struct.attribute_list)
# Enforce that a [Native] attribute is set to make native-only struct
# declarations more explicit.
if struct.native_only:
if not struct.attributes or not struct.attributes.get('Native', False):
raise Exception("Native-only struct declarations must include a " +
"Native attribute.")
if struct.attributes and struct.attributes.get('CustomSerializer', False):
struct.custom_serializer = True
return struct
def _Union(module, parsed_union):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_union: {ast.Union} Parsed union.
Returns:
{mojom.Union} AST union.
"""
union = mojom.Union(module=module)
union.mojom_name = parsed_union.mojom_name
union.spec = 'x:' + module.mojom_namespace + '.' + union.mojom_name
module.kinds[union.spec] = union
# Stash fields parsed_union here temporarily.
union.fields_data = _ElemsOfType(
parsed_union.body, ast.UnionField, parsed_union.mojom_name)
union.attributes = _AttributeListToDict(parsed_union.attribute_list)
return union
def _StructField(module, parsed_field, struct):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_field: {ast.StructField} Parsed struct field.
struct: {mojom.Struct} Struct this field belongs to.
Returns:
{mojom.StructField} AST struct field.
"""
field = mojom.StructField()
field.mojom_name = parsed_field.mojom_name
field.kind = _Kind(
module.kinds, _MapKind(parsed_field.typename),
(module.mojom_namespace, struct.mojom_name))
field.ordinal = parsed_field.ordinal.value if parsed_field.ordinal else None
field.default = _FixupExpression(
module, parsed_field.default_value,
(module.mojom_namespace, struct.mojom_name), field.kind)
field.attributes = _AttributeListToDict(parsed_field.attribute_list)
return field
def _UnionField(module, parsed_field, union):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_field: {ast.UnionField} Parsed union field.
union: {mojom.Union} Union this fields belong to.
Returns:
{mojom.UnionField} AST union.
"""
field = mojom.UnionField()
field.mojom_name = parsed_field.mojom_name
field.kind = _Kind(
module.kinds, _MapKind(parsed_field.typename),
(module.mojom_namespace, union.mojom_name))
field.ordinal = parsed_field.ordinal.value if parsed_field.ordinal else None
field.default = _FixupExpression(
module, None, (module.mojom_namespace, union.mojom_name), field.kind)
field.attributes = _AttributeListToDict(parsed_field.attribute_list)
return field
def _Parameter(module, parsed_param, interface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_param: {ast.Parameter} Parsed parameter.
union: {mojom.Interface} Interface this parameter belongs to.
Returns:
{mojom.Parameter} AST parameter.
"""
parameter = mojom.Parameter()
parameter.mojom_name = parsed_param.mojom_name
parameter.kind = _Kind(
module.kinds, _MapKind(parsed_param.typename),
(module.mojom_namespace, interface.mojom_name))
parameter.ordinal = (
parsed_param.ordinal.value if parsed_param.ordinal else None)
parameter.default = None # TODO(tibell): We never have these. Remove field?
parameter.attributes = _AttributeListToDict(parsed_param.attribute_list)
return parameter
def _Method(module, parsed_method, interface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_method: {ast.Method} Parsed method.
interface: {mojom.Interface} Interface this method belongs to.
Returns:
{mojom.Method} AST method.
"""
method = mojom.Method(
interface, parsed_method.mojom_name,
ordinal=parsed_method.ordinal.value if parsed_method.ordinal else None)
method.parameters = map(
lambda parameter: _Parameter(module, parameter, interface),
parsed_method.parameter_list)
if parsed_method.response_parameter_list is not None:
method.response_parameters = map(
lambda parameter: _Parameter(module, parameter, interface),
parsed_method.response_parameter_list)
method.attributes = _AttributeListToDict(parsed_method.attribute_list)
# Enforce that only methods with response can have a [Sync] attribute.
if method.sync and method.response_parameters is None:
raise Exception("Only methods with response can include a [Sync] "
"attribute. If no response parameters are needed, you "
"could use an empty response parameter list, i.e., "
"\"=> ()\".")
return method
def _Interface(module, parsed_iface):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_iface: {ast.Interface} Parsed interface.
Returns:
{mojom.Interface} AST interface.
"""
interface = mojom.Interface(module=module)
interface.mojom_name = parsed_iface.mojom_name
interface.spec = 'x:' + module.mojom_namespace + '.' + interface.mojom_name
module.kinds[interface.spec] = interface
interface.enums = map(
lambda enum: _Enum(module, enum, interface),
_ElemsOfType(parsed_iface.body, ast.Enum, parsed_iface.mojom_name))
interface.constants = map(
lambda constant: _Constant(module, constant, interface),
_ElemsOfType(parsed_iface.body, ast.Const, parsed_iface.mojom_name))
# Stash methods parsed_iface here temporarily.
interface.methods_data = _ElemsOfType(
parsed_iface.body, ast.Method, parsed_iface.mojom_name)
interface.attributes = _AttributeListToDict(parsed_iface.attribute_list)
return interface
def _EnumField(module, enum, parsed_field, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
enum: {mojom.Enum} Enum this field belongs to.
parsed_field: {ast.EnumValue} Parsed enum value.
parent_kind: {mojom.Kind} The enclosing type.
Returns:
{mojom.EnumField} AST enum field.
"""
field = mojom.EnumField()
field.mojom_name = parsed_field.mojom_name
# TODO(mpcomplete): FixupExpression should be done in the second pass,
# so constants and enums can refer to each other.
# TODO(mpcomplete): But then, what if constants are initialized to an enum? Or
# vice versa?
if parent_kind:
field.value = _FixupExpression(
module, parsed_field.value,
(module.mojom_namespace, parent_kind.mojom_name), enum)
else:
field.value = _FixupExpression(
module, parsed_field.value, (module.mojom_namespace, ), enum)
field.attributes = _AttributeListToDict(parsed_field.attribute_list)
value = mojom.EnumValue(module, enum, field)
module.values[value.GetSpec()] = value
return field
def _ResolveNumericEnumValues(enum_fields):
"""
Given a reference to a list of mojom.EnumField, resolves and assigns their
values to EnumField.numeric_value.
Returns:
A tuple of the lowest and highest assigned enumerator value or None, None
if no enumerator values were assigned.
"""
# map of <mojom_name> -> integral value
resolved_enum_values = {}
prev_value = -1
min_value = None
max_value = None
for field in enum_fields:
# This enum value is +1 the previous enum value (e.g: BEGIN).
if field.value is None:
prev_value += 1
# Integral value (e.g: BEGIN = -0x1).
elif type(field.value) is str:
prev_value = int(field.value, 0)
# Reference to a previous enum value (e.g: INIT = BEGIN).
elif type(field.value) is mojom.EnumValue:
prev_value = resolved_enum_values[field.value.mojom_name]
else:
raise Exception("Unresolved enum value.")
resolved_enum_values[field.mojom_name] = prev_value
field.numeric_value = prev_value
if min_value is None or prev_value < min_value:
min_value = prev_value
if max_value is None or prev_value > max_value:
max_value = prev_value
return min_value, max_value
def _Enum(module, parsed_enum, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_enum: {ast.Enum} Parsed enum.
Returns:
{mojom.Enum} AST enum.
"""
enum = mojom.Enum(module=module)
enum.mojom_name = parsed_enum.mojom_name
enum.native_only = parsed_enum.enum_value_list is None
mojom_name = enum.mojom_name
if parent_kind:
mojom_name = parent_kind.mojom_name + '.' + mojom_name
enum.spec = 'x:%s.%s' % (module.mojom_namespace, mojom_name)
enum.parent_kind = parent_kind
enum.attributes = _AttributeListToDict(parsed_enum.attribute_list)
if not enum.native_only:
enum.fields = map(
lambda field: _EnumField(module, enum, field, parent_kind),
parsed_enum.enum_value_list)
enum.min_value, enum.max_value = _ResolveNumericEnumValues(enum.fields)
module.kinds[enum.spec] = enum
# Enforce that a [Native] attribute is set to make native-only enum
# declarations more explicit.
if enum.native_only:
if not enum.attributes or not enum.attributes.get('Native', False):
raise Exception("Native-only enum declarations must include a " +
"Native attribute.")
return enum
def _Constant(module, parsed_const, parent_kind):
"""
Args:
module: {mojom.Module} Module currently being constructed.
parsed_const: {ast.Const} Parsed constant.
Returns:
{mojom.Constant} AST constant.
"""
constant = mojom.Constant()
constant.mojom_name = parsed_const.mojom_name
if parent_kind:
scope = (module.mojom_namespace, parent_kind.mojom_name)
else:
scope = (module.mojom_namespace, )
# TODO(mpcomplete): maybe we should only support POD kinds.
constant.kind = _Kind(module.kinds, _MapKind(parsed_const.typename), scope)
constant.parent_kind = parent_kind
constant.value = _FixupExpression(module, parsed_const.value, scope, None)
value = mojom.ConstantValue(module, parent_kind, constant)
module.values[value.GetSpec()] = value
return constant
def _CollectReferencedKinds(module, all_defined_kinds):
"""
Takes a {mojom.Module} object and a list of all defined kinds within that
module, and enumerates the complete dict of user-defined mojom types
(as {mojom.Kind} objects) referenced by the module's own defined kinds (i.e.
as types of struct or union or interface parameters. The returned dict is
keyed by kind spec.
"""
def extract_referenced_user_kinds(kind):
if mojom.IsArrayKind(kind):
return extract_referenced_user_kinds(kind.kind)
if mojom.IsMapKind(kind):
return (extract_referenced_user_kinds(kind.key_kind) +
extract_referenced_user_kinds(kind.value_kind))
if mojom.IsInterfaceRequestKind(kind) or mojom.IsAssociatedKind(kind):
return [kind.kind]
if mojom.IsStructKind(kind):
return [kind]
if (mojom.IsInterfaceKind(kind) or mojom.IsEnumKind(kind) or
mojom.IsUnionKind(kind)):
return [kind]
return []
def sanitize_kind(kind):
"""Removes nullability from a kind"""
if kind.spec.startswith('?'):
return _Kind(module.kinds, kind.spec[1:],
(module.mojom_namespace, ''))
return kind
referenced_user_kinds = {}
for defined_kind in all_defined_kinds:
if mojom.IsStructKind(defined_kind) or mojom.IsUnionKind(defined_kind):
for field in defined_kind.fields:
for referenced_kind in extract_referenced_user_kinds(field.kind):
sanitized_kind = sanitize_kind(referenced_kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
# Also scan for references in parameter lists
for interface in module.interfaces:
for method in interface.methods:
for param in itertools.chain(method.parameters or [],
method.response_parameters or []):
if (mojom.IsStructKind(param.kind) or mojom.IsUnionKind(param.kind) or
mojom.IsEnumKind(param.kind) or
mojom.IsAnyInterfaceKind(param.kind)):
for referenced_kind in extract_referenced_user_kinds(param.kind):
sanitized_kind = sanitize_kind(referenced_kind)
referenced_user_kinds[sanitized_kind.spec] = sanitized_kind
return referenced_user_kinds
def _Module(tree, path, imports):
"""
Args:
tree: {ast.Mojom} The parse tree.
path: {str} The path to the mojom file.
imports: {Dict[str, mojom.Module]} Mapping from filenames, as they appear in
the import list, to already processed modules. Used to process imports.
Returns:
{mojom.Module} An AST for the mojom.
"""
module = mojom.Module(path=path)
module.kinds = {}
for kind in mojom.PRIMITIVES:
module.kinds[kind.spec] = kind
module.values = {}
module.mojom_namespace = tree.module.mojom_namespace[1] if tree.module else ''
# Imports must come first, because they add to module.kinds which is used
# by by the others.
module.imports = [
_Import(module, imports[imp.import_filename])
for imp in tree.import_list]
if tree.module and tree.module.attribute_list:
assert isinstance(tree.module.attribute_list, ast.AttributeList)
# TODO(vtl): Check for duplicate keys here.
module.attributes = dict((attribute.key, attribute.value)
for attribute in tree.module.attribute_list)
filename = os.path.basename(path)
# First pass collects kinds.
module.enums = list(
map(lambda enum: _Enum(module, enum, None),
_ElemsOfType(tree.definition_list, ast.Enum, filename)))
module.structs = list(
map(lambda struct: _Struct(module, struct),
_ElemsOfType(tree.definition_list, ast.Struct, filename)))
module.unions = list(
map(lambda union: _Union(module, union),
_ElemsOfType(tree.definition_list, ast.Union, filename)))
module.interfaces = list(
map(lambda interface: _Interface(module, interface),
_ElemsOfType(tree.definition_list, ast.Interface, filename)))
module.constants = map(
lambda constant: _Constant(module, constant, None),
_ElemsOfType(tree.definition_list, ast.Const, filename))
# Second pass expands fields and methods. This allows fields and parameters
# to refer to kinds defined anywhere in the mojom.
all_defined_kinds = {}
for struct in module.structs:
struct.fields = list(
map(lambda field: _StructField(module, field, struct),
struct.fields_data))
del struct.fields_data
all_defined_kinds[struct.spec] = struct
for enum in struct.enums:
all_defined_kinds[enum.spec] = enum
for union in module.unions:
union.fields = map(lambda field:
_UnionField(module, field, union), union.fields_data)
del union.fields_data
all_defined_kinds[union.spec] = union
for interface in module.interfaces:
interface.methods = map(lambda method:
_Method(module, method, interface), interface.methods_data)
del interface.methods_data
all_defined_kinds[interface.spec] = interface
for enum in interface.enums:
all_defined_kinds[enum.spec] = enum
for enum in module.enums:
all_defined_kinds[enum.spec] = enum
all_referenced_kinds = _CollectReferencedKinds(module,
all_defined_kinds.values())
imported_kind_specs = set(all_referenced_kinds.keys()).difference(
set(all_defined_kinds.keys()))
module.imported_kinds = dict((spec, all_referenced_kinds[spec])
for spec in imported_kind_specs)
return module
def OrderedModule(tree, path, imports):
"""Convert parse tree to AST module.
Args:
tree: {ast.Mojom} The parse tree.
path: {str} The path to the mojom file.
imports: {Dict[str, mojom.Module]} Mapping from filenames, as they appear in
the import list, to already processed modules. Used to process imports.
Returns:
{mojom.Module} An AST for the mojom.
"""
module = _Module(tree, path, imports)
return module
| 36.435506 | 80 | 0.689075 |
f83109ab8688ead969bc0aafc56c294f62fd5f15 | 180 | py | Python | users/models.py | Don-Joel/MyDash | 8c556c451752c860426a061c230f524e77afcb6f | [
"MIT"
] | null | null | null | users/models.py | Don-Joel/MyDash | 8c556c451752c860426a061c230f524e77afcb6f | [
"MIT"
] | null | null | null | users/models.py | Don-Joel/MyDash | 8c556c451752c860426a061c230f524e77afcb6f | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
pass | 20 | 51 | 0.8 |
c44e6fc699a1b80a2ebe73b976e1a7a390406e99 | 1,714 | py | Python | tests/test_binding.py | massongit/natto-py | de8023abb99133e11ddd889fc915692ff07d4db8 | [
"BSD-2-Clause"
] | 1 | 2017-11-26T22:15:42.000Z | 2017-11-26T22:15:42.000Z | tests/test_binding.py | massongit/natto-py | de8023abb99133e11ddd889fc915692ff07d4db8 | [
"BSD-2-Clause"
] | null | null | null | tests/test_binding.py | massongit/natto-py | de8023abb99133e11ddd889fc915692ff07d4db8 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''Test for natto.binding.'''
import natto.binding as binding
import unittest
class TestBinding(unittest.TestCase):
'''Tests the functions in the natto.binding module.'''
def test_ffi_libmecab(self):
'''Test FFI binding to mecab library.'''
ffi = binding._ffi_libmecab()
self.assertIsNotNone(ffi)
'''
Copyright (c) 2020, Brooke M. Fujita.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the
following disclaimer in the documentation and/or other
materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| 41.804878 | 80 | 0.753792 |
a9e5cda03d6b4be39101291bc5a8b126abcf4a39 | 2,716 | py | Python | qa/rpc-tests/test_framework/coverage.py | mirzaei-ce/core-iranbit | 7b1c880dc0eeb84408a84a356c2b731c5b5a0b8c | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/coverage.py | mirzaei-ce/core-iranbit | 7b1c880dc0eeb84408a84a356c2b731c5b5a0b8c | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/coverage.py | mirzaei-ce/core-iranbit | 7b1c880dc0eeb84408a84a356c2b731c5b5a0b8c | [
"MIT"
] | null | null | null | """
This module contains utilities for doing coverage analysis on the RPC
interface.
It provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper(object):
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__getattr__(
*args, **kwargs)
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+') as f:
f.write("%s\n" % rpc_method)
return return_val
@property
def url(self):
return self.auth_service_proxy_instance.url
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `iranbit-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w') as f:
f.writelines(list(commands))
return True
| 26.627451 | 79 | 0.650957 |
287f1b0765bf6b9e43fcf6fd6e3478ceb6556b48 | 191 | py | Python | pi-codes/modules/capture.py | hammer1234567/Build-4-India | 1f589f5158165f871b53886831c9f6c9aaa5d112 | [
"MIT"
] | null | null | null | pi-codes/modules/capture.py | hammer1234567/Build-4-India | 1f589f5158165f871b53886831c9f6c9aaa5d112 | [
"MIT"
] | null | null | null | pi-codes/modules/capture.py | hammer1234567/Build-4-India | 1f589f5158165f871b53886831c9f6c9aaa5d112 | [
"MIT"
] | null | null | null | from picamera import PiCamera
from time import sleep
def Capture():
camera = PiCamera()
camera.start_preview()
sleep(2)
camera.capture('image.jpg')
camera.stop_preview()
| 19.1 | 31 | 0.696335 |
76aaf91130f45808835037687bc464ca3daff77c | 6,925 | py | Python | Barbican/Server/barbican-stable-mitaka/barbican/barbican/tests/cmd/test_barbican_manage.py | cloud-security-research/sgx-kms | 2f07bf36721dafe0e4a000ca3f41529dc8d2004a | [
"Apache-2.0"
] | 36 | 2017-11-08T23:36:35.000Z | 2022-03-20T16:01:55.000Z | Barbican/Server/barbican-stable-mitaka/barbican/barbican/tests/cmd/test_barbican_manage.py | Calctopia-OpenSource/sgx-kms | 2f07bf36721dafe0e4a000ca3f41529dc8d2004a | [
"Apache-2.0"
] | 2 | 2017-11-17T03:19:17.000Z | 2018-07-04T03:37:36.000Z | Barbican/Server/barbican-stable-mitaka/barbican/barbican/tests/cmd/test_barbican_manage.py | Calctopia-OpenSource/sgx-kms | 2f07bf36721dafe0e4a000ca3f41529dc8d2004a | [
"Apache-2.0"
] | 12 | 2017-11-13T03:20:40.000Z | 2021-07-16T03:26:41.000Z | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
import mock
from barbican.cmd import barbican_manage as manager
from barbican.tests import utils
class TestBarbicanManageBase(utils.BaseTestCase):
def setUp(self):
super(TestBarbicanManageBase, self).setUp()
def clear_conf():
manager.CONF.reset()
manager.CONF.unregister_opt(manager.category_opt)
clear_conf()
self.addCleanup(clear_conf)
self.useFixture(fixtures.MonkeyPatch(
'oslo_log.log.setup', lambda barbican_test, version='test': None))
manager.CONF.set_override('sql_connection', 'mockdburl')
def _main_test_helper(self, argv, func_name=None, *exp_args, **exp_kwargs):
self.useFixture(fixtures.MonkeyPatch('sys.argv', argv))
manager.main()
func_name.assert_called_once_with(*exp_args, **exp_kwargs)
class TestBarbicanManage(TestBarbicanManageBase):
"""Test barbican-manage functionality."""
@mock.patch('barbican.model.migration.commands.generate')
def test_db_revision(self, mock_generate):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'revision', '--db-url',
'mockdb', '--message', 'mockmsg'], mock_generate,
autogenerate=False, message='mockmsg', sql_url='mockdb')
@mock.patch('barbican.model.migration.commands.generate')
def test_db_revision_autogenerate(self, mock_generate):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'revision', '--db-url',
'mockdb', '--message', 'mockmsg', '--autogenerate'],
mock_generate, autogenerate=True, message='mockmsg',
sql_url='mockdb')
@mock.patch('barbican.model.migration.commands.generate')
def test_db_revision_no_dburl(self, mock_generate):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'revision', '--message',
'mockmsg'], mock_generate, autogenerate=False, message='mockmsg',
sql_url='mockdburl')
@mock.patch('barbican.model.migration.commands.upgrade')
def test_db_upgrade(self, mock_upgrade):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'upgrade', '--db-url',
'mockdb'], mock_upgrade, to_version='head', sql_url='mockdb')
@mock.patch('barbican.model.migration.commands.upgrade')
def test_db_upgrade_no_dburl(self, mock_upgrade):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'upgrade'], mock_upgrade,
to_version='head', sql_url='mockdburl')
@mock.patch('barbican.model.migration.commands.history')
def test_db_history(self, mock_history):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'history', '--db-url',
'mockdb'], mock_history, False, sql_url='mockdb')
@mock.patch('barbican.model.migration.commands.history')
def test_db_history_no_dburl(self, mock_history):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'history'], mock_history,
False, sql_url='mockdburl')
@mock.patch('barbican.model.clean.clean_command')
def test_db_clean_no_args(self, mock_clean_command):
manager.CONF.set_override('log_file', 'mock_log_file')
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'clean'],
func_name=mock_clean_command,
sql_url='mockdburl',
min_num_days=90,
do_clean_unassociated_projects=False,
do_soft_delete_expired_secrets=False,
verbose=False,
log_file='mock_log_file')
manager.CONF.clear_override('log_file')
@mock.patch('barbican.model.clean.clean_command')
def test_db_clean_with_args(self, mock_clean_command):
manager.CONF.set_override('log_file', 'mock_log_file')
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'clean',
'--db-url', 'somewhere', '--min-days', '180',
'--clean-unassociated-projects', '--soft-delete-expired-secrets',
'--verbose', '--log-file', '/tmp/whatevs'],
func_name=mock_clean_command,
sql_url='somewhere',
min_num_days=180,
do_clean_unassociated_projects=True,
do_soft_delete_expired_secrets=True,
verbose=True,
log_file='/tmp/whatevs')
manager.CONF.clear_override('log_file')
@mock.patch('barbican.model.migration.commands.current')
def test_db_current(self, mock_current):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'current', '--db-url',
'mockdb'], mock_current, False, sql_url='mockdb')
@mock.patch('barbican.model.migration.commands.current')
def test_db_current_no_dburl(self, mock_current):
self._main_test_helper(
['barbican.cmd.barbican_manage', 'db', 'current'], mock_current,
False, sql_url='mockdburl')
@mock.patch('barbican.plugin.crypto.pkcs11.PKCS11')
def test_hsm_gen_mkek(self, mock_pkcs11):
mock_pkcs11.return_value.get_session.return_value = long(1)
mock_pkcs11.return_value.get_key_handle.return_value = None
mock_pkcs11.return_value.generate_key.return_value = long(0)
mock_genkey = mock_pkcs11.return_value.generate_key
self._main_test_helper(
['barbican.cmd.barbican_manage', 'hsm', 'gen_mkek',
'--library-path', 'mocklib', '--passphrase', 'mockpassewd',
'--label', 'mocklabel'], mock_genkey,
32, 1, 'mocklabel', encrypt=True, wrap=True, master_key=True)
@mock.patch('barbican.plugin.crypto.pkcs11.PKCS11')
def test_hsm_gen_hmac(self, mock_pkcs11):
mock_pkcs11.return_value.get_session.return_value = long(1)
mock_pkcs11.return_value.get_key_handle.return_value = None
mock_pkcs11.return_value.generate_key.return_value = long(0)
mock_genkey = mock_pkcs11.return_value.generate_key
self._main_test_helper(
['barbican.cmd.barbican_manage', 'hsm', 'gen_hmac',
'--library-path', 'mocklib', '--passphrase', 'mockpassewd',
'--label', 'mocklabel'], mock_genkey,
32, 1, 'mocklabel', sign=True, master_key=True)
| 44.10828 | 79 | 0.665415 |
9b0f0ae120622b9414361819d638f88b1479b3ba | 499 | py | Python | corrector.py | NazcaLines/spelling-corrector | ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b | [
"MIT"
] | null | null | null | corrector.py | NazcaLines/spelling-corrector | ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b | [
"MIT"
] | null | null | null | corrector.py | NazcaLines/spelling-corrector | ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b | [
"MIT"
] | null | null | null | import sys
from cliff.app import App
from cliff.commandmanager import CommandManager
class Corrector(App):
def __init__(self):
super(Corrector, self).__init__(
description='spelling-corrector',
version='0.1',
command_manager=CommandManager('corrector'),
deferred_help=True
)
def main(argv=sys.argv[1:]):
corrector = Corrector()
return corrector.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 19.96 | 56 | 0.639279 |
090e21fd45e37cecf8e5637bf54f0b7c93517daf | 14,640 | py | Python | tensorflow_datasets/core/proto/dataset_info_generated_pb2.py | aweers/datasets | a3cc147c7c9bc3497dc82f8b9e3af22d332c921b | [
"Apache-2.0"
] | 1 | 2019-03-02T22:54:29.000Z | 2019-03-02T22:54:29.000Z | tensorflow_datasets/core/proto/dataset_info_generated_pb2.py | rsepassi/datasets | 299f482da52aebe910e91053dbb06a36355f4cde | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/proto/dataset_info_generated_pb2.py | rsepassi/datasets | 299f482da52aebe910e91053dbb06a36355f4cde | [
"Apache-2.0"
] | 1 | 2020-01-01T04:48:04.000Z | 2020-01-01T04:48:04.000Z | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: skip-file
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: dataset_info.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow_metadata.proto.v0 import statistics_pb2 as tensorflow__metadata_dot_proto_dot_v0_dot_statistics__pb2
from tensorflow_metadata.proto.v0 import schema_pb2 as tensorflow__metadata_dot_proto_dot_v0_dot_schema__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='dataset_info.proto',
package='tensorflow_datasets',
syntax='proto3',
serialized_options=_b('\370\001\001'),
serialized_pb=_b('\n\x12\x64\x61taset_info.proto\x12\x13tensorflow_datasets\x1a-tensorflow_metadata/proto/v0/statistics.proto\x1a)tensorflow_metadata/proto/v0/schema.proto\"\x1f\n\x0f\x44\x61tasetLocation\x12\x0c\n\x04urls\x18\x01 \x03(\t\"s\n\tSplitInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\nnum_shards\x18\x02 \x01(\x03\x12\x44\n\nstatistics\x18\x03 \x01(\x0b\x32\x30.tensorflow.metadata.v0.DatasetFeatureStatistics\"/\n\x0eSupervisedKeys\x12\r\n\x05input\x18\x01 \x01(\t\x12\x0e\n\x06output\x18\x02 \x01(\t\"\xcf\x03\n\x0b\x44\x61tasetInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x0f\n\x07version\x18\t \x01(\t\x12\x10\n\x08\x63itation\x18\x03 \x01(\t\x12\x15\n\rsize_in_bytes\x18\x04 \x01(\x03\x12\x36\n\x08location\x18\x05 \x01(\x0b\x32$.tensorflow_datasets.DatasetLocation\x12S\n\x12\x64ownload_checksums\x18\n \x03(\x0b\x32\x37.tensorflow_datasets.DatasetInfo.DownloadChecksumsEntry\x12.\n\x06schema\x18\x06 \x01(\x0b\x32\x1e.tensorflow.metadata.v0.Schema\x12.\n\x06splits\x18\x07 \x03(\x0b\x32\x1e.tensorflow_datasets.SplitInfo\x12<\n\x0fsupervised_keys\x18\x08 \x01(\x0b\x32#.tensorflow_datasets.SupervisedKeys\x1a\x38\n\x16\x44ownloadChecksumsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow__metadata_dot_proto_dot_v0_dot_statistics__pb2.DESCRIPTOR,tensorflow__metadata_dot_proto_dot_v0_dot_schema__pb2.DESCRIPTOR,])
_DATASETLOCATION = _descriptor.Descriptor(
name='DatasetLocation',
full_name='tensorflow_datasets.DatasetLocation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='urls', full_name='tensorflow_datasets.DatasetLocation.urls', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=133,
serialized_end=164,
)
_SPLITINFO = _descriptor.Descriptor(
name='SplitInfo',
full_name='tensorflow_datasets.SplitInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow_datasets.SplitInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_shards', full_name='tensorflow_datasets.SplitInfo.num_shards', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='statistics', full_name='tensorflow_datasets.SplitInfo.statistics', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=166,
serialized_end=281,
)
_SUPERVISEDKEYS = _descriptor.Descriptor(
name='SupervisedKeys',
full_name='tensorflow_datasets.SupervisedKeys',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input', full_name='tensorflow_datasets.SupervisedKeys.input', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output', full_name='tensorflow_datasets.SupervisedKeys.output', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=283,
serialized_end=330,
)
_DATASETINFO_DOWNLOADCHECKSUMSENTRY = _descriptor.Descriptor(
name='DownloadChecksumsEntry',
full_name='tensorflow_datasets.DatasetInfo.DownloadChecksumsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow_datasets.DatasetInfo.DownloadChecksumsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow_datasets.DatasetInfo.DownloadChecksumsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=740,
serialized_end=796,
)
_DATASETINFO = _descriptor.Descriptor(
name='DatasetInfo',
full_name='tensorflow_datasets.DatasetInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow_datasets.DatasetInfo.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='tensorflow_datasets.DatasetInfo.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='tensorflow_datasets.DatasetInfo.version', index=2,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='citation', full_name='tensorflow_datasets.DatasetInfo.citation', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='size_in_bytes', full_name='tensorflow_datasets.DatasetInfo.size_in_bytes', index=4,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='location', full_name='tensorflow_datasets.DatasetInfo.location', index=5,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='download_checksums', full_name='tensorflow_datasets.DatasetInfo.download_checksums', index=6,
number=10, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='schema', full_name='tensorflow_datasets.DatasetInfo.schema', index=7,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='splits', full_name='tensorflow_datasets.DatasetInfo.splits', index=8,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='supervised_keys', full_name='tensorflow_datasets.DatasetInfo.supervised_keys', index=9,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DATASETINFO_DOWNLOADCHECKSUMSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=333,
serialized_end=796,
)
_SPLITINFO.fields_by_name['statistics'].message_type = tensorflow__metadata_dot_proto_dot_v0_dot_statistics__pb2._DATASETFEATURESTATISTICS
_DATASETINFO_DOWNLOADCHECKSUMSENTRY.containing_type = _DATASETINFO
_DATASETINFO.fields_by_name['location'].message_type = _DATASETLOCATION
_DATASETINFO.fields_by_name['download_checksums'].message_type = _DATASETINFO_DOWNLOADCHECKSUMSENTRY
_DATASETINFO.fields_by_name['schema'].message_type = tensorflow__metadata_dot_proto_dot_v0_dot_schema__pb2._SCHEMA
_DATASETINFO.fields_by_name['splits'].message_type = _SPLITINFO
_DATASETINFO.fields_by_name['supervised_keys'].message_type = _SUPERVISEDKEYS
DESCRIPTOR.message_types_by_name['DatasetLocation'] = _DATASETLOCATION
DESCRIPTOR.message_types_by_name['SplitInfo'] = _SPLITINFO
DESCRIPTOR.message_types_by_name['SupervisedKeys'] = _SUPERVISEDKEYS
DESCRIPTOR.message_types_by_name['DatasetInfo'] = _DATASETINFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DatasetLocation = _reflection.GeneratedProtocolMessageType('DatasetLocation', (_message.Message,), dict(
DESCRIPTOR = _DATASETLOCATION,
__module__ = 'dataset_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.DatasetLocation)
))
_sym_db.RegisterMessage(DatasetLocation)
SplitInfo = _reflection.GeneratedProtocolMessageType('SplitInfo', (_message.Message,), dict(
DESCRIPTOR = _SPLITINFO,
__module__ = 'dataset_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.SplitInfo)
))
_sym_db.RegisterMessage(SplitInfo)
SupervisedKeys = _reflection.GeneratedProtocolMessageType('SupervisedKeys', (_message.Message,), dict(
DESCRIPTOR = _SUPERVISEDKEYS,
__module__ = 'dataset_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.SupervisedKeys)
))
_sym_db.RegisterMessage(SupervisedKeys)
DatasetInfo = _reflection.GeneratedProtocolMessageType('DatasetInfo', (_message.Message,), dict(
DownloadChecksumsEntry = _reflection.GeneratedProtocolMessageType('DownloadChecksumsEntry', (_message.Message,), dict(
DESCRIPTOR = _DATASETINFO_DOWNLOADCHECKSUMSENTRY,
__module__ = 'dataset_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.DatasetInfo.DownloadChecksumsEntry)
))
,
DESCRIPTOR = _DATASETINFO,
__module__ = 'dataset_info_pb2'
# @@protoc_insertion_point(class_scope:tensorflow_datasets.DatasetInfo)
))
_sym_db.RegisterMessage(DatasetInfo)
_sym_db.RegisterMessage(DatasetInfo.DownloadChecksumsEntry)
DESCRIPTOR._options = None
_DATASETINFO_DOWNLOADCHECKSUMSENTRY._options = None
# @@protoc_insertion_point(module_scope) | 42.682216 | 1,327 | 0.7625 |
1f427f2021afd28ae3bcf08d8afe3aec066191ba | 1,898 | py | Python | deeplearning1/nbs/test_chapter1.py | manhcuogntin4/Fast-AI-course | 0f8acd501f00491c8f99705581f6706e5dcf3e97 | [
"Apache-2.0"
] | null | null | null | deeplearning1/nbs/test_chapter1.py | manhcuogntin4/Fast-AI-course | 0f8acd501f00491c8f99705581f6706e5dcf3e97 | [
"Apache-2.0"
] | null | null | null | deeplearning1/nbs/test_chapter1.py | manhcuogntin4/Fast-AI-course | 0f8acd501f00491c8f99705581f6706e5dcf3e97 | [
"Apache-2.0"
] | null | null | null | from __future__ import division,print_function
import os, json
from glob import glob
import numpy as np
np.set_printoptions(precision=4, linewidth=100)
from matplotlib import pyplot as plt
from numpy.random import random, permutation
from scipy import misc, ndimage
from scipy.ndimage.interpolation import zoom
import keras
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.models import Sequential, Model
from keras.layers.core import Flatten, Dense, Dropout, Lambda
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, RMSprop
from keras.preprocessing import image
from keras.models import load_model
#from keras import backend as K
#K.set_image_dim_ordering('th')
path = "../../../data/sample/"
import utils; reload(utils)
from utils import plots
# As large as you can, but no larger than 64 is recommended.
# If you have an older or cheaper GPU, you'll run out of memory, so will have to decrease this.
batch_size=4
# Import our class, and instantiate
import vgg16; reload(vgg16)
from vgg16 import Vgg16
model_path=path+ "ft1.h5"
vgg = Vgg16()
#print (path + "ft1.h5")
import h5py
# with h5py.File(model_path, 'a') as f:
# if 'optimizer_weights' in f.keys():
# del f['optimizer_weights']
# f = h5py.File(model_path)
# for k in range(f.attrs['nb_layers']):
# if k >= len(model.layers):
# # we don't look at the last (fully-connected) layers in the savefile
# break
# g = f['layer_{}'.format(k)]
# weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
# model.layers[k].set_weights(weights)
# f.close()
hdf5_file = h5py.File(model_path, mode='r')
print(list(hdf5_file))
vgg.model.load_weights(path+'ft1.h5')
val_batches, probs=vgg.test(path+'valid', batch_size=batch_size)
| 26.361111 | 95 | 0.73393 |
c25008cc2925467f627441bfffbe33879cdb4641 | 24,310 | py | Python | pysnmp/BLUECOAT-SG-PROXY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/BLUECOAT-SG-PROXY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/BLUECOAT-SG-PROXY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module BLUECOAT-SG-PROXY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BLUECOAT-SG-PROXY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:22:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
blueCoatMgmt, = mibBuilder.importSymbols("BLUECOAT-MIB", "blueCoatMgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits, ModuleIdentity, TimeTicks, Unsigned32, ObjectIdentity, IpAddress, Integer32, Counter64, Gauge32, NotificationType, Counter32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits", "ModuleIdentity", "TimeTicks", "Unsigned32", "ObjectIdentity", "IpAddress", "Integer32", "Counter64", "Gauge32", "NotificationType", "Counter32", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
bluecoatSGProxyMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 3417, 2, 11))
bluecoatSGProxyMIB.setRevisions(('2011-11-01 03:00', '2007-11-05 03:00', '2007-08-28 03:00',))
if mibBuilder.loadTexts: bluecoatSGProxyMIB.setLastUpdated('201111010300Z')
if mibBuilder.loadTexts: bluecoatSGProxyMIB.setOrganization('Blue Coat Systems, Inc.')
sgProxyConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 1))
sgProxySystem = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2))
sgProxyHttp = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3))
sgProxyAdmin = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyAdmin.setStatus('current')
sgProxySoftware = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxySoftware.setStatus('current')
sgProxyVersion = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyVersion.setStatus('current')
sgProxySerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxySerialNumber.setStatus('current')
sgProxyCpu = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1))
sgProxyCache = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 2))
sgProxyMemory = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 3))
sgProxyCpuCoreTable = MibTable((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4), )
if mibBuilder.loadTexts: sgProxyCpuCoreTable.setStatus('current')
sgProxyCpuUpTime = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 1), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuUpTime.setStatus('deprecated')
sgProxyCpuBusyTime = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 2), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuBusyTime.setStatus('deprecated')
sgProxyCpuIdleTime = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 3), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuIdleTime.setStatus('deprecated')
sgProxyCpuUpTimeSinceLastAccess = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 4), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuUpTimeSinceLastAccess.setStatus('deprecated')
sgProxyCpuBusyTimeSinceLastAccess = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 5), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuBusyTimeSinceLastAccess.setStatus('deprecated')
sgProxyCpuIdleTimeSinceLastAccess = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 6), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuIdleTimeSinceLastAccess.setStatus('deprecated')
sgProxyCpuBusyPerCent = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 7), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuBusyPerCent.setStatus('deprecated')
sgProxyCpuIdlePerCent = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 1, 8), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuIdlePerCent.setStatus('deprecated')
sgProxyStorage = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 2, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyStorage.setStatus('current')
sgProxyNumObjects = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 2, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyNumObjects.setStatus('current')
sgProxyMemAvailable = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 3, 1), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyMemAvailable.setStatus('current')
sgProxyMemCacheUsage = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 3, 2), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyMemCacheUsage.setStatus('current')
sgProxyMemSysUsage = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 3, 3), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyMemSysUsage.setStatus('current')
sgProxyMemoryPressure = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 3, 4), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyMemoryPressure.setStatus('current')
sgProxyCpuCoreTableEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1), ).setIndexNames((0, "BLUECOAT-SG-PROXY-MIB", "sgProxyCpuCoreIndex"))
if mibBuilder.loadTexts: sgProxyCpuCoreTableEntry.setStatus('current')
sgProxyCpuCoreIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 32)))
if mibBuilder.loadTexts: sgProxyCpuCoreIndex.setStatus('current')
sgProxyCpuCoreUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 2), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreUpTime.setStatus('current')
sgProxyCpuCoreBusyTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 3), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreBusyTime.setStatus('current')
sgProxyCpuCoreIdleTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 4), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreIdleTime.setStatus('current')
sgProxyCpuCoreUpTimeSinceLastAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 5), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreUpTimeSinceLastAccess.setStatus('current')
sgProxyCpuCoreBusyTimeSinceLastAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 6), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreBusyTimeSinceLastAccess.setStatus('current')
sgProxyCpuCoreIdleTimeSinceLastAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 7), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreIdleTimeSinceLastAccess.setStatus('current')
sgProxyCpuCoreBusyPerCent = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 8), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreBusyPerCent.setStatus('current')
sgProxyCpuCoreIdlePerCent = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 2, 4, 1, 9), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyCpuCoreIdlePerCent.setStatus('current')
sgProxyHttpPerf = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1))
sgProxyHttpResponse = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2))
sgProxyHttpMedian = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3))
sgProxyHttpClient = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1))
sgProxyHttpServer = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 2))
sgProxyHttpConnections = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3))
sgProxyHttpClientRequests = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientRequests.setStatus('current')
sgProxyHttpClientHits = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientHits.setStatus('current')
sgProxyHttpClientPartialHits = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientPartialHits.setStatus('current')
sgProxyHttpClientMisses = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientMisses.setStatus('current')
sgProxyHttpClientErrors = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientErrors.setStatus('current')
sgProxyHttpClientRequestRate = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 6), Gauge32()).setUnits('Requests Per Second').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientRequestRate.setStatus('current')
sgProxyHttpClientHitRate = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 7), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientHitRate.setStatus('current')
sgProxyHttpClientByteHitRate = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 8), Gauge32()).setUnits('Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientByteHitRate.setStatus('current')
sgProxyHttpClientInBytes = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 9), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientInBytes.setStatus('current')
sgProxyHttpClientOutBytes = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 1, 10), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientOutBytes.setStatus('current')
sgProxyHttpServerRequests = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 2, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerRequests.setStatus('current')
sgProxyHttpServerErrors = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 2, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerErrors.setStatus('current')
sgProxyHttpServerInBytes = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 2, 3), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerInBytes.setStatus('current')
sgProxyHttpServerOutBytes = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 2, 4), Counter64()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerOutBytes.setStatus('current')
sgProxyHttpClientConnections = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientConnections.setStatus('current')
sgProxyHttpClientConnectionsActive = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientConnectionsActive.setStatus('current')
sgProxyHttpClientConnectionsIdle = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpClientConnectionsIdle.setStatus('current')
sgProxyHttpServerConnections = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerConnections.setStatus('current')
sgProxyHttpServerConnectionsActive = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerConnectionsActive.setStatus('current')
sgProxyHttpServerConnectionsIdle = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 1, 3, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServerConnectionsIdle.setStatus('current')
sgProxyHttpResponseTime = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1))
sgProxyHttpResponseFirstByte = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 2))
sgProxyHttpResponseByteRate = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 3))
sgProxyHttpResponseSize = MibIdentifier((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 4))
sgProxyHttpServiceTimeAll = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 1), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServiceTimeAll.setStatus('current')
sgProxyHttpServiceTimeHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 2), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServiceTimeHit.setStatus('current')
sgProxyHttpServiceTimePartialHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 3), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServiceTimePartialHit.setStatus('current')
sgProxyHttpServiceTimeMiss = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 4), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpServiceTimeMiss.setStatus('current')
sgProxyHttpTotalFetchTimeAll = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 5), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpTotalFetchTimeAll.setStatus('current')
sgProxyHttpTotalFetchTimeHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 6), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpTotalFetchTimeHit.setStatus('current')
sgProxyHttpTotalFetchTimePartialHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 7), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpTotalFetchTimePartialHit.setStatus('current')
sgProxyHttpTotalFetchTimeMiss = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 1, 8), Counter64()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpTotalFetchTimeMiss.setStatus('current')
sgProxyHttpFirstByteAll = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 2, 1), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpFirstByteAll.setStatus('current')
sgProxyHttpFirstByteHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 2, 2), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpFirstByteHit.setStatus('current')
sgProxyHttpFirstBytePartialHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 2, 3), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpFirstBytePartialHit.setStatus('current')
sgProxyHttpFirstByteMiss = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 2, 4), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpFirstByteMiss.setStatus('current')
sgProxyHttpByteRateAll = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 3, 1), Gauge32()).setUnits('Bytes Per Second').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpByteRateAll.setStatus('current')
sgProxyHttpByteRateHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 3, 2), Gauge32()).setUnits('Bytes Per Second').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpByteRateHit.setStatus('current')
sgProxyHttpByteRatePartialHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 3, 3), Gauge32()).setUnits('Bytes Per Second').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpByteRatePartialHit.setStatus('current')
sgProxyHttpByteRateMiss = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 3, 4), Gauge32()).setUnits('Bytes Per Second').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpByteRateMiss.setStatus('current')
sgProxyHttpResponseSizeAll = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 4, 1), Gauge32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpResponseSizeAll.setStatus('current')
sgProxyHttpResponseSizeHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 4, 2), Gauge32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpResponseSizeHit.setStatus('current')
sgProxyHttpResponseSizePartialHit = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 4, 3), Gauge32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpResponseSizePartialHit.setStatus('current')
sgProxyHttpResponseSizeMiss = MibScalar((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 2, 4, 4), Gauge32()).setUnits('Bytes').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpResponseSizeMiss.setStatus('current')
sgProxyHttpMedianServiceTable = MibTable((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1), )
if mibBuilder.loadTexts: sgProxyHttpMedianServiceTable.setStatus('current')
sgProxyHttpMedianServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1), ).setIndexNames((0, "BLUECOAT-SG-PROXY-MIB", "sgProxyHttpMedianServiceTime"))
if mibBuilder.loadTexts: sgProxyHttpMedianServiceEntry.setStatus('current')
sgProxyHttpMedianServiceTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 5, 60))).clone(namedValues=NamedValues(("one", 1), ("five", 5), ("sixty", 60)))).setUnits('Minutes')
if mibBuilder.loadTexts: sgProxyHttpMedianServiceTime.setStatus('current')
sgProxyHttpMedianServiceTimeAll = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1, 2), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpMedianServiceTimeAll.setStatus('current')
sgProxyHttpMedianServiceTimeHit = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1, 3), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpMedianServiceTimeHit.setStatus('current')
sgProxyHttpMedianServiceTimePartialHit = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1, 4), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpMedianServiceTimePartialHit.setStatus('current')
sgProxyHttpMedianServiceTimeMiss = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1, 5), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyHttpMedianServiceTimeMiss.setStatus('current')
sgProxyDnsMedianServiceTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3417, 2, 11, 3, 3, 1, 1, 6), Gauge32()).setUnits('Milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: sgProxyDnsMedianServiceTime.setStatus('current')
mibBuilder.exportSymbols("BLUECOAT-SG-PROXY-MIB", sgProxyHttpResponseSize=sgProxyHttpResponseSize, sgProxyHttpClientMisses=sgProxyHttpClientMisses, sgProxyHttpFirstByteHit=sgProxyHttpFirstByteHit, sgProxyHttpMedianServiceTime=sgProxyHttpMedianServiceTime, sgProxyCpuCoreBusyTime=sgProxyCpuCoreBusyTime, sgProxyHttpClientErrors=sgProxyHttpClientErrors, sgProxyDnsMedianServiceTime=sgProxyDnsMedianServiceTime, sgProxyHttpClientConnectionsActive=sgProxyHttpClientConnectionsActive, sgProxyHttpByteRateHit=sgProxyHttpByteRateHit, sgProxyHttpMedianServiceTimePartialHit=sgProxyHttpMedianServiceTimePartialHit, sgProxyCpuBusyPerCent=sgProxyCpuBusyPerCent, sgProxyHttpClient=sgProxyHttpClient, sgProxyHttpServiceTimeMiss=sgProxyHttpServiceTimeMiss, sgProxyHttpServiceTimePartialHit=sgProxyHttpServiceTimePartialHit, sgProxyHttpServiceTimeHit=sgProxyHttpServiceTimeHit, sgProxyCpuCoreBusyTimeSinceLastAccess=sgProxyCpuCoreBusyTimeSinceLastAccess, sgProxyCpuCoreTableEntry=sgProxyCpuCoreTableEntry, sgProxyHttpResponseTime=sgProxyHttpResponseTime, sgProxyHttpResponseFirstByte=sgProxyHttpResponseFirstByte, sgProxyHttpResponseSizePartialHit=sgProxyHttpResponseSizePartialHit, sgProxyHttpFirstByteMiss=sgProxyHttpFirstByteMiss, sgProxyHttpClientHitRate=sgProxyHttpClientHitRate, sgProxyHttpClientByteHitRate=sgProxyHttpClientByteHitRate, sgProxyHttpConnections=sgProxyHttpConnections, sgProxyHttpFirstBytePartialHit=sgProxyHttpFirstBytePartialHit, sgProxyStorage=sgProxyStorage, sgProxyMemSysUsage=sgProxyMemSysUsage, sgProxyMemAvailable=sgProxyMemAvailable, sgProxyHttpMedianServiceTimeHit=sgProxyHttpMedianServiceTimeHit, sgProxyMemory=sgProxyMemory, sgProxyCpuCoreIndex=sgProxyCpuCoreIndex, sgProxyHttpServer=sgProxyHttpServer, sgProxyHttpMedianServiceTimeAll=sgProxyHttpMedianServiceTimeAll, sgProxyCpuUpTimeSinceLastAccess=sgProxyCpuUpTimeSinceLastAccess, sgProxyCpuCoreIdlePerCent=sgProxyCpuCoreIdlePerCent, sgProxyHttpClientOutBytes=sgProxyHttpClientOutBytes, sgProxyHttpClientRequests=sgProxyHttpClientRequests, sgProxyHttpServiceTimeAll=sgProxyHttpServiceTimeAll, sgProxyHttpResponse=sgProxyHttpResponse, sgProxyHttpFirstByteAll=sgProxyHttpFirstByteAll, sgProxyHttpServerOutBytes=sgProxyHttpServerOutBytes, sgProxyHttpTotalFetchTimeAll=sgProxyHttpTotalFetchTimeAll, sgProxyHttpClientConnections=sgProxyHttpClientConnections, sgProxyCache=sgProxyCache, sgProxyConfig=sgProxyConfig, sgProxyHttpMedian=sgProxyHttpMedian, sgProxyCpuCoreUpTimeSinceLastAccess=sgProxyCpuCoreUpTimeSinceLastAccess, sgProxyHttpByteRateMiss=sgProxyHttpByteRateMiss, sgProxyHttpServerConnections=sgProxyHttpServerConnections, sgProxyAdmin=sgProxyAdmin, sgProxyHttpClientRequestRate=sgProxyHttpClientRequestRate, sgProxyCpuIdlePerCent=sgProxyCpuIdlePerCent, sgProxyHttpClientPartialHits=sgProxyHttpClientPartialHits, PYSNMP_MODULE_ID=bluecoatSGProxyMIB, sgProxyHttpClientHits=sgProxyHttpClientHits, sgProxyCpuCoreIdleTime=sgProxyCpuCoreIdleTime, sgProxyHttpServerRequests=sgProxyHttpServerRequests, sgProxyCpu=sgProxyCpu, sgProxyHttpByteRateAll=sgProxyHttpByteRateAll, sgProxyCpuIdleTime=sgProxyCpuIdleTime, sgProxyMemCacheUsage=sgProxyMemCacheUsage, sgProxyHttpServerErrors=sgProxyHttpServerErrors, sgProxyHttpTotalFetchTimeMiss=sgProxyHttpTotalFetchTimeMiss, sgProxyHttpServerConnectionsIdle=sgProxyHttpServerConnectionsIdle, sgProxyHttpMedianServiceTimeMiss=sgProxyHttpMedianServiceTimeMiss, sgProxyCpuBusyTimeSinceLastAccess=sgProxyCpuBusyTimeSinceLastAccess, sgProxySerialNumber=sgProxySerialNumber, sgProxyHttp=sgProxyHttp, sgProxyHttpByteRatePartialHit=sgProxyHttpByteRatePartialHit, sgProxyCpuCoreBusyPerCent=sgProxyCpuCoreBusyPerCent, sgProxyCpuCoreIdleTimeSinceLastAccess=sgProxyCpuCoreIdleTimeSinceLastAccess, sgProxyHttpResponseSizeAll=sgProxyHttpResponseSizeAll, sgProxyHttpClientConnectionsIdle=sgProxyHttpClientConnectionsIdle, sgProxyHttpResponseSizeMiss=sgProxyHttpResponseSizeMiss, sgProxyCpuUpTime=sgProxyCpuUpTime, sgProxyCpuCoreUpTime=sgProxyCpuCoreUpTime, sgProxyHttpMedianServiceTable=sgProxyHttpMedianServiceTable, sgProxyHttpServerInBytes=sgProxyHttpServerInBytes, sgProxyHttpClientInBytes=sgProxyHttpClientInBytes, sgProxyCpuBusyTime=sgProxyCpuBusyTime, sgProxyHttpResponseSizeHit=sgProxyHttpResponseSizeHit, sgProxySoftware=sgProxySoftware, sgProxyHttpPerf=sgProxyHttpPerf, sgProxyHttpResponseByteRate=sgProxyHttpResponseByteRate, bluecoatSGProxyMIB=bluecoatSGProxyMIB, sgProxyCpuCoreTable=sgProxyCpuCoreTable, sgProxyHttpServerConnectionsActive=sgProxyHttpServerConnectionsActive, sgProxySystem=sgProxySystem, sgProxyMemoryPressure=sgProxyMemoryPressure, sgProxyCpuIdleTimeSinceLastAccess=sgProxyCpuIdleTimeSinceLastAccess, sgProxyHttpMedianServiceEntry=sgProxyHttpMedianServiceEntry, sgProxyVersion=sgProxyVersion, sgProxyNumObjects=sgProxyNumObjects, sgProxyHttpTotalFetchTimePartialHit=sgProxyHttpTotalFetchTimePartialHit, sgProxyHttpTotalFetchTimeHit=sgProxyHttpTotalFetchTimeHit)
| 127.947368 | 4,884 | 0.779186 |
ed7745d2ff777d218802fbba9727aa73bf19a393 | 7,281 | py | Python | test/functional/mining_prioritisetransaction.py | bitcoinNickel/bitcoinnickel | 7980682cf13048d0177b370d272717ada4c66910 | [
"MIT"
] | 1 | 2022-02-06T18:40:22.000Z | 2022-02-06T18:40:22.000Z | test/functional/mining_prioritisetransaction.py | bitcoinNickel/bitcoinnickel | 7980682cf13048d0177b370d272717ada4c66910 | [
"MIT"
] | null | null | null | test/functional/mining_prioritisetransaction.py | bitcoinNickel/bitcoinnickel | 7980682cf13048d0177b370d272717ada4c66910 | [
"MIT"
] | 1 | 2022-01-02T16:36:33.000Z | 2022-01-02T16:36:33.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
import time
from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.test_framework import BitcoinNickelTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class PrioritiseTransactionTest(BitcoinNickelTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-printpriority=1"], ["-printpriority=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-1, "txid must be hexadecimal string", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert(tx_id not in self.nodes[0].getrawmempool())
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert(tx_id in self.nodes[0].getrawmempool())
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate()
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate()
assert(template != new_template)
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| 47.588235 | 171 | 0.678478 |
fb6fffc3bcda133808c31cbe47332497317bb31d | 20,346 | py | Python | internals/processes.py | kashish-d/chromium-dashboard | a9960a0de16c169c3c2518d6786510b4dad92e68 | [
"Apache-2.0"
] | null | null | null | internals/processes.py | kashish-d/chromium-dashboard | a9960a0de16c169c3c2518d6786510b4dad92e68 | [
"Apache-2.0"
] | null | null | null | internals/processes.py | kashish-d/chromium-dashboard | a9960a0de16c169c3c2518d6786510b4dad92e68 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from internals import approval_defs
from internals import models
Process = collections.namedtuple(
'Process',
'name, description, applicability, stages')
# Note: A new feature always starts with intent_stage == INTENT_NONE
# regardless of process. intent_stage is set to the first stage of
# a specific process when the user clicks a "Start" button and submits
# a form that sets intent_stage.
ProcessStage = collections.namedtuple(
'ProcessStage',
'name, description, progress_items, actions, approvals, '
'incoming_stage, outgoing_stage')
ProgressItem = collections.namedtuple(
'ProgressItem',
'name, field')
Action = collections.namedtuple(
'Action',
'name, url, prerequisites')
def process_to_dict(process):
"""Return nested dicts for the nested namedtuples of a process."""
# These lines are sort of like a deep version of _asdict().
stages = [stage._asdict() for stage in process.stages]
for stage in stages:
stage['progress_items'] = [pi._asdict() for pi in stage['progress_items']]
stage['actions'] = [act._asdict() for act in stage['actions']]
stage['approvals'] = [appr._asdict() for appr in stage['approvals']]
process_dict = {
'name': process.name,
'description': process.description,
'applicability': process.applicability,
'stages': stages,
}
return process_dict
# This page generates a preview of an email that can be sent
# to a mailing list to announce an intent.
# {feature_id} and {outgoing_stage} are filled in by JS code.
# The param "intent" adds clauses the template to include details
# needed for an intent email. The param "launch" causes those
# details to be omitted and a link to create a launch bug shown instead.
INTENT_EMAIL_URL = ('/admin/features/launch/{feature_id}'
'/{outgoing_stage}'
'?intent=1')
LAUNCH_BUG_TEMPLATE_URL = '/admin/features/launch/{feature_id}?launch=1'
# TODO(jrobbins): Creation of the launch bug has been a TODO for 5 years.
PI_INITIAL_PUBLIC_PROPOSAL = ProgressItem(
'Initial public proposal', 'initial_public_proposal_url')
PI_MOTIVATION = ProgressItem('Motivation', 'motivation')
PI_EXPLAINER = ProgressItem('Explainer', 'explainer_links')
PI_SPEC_LINK = ProgressItem('Spec link', 'spec_link')
PI_SPEC_MENTOR = ProgressItem('Spec mentor', 'spec_mentors')
PI_DRAFT_API_SPEC = ProgressItem('Draft API spec', None)
PI_I2P_EMAIL = ProgressItem(
'Intent to Prototype email', 'intent_to_implement_url')
PI_SAMPLES = ProgressItem('Samples', 'sample_links')
PI_DRAFT_API_OVERVIEW = ProgressItem('Draft API overview (may be on MDN)', None)
PI_REQUEST_SIGNALS = ProgressItem('Request signals', 'safari_views')
PI_SEC_REVIEW = ProgressItem('Security review issues addressed', None)
PI_PRI_REVIEW = ProgressItem('Privacy review issues addressed', None)
# TODO(jrobbins): needs detector.
PI_EXTERNAL_REVIEWS = ProgressItem('External reviews', None)
PI_R4DT_EMAIL = ProgressItem('Ready for Trial email', 'ready_for_trial_url')
PI_TAG_REQUESTED = ProgressItem('TAG review requested', 'tag_review')
PI_VENDOR_SIGNALS = ProgressItem('Vendor signals', 'safari_views')
PI_WEB_DEV_SIGNALS = ProgressItem('Web developer signals', 'web_dev_views')
PI_DOC_LINKS = ProgressItem('Doc links', 'doc_links')
# TODO(jrobbins): needs detector.
PI_DOC_SIGNOFF = ProgressItem('Documentation signoff', None)
PI_EST_TARGET_MILESTONE = ProgressItem(
'Estimated target milestone', 'shipped_milestone')
# TODO(jrobbins): needs detector.
PI_OT_REQUEST = ProgressItem('OT request', None)
# TODO(jrobbins): needs detector.
PI_OT_AVAILABLE = ProgressItem('OT available', None)
# TODO(jrobbins): needs detector.
PI_OT_RESULTS = ProgressItem('OT results', None)
PI_I2E_EMAIL = ProgressItem(
'Intent to Experiment email', 'intent_to_experiment_url')
PI_I2E_LGTMS = ProgressItem('One LGTM on Intent to Experiment', 'i2e_lgtms')
PI_MIGRATE_INCUBATION = ProgressItem('Request to migrate incubation', None)
PI_TAG_ADDRESSED = ProgressItem(
'TAG review issues addressed', 'tag_review_status')
PI_UPDATED_VENDOR_SIGNALS = ProgressItem(
'Updated vendor signals', 'safari_views')
PI_UPDATED_TARGET_MILESTONE = ProgressItem(
'Updated target milestone', 'shipped_milestone')
PI_I2S_EMAIL = ProgressItem('Intent to Ship email', 'intent_to_ship_url')
PI_I2S_LGTMS = ProgressItem('Three LGTMs on Intent to Ship', 'i2s_lgtms')
# TODO(jrobbins): needs detector.
PI_FINAL_VENDOR_SIGNALS = ProgressItem('Finalized vendor signals', 'safari_views')
# TODO(jrobbins): needs detector.
PI_FINAL_TARGET_MILESTONE = ProgressItem(
'Finalized target milestone', 'shipped_milestone')
PI_CODE_IN_CHROMIUM = ProgressItem('Code in Chromium', None)
PI_PSA_EMAIL = ProgressItem('Web facing PSA email', None)
# TODO(jrobbins): needs detector.
PI_DT_REQUEST = ProgressItem('DT request', None)
# TODO(jrobbins): needs detector.
PI_DT_AVAILABLE = ProgressItem('DT available', None)
# TODO(jrobbins): needs detector.
PI_REMOVAL_OF_DT = ProgressItem('Removal of DT', None)
PI_DT_EMAIL = ProgressItem(
'Request for Deprecation Trial email', 'intent_to_experiment_url')
PI_DT_LGTMS = ProgressItem(
'One LGTM on Request for Deprecation Trial', 'i2e_lgtms')
# TODO(jrobbins): needs detector.
PI_EXISTING_FEATURE = ProgressItem('Link to existing feature', None)
PI_CODE_REMOVED = ProgressItem('Code removed', None)
BLINK_PROCESS_STAGES = [
ProcessStage(
'Start incubating',
'Create an initial WebStatus feature entry and kick off standards '
'incubation (WICG) to share ideas.',
[PI_INITIAL_PUBLIC_PROPOSAL,
PI_MOTIVATION,
PI_EXPLAINER,
],
[],
[],
models.INTENT_NONE, models.INTENT_INCUBATE),
ProcessStage(
'Start prototyping',
'Share an explainer doc and API. '
'Start prototyping code in a public repo.',
[PI_SPEC_LINK,
PI_SPEC_MENTOR,
PI_DRAFT_API_SPEC,
PI_I2P_EMAIL,
],
[Action('Draft Intent to Prototype email', INTENT_EMAIL_URL,
[PI_INITIAL_PUBLIC_PROPOSAL.name, PI_MOTIVATION.name,
PI_EXPLAINER.name, PI_SPEC_LINK.name])],
[approval_defs.PrototypeApproval],
models.INTENT_INCUBATE, models.INTENT_IMPLEMENT),
ProcessStage(
'Dev trials and iterate on design',
'Publicize availablity for developers to try. '
'Provide sample code. '
'Request feedback from browser vendors.',
[PI_SAMPLES,
PI_DRAFT_API_OVERVIEW,
PI_REQUEST_SIGNALS,
PI_SEC_REVIEW,
PI_PRI_REVIEW,
PI_EXTERNAL_REVIEWS,
PI_R4DT_EMAIL,
],
[Action('Draft Ready for Trial email', INTENT_EMAIL_URL,
[PI_INITIAL_PUBLIC_PROPOSAL.name, PI_MOTIVATION.name,
PI_EXPLAINER.name, PI_SPEC_LINK.name])],
[],
models.INTENT_IMPLEMENT, models.INTENT_EXPERIMENT),
ProcessStage(
'Evaluate readiness to ship',
'Work through a TAG review and gather vendor signals.',
[PI_TAG_REQUESTED,
PI_VENDOR_SIGNALS,
PI_WEB_DEV_SIGNALS,
PI_DOC_LINKS,
PI_DOC_SIGNOFF,
PI_EST_TARGET_MILESTONE,
],
[],
[],
models.INTENT_EXPERIMENT, models.INTENT_IMPLEMENT_SHIP),
ProcessStage(
'Origin Trial',
'(Optional) Set up and run an origin trial. '
'Act on feedback from partners and web developers.',
[PI_OT_REQUEST,
PI_OT_AVAILABLE,
PI_OT_RESULTS,
PI_I2E_EMAIL,
PI_I2E_LGTMS,
],
[Action('Draft Intent to Experiment email', INTENT_EMAIL_URL,
[PI_INITIAL_PUBLIC_PROPOSAL.name, PI_MOTIVATION.name,
PI_EXPLAINER.name, PI_SPEC_LINK.name,
PI_EST_TARGET_MILESTONE.name])],
[approval_defs.ExperimentApproval],
models.INTENT_IMPLEMENT_SHIP, models.INTENT_EXTEND_TRIAL),
ProcessStage(
'Prepare to ship',
'Lock in shipping milestone. Finalize docs and announcements. '
'Further standardization.',
[PI_MIGRATE_INCUBATION,
PI_TAG_ADDRESSED,
PI_UPDATED_VENDOR_SIGNALS,
PI_UPDATED_TARGET_MILESTONE,
PI_I2S_EMAIL,
PI_I2S_LGTMS,
],
[Action('Draft Intent to Ship email', INTENT_EMAIL_URL,
[PI_INITIAL_PUBLIC_PROPOSAL.name, PI_MOTIVATION.name,
PI_EXPLAINER.name, PI_SPEC_LINK.name,
PI_TAG_ADDRESSED.name, PI_UPDATED_VENDOR_SIGNALS.name,
PI_UPDATED_TARGET_MILESTONE.name])],
[approval_defs.ShipApproval],
models.INTENT_IMPLEMENT_SHIP, models.INTENT_SHIP),
ProcessStage(
'Ship',
'Update milestones and other information when the feature '
'actually ships.',
[PI_FINAL_VENDOR_SIGNALS,
PI_FINAL_TARGET_MILESTONE,
],
[],
[],
models.INTENT_SHIP, models.INTENT_SHIPPED),
]
BLINK_LAUNCH_PROCESS = Process(
'New feature incubation',
'Description of blink launch process', # Not used yet.
'When to use it', # Not used yet.
BLINK_PROCESS_STAGES)
BLINK_FAST_TRACK_STAGES = [
ProcessStage(
'Start prototyping',
'Write up use cases and scenarios, start coding as a '
'runtime enabled feature.',
[PI_SPEC_LINK,
PI_CODE_IN_CHROMIUM,
],
[Action('Draft Intent to Prototype email', INTENT_EMAIL_URL,
[PI_SPEC_LINK.name])],
[approval_defs.PrototypeApproval],
models.INTENT_NONE, models.INTENT_IMPLEMENT),
ProcessStage(
'Dev trials and iterate on implementation',
'Publicize availablity for developers to try. '
'Provide sample code. '
'Act on feedback from partners and web developers.',
[PI_SAMPLES,
PI_DRAFT_API_OVERVIEW,
PI_R4DT_EMAIL,
PI_VENDOR_SIGNALS,
PI_EST_TARGET_MILESTONE,
],
[Action('Draft Ready for Trial email', INTENT_EMAIL_URL,
[PI_SPEC_LINK.name, PI_EST_TARGET_MILESTONE.name])],
[],
models.INTENT_IMPLEMENT, models.INTENT_EXPERIMENT),
ProcessStage(
'Origin Trial',
'(Optional) Set up and run an origin trial. '
'Act on feedback from partners and web developers.',
[PI_OT_REQUEST,
PI_OT_AVAILABLE,
PI_OT_RESULTS,
PI_I2E_EMAIL,
PI_I2E_LGTMS,
],
[Action('Draft Intent to Experiment email', INTENT_EMAIL_URL,
[PI_SPEC_LINK.name, PI_EST_TARGET_MILESTONE.name])],
[approval_defs.ExperimentApproval],
models.INTENT_EXPERIMENT, models.INTENT_EXTEND_TRIAL),
ProcessStage(
'Prepare to ship',
'Lock in shipping milestone. Finalize docs and announcements. '
'Further standardization.',
[PI_DOC_SIGNOFF,
PI_UPDATED_TARGET_MILESTONE,
PI_I2S_EMAIL,
PI_I2S_LGTMS,
],
[Action('Draft Intent to Ship email', INTENT_EMAIL_URL,
[PI_SPEC_LINK.name, PI_UPDATED_TARGET_MILESTONE.name])],
[approval_defs.ShipApproval],
models.INTENT_EXPERIMENT, models.INTENT_SHIP),
ProcessStage(
'Ship',
'Update milestones and other information when the feature '
'actually ships.',
[PI_FINAL_VENDOR_SIGNALS,
PI_FINAL_TARGET_MILESTONE,
],
[],
[],
models.INTENT_SHIP, models.INTENT_SHIPPED),
]
BLINK_FAST_TRACK_PROCESS = Process(
'Existing feature implementation',
'Description of blink fast track process', # Not used yet.
'When to use it', # Not used yet.
BLINK_FAST_TRACK_STAGES)
PSA_ONLY_STAGES = [
ProcessStage(
'Implement',
'Check code into Chromium under a flag.',
[PI_SPEC_LINK,
PI_CODE_IN_CHROMIUM,
],
[],
[],
models.INTENT_NONE, models.INTENT_IMPLEMENT),
ProcessStage(
'Dev trials and iterate on implementation',
'(Optional) Publicize availablity for developers to try. '
'Act on feedback from partners and web developers.',
[PI_R4DT_EMAIL,
PI_VENDOR_SIGNALS,
PI_EST_TARGET_MILESTONE,
],
[Action('Draft Ready for Trial email', INTENT_EMAIL_URL,
[PI_SPEC_LINK.name, PI_EST_TARGET_MILESTONE.name])],
[],
models.INTENT_IMPLEMENT, models.INTENT_EXPERIMENT),
ProcessStage(
'Prepare to ship',
'Lock in shipping milestone.',
[PI_PSA_EMAIL,
PI_UPDATED_TARGET_MILESTONE,
PI_I2S_EMAIL,
],
[Action('Draft Intent to Ship email', INTENT_EMAIL_URL,
[PI_SPEC_LINK.name, PI_UPDATED_TARGET_MILESTONE.name])],
[approval_defs.ShipApproval],
models.INTENT_EXPERIMENT, models.INTENT_SHIP),
ProcessStage(
'Ship',
'Update milestones and other information when the feature '
'actually ships.',
[PI_FINAL_VENDOR_SIGNALS,
PI_FINAL_TARGET_MILESTONE,
],
[],
[],
models.INTENT_SHIP, models.INTENT_SHIPPED),
]
PSA_ONLY_PROCESS = Process(
'Web developer facing change to existing code',
'Description of PSA process', # Not used yet.
'When to use it', # Not used yet.
PSA_ONLY_STAGES)
DEPRECATION_STAGES = [
ProcessStage(
'Write up motivation',
'Create an initial WebStatus feature entry to deprecate '
'an existing feature, including motivation and impact. '
'Then, move existing Chromium code under a flag.',
[PI_EXISTING_FEATURE,
PI_MOTIVATION,
],
[Action('Draft Intent to Deprecate and Remove email', INTENT_EMAIL_URL,
[PI_MOTIVATION.name])],
[approval_defs.PrototypeApproval],
models.INTENT_NONE, models.INTENT_IMPLEMENT),
# TODO(cwilso): Work out additional steps for flag defaulting to disabled.
ProcessStage(
'Dev trial of deprecation',
'Publicize deprecation and address risks. ',
[PI_R4DT_EMAIL,
PI_VENDOR_SIGNALS,
PI_EST_TARGET_MILESTONE,
],
[Action('Draft Ready for Trial email', INTENT_EMAIL_URL,
[PI_MOTIVATION.name, PI_VENDOR_SIGNALS.name,
PI_EST_TARGET_MILESTONE.name])],
[],
models.INTENT_IMPLEMENT, models.INTENT_EXPERIMENT),
ProcessStage(
'Prepare for Deprecation Trial',
'(Optional) Set up and run a deprecation trial. ',
[PI_DT_REQUEST,
PI_DT_AVAILABLE,
PI_REMOVAL_OF_DT,
PI_DT_EMAIL,
PI_DT_LGTMS,
],
[Action('Draft Request for Deprecation Trial email', INTENT_EMAIL_URL,
[PI_MOTIVATION.name, PI_VENDOR_SIGNALS.name,
PI_EST_TARGET_MILESTONE.name])],
# TODO(jrobbins): Intent to extend deprecation.
[approval_defs.ExperimentApproval],
models.INTENT_EXPERIMENT, models.INTENT_EXTEND_TRIAL),
ProcessStage(
'Prepare to ship',
'Lock in shipping milestone. '
'Finalize docs and announcements before disabling feature by default.',
[PI_UPDATED_TARGET_MILESTONE,
PI_I2S_EMAIL,
PI_I2S_LGTMS,
],
[Action('Draft Intent to Ship email', INTENT_EMAIL_URL,
[PI_MOTIVATION.name, PI_VENDOR_SIGNALS.name,
PI_UPDATED_TARGET_MILESTONE.name])],
[approval_defs.ShipApproval],
models.INTENT_EXPERIMENT, models.INTENT_SHIP),
ProcessStage(
'Remove code',
'Once the feature is no longer available, remove the code.',
[PI_CODE_REMOVED,
],
[Action('Generate an Intent to Extend Deprecation Trial',
INTENT_EMAIL_URL,
[PI_MOTIVATION.name, PI_VENDOR_SIGNALS.name,
PI_UPDATED_TARGET_MILESTONE.name]),
],
[],
models.INTENT_SHIP, models.INTENT_REMOVED),
]
DEPRECATION_PROCESS = Process(
'Feature deprecation',
'Description of deprecation process', # Not used yet.
'When to use it', # Not used yet.
DEPRECATION_STAGES)
ALL_PROCESSES = {
models.FEATURE_TYPE_INCUBATE_ID: BLINK_LAUNCH_PROCESS,
models.FEATURE_TYPE_EXISTING_ID: BLINK_FAST_TRACK_PROCESS,
models.FEATURE_TYPE_CODE_CHANGE_ID: PSA_ONLY_PROCESS,
models.FEATURE_TYPE_DEPRECATION_ID: DEPRECATION_PROCESS,
}
INTENT_EMAIL_SECTIONS = {
models.INTENT_NONE: [],
models.INTENT_INCUBATE: [],
models.INTENT_IMPLEMENT: ['motivation'],
models.INTENT_EXPERIMENT: ['i2p_thread', 'experiment'],
models.INTENT_IMPLEMENT_SHIP: [
'need_api_owners_lgtms', 'motivation', 'tracking_bug', 'sample_links'],
models.INTENT_EXTEND_TRIAL: [
'i2p_thread', 'experiment', 'extension_reason'],
models.INTENT_SHIP: [
'need_api_owners_lgtms', 'i2p_thread', 'tracking_bug', 'sample_links'],
models.INTENT_REMOVED: [],
models.INTENT_SHIPPED: [],
models.INTENT_PARKED: [],
}
def initial_tag_review_status(feature_type):
"""Incubating a new feature requires a TAG review, other types do not."""
if feature_type == models.FEATURE_TYPE_INCUBATE_ID:
return models.REVIEW_PENDING
return models.REVIEW_NA
def review_is_done(status):
return status in (models.REVIEW_ISSUES_ADDRESSED, models.REVIEW_NA)
# These functions return a true value when the checkmark should be shown.
# If they return a string, and it starts with "http:" or "https:", it will
# be used as a link URL.
PROGRESS_DETECTORS = {
'Initial public proposal':
lambda f: f.initial_public_proposal_url,
'Explainer':
lambda f: f.explainer_links and f.explainer_links[0],
'Security review issues addressed':
lambda f: review_is_done(f.security_review_status),
'Privacy review issues addressed':
lambda f: review_is_done(f.privacy_review_status),
'Intent to Prototype email':
lambda f: f.intent_to_implement_url,
'Intent to Ship email':
lambda f: f.intent_to_ship_url,
'Ready for Trial email':
lambda f: f.ready_for_trial_url,
'Intent to Experiment email':
lambda f: f.intent_to_experiment_url,
'One LGTM on Intent to Experiment':
lambda f: f.i2e_lgtms,
'One LGTM on Request for Deprecation Trial':
lambda f: f.i2e_lgtms,
'Three LGTMs on Intent to Ship':
lambda f: f.i2s_lgtms and len(f.i2s_lgtms) >= 3,
'Samples':
lambda f: f.sample_links and f.sample_links[0],
'Doc links':
lambda f: f.doc_links and f.doc_links[0],
'Spec link':
lambda f: f.spec_link,
'Draft API spec':
lambda f: f.spec_link,
'API spec':
lambda f: f.api_spec,
'Spec mentor':
lambda f: f.spec_mentors,
'TAG review requested':
lambda f: f.tag_review,
'TAG review issues addressed':
lambda f: review_is_done(f.tag_review_status),
'Web developer signals':
lambda f: bool(f.web_dev_views and
f.web_dev_views != models.DEV_NO_SIGNALS),
'Vendor signals':
lambda f: bool(
f.ff_views != models.NO_PUBLIC_SIGNALS or
f.safari_views != models.NO_PUBLIC_SIGNALS or
f.ie_views != models.NO_PUBLIC_SIGNALS), # IE Deprecated
'Updated vendor signals':
lambda f: bool(
f.ff_views != models.NO_PUBLIC_SIGNALS or
f.safari_views != models.NO_PUBLIC_SIGNALS or
f.ie_views != models.NO_PUBLIC_SIGNALS), # IE Deprecated
'Final vendor signals':
lambda f: bool(
f.ff_views != models.NO_PUBLIC_SIGNALS or
f.safari_views != models.NO_PUBLIC_SIGNALS or
f.ie_views != models.NO_PUBLIC_SIGNALS), # IE Deprecated
'Estimated target milestone':
lambda f: bool(f.shipped_milestone),
'Final target milestone':
lambda f: bool(f.shipped_milestone),
'Code in Chromium':
lambda f: f.impl_status_chrome in (
models.IN_DEVELOPMENT, models.BEHIND_A_FLAG, models.ENABLED_BY_DEFAULT,
models.ORIGIN_TRIAL, models.INTERVENTION),
'Motivation':
lambda f: bool(f.motivation),
'Code removed':
lambda f: f.impl_status_chrome == models.REMOVED,
}
| 32.869144 | 82 | 0.690553 |
794fb7414b331c197d49a8a69b78efe486131ddc | 679 | py | Python | interpretImage.py | BlanchonMarc/RandomImageGenerator | fd684c8f27d0c7eeec66cd2521d482a8405dd097 | [
"MIT"
] | null | null | null | interpretImage.py | BlanchonMarc/RandomImageGenerator | fd684c8f27d0c7eeec66cd2521d482a8405dd097 | [
"MIT"
] | null | null | null | interpretImage.py | BlanchonMarc/RandomImageGenerator | fd684c8f27d0c7eeec66cd2521d482a8405dd097 | [
"MIT"
] | null | null | null | import numpy as np
import glob
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.colors as color
import math
def ParamToInten(AoP, DoP, Inten, angle):
return ((Inten/2.0) * (1 + DoP*np.cos(math.radians(2*AoP) - 2*math.radians(angle))))
if __name__ == "__main__":
imagedir = "output/image/"
listimage = glob.glob(f"{imagedir}*.tiff")
for pth in listimage:
img = color.rgb_to_hsv(mpimg.imread(pth))
#array = np.zeros_like(img)
AoP = img[:, :, 0] * 360.0
DoP = img[:, :, 1] * 100.0
Inten = img[:, :, 2] / 255.0
print(np.amax(AoP))
# plt.imshow(img)
# plt.show()
| 24.25 | 88 | 0.600884 |
99e4a730889e2ac3b6f61c042d34495f5004f202 | 13 | py | Python | py2app_tests/basic_app_with_encoding/package1/__init__.py | flupke/py2app | 8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d | [
"MIT"
] | 193 | 2020-01-15T09:34:20.000Z | 2022-03-18T19:14:16.000Z | py2app_tests/basic_app_with_encoding/package1/__init__.py | flupke/py2app | 8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d | [
"MIT"
] | 185 | 2020-01-15T08:38:27.000Z | 2022-03-27T17:29:29.000Z | py2app_tests/basic_app_with_encoding/package1/__init__.py | flupke/py2app | 8eb6c618f9c63d6ac970fb145a7f7782b71bcb4d | [
"MIT"
] | 23 | 2020-01-24T14:47:18.000Z | 2022-02-22T17:19:47.000Z | " package1 "
| 6.5 | 12 | 0.615385 |
88a8bf1323d63c9c47c78732b69ddb256d14bb3e | 309 | py | Python | data/multilingual/Cyrl.OSS/Sun-ExtA_8/pdf_to_json_test_Cyrl.OSS_Sun-ExtA_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Cyrl.OSS/Sun-ExtA_8/pdf_to_json_test_Cyrl.OSS_Sun-ExtA_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Cyrl.OSS/Sun-ExtA_8/pdf_to_json_test_Cyrl.OSS_Sun-ExtA_8.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Cyrl.OSS/Sun-ExtA_8/udhr_Cyrl.OSS_Sun-ExtA_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.9 | 79 | 0.809061 |
52252a3abcbf5648a7b6961ea4a32f2ac57132cc | 295 | py | Python | python-leetcode/20.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
] | 12 | 2020-01-16T08:55:27.000Z | 2021-12-02T14:52:39.000Z | python-leetcode/20.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
] | null | null | null | python-leetcode/20.py | MDGSF/interviews | 9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76 | [
"MIT"
] | 1 | 2019-12-11T12:00:38.000Z | 2019-12-11T12:00:38.000Z | class Solution:
def isValid(self, s: str) -> bool:
stack = []
for c in s:
if c == '(': stack.append(')')
elif c == '[': stack.append(']')
elif c == '{': stack.append('}')
elif len(stack) == 0 or c != stack.pop():
return False
return len(stack) == 0
| 26.818182 | 47 | 0.484746 |
f24b6848f3d175d16eba412042fe265bf2c613aa | 651 | py | Python | sync_example.py | tmjvonboss/PasteBunny | 96f0be63c2abfc6911e1e5f52a49e9215555b369 | [
"MIT"
] | null | null | null | sync_example.py | tmjvonboss/PasteBunny | 96f0be63c2abfc6911e1e5f52a49e9215555b369 | [
"MIT"
] | 1 | 2018-03-31T13:04:12.000Z | 2018-03-31T13:04:12.000Z | sync_example.py | tmjvonboss/PasteBunny | 96f0be63c2abfc6911e1e5f52a49e9215555b369 | [
"MIT"
] | null | null | null | from PasteBunny.Client import SyncClient
from PasteBunny.Models import Paste
from PasteBunny.Constants import Privacy, Expiry, Format
# Create client, add username and password only to automatically generate a user key
c = SyncClient(
dev_key="dev_key",
username="user",
password="pass"
)
# Create a Paste object
paste = Paste(
code="Finally a library for Pastebin in Python 3 I like!\n3bunny5you",
name="PasteBunnny test",
_format=Format.NAN,
expiry=Expiry.HOUR,
privacy=Privacy.PUBLIC
)
# Actually create a paste on Pastebin
c.create_paste(paste)
# Print the url
print(paste.get_url())
| 24.111111 | 85 | 0.711214 |
3ad2d4aed4828fd0fd524bb942e4b412eb0c2277 | 3,812 | py | Python | kubernetes/client/models/v1_event_source.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/client/models/v1_event_source.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_event_source.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1EventSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'component': 'str',
'host': 'str'
}
attribute_map = {
'component': 'component',
'host': 'host'
}
def __init__(self, component=None, host=None):
"""
V1EventSource - a model defined in Swagger
"""
self._component = None
self._host = None
self.discriminator = None
if component is not None:
self.component = component
if host is not None:
self.host = host
@property
def component(self):
"""
Gets the component of this V1EventSource.
Component from which the event is generated.
:return: The component of this V1EventSource.
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""
Sets the component of this V1EventSource.
Component from which the event is generated.
:param component: The component of this V1EventSource.
:type: str
"""
self._component = component
@property
def host(self):
"""
Gets the host of this V1EventSource.
Node name on which the event is generated.
:return: The host of this V1EventSource.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this V1EventSource.
Node name on which the event is generated.
:param host: The host of this V1EventSource.
:type: str
"""
self._host = host
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1EventSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.593548 | 105 | 0.54276 |
e1586c36e10149bc0c75d57ec76c946087950c55 | 181 | py | Python | src/me/ch2/bingocard.py | banbiossa/book-rust | 209965a90767927bd2d909e4de18fe6f1e6ea729 | [
"MIT"
] | null | null | null | src/me/ch2/bingocard.py | banbiossa/book-rust | 209965a90767927bd2d909e4de18fe6f1e6ea729 | [
"MIT"
] | null | null | null | src/me/ch2/bingocard.py | banbiossa/book-rust | 209965a90767927bd2d909e4de18fe6f1e6ea729 | [
"MIT"
] | null | null | null | import random
nums = list(range(1, 75 + 1))
random.shuffle(nums)
nums[12] = " *"
for y in range(5):
for x in range(5):
print(f"{nums[y*5+x]:3}", end="")
print("") | 18.1 | 41 | 0.541436 |
4ec5558f973e513da895d99fdb4b68cfb21c953c | 13,654 | py | Python | src/main/python/rlbot/agents/base_agent.py | robbai/RLBot | 2cf0abbc028d1be9b2a2d063e2088231074cc881 | [
"MIT"
] | 1 | 2020-01-21T15:37:22.000Z | 2020-01-21T15:37:22.000Z | BeardBot/venv/Lib/site-packages/rlbot/agents/base_agent.py | Bearedy/BeardBot | cc31ae663bdf9ebf7cc0c41f7d4cff0d33064da0 | [
"MIT"
] | null | null | null | BeardBot/venv/Lib/site-packages/rlbot/agents/base_agent.py | Bearedy/BeardBot | cc31ae663bdf9ebf7cc0c41f7d4cff0d33064da0 | [
"MIT"
] | null | null | null | from typing import Optional
from urllib.parse import ParseResult as URL
from rlbot.botmanager.helper_process_request import HelperProcessRequest
from rlbot.matchcomms.client import MatchcommsClient
from rlbot.messages.flat import MatchSettings
from rlbot.parsing.custom_config import ConfigObject
from rlbot.utils.game_state_util import GameState
from rlbot.utils.logging_utils import get_logger
from rlbot.utils.rendering.rendering_manager import RenderingManager
from rlbot.utils.structures.ball_prediction_struct import BallPrediction
from rlbot.utils.structures.game_data_struct import GameTickPacket, FieldInfoPacket
from rlbot.utils.structures.legacy_data_v3 import convert_to_legacy_v3
from rlbot.utils.structures.quick_chats import QuickChats
from rlbot.utils.structures.rigid_body_struct import RigidBodyTick
BOT_CONFIG_MODULE_HEADER = 'Locations'
BOT_CONFIG_AGENT_HEADER = 'Bot Parameters'
BOT_CONFIG_DETAILS_HEADER = 'Details'
PYTHON_FILE_KEY = 'python_file'
LOGO_FILE_KEY = 'logo_file'
LOOKS_CONFIG_KEY = 'looks_config'
BOT_NAME_KEY = "name"
SUPPORTS_EARLY_START_KEY = "supports_early_start"
MAXIMUM_TICK_RATE_PREFERENCE_KEY = "maximum_tick_rate_preference"
class SimpleControllerState:
"""
Building flatbuffer objects is verbose and error prone. This class provides a friendlier
interface to bot makers.
"""
def __init__(self,
steer: float = 0.0,
throttle: float = 0.0,
pitch: float = 0.0,
yaw: float = 0.0,
roll: float = 0.0,
jump: bool = False,
boost: bool = False,
handbrake: bool = False,
use_item: bool = False):
"""
:param steer: Range: -1 .. 1, negative=left, positive=right
:param throttle: Range: -1 .. 1, negative=backward, positive=forward
:param pitch: Range: -1 .. 1, negative=nose-down, positive=nose-up
:param yaw: Range: -1 .. 1, negative=nose-left, positive=nose-right
:param roll: Range: -1 .. 1, negative=anticlockwise, positive=clockwise (when looking forwards along the car)
:param jump: Analogous to the jump button in game.
:param boost: Analogous to the boost button in game.
:param handbrake: Analogous to the handbrake button in game.
:param use_item: Analogous to the use item button (from rumble) in game.
"""
self.steer = steer
self.throttle = throttle
self.pitch = pitch
self.yaw = yaw
self.roll = roll
self.jump = jump
self.boost = boost
self.handbrake = handbrake
self.use_item = use_item
class BaseAgent:
# the name of the bot fixed for any duplicates that may occur
name = None
# 'team' is an integer: 0 towards positive goal, 1 towards negative goal.
# 0 is blue team, 1 is orange team
team = None
# 'index' is an integer: it is index at which the bot appears inside game_tick_packet.game_cars
index = None
# passed in by the bot manager
__quick_chat_func = None
__field_info_func = None
__game_state_func = None
__get_rigid_body_tick_func = None
__match_settings_func = None
renderer: RenderingManager = None
matchcomms_root: URL = None
def __init__(self, name, team, index):
self.name = name
self.team = team
self.index = index
self.logger = get_logger(f'bot{index}')
def init_match_config(self, match_config: 'MatchConfig'):
"""
Override this method if you would like to be informed of what config was used to start the match.
Useful for knowing what map you're on, mutators, etc.
"""
pass
def get_output(self, game_tick_packet: GameTickPacket) -> SimpleControllerState:
"""
Where all the logic of your bot gets its input and returns its output.
:param game_tick_packet: see https://github.com/drssoccer55/RLBot/wiki/Input-and-Output-Data-(current)
:return: [throttle, steer, pitch, yaw, roll, jump, boost, handbrake]
"""
return SimpleControllerState()
def send_quick_chat(self, team_only, quick_chat):
"""
Sends a quick chat to the other bots.
If it is QuickChats.CHAT_NONE or None it does not send a quick chat to other bots.
:param team_only: either True or False, this says if the quick chat should only go to team members.
:param quick_chat: The quick chat selection, available chats are defined in quick_chats.py
"""
if quick_chat == QuickChats.CHAT_NONE or quick_chat is None:
return
self.__quick_chat_func(team_only, quick_chat)
def handle_quick_chat(self, index, team, quick_chat):
"""
Handles a quick chat from another bot.
This will not receive any chats that this bot sends out.
Currently does nothing, override to add functionality.
:param index: Returns the index in the list of game cars that sent the quick chat
:param team: Which team this player is on
:param quick_chat: What the quick chat selection was
"""
pass
def get_field_info(self):
"""Gets the information about the field.
This does not change during a match so it only needs to be called once after the everything is loaded."""
return self.__field_info_func()
def get_rigid_body_tick(self) -> RigidBodyTick:
"""Get the most recent state of the physics engine."""
return self.__get_rigid_body_tick_func()
def set_game_state(self, game_state: GameState):
"""CHEAT: Change the rocket league game to the given game_state"""
self.__game_state_func(game_state)
def get_ball_prediction(self):
"""DEPRECATED! Please use get_ball_prediction_struct instead, because this is going away soon!"""
return self.__ball_prediction_func()
def get_ball_prediction_struct(self) -> BallPrediction:
"""Fetches a prediction of where the ball will go during the next few seconds."""
return self.__ball_prediction_struct_func()
def get_match_settings(self) -> MatchSettings:
"""Gets the current match settings in flatbuffer format. Useful for determining map, game mode,
mutator settings, etc."""
return self.__match_settings_func()
_matchcomms: Optional[MatchcommsClient] = None
@property
def matchcomms(self) -> MatchcommsClient:
"""
Gets a client to send and recieve messages to other participants in the match (e.g. bots, trainer)
"""
if self._matchcomms is None:
self._matchcomms = MatchcommsClient(self.matchcomms_root)
return self._matchcomms # note: _matchcomms.close() is called by the bot_manager.
def load_config(self, config_object_header):
"""
Loads a config object this is called after the constructor but before anything else inside the bot.
:param config_object: This is a config object that has headers, and values for custom agent configuration.
"""
pass
def initialize_agent(self):
"""
Called for all heaver initialization that needs to happen.
The config is fully loaded at this point
"""
pass
def get_extra_pids(self):
"""
Gets the list of process ids that should be marked as high priority.
:return: A list of process ids that are used by this bot in addition to the ones inside the python process.
"""
return []
def retire(self):
"""Called after the game ends"""
pass
def get_helper_process_request(self) -> HelperProcessRequest:
"""
If your bot needs a helper process which can be shared, e.g. with other bots of the same type,
you may override this to return a HelperProcessRequest.
"""
return None
@staticmethod
def create_agent_configurations(config: ConfigObject):
"""
If your bot needs to add custom configurations, you may override this and use the `config` object.
:param config: A ConfigObject instance.
"""
pass
def convert_output_to_v4(self, controller_input):
"""Converts a v3 output to a v4 controller state"""
player_input = SimpleControllerState()
player_input.throttle = controller_input[0]
player_input.steer = controller_input[1]
player_input.pitch = controller_input[2]
player_input.yaw = controller_input[3]
player_input.roll = controller_input[4]
player_input.jump = controller_input[5]
player_input.boost = controller_input[6]
player_input.handbrake = controller_input[7]
return player_input
def convert_packet_to_v3(self, game_tick_packet: GameTickPacket, field_info_packet: FieldInfoPacket = None):
"""Converts the current game tick packet to v3
:param game_tick_packet a game tick packet in the v4 struct format.
:param field_info_packet a field info packet in the v4 struct format. Optional. If this is not supplied,
none of the boost locations will be filled in.
:return: A v3 version of the game tick packet"""
return convert_to_legacy_v3(game_tick_packet, field_info_packet)
def is_hot_reload_enabled(self):
"""
If true, the framework will watch all your python files for modifications and instantly reload your bot
so that the logic changes take effect. You may wish to disable this if you're concerned about performance.
"""
return True
############
# Methods that should not be called or changed by subclasses
############
def _register_quick_chat(self, quick_chat_func):
"""
Registers the send quick chat function.
This should not be overwritten by the agent.
"""
self.__quick_chat_func = quick_chat_func
def _register_field_info(self, field_info_func):
"""
Sets the function to grab field information from the interface.
This should not be overwritten by the agent.
"""
self.__field_info_func = field_info_func
def _register_get_rigid_body_tick(self, get_rigid_body_tick_func):
self.__get_rigid_body_tick_func = get_rigid_body_tick_func
def _register_set_game_state(self, game_state_func):
self.__game_state_func = game_state_func
def _register_ball_prediction(self, ball_prediction_func):
"""
Deprecated. __ball_prediction_struct_func will be used instead.
Sets the function to grab ball predictions from the interface.
This should not be overwritten by the agent.
"""
self.__ball_prediction_func = ball_prediction_func
def _register_ball_prediction_struct(self, ball_prediction_func):
"""
Sets the function to grab ball predictions from the interface.
This should not be overwritten by the agent.
"""
self.__ball_prediction_struct_func = ball_prediction_func
def _register_match_settings_func(self, match_settings_func):
"""
Sets the function to grab match settings from the interface.
This should not be overwritten by the agent.
"""
self.__match_settings_func = match_settings_func
def _set_renderer(self, renderer: RenderingManager):
self.renderer = renderer
# Information about @classmethod: https://docs.python.org/3/library/functions.html#classmethod
@classmethod
def base_create_agent_configurations(cls) -> ConfigObject:
"""
This is used when initializing agent config via builder pattern.
It also calls `create_agent_configurations` that can be used by BaseAgent subclasses for custom configs.
:return: Returns an instance of a ConfigObject object.
"""
config = ConfigObject()
location_config = config.add_header_name(BOT_CONFIG_MODULE_HEADER)
location_config.add_value(LOOKS_CONFIG_KEY, str,
description='Path to loadout config from runner')
location_config.add_value(PYTHON_FILE_KEY, str,
description="Bot's python file.\nOnly need this if RLBot controlled")
location_config.add_value(BOT_NAME_KEY, str, default='nameless',
description='The name that will be displayed in game')
location_config.add_value(LOGO_FILE_KEY, str,
description="Location of an image file to use as your bot's logo")
location_config.add_value(SUPPORTS_EARLY_START_KEY, bool,
description="True if this bot can be started before the Rocket League match begins.")
location_config.add_value(MAXIMUM_TICK_RATE_PREFERENCE_KEY, int, default=60,
description="The maximum number of ticks per second that your bot wishes to receive.")
details_config = config.add_header_name(BOT_CONFIG_DETAILS_HEADER)
details_config.add_value('developer', str, description="Name of the bot's creator/developer")
details_config.add_value('description', str, description="Short description of the bot")
details_config.add_value('fun_fact', str, description="Fun fact about the bot")
details_config.add_value('github', str, description="Link to github repository")
details_config.add_value('language', str, description="Programming language")
cls.create_agent_configurations(config)
return config
| 43.072555 | 122 | 0.681559 |
735f29d1b61debb3a35e86c391cfbf660be0a9ac | 805 | py | Python | test/functional/rpc_uptime.py | synergy-promotions/baddcoin | e536d224874628a44c316d33ac7f2373dd56beaf | [
"MIT"
] | null | null | null | test/functional/rpc_uptime.py | synergy-promotions/baddcoin | e536d224874628a44c316d33ac7f2373dd56beaf | [
"MIT"
] | null | null | null | test/functional/rpc_uptime.py | synergy-promotions/baddcoin | e536d224874628a44c316d33ac7f2373dd56beaf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import BaddcoinTestFramework
class UptimeTest(BaddcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert self.nodes[0].uptime() >= wait_time
if __name__ == '__main__':
UptimeTest().main()
| 25.967742 | 69 | 0.710559 |
e3bba53e3b8d1714ef36e75ad03410640d826bad | 5,134 | py | Python | nova/tests/functional/test_list_servers_ip_filter.py | nfvri/nova | 2ce5a440c44eb512f07adacd313304e226bb56a0 | [
"Apache-2.0"
] | 1 | 2016-07-18T22:05:01.000Z | 2016-07-18T22:05:01.000Z | nova/tests/functional/test_list_servers_ip_filter.py | nfvri/nova | 2ce5a440c44eb512f07adacd313304e226bb56a0 | [
"Apache-2.0"
] | null | null | null | nova/tests/functional/test_list_servers_ip_filter.py | nfvri/nova | 2ce5a440c44eb512f07adacd313304e226bb56a0 | [
"Apache-2.0"
] | 1 | 2021-11-12T03:55:41.000Z | 2021-11-12T03:55:41.000Z | # Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import nova.scheduler.utils
import nova.servicegroup
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional import fixtures as func_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
class TestListServersIpFilter(test.TestCase):
def setUp(self):
super(TestListServersIpFilter, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
self.neutron = self.useFixture(
nova_fixtures.NeutronFixture(self))
# Add a 2nd port to the neutron fixture to have multiple ports
self.neutron.create_port(self.neutron.port_2)
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(func_fixtures.PlacementFixture())
self.start_service('conductor')
self.flags(enabled_filters=['ComputeFilter'],
group='filter_scheduler')
self.start_service('scheduler')
self.start_service('compute')
self.useFixture(cast_as_call.CastAsCall(self))
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def wait_until_active_or_timeout(self, server_id):
timeout = 0.0
server = self.api.get_server(server_id)
while server['status'] != "ACTIVE" and timeout < 10.0:
time.sleep(.1)
timeout += .1
server = self.api.get_server(server_id)
if server['status'] != "ACTIVE":
self.fail(
'Timed out waiting for server %s to be ACTIVE.' % server_id)
return server
def test_list_servers_with_ip_filters_regex(self):
"""Tests listing servers with IP filter regex.
The compute API will perform a regex match on the ip filter and include
all servers that have fixed IPs which match the filter.
For example, consider we have two servers. The first server has IP
10.1.1.1 and the second server has IP 10.1.1.10. If we list servers
with filter ip=10.1.1.1 we should get back both servers because
10.1.1.1 is a prefix of 10.1.1.10. If we list servers with filter
ip=10.1.1.10 then we should only get back the second server.
"""
# We're going to create two servers with unique ports, but the IPs on
# the ports are close enough that one matches the regex for the other.
# The ports used in this test are defined in the NeutronFixture.
for port_id in (self.neutron.port_1['id'], self.neutron.port_2['id']):
server = dict(
name=port_id, imageRef=self.image_id, flavorRef=self.flavor_id,
networks=[{'port': port_id}])
server = self.api.post_server({'server': server})
self.addCleanup(self.api.delete_server, server['id'])
self.wait_until_active_or_timeout(server['id'])
# Now list servers and filter on the IP of the first server.
servers = self.api.get_servers(
search_opts={
'ip': self.neutron.port_1['fixed_ips'][0]['ip_address']})
# We should get both servers back because the IP on the first server is
# a prefix of the IP on the second server.
self.assertEqual(2, len(servers),
'Unexpected number of servers returned when '
'filtering by ip=%s: %s' % (
self.neutron.port_1['fixed_ips'][0]['ip_address'],
servers))
# Now list servers and filter on the IP of the second server.
servers = self.api.get_servers(
search_opts={
'ip': self.neutron.port_2['fixed_ips'][0]['ip_address']})
# We should get one server back because the IP on the second server is
# unique between both servers.
self.assertEqual(1, len(servers),
'Unexpected number of servers returned when '
'filtering by ip=%s: %s' % (
self.neutron.port_2['fixed_ips'][0]['ip_address'],
servers))
self.assertEqual(self.neutron.port_2['fixed_ips'][0]['ip_address'],
servers[0]['addresses']['private-network'][0]['addr'])
| 43.142857 | 79 | 0.637904 |
0ed28d66bfcafd66a845c0421855f9e41af37e32 | 2,431 | py | Python | Day_16/part2.py | Uklusi/AdventOfCode2021 | 3d22ace832bfd6c9855b2ebad3bf7f10c4751982 | [
"MIT"
] | null | null | null | Day_16/part2.py | Uklusi/AdventOfCode2021 | 3d22ace832bfd6c9855b2ebad3bf7f10c4751982 | [
"MIT"
] | null | null | null | Day_16/part2.py | Uklusi/AdventOfCode2021 | 3d22ace832bfd6c9855b2ebad3bf7f10c4751982 | [
"MIT"
] | null | null | null | from functools import reduce
from math import prod
from AoCUtils import *
result = 0
partNumber = "2"
writeToLog = False
if writeToLog:
logFile = open("log" + partNumber + ".txt", "w")
else:
logFile = "stdout"
printLog = printLogFactory(logFile)
with open("input.txt", "r") as inputFile:
lines = inputFile.read().strip().split("\n")
for line in lines:
line = line.strip()
binInput = join([bin(int(n, 16))[2:].zfill(4) for n in line])
def parsePackage(package, position, acc):
# packVer = int(package[position:position + 3], 2)
acc = 0
packId = int(package[position + 3:position + 6], 2)
position = position + 6
# printLog(f"packVer: {packVer}, packId: {packId}, position: {position}")
if packId == 4:
bitGroup = None
value = ""
while bitGroup is None or bitGroup[0] == "1":
bitGroup = package[position:position + 5]
position = position + 5
value += bitGroup[1:]
return (position, int(value, 2))
lengthType = package[position]
position = position + 1
length = None
numPacks = None
if lengthType == "0":
length = int(package[position:position + 15], 2)
position = position + 15
else:
numPacks = int(package[position:position + 11], 2)
position = position + 11
newPosition = position
countPackages = 0
valueList = []
while (newPosition - position != length) and (countPackages != numPacks):
(newPosition, acc) = parsePackage(package, newPosition, acc)
valueList.append(acc)
countPackages += 1
match packId:
case 0:
return (newPosition, sum(valueList))
case 1:
return (newPosition, prod(valueList))
case 2:
return (newPosition, min(valueList))
case 3:
return (newPosition, max(valueList))
case 5:
return (newPosition, int(valueList[0] > valueList[1]))
case 6:
return (newPosition, int(valueList[0] < valueList[1]))
case 7:
return (newPosition, int(valueList[0] == valueList[1]))
case _:
raise(Exception("ValueError"))
(_, result) = parsePackage(binInput, 0, 0)
with open("output" + partNumber + ".txt", "w") as outputFile:
outputFile.write(str(result))
print(str(result))
if writeToLog:
cast(TextIOWrapper, logFile).close()
| 28.267442 | 77 | 0.594406 |
bed53f600f7a705512866205ef76609d39335da9 | 534 | py | Python | orders/tasks.py | techonerd/kobbyshop | e79f009f75f576fdc2e8ac037781f5817a2e255f | [
"MIT"
] | 4 | 2021-11-25T15:45:31.000Z | 2022-01-11T21:31:56.000Z | orders/tasks.py | KwabenaYeboah/kobbyshop | 850a04b24fafa8aa538fdbf039a0e8fafc3ebfc2 | [
"MIT"
] | 1 | 2021-12-30T08:18:28.000Z | 2021-12-30T08:18:28.000Z | orders/tasks.py | techonerd/kobbyshop | e79f009f75f576fdc2e8ac037781f5817a2e255f | [
"MIT"
] | 2 | 2021-12-26T05:11:00.000Z | 2021-12-30T08:18:13.000Z | from celery import task
from django.core.mail import send_mail
from .models import Order
@task
def created_order(order_id):
'''A celery task to notify customers via email after an order is placed '''
order = Order.objects.get(id=order_id)
subject = f'Order Number. {order.id}'
message = f'Hello {order.first_name}, \n\n'\
f'You have successfully placed an order.\nYour order ID is: {order.id}'
mail_sent = send_mail(subject, message, 'orders@kobbyshop.com', [order.email])
return mail_sent | 35.6 | 85 | 0.698502 |
9c8939a61321ff1f8b30da4b9ecb57e883fc2f0e | 4,051 | py | Python | tensorflow/python/autograph/converters/function_scopes_test.py | leike666666/tensorflow | a3fd0ddfcb716be124e95b51e96e6c1e4507ef64 | [
"Apache-2.0"
] | 57 | 2017-09-03T07:08:31.000Z | 2022-02-28T04:33:42.000Z | tensorflow/python/autograph/converters/function_scopes_test.py | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 58 | 2021-11-22T05:41:28.000Z | 2022-01-19T01:33:40.000Z | tensorflow/python/autograph/converters/function_scopes_test.py | sagol/tensorflow | 04f2870814d2773e09dcfa00cbe76a66a2c4de88 | [
"Apache-2.0"
] | 66 | 2020-05-15T10:05:12.000Z | 2022-02-14T07:28:18.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for function_scopes module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class FunctionBodyTransformerTest(converter_testing.TestCase):
@test_util.run_deprecated_v1
def test_basic(self):
def test_fn(l):
"""Docstring."""
a = 1
l += a
return l
with self.converted(test_fn, function_scopes, {}) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
self.assertEqual('Docstring.', result.test_fn.__doc__)
@test_util.run_deprecated_v1
def test_multiline_docstring(self):
tf = None
def test_fn():
"""First sentence.
Second sentence.
"""
return tf.constant(1)
with self.converted(test_fn, function_scopes, {},
(constant_op.constant,)) as result:
result_op = result.test_fn()
self.assertIn('test_fn/', result_op.op.name)
self.assertIn('First sentence.', result.test_fn.__doc__)
self.assertIn('Second sentence.', result.test_fn.__doc__)
@test_util.run_deprecated_v1
def test_nested_functions(self):
def test_fn(l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
with self.converted(test_fn, function_scopes, {},
(ops.name_scope,)) as result:
first, second = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', first.op.name)
self.assertNotIn('inner_fn', first.op.name)
self.assertIn('test_fn/inner_fn/', second.op.inputs[0].name)
@test_util.run_deprecated_v1
def test_conversion_context_preserves_in_inner_functions(self):
def inner_fn_callee():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.DISABLED)
def test_fn():
def inner_fn():
inner_fn_callee()
with ag_ctx.ControlStatusCtx(
ag_ctx.Status.DISABLED, converter.ConversionOptions(recursive=True)):
inner_fn()
ns = {
'inner_fn_callee': inner_fn_callee,
'ag_ctx': ag_ctx,
'converter': converter
}
with self.converted(test_fn, function_scopes, ns) as result:
result.test_fn()
@test_util.run_deprecated_v1
def test_method(self):
class TestClass(object):
def test_fn(self, l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
ns = {'TestClass': TestClass}
node, ctx = self.prepare(TestClass, ns)
node = function_scopes.transform(node, ctx)
with self.compiled(node, {}, (ops.name_scope,)) as result:
first, second = result.TestClass().test_fn(constant_op.constant(1))
self.assertIn('test_fn/', first.op.name)
self.assertNotIn('inner_fn', first.op.name)
self.assertIn('test_fn/inner_fn/', second.op.inputs[0].name)
if __name__ == '__main__':
test.main()
| 30.689394 | 80 | 0.682301 |
5b6f733ff7cf1362e24a47b6bdf7af003d27bc79 | 30,617 | py | Python | python/ccxt/latoken.py | pkaske/ccxt | 19821cfe0b6899d42b714757137dce9f00c406a0 | [
"MIT"
] | null | null | null | python/ccxt/latoken.py | pkaske/ccxt | 19821cfe0b6899d42b714757137dce9f00c406a0 | [
"MIT"
] | null | null | null | python/ccxt/latoken.py | pkaske/ccxt | 19821cfe0b6899d42b714757137dce9f00c406a0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import ROUND
class latoken(Exchange):
def describe(self):
return self.deep_extend(super(latoken, self).describe(), {
'id': 'latoken',
'name': 'Latoken',
'countries': ['KY'], # Cayman Islands
'version': 'v1',
'rateLimit': 2000,
'certified': False,
'userAgent': self.userAgents['chrome'],
'has': {
'CORS': False,
'publicAPI': True,
'privateAPI': True,
'cancelOrder': True,
'cancelAllOrders': True,
'createMarketOrder': False,
'createOrder': True,
'fetchBalance': True,
'fetchCanceledOrders': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchMyTrades': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrdersByStatus': True,
'fetchOrderBook': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/61511972-24c39f00-aa01-11e9-9f7c-471f1d6e5214.jpg',
'api': 'https://api.latoken.com',
'www': 'https://latoken.com',
'doc': [
'https://api.latoken.com',
],
},
'api': {
'public': {
'get': [
'ExchangeInfo/time',
'ExchangeInfo/limits',
'ExchangeInfo/pairs',
'ExchangeInfo/pairs/{currency}',
'ExchangeInfo/pair',
'ExchangeInfo/currencies',
'ExchangeInfo/currencies/{symbol}',
'MarketData/tickers',
'MarketData/ticker/{symbol}',
'MarketData/orderBook/{symbol}',
'MarketData/trades/{symbol}',
'MarketData/trades/{symbol}/{limit}',
],
},
'private': {
'get': [
'Account/balances',
'Account/balances/{currency}',
'Order/status',
'Order/active',
'Order/get_order',
'Order/trades',
],
'post': [
'Order/new',
'Order/test-order',
'Order/cancel',
'Order/cancel_all',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
},
'commonCurrencies': {
'TSL': 'Treasure SL',
},
'options': {
'createOrderMethod': 'private_post_order_new', # private_post_order_test_order
},
'exceptions': {
'exact': {
'Signature or ApiKey is not valid': AuthenticationError,
'Request is out of time': InvalidNonce,
'Symbol must be specified': BadRequest,
},
'broad': {
'Request limit reached': DDoSProtection,
'Pair': BadRequest,
'Price needs to be greater than': InvalidOrder,
'Amount needs to be greater than': InvalidOrder,
'The Symbol field is required': InvalidOrder,
'OrderType is not valid': InvalidOrder,
'Side is not valid': InvalidOrder,
'Cancelable order whit': OrderNotFound,
'Order': OrderNotFound,
},
},
})
def nonce(self):
return self.milliseconds()
def fetch_time(self, params={}):
response = self.publicGetExchangeInfoTime(params)
#
# {
# "time": "2019-04-18T9:00:00.0Z",
# "unixTimeSeconds": 1555578000,
# "unixTimeMiliseconds": 1555578000000
# }
#
return self.safe_integer(response, 'unixTimeMiliseconds')
def fetch_markets(self, params={}):
response = self.publicGetExchangeInfoPairs(params)
#
# [
# {
# "pairId": 502,
# "symbol": "LAETH",
# "baseCurrency": "LA",
# "quotedCurrency": "ETH",
# "makerFee": 0.01,
# "takerFee": 0.01,
# "pricePrecision": 8,
# "amountPrecision": 8,
# "minQty": 0.1
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'symbol')
# the exchange shows them inverted
baseId = self.safe_string(market, 'baseCurrency')
quoteId = self.safe_string(market, 'quotedCurrency')
numericId = self.safe_integer(market, 'pairId')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'price': self.safe_integer(market, 'pricePrecision'),
'amount': self.safe_integer(market, 'amountPrecision'),
}
limits = {
'amount': {
'min': self.safe_float(market, 'minQty'),
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
result.append({
'id': id,
'numericId': numericId,
'info': market,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': None, # assuming True
'precision': precision,
'limits': limits,
})
return result
def fetch_currencies(self, params={}):
response = self.publicGetExchangeInfoCurrencies(params)
#
# [
# {
# "currencyId": 102,
# "symbol": "LA",
# "name": "Latoken",
# "precission": 8,
# "type": "ERC20",
# "fee": 0.1
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'currencyId')
code = self.safe_currency_code(id)
precision = self.safe_integer(currency, 'precission')
fee = self.safe_float(currency, 'fee')
active = None
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'info': currency,
'name': code,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = amount * rate
precision = market['precision']['price']
if side == 'sell':
cost *= price
else:
key = 'base'
precision = market['precision']['amount']
cost = self.decimal_to_precision(cost, ROUND, precision, self.precisionMode)
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(cost),
}
def fetch_balance(self, currency=None, params={}):
self.load_markets()
response = self.privateGetAccountBalances(params)
#
# [
# {
# "currencyId": 102,
# "symbol": "LA",
# "name": "Latoken",
# "amount": 1054.66,
# "available": 900.66,
# "frozen": 154,
# "pending": 0
# }
# ]
#
result = {
'info': response,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
frozen = self.safe_float(balance, 'frozen')
pending = self.safe_float(balance, 'pending')
used = self.sum(frozen, pending)
account = {
'free': self.safe_float(balance, 'available'),
'used': used,
'total': self.safe_float(balance, 'amount'),
}
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetMarketDataOrderBookSymbol(self.extend(request, params))
#
# {
# "pairId": 502,
# "symbol": "LAETH",
# "spread": 0.07,
# "asks": [
# {"price": 136.3, "amount": 7.024}
# ],
# "bids": [
# {"price": 136.2, "amount": 6.554}
# ]
# }
#
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
def parse_ticker(self, ticker, market=None):
symbol = self.find_symbol(self.safe_string(ticker, 'symbol'), market)
open = self.safe_float(ticker, 'open')
close = self.safe_float(ticker, 'close')
change = None
if open is not None and close is not None:
change = close - open
percentage = self.safe_float(ticker, 'priceChange')
timestamp = self.nonce()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'low': self.safe_float(ticker, 'low'),
'high': self.safe_float(ticker, 'high'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': None,
'baseVolume': None,
'quoteVolume': self.safe_float(ticker, 'volume'),
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetMarketDataTickerSymbol(self.extend(request, params))
#
# {
# "pairId": 502,
# "symbol": "LAETH",
# "volume": 1023314.3202,
# "open": 134.82,
# "low": 133.95,
# "high": 136.22,
# "close": 135.12,
# "priceChange": 0.22
# }
#
return self.parse_ticker(response, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetMarketDataTickers(params)
#
# [
# {
# "pairId": 502,
# "symbol": "LAETH",
# "volume": 1023314.3202,
# "open": 134.82,
# "low": 133.95,
# "high": 136.22,
# "close": 135.12,
# "priceChange": 0.22
# }
# ]
#
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
if symbols is None or self.in_array(symbol, symbols):
result[symbol] = ticker
return result
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# side: 'buy',
# price: 0.33634,
# amount: 0.01,
# timestamp: 1564240008000 # milliseconds
# }
#
# fetchMyTrades(private)
#
# {
# id: '1564223032.892829.3.tg15',
# orderId: '1564223032.671436.707548@1379:1',
# commission: 0,
# side: 'buy',
# price: 0.32874,
# amount: 0.607,
# timestamp: 1564223033 # seconds
# }
#
type = None
timestamp = self.safe_integer_2(trade, 'timestamp', 'time')
if timestamp is not None:
# 03 Jan 2009 - first block
if timestamp < 1230940800000:
timestamp *= 1000
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
side = self.safe_string(trade, 'side')
cost = None
if amount is not None:
if price is not None:
cost = amount * price
symbol = None
if market is not None:
symbol = market['symbol']
id = self.safe_string(trade, 'id')
orderId = self.safe_string(trade, 'orderId')
feeCost = self.safe_float(trade, 'commission')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
}
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': type,
'takerOrMaker': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 50, max 100
response = self.publicGetMarketDataTradesSymbol(self.extend(request, params))
#
# {
# "pairId":370,
# "symbol":"ETHBTC",
# "tradeCount":51,
# "trades": [
# {
# side: 'buy',
# price: 0.33634,
# amount: 0.01,
# timestamp: 1564240008000 # milliseconds
# }
# ]
# }
#
trades = self.safe_value(response, 'trades', [])
return self.parse_trades(trades, market, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.privateGetOrderTrades(self.extend(request, params))
#
# {
# "pairId": 502,
# "symbol": "LAETH",
# "tradeCount": 1,
# "trades": [
# {
# id: '1564223032.892829.3.tg15',
# orderId: '1564223032.671436.707548@1379:1',
# commission: 0,
# side: 'buy',
# price: 0.32874,
# amount: 0.607,
# timestamp: 1564223033 # seconds
# }
# ]
# }
#
trades = self.safe_value(response, 'trades', [])
return self.parse_trades(trades, market, since, limit)
def parse_order_status(self, status):
statuses = {
'active': 'open',
'partiallyFilled': 'open',
'filled': 'closed',
'cancelled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "orderId":"1563460093.134037.704945@0370:2",
# "cliOrdId":"",
# "pairId":370,
# "symbol":"ETHBTC",
# "side":"sell",
# "orderType":"limit",
# "price":1.0,
# "amount":1.0
# }
#
# cancelOrder, fetchOrder, fetchOpenOrders, fetchClosedOrders, fetchCanceledOrders
#
# {
# "orderId": "1555492358.126073.126767@0502:2",
# "cliOrdId": "myNewOrder",
# "pairId": 502,
# "symbol": "LAETH",
# "side": "buy",
# "orderType": "limit",
# "price": 136.2,
# "amount": 0.57,
# "orderStatus": "partiallyFilled",
# "executedAmount": 0.27,
# "reaminingAmount": 0.3,
# "timeCreated": 155551580736,
# "timeFilled": 0
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_timestamp(order, 'timeCreated')
marketId = self.safe_string(order, 'symbol')
symbol = marketId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'executedAmount')
remaining = None
if amount is not None:
if filled is not None:
remaining = amount - filled
status = self.parse_order_status(self.safe_string(order, 'orderStatus'))
cost = None
if filled is not None:
if price is not None:
cost = filled * price
timeFilled = self.safe_timestamp(order, 'timeFilled')
lastTradeTimestamp = None
if (timeFilled is not None) and (timeFilled > 0):
lastTradeTimestamp = timeFilled
return {
'id': id,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'average': None,
'remaining': remaining,
'fee': None,
}
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_with_method('private_get_order_active', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_status('filled', symbol, since, limit, params)
def fetch_canceled_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_status('cancelled', symbol, since, limit, params)
def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):
request = {
'status': status,
}
return self.fetch_orders_with_method('private_get_order_status', symbol, since, limit, self.extend(request, params))
def fetch_orders_with_method(self, method, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersWithMethod requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 100
response = getattr(self, method)(self.extend(request, params))
#
# [
# {
# "orderId": "1555492358.126073.126767@0502:2",
# "cliOrdId": "myNewOrder",
# "pairId": 502,
# "symbol": "LAETH",
# "side": "buy",
# "orderType": "limit",
# "price": 136.2,
# "amount": 0.57,
# "orderStatus": "partiallyFilled",
# "executedAmount": 0.27,
# "reaminingAmount": 0.3,
# "timeCreated": 155551580736,
# "timeFilled": 0
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
response = self.privateGetOrderGetOrder(self.extend(request, params))
#
# {
# "orderId": "1555492358.126073.126767@0502:2",
# "cliOrdId": "myNewOrder",
# "pairId": 502,
# "symbol": "LAETH",
# "side": "buy",
# "orderType": "limit",
# "price": 136.2,
# "amount": 0.57,
# "orderStatus": "partiallyFilled",
# "executedAmount": 0.27,
# "reaminingAmount": 0.3,
# "timeCreated": 155551580736,
# "timeFilled": 0
# }
#
return self.parse_order(response)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
if type != 'limit':
raise ExchangeError(self.id + ' allows limit orders only')
request = {
'symbol': self.market_id(symbol),
'side': side,
'price': self.price_to_precision(symbol, price),
'amount': self.amount_to_precision(symbol, amount),
'orderType': type,
}
method = self.safe_string(self.options, 'createOrderMethod', 'private_post_order_new')
response = getattr(self, method)(self.extend(request, params))
#
# {
# "orderId":"1563460093.134037.704945@0370:2",
# "cliOrdId":"",
# "pairId":370,
# "symbol":"ETHBTC",
# "side":"sell",
# "orderType":"limit",
# "price":1.0,
# "amount":1.0
# }
#
return self.parse_order(response)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
response = self.privatePostOrderCancel(self.extend(request, params))
#
# {
# "orderId": "1555492358.126073.126767@0502:2",
# "cliOrdId": "myNewOrder",
# "pairId": 502,
# "symbol": "LAETH",
# "side": "buy",
# "orderType": "limit",
# "price": 136.2,
# "amount": 0.57,
# "orderStatus": "partiallyFilled",
# "executedAmount": 0.27,
# "reaminingAmount": 0.3,
# "timeCreated": 155551580736,
# "timeFilled": 0
# }
#
return self.parse_order(response)
def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders requires a symbol argument')
self.load_markets()
marketId = self.market_id(symbol)
request = {
'symbol': marketId,
}
response = self.privatePostOrderCancelAll(self.extend(request, params))
#
# {
# "pairId": 502,
# "symbol": "LAETH",
# "cancelledOrders": [
# "1555492358.126073.126767@0502:2"
# ]
# }
#
result = []
canceledOrders = self.safe_value(response, 'cancelledOrders', [])
for i in range(0, len(canceledOrders)):
order = self.parse_order({
'symbol': marketId,
'orderId': canceledOrders[i],
'orderStatus': 'canceled',
})
result.append(order)
return result
def sign(self, path, api='public', method='GET', params=None, headers=None, body=None):
request = '/api/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private':
nonce = self.nonce()
query = self.extend({
'timestamp': nonce,
}, query)
urlencodedQuery = self.urlencode(query)
if query:
request += '?' + urlencodedQuery
if api == 'private':
self.check_required_credentials()
signature = self.hmac(self.encode(request), self.encode(self.secret))
headers = {
'X-LA-KEY': self.apiKey,
'X-LA-SIGNATURE': signature,
}
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
body = urlencodedQuery
url = self.urls['api'] + request
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return
#
# {"message": "Request limit reachednot ", "details": "Request limit reached. Maximum allowed: 1 per 1s. Please try again in 1 second(s)."}
# {"error": {"message": "Pair 370 is not found","errorType":"RequestError","statusCode":400}}
# {"error": {"message": "Signature or ApiKey is not valid","errorType":"RequestError","statusCode":400}}
# {"error": {"message": "Request is out of time", "errorType": "RequestError", "statusCode":400}}
# {"error": {"message": "Price needs to be greater than 0","errorType":"ValidationError","statusCode":400}}
# {"error": {"message": "Side is not valid, Price needs to be greater than 0, Amount needs to be greater than 0, The Symbol field is required., OrderType is not valid","errorType":"ValidationError","statusCode":400}}
# {"error": {"message": "Cancelable order whit ID 1563460289.571254.704945@0370:1 not found","errorType":"RequestError","statusCode":400}}
# {"error": {"message": "Symbol must be specified","errorType":"RequestError","statusCode":400}}
# {"error": {"message": "Order 1563460289.571254.704945@0370:1 is not found","errorType":"RequestError","statusCode":400}}
#
message = self.safe_string(response, 'message')
exact = self.exceptions['exact']
broad = self.exceptions['broad']
feedback = self.id + ' ' + body
if message is not None:
if message in exact:
raise exact[message](feedback)
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
error = self.safe_value(response, 'error', {})
errorMessage = self.safe_string(error, 'message')
if errorMessage is not None:
if errorMessage in exact:
raise exact[errorMessage](feedback)
broadKey = self.findBroadlyMatchedKey(broad, errorMessage)
if broadKey is not None:
raise broad[broadKey](feedback)
raise ExchangeError(feedback) # unknown message
| 37.021765 | 228 | 0.464252 |
9ab7ff41e5ed8ee914d697f9cea1e94193125e3e | 2,326 | py | Python | tests/test_config/test_yaml.py | Dogeek/pyconf | 225be858d2259bbf4306f05c620cdfeaa3727cb9 | [
"MIT"
] | 1 | 2021-03-28T02:43:03.000Z | 2021-03-28T02:43:03.000Z | tests/test_config/test_yaml.py | Dogeek/pyconf | 225be858d2259bbf4306f05c620cdfeaa3727cb9 | [
"MIT"
] | 20 | 2021-02-06T23:47:18.000Z | 2021-03-28T02:41:27.000Z | tests/test_config/test_yaml.py | Dogeek/xdgconfig | 225be858d2259bbf4306f05c620cdfeaa3727cb9 | [
"MIT"
] | null | null | null | import pathlib
from tests.utils import TestCase
from tests.mocks import MockedYamlConfig
class TestYamlConfig(TestCase):
CONFIG_NAME = 'config.yaml'
def test_config_saved(self):
'''
Tests that the config file is created correctly.
'''
config = self.make_config(MockedYamlConfig, 'saved')
config['string'] = 'string'
config['integer'] = 0
config['float'] = 0.1
config['dict'] = {}
config['list'] = []
self.assertEqual(
config,
{
'string': 'string',
'integer': 0,
'float': 0.1,
'dict': {},
'list': []
}
)
config.save()
self.assertFileExists(
(
pathlib.Path('./__tmp__') /
self.__class__.__name__ /
f'saved_{self.CONFIG_NAME}'
)
)
def test_config_loaded(self):
config = self.make_config(MockedYamlConfig, 'load')
config['string'] = 'string'
config['integer'] = 0
config['float'] = 0.1
config['dict'] = {}
config['list'] = []
config.save()
conf = self.make_config(MockedYamlConfig, 'load')
self.assertEqual(
conf,
{
'string': 'string',
'integer': 0,
'float': 0.1,
'dict': {},
'list': []
}
)
def test_mutating_subkey(self):
'''
Test that mutating a non-existing subkey generates the proper
tree-like structure.
'''
config = self.make_config(MockedYamlConfig, 'mutating')
config['foo']['bar'] = 'baz'
self.assertEqual(
config, {'foo': {'bar': 'baz'}}
)
def test_config_identity(self):
'''
Test that the config file's instance is cached in a Singleton-type
design pattern. Ensures limited memory footprint when working with
config files.
'''
config = self.make_config(MockedYamlConfig, 'identity')
self.assertIs(config, self.make_config(MockedYamlConfig, 'identity'))
self.assertIsNot(
config, self.make_config(MockedYamlConfig, 'identity_false')
)
| 28.365854 | 77 | 0.507309 |
f33cf206323903668c968c9b31e5985bf41b424e | 1,439 | py | Python | VolumeProfile.py | gregyjames/VolumeProfiles | 9f89fb5896146ef78b11129075ed07ba7246e237 | [
"MIT"
] | 1 | 2021-05-08T13:45:40.000Z | 2021-05-08T13:45:40.000Z | VolumeProfile.py | gregyjames/VolumeProfiles | 9f89fb5896146ef78b11129075ed07ba7246e237 | [
"MIT"
] | null | null | null | VolumeProfile.py | gregyjames/VolumeProfiles | 9f89fb5896146ef78b11129075ed07ba7246e237 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from scipy import stats, signal
import plotly.express as px
import plotly.graph_objects as go
import pandas_datareader as web
import datetime
#datetime.datetime is a data type within the datetime module
start = datetime.datetime(2020, 12, 1)
end = datetime.datetime(2021, 1, 31)
#DataReader method name is case sensitive
df = web.DataReader("nvda", 'yahoo', start, end)
print(df.head())
volume = df['Volume']
close = df['Close']
#px.histogram(df, x='Volume', y='Close', nbins=150, orientation='h').show()
kde_factor = 0.05
num_samples = 500
kde = stats.gaussian_kde(close,weights=volume,bw_method=kde_factor)
xr = np.linspace(close.min(),close.max(),num_samples)
kdy = kde(xr)
ticks_per_sample = (xr.max() - xr.min()) / num_samples
def get_dist_plot(c, v, kx, ky):
fig = go.Figure()
fig.add_trace(go.Histogram(name='Vol Profile', x=c, y=v, nbinsx=150,
histfunc='sum', histnorm='probability density',
marker_color='#B0C4DE'))
fig.add_trace(go.Scatter(name='KDE', x=kx, y=ky, mode='lines', marker_color='#D2691E'))
return fig
peaks,_ = signal.find_peaks(kdy)
pkx = xr[peaks]
pky = kdy[peaks]
pk_marker_args=dict(size=10)
fig = get_dist_plot(close, volume, xr, kdy)
fig.add_trace(go.Scatter(name="Peaks", x=pkx, y=pky, mode='markers', marker=pk_marker_args))
fig.show() | 31.977778 | 93 | 0.669215 |
b70b4e278b60148649d722ec6634ff850f83b0aa | 616 | py | Python | conditional_generation/data_utils.py | shizhediao/TILGAN | 6115cc191a9f3cb39abc466a5da57b7c8cce5202 | [
"MIT"
] | 11 | 2021-05-30T01:32:50.000Z | 2022-03-24T08:34:27.000Z | conditional_generation/data_utils.py | shizhediao/TILGAN | 6115cc191a9f3cb39abc466a5da57b7c8cce5202 | [
"MIT"
] | 1 | 2021-08-01T14:19:57.000Z | 2021-09-13T15:41:52.000Z | conditional_generation/data_utils.py | shizhediao/TILGAN | 6115cc191a9f3cb39abc466a5da57b7c8cce5202 | [
"MIT"
] | 2 | 2021-09-17T14:20:12.000Z | 2022-01-18T07:53:22.000Z | from tensorflow.python.platform import gfile
_PAD = "_PAD"
_GO = "_GO"
_EOS = "_EOS"
_UNK = "_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
def initialize_vocabulary(vocabulary_path):
if gfile.Exists(vocabulary_path):
rev_vocab = []
with gfile.GFile(vocabulary_path, mode="r") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path) | 28 | 70 | 0.665584 |
91312e2b78195be7d9d2a963aa3dc514634e0bfd | 4,637 | py | Python | p24.py | cemulate/python-challenge | 1c3b790d3a32cd51b3bc9ed4d1acc2760405f358 | [
"MIT"
] | null | null | null | p24.py | cemulate/python-challenge | 1c3b790d3a32cd51b3bc9ed4d1acc2760405f358 | [
"MIT"
] | null | null | null | p24.py | cemulate/python-challenge | 1c3b790d3a32cd51b3bc9ed4d1acc2760405f358 | [
"MIT"
] | null | null | null | from PIL import Image, ImageDraw
maze = Image.open("maze.png")
msolved = maze.copy()
mdraw = ImageDraw.Draw(msolved)
def enum(**enums):
return type('Enum', (), enums)
Direction = enum(UP=0, DOWN=1, LEFT=2, RIGHT=3)
dirNames = {Direction.UP:"Up",
Direction.DOWN:"Down",
Direction.RIGHT:"Right",
Direction.LEFT:"Left",
None:"None"}
directions = [Direction.UP, Direction.LEFT, Direction.RIGHT, Direction.DOWN]
Backwards = {Direction.UP:Direction.DOWN,
Direction.DOWN:Direction.UP,
Direction.LEFT:Direction.RIGHT,
Direction.RIGHT:Direction.LEFT}
class PDPair():
def __init__(self, point, direction):
self.point = point
self.direction = direction
self.dirMoveMap = {Direction.UP:self._moveUp,
Direction.DOWN:self._moveDown,
Direction.LEFT:self._moveLeft,
Direction.RIGHT:self._moveRight}
def __str__(self):
return "PDPair: " + str(self.point) + " " + dirNames[self.direction]
def move(self, dir):
"""Returns a new Point-Direction pair formed by moving the specified
PDPair in a specified direction. The direction attribute of the new
pair is defined to be along the vector that the PDPair just moved"""
# Calls the appropriate function for the direction
return self.dirMoveMap[dir]()
def _moveRight(self):
p = self.point
return PDPair((p[0]+1, p[1]), Direction.RIGHT)
def _moveLeft(self):
p = self.point
return PDPair((p[0]-1, p[1]), Direction.LEFT)
def _moveDown(self):
p = self.point
return PDPair((p[0], p[1]+1), Direction.DOWN)
def _moveUp(self):
p = self.point
return PDPair((p[0], p[1]-1), Direction.UP)
#def trace(seed):
# paths = []
# curPath = [seed.point]
# next = [seed]
# while len(next) < 2:
# p = next[0]
# curPath.append(p.point)
# possible = [d for d in directions if not d == Backwards[p.direction]]
# points = [p.move(d) for d in possible]
# test = lambda pdpair: maze.getpixel(pdpair.point)[0] == 255
# next = [point for point in points if test(point) is True]
# if len(next) is 0:
# return None
# if len(next) is 1 and next[0].point[1] == 641:
# return curPath
# paths.append(curPath)
# for x in next:
# r = trace(x)
# if not r == None:
# paths.append(r)
# print "HEllo"
# return paths
#import sys
#sys.setrecursionlimit(1000)
#
#paths = trace(PDPair((80, 1), Direction.DOWN))
#print paths
def deadEnds(image):
ends = []
x, y = 0, 0
for i in range(641-2):
y += 1
x = 0
for j in range(641-2):
x += 1
p = image.getpixel((x, y))
if p != (255, 255, 255, 255):
adj = [(x-1, y), (x+1, y), (x, y-1), (x, y+1)]
adj = [image.getpixel(a) for a in adj]
adj = filter(lambda p: p != (255, 255, 255, 255), adj)
if len(adj) == 1:
#print "CONFIRM!", x, y
ends.append((x, y))
image.putpixel((x, y), (255, 255, 255, 255))
return ends
def trace(seed, image, dataDump=None, draw=True):
"""
Follows a path starting at seed until it hits an intersection or the end
of the picture. If draw is True, it will fill in every pixel it passes
over. If dataDump is non-null, it will append the chr() of the color value
of each pixel it passes to dataDump
"""
next = [seed]
while len(next) == 1:
p = next[0]
if p.direction != None:
possible = [d for d in directions if not d == Backwards[p.direction]]
else:
possible = directions
points = [p.move(d) for d in possible]
test = lambda pdpair: image.getpixel(pdpair.point) != (255, 255, 255, 255)
try:
next = filter(test, points)
except:
print "Reached edge of image, ending trace"
return
if dataDump != None:
dataDump.append(chr(image.getpixel(p.point)[0]))
if len(next) == 1 and draw:
image.putpixel(p.point, (255, 255, 255, 255))
import sys
print "Identifying all dead ends in the maze..."
ends = deadEnds(msolved)
sys.stdout.write("Filling dead ends back to their intersection")
# By maze theory, if you fill all dead ends back to the first intersection,
# you will reveal the correct path, assuming there is one, when done
x = int(len(ends)//20)
for i in range(len(ends)):
end = ends[i]
trace(PDPair(end, None), msolved)
if i % x is 0:
sys.stdout.write(" .")
print "\n"
# Now the image contains the one and only one correct path
#msolved.save("maze_solved.png")
start = PDPair((639,0), Direction.DOWN)
dump = []
# Trace the correct path now, without drawing over and with a dataDump
print "Tracing the correct path, dumping color data of red fields..."
trace(start, msolved, dump, False)
# Write every other byte (non-zero) to file
open("maze_dump.zip", 'wb').write(''.join(dump[1::2]))
print "Done."
msolved.save("maze_solved.png")
| 26.803468 | 76 | 0.660772 |
96308fc60c947860380cb645cf7b5a73d1f14fd1 | 10,035 | py | Python | modules/afamqp/rabbitmq-c/codegen/amqp_codegen.py | balabit-deps/balabit-os-6-syslog-ng | c8d0fafc8eaca8ed690b2ad17ab1d93820bd07f6 | [
"BSD-4-Clause-UC"
] | null | null | null | modules/afamqp/rabbitmq-c/codegen/amqp_codegen.py | balabit-deps/balabit-os-6-syslog-ng | c8d0fafc8eaca8ed690b2ad17ab1d93820bd07f6 | [
"BSD-4-Clause-UC"
] | null | null | null | modules/afamqp/rabbitmq-c/codegen/amqp_codegen.py | balabit-deps/balabit-os-6-syslog-ng | c8d0fafc8eaca8ed690b2ad17ab1d93820bd07f6 | [
"BSD-4-Clause-UC"
] | null | null | null | ## The contents of this file are subject to the Mozilla Public License
## Version 1.1 (the "License"); you may not use this file except in
## compliance with the License. You may obtain a copy of the License
## at http://www.mozilla.org/MPL/
##
## Software distributed under the License is distributed on an "AS IS"
## basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
## the License for the specific language governing rights and
## limitations under the License.
##
## The Original Code is RabbitMQ.
##
## The Initial Developer of the Original Code is GoPivotal, Inc.
## Copyright (c) 2007-2013 GoPivotal, Inc. All rights reserved.
##
from __future__ import nested_scopes
import re
import sys
import os
from optparse import OptionParser
try:
try:
import simplejson as json
except ImportError, e:
if sys.hexversion >= 0x20600f0:
import json
else:
raise e
except ImportError:
print >> sys.stderr , " You don't appear to have simplejson.py installed"
print >> sys.stderr , " (an implementation of a JSON reader and writer in Python)."
print >> sys.stderr , " You can install it:"
print >> sys.stderr , " - by running 'apt-get install python-simplejson' on Debian-based systems,"
print >> sys.stderr , " - by running 'yum install python-simplejson' on Fedora/Red Hat system,"
print >> sys.stderr , " - by running 'port install py25-simplejson' on Macports on OS X"
print >> sys.stderr , " (you may need to say 'make PYTHON=python2.5', as well),"
print >> sys.stderr , " - from sources from 'http://pypi.python.org/pypi/simplejson'"
print >> sys.stderr , " - simplejson is a standard json library in the Python core since 2.6"
sys.exit(1)
def insert_base_types(d):
for t in ['octet', 'shortstr', 'longstr', 'short', 'long',
'longlong', 'bit', 'table', 'timestamp']:
d[t] = t
class AmqpSpecFileMergeConflict(Exception): pass
# If ignore_conflicts is true, then we allow acc and new to conflict,
# with whatever's already in acc winning and new being ignored. If
# ignore_conflicts is false, acc and new must not conflict.
def default_spec_value_merger(key, acc, new, ignore_conflicts):
if acc is None or acc == new or ignore_conflicts:
return new
else:
raise AmqpSpecFileMergeConflict(key, acc, new)
def extension_info_merger(key, acc, new, ignore_conflicts):
return acc + [new]
def domains_merger(key, acc, new, ignore_conflicts):
merged = dict((k, v) for [k, v] in acc)
for [k, v] in new:
if merged.has_key(k):
if not ignore_conflicts:
raise AmqpSpecFileMergeConflict(key, acc, new)
else:
merged[k] = v
return [[k, v] for (k, v) in merged.iteritems()]
def merge_dict_lists_by(dict_key, acc, new, ignore_conflicts):
acc_index = set(v[dict_key] for v in acc)
result = list(acc) # shallow copy
for v in new:
if v[dict_key] in acc_index:
if not ignore_conflicts:
raise AmqpSpecFileMergeConflict(description, acc, new)
else:
result.append(v)
return result
def constants_merger(key, acc, new, ignore_conflicts):
return merge_dict_lists_by("name", acc, new, ignore_conflicts)
def methods_merger(classname, acc, new, ignore_conflicts):
return merge_dict_lists_by("name", acc, new, ignore_conflicts)
def properties_merger(classname, acc, new, ignore_conflicts):
return merge_dict_lists_by("name", acc, new, ignore_conflicts)
def class_merger(acc, new, ignore_conflicts):
acc["methods"] = methods_merger(acc["name"],
acc["methods"],
new["methods"],
ignore_conflicts)
acc["properties"] = properties_merger(acc["name"],
acc.get("properties", []),
new.get("properties", []),
ignore_conflicts)
def classes_merger(key, acc, new, ignore_conflicts):
acc_dict = dict((v["name"], v) for v in acc)
result = list(acc) # shallow copy
for w in new:
if w["name"] in acc_dict:
class_merger(acc_dict[w["name"]], w, ignore_conflicts)
else:
result.append(w)
return result
mergers = {
"extension": (extension_info_merger, []),
"domains": (domains_merger, []),
"constants": (constants_merger, []),
"classes": (classes_merger, []),
}
def merge_load_specs(filenames, ignore_conflicts):
handles = [open(filename) for filename in filenames]
docs = [json.load(handle) for handle in handles]
spec = {}
for doc in docs:
for (key, value) in doc.iteritems():
(merger, default_value) = mergers.get(key, (default_spec_value_merger, None))
spec[key] = merger(key, spec.get(key, default_value), value, ignore_conflicts)
for handle in handles: handle.close()
return spec
class AmqpSpec:
# Slight wart: use a class member rather than change the ctor signature
# to avoid breaking everyone else's code.
ignore_conflicts = False
def __init__(self, filenames):
self.spec = merge_load_specs(filenames, AmqpSpec.ignore_conflicts)
self.major = self.spec['major-version']
self.minor = self.spec['minor-version']
self.revision = self.spec.has_key('revision') and self.spec['revision'] or 0
self.port = self.spec['port']
self.domains = {}
insert_base_types(self.domains)
for entry in self.spec['domains']:
self.domains[ entry[0] ] = entry[1]
self.constants = []
for d in self.spec['constants']:
if d.has_key('class'):
klass = d['class']
else:
klass = ''
self.constants.append((d['name'], d['value'], klass))
self.classes = []
for element in self.spec['classes']:
self.classes.append(AmqpClass(self, element))
def allClasses(self):
return self.classes
def allMethods(self):
return [m for c in self.classes for m in c.allMethods()]
def resolveDomain(self, n):
return self.domains[n]
class AmqpEntity:
def __init__(self, element):
self.element = element
self.name = element['name']
class AmqpClass(AmqpEntity):
def __init__(self, spec, element):
AmqpEntity.__init__(self, element)
self.spec = spec
self.index = int(self.element['id'])
self.methods = []
for method_element in self.element['methods']:
self.methods.append(AmqpMethod(self, method_element))
self.hasContentProperties = False
for method in self.methods:
if method.hasContent:
self.hasContentProperties = True
break
self.fields = []
if self.element.has_key('properties'):
index = 0
for e in self.element['properties']:
self.fields.append(AmqpField(self, e, index))
index = index + 1
def allMethods(self):
return self.methods
def __repr__(self):
return 'AmqpClass("' + self.name + '")'
class AmqpMethod(AmqpEntity):
def __init__(self, klass, element):
AmqpEntity.__init__(self, element)
self.klass = klass
self.index = int(self.element['id'])
if self.element.has_key('synchronous'):
self.isSynchronous = self.element['synchronous']
else:
self.isSynchronous = False
if self.element.has_key('content'):
self.hasContent = self.element['content']
else:
self.hasContent = False
self.arguments = []
index = 0
for argument in element['arguments']:
self.arguments.append(AmqpField(self, argument, index))
index = index + 1
def __repr__(self):
return 'AmqpMethod("' + self.klass.name + "." + self.name + '" ' + repr(self.arguments) + ')'
class AmqpField(AmqpEntity):
def __init__(self, method, element, index):
AmqpEntity.__init__(self, element)
self.method = method
self.index = index
if self.element.has_key('type'):
self.domain = self.element['type']
else:
self.domain = self.element['domain']
if self.element.has_key('default-value'):
self.defaultvalue = self.element['default-value']
else:
self.defaultvalue = None
def __repr__(self):
return 'AmqpField("' + self.name + '")'
def do_main(header_fn, body_fn):
do_main_dict({"header": header_fn, "body": body_fn})
def do_main_dict(funcDict):
def usage():
print >> sys.stderr , "Usage:"
print >> sys.stderr , " %s <function> <path_to_amqp_spec.json>... <path_to_output_file>" % (sys.argv[0])
print >> sys.stderr , " where <function> is one of %s" % ", ".join([k for k in funcDict.keys()])
def execute(fn, amqp_specs, out_file):
stdout = sys.stdout
f = open(out_file, 'w')
success = False
try:
sys.stdout = f
fn(amqp_specs)
success = True
finally:
sys.stdout = stdout
f.close()
if not success:
os.remove(out_file)
parser = OptionParser()
parser.add_option("--ignore-conflicts", action="store_true", dest="ignore_conflicts", default=False)
(options, args) = parser.parse_args()
if len(args) < 3:
usage()
sys.exit(1)
else:
function = args[0]
sources = args[1:-1]
dest = args[-1]
AmqpSpec.ignore_conflicts = options.ignore_conflicts
if funcDict.has_key(function):
execute(funcDict[function], sources, dest)
else:
usage()
sys.exit(1)
| 34.965157 | 113 | 0.603388 |
48c76e426cb9b2089cba4aa96c0a2d668521698d | 1,396 | py | Python | Communicator.py | mkatabi/Checkers-AI | 3211b370d018f995af67d63e5e267c441521be2d | [
"MIT"
] | null | null | null | Communicator.py | mkatabi/Checkers-AI | 3211b370d018f995af67d63e5e267c441521be2d | [
"MIT"
] | 1 | 2020-04-28T08:35:13.000Z | 2020-04-28T16:20:32.000Z | Communicator.py | andrewttsui/Checkers-AI | 71f0c5caeda3833acf315fed0a27551866d8bb17 | [
"MIT"
] | null | null | null | from subprocess import Popen, PIPE
import select
import fcntl, os
import time
class Communicator(object):
def __init__(self, command,timeout):
self.timeout = timeout
self.process = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
flags = fcntl.fcntl(self.process.stdout, fcntl.F_GETFL)
fcntl.fcntl(self.process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.accumulated_time = 0
def send(self, data, tail = '\n'.encode()):
self.process.stdin.write(data + tail)
self.process.stdin.flush()
time.sleep(0.01)
def recv(self,t=0.2,return_stderr=False,time_already=None):
if time_already is not None:
DeprecationWarning("time_already parameter has been deprecated, and it will be removed soon.")
r = ''
pr = self.process.stdout
per = self.process.stderr
bt = time.time()
er = b''
while ((time.time() - bt)+self.accumulated_time < self.timeout):
if not select.select([pr], [], [], 0)[0]:
time.sleep(t)
continue
r = pr.read().rstrip()
self.accumulated_time += time.time() - bt
if r.decode() == ' ' or r.decode() == '':
er = per.read()
if return_stderr:
return r,er
return r
raise TimeoutError
| 34.9 | 106 | 0.579513 |
0cce7ae67da1e51ab15b950bda7a29ced9357540 | 887 | py | Python | convert.py | aksnzhy/dgl-tool | eab2d8387334e9e1c0b0514bb6f1fe513b0e3dcc | [
"Apache-2.0"
] | 1 | 2019-10-31T06:23:18.000Z | 2019-10-31T06:23:18.000Z | convert.py | aksnzhy/dgl-tool | eab2d8387334e9e1c0b0514bb6f1fe513b0e3dcc | [
"Apache-2.0"
] | null | null | null | convert.py | aksnzhy/dgl-tool | eab2d8387334e9e1c0b0514bb6f1fe513b0e3dcc | [
"Apache-2.0"
] | null | null | null | import os
entity2id_global = {}
with open('entities.dict') as f:
for line in f:
eid, entity = line.strip().split('\t')
entity2id_global[entity] = int(eid)
count = 0
entity2id_local = {}
local_to_global = []
with open('train.txt') as f:
for line in f:
h, r, t = line.strip().split('\t')
if h not in entity2id_local.keys():
entity2id_local[h] = count
local_to_global.append(entity2id_global[h])
count += 1
if t not in entity2id_local.keys():
entity2id_local[t] = count
local_to_global.append(entity2id_global[t])
count += 1
# write
f = open('entities.dict.local', 'w')
for key, value in entity2id_local.items():
f.write(str(value)+'\t'+key+'\n')
f.close()
f = open('local_to_global.txt', 'w')
for data in local_to_global:
f.write(str(data)+'\n')
f.close() | 25.342857 | 55 | 0.59752 |
60515949445b471e90897a71beeedccfcd387128 | 1,034 | py | Python | views/room_connector_view.py | Andrey-Tkachev/infection | 64ccd69f7c714c05c7b7ec8e7cf2ece2b4d6cca6 | [
"MIT"
] | 1 | 2017-04-04T09:20:00.000Z | 2017-04-04T09:20:00.000Z | views/room_connector_view.py | Andrey-Tkachev/infection | 64ccd69f7c714c05c7b7ec8e7cf2ece2b4d6cca6 | [
"MIT"
] | null | null | null | views/room_connector_view.py | Andrey-Tkachev/infection | 64ccd69f7c714c05c7b7ec8e7cf2ece2b4d6cca6 | [
"MIT"
] | null | null | null | from views.base_view import BaseView
class RoomConnectorView(BaseView):
def get(self, room_id, user_name):
user_name = self.get_query_arguments('user_name')[0]
room_id = self.get_query_arguments('room_id')[0]
if (room_id in self.application.rooms_manager.rooms_dict):
room = self.application.rooms_manager.get_room(room_id)
if self.current_user:
if self.current_user not in room.players_dict:
if len(room.players_dict) < room.players_num:
room.add_player(str(self.current_user), user_name)
self.application.rooms_manager.rooms_dict[
room_id] = room
self.set_cookie("room_id", room.objectid)
# player = {'color': room.get_color_by_player(self.current_user),
#'nick': self.get_cookie('nick')}
self.redirect('/room/')
else:
self.redirect('/')
| 44.956522 | 89 | 0.565764 |
aec21403855e381afcc69cd877b49bc780d663da | 45,761 | py | Python | rasa/utils/tensorflow/model_data.py | orcaformation/chatbot_widget | cdbc0db5103a5a701878804ba183d5448823c798 | [
"Apache-2.0"
] | 37 | 2019-06-07T07:39:00.000Z | 2022-01-27T08:32:57.000Z | rasa/utils/tensorflow/model_data.py | orcaformation/chatbot_widget | cdbc0db5103a5a701878804ba183d5448823c798 | [
"Apache-2.0"
] | 93 | 2020-10-22T10:41:26.000Z | 2022-03-01T13:34:43.000Z | rasa/utils/tensorflow/model_data.py | orcaformation/chatbot_widget | cdbc0db5103a5a701878804ba183d5448823c798 | [
"Apache-2.0"
] | 65 | 2019-05-21T12:16:53.000Z | 2022-02-23T10:54:15.000Z | import logging
import numpy as np
import scipy.sparse
import tensorflow as tf
from sklearn.model_selection import train_test_split
from typing import (
Optional,
Dict,
Text,
List,
Tuple,
Any,
Union,
Generator,
NamedTuple,
ItemsView,
)
from collections import defaultdict, OrderedDict
from rasa.utils.tensorflow.constants import BALANCED, SEQUENCE
logger = logging.getLogger(__name__)
class FeatureArray(np.ndarray):
"""Stores any kind of features ready to be used by a RasaModel.
Next to the input numpy array of features, it also received the number of dimensions of the features.
As our features can have 1 to 4 dimensions we might have different number of numpy arrays stacked.
The number of dimensions helps us to figure out how to handle this particular feature array.
Also, it is automatically determined whether the feature array is sparse or not and the number of units
is determined as well.
Subclassing np.array: https://numpy.org/doc/stable/user/basics.subclassing.html
"""
def __new__(
cls, input_array: np.ndarray, number_of_dimensions: int
) -> "FeatureArray":
"""Create and return a new object. See help(type) for accurate signature."""
FeatureArray._validate_number_of_dimensions(number_of_dimensions, input_array)
feature_array = np.asarray(input_array).view(cls)
if number_of_dimensions <= 2:
feature_array.units = input_array.shape[-1]
feature_array.is_sparse = isinstance(input_array[0], scipy.sparse.spmatrix)
elif number_of_dimensions == 3:
feature_array.units = input_array[0].shape[-1]
feature_array.is_sparse = isinstance(input_array[0], scipy.sparse.spmatrix)
elif number_of_dimensions == 4:
feature_array.units = input_array[0][0].shape[-1]
feature_array.is_sparse = isinstance(
input_array[0][0], scipy.sparse.spmatrix
)
else:
raise ValueError(
f"Number of dimensions '{number_of_dimensions}' currently not supported."
)
feature_array.number_of_dimensions = number_of_dimensions
return feature_array
def __init__(self, input_array: Any, number_of_dimensions: int, **kwargs):
"""Initialize. FeatureArray.
Needed in order to avoid 'Invalid keyword argument number_of_dimensions
to function FeatureArray.__init__ '
Args:
input_array: the array that contains features
number_of_dimensions: number of dimensions in input_array
"""
super().__init__(**kwargs)
self.number_of_dimensions = number_of_dimensions
def __array_finalize__(self, obj: Any) -> None:
"""This method is called whenever the system internally allocates a new array from obj.
Args:
obj: A subclass (subtype) of ndarray.
"""
if obj is None:
return
self.units = getattr(obj, "units", None)
self.number_of_dimensions = getattr(obj, "number_of_dimensions", None)
self.is_sparse = getattr(obj, "is_sparse", None)
default_attributes = {
"units": self.units,
"number_of_dimensions": self.number_of_dimensions,
"is_spare": self.is_sparse,
}
self.__dict__.update(default_attributes)
# pytype: disable=attribute-error
def __array_ufunc__(self, ufunc: Any, method: Text, *inputs, **kwargs) -> Any:
"""Overwrite this method as we are subclassing numpy array.
Args:
ufunc: The ufunc object that was called.
method: A string indicating which Ufunc method was called
(one of "__call__", "reduce", "reduceat", "accumulate", "outer",
"inner").
*inputs: A tuple of the input arguments to the ufunc.
**kwargs: Any additional arguments
Returns:
The result of the operation.
"""
f = {
"reduce": ufunc.reduce,
"accumulate": ufunc.accumulate,
"reduceat": ufunc.reduceat,
"outer": ufunc.outer,
"at": ufunc.at,
"__call__": ufunc,
}
# convert the inputs to np.ndarray to prevent recursion, call the function,
# then cast it back as FeatureArray
output = FeatureArray(
f[method](*(i.view(np.ndarray) for i in inputs), **kwargs),
number_of_dimensions=kwargs["number_of_dimensions"],
)
output.__dict__ = self.__dict__ # carry forward attributes
return output
def __reduce__(self) -> Tuple[Any, Any, Any]:
"""Needed in order to pickle this object.
Returns:
A tuple.
"""
pickled_state = super(FeatureArray, self).__reduce__()
new_state = pickled_state[2] + (
self.number_of_dimensions,
self.is_sparse,
self.units,
)
return pickled_state[0], pickled_state[1], new_state
def __setstate__(self, state, **kwargs) -> None:
"""Sets the state.
Args:
state: The state argument must be a sequence that contains the following
elements version, shape, dtype, isFortan, rawdata.
**kwargs: Any additional parameter
"""
# Needed in order to load the object
self.number_of_dimensions = state[-3]
self.is_sparse = state[-2]
self.units = state[-1]
super(FeatureArray, self).__setstate__(state[0:-3], **kwargs)
# pytype: enable=attribute-error
@staticmethod
def _validate_number_of_dimensions(
number_of_dimensions: int, input_array: np.ndarray
) -> None:
"""Validates if the the input array has given number of dimensions.
Args:
number_of_dimensions: number of dimensions
input_array: input array
Raises: ValueError in case the dimensions do not match
"""
_sub_array = input_array
dim = 0
# Go number_of_dimensions into the given input_array
for i in range(1, number_of_dimensions + 1):
_sub_array = _sub_array[0]
if isinstance(_sub_array, scipy.sparse.spmatrix):
dim = i
break
if isinstance(_sub_array, np.ndarray) and _sub_array.shape[0] == 0:
# sequence dimension is 0, we are dealing with "fake" features
dim = i
break
# If the resulting sub_array is sparse, the remaining number of dimensions
# should be at least 2
if isinstance(_sub_array, scipy.sparse.spmatrix):
if dim > 2:
raise ValueError(
f"Given number of dimensions '{number_of_dimensions}' does not "
f"match dimensions of given input array: {input_array}."
)
elif isinstance(_sub_array, np.ndarray) and _sub_array.shape[0] == 0:
# sequence dimension is 0, we are dealing with "fake" features,
# but they should be of dim 2
if dim > 2:
raise ValueError(
f"Given number of dimensions '{number_of_dimensions}' does not "
f"match dimensions of given input array: {input_array}."
)
# If the resulting sub_array is dense, the sub_array should be a single number
elif not np.issubdtype(type(_sub_array), np.integer) and not isinstance(
_sub_array, (np.float32, np.float64)
):
raise ValueError(
f"Given number of dimensions '{number_of_dimensions}' does not match "
f"dimensions of given input array: {input_array}."
)
def get_shape_type_info(
self,
) -> Tuple[
List[
Union[
int,
Tuple[None],
Tuple[None, int],
Tuple[None, None, int],
Tuple[None, None, None, int],
]
],
List[int],
]:
"""Returns shapes and types needed to convert this feature array into tensors.
Returns:
A list of shape tuples.
A list of type tuples.
"""
if self.is_sparse:
# 4D tensors were converted into 3D tensors during padding
number_of_dimensions = (
self.number_of_dimensions if self.number_of_dimensions != 4 else 3
)
# scipy matrix is converted into indices, data, shape
return (
[(None, number_of_dimensions), (None,), (number_of_dimensions)],
[tf.int64, tf.float32, tf.int64],
)
if self.number_of_dimensions == 1:
return [(None,)], [tf.float32]
if self.number_of_dimensions == 2:
return [(None, self.units)], [tf.float32]
if self.number_of_dimensions == 3:
return [(None, None, self.units)], [tf.float32]
if self.number_of_dimensions == 4:
# 4D tensors were converted into 3D tensors during padding
return [(None, None, self.units)], [tf.float32]
return [], []
class FeatureSignature(NamedTuple):
"""Signature of feature arrays.
Stores the number of units, the type (sparse vs dense), and the number of
dimensions of features.
"""
is_sparse: bool
units: Optional[int]
number_of_dimensions: int
# Mapping of attribute name and feature name to a list of feature arrays representing
# the actual features
# For example:
# "text" -> { "sentence": [
# "feature array containing dense features for every training example",
# "feature array containing sparse features for every training example"
# ]}
Data = Dict[Text, Dict[Text, List[FeatureArray]]]
class RasaModelData:
"""Data object used for all RasaModels.
It contains all features needed to train the models.
'data' is a mapping of attribute name, e.g. TEXT, INTENT, etc., and feature name,
e.g. SENTENCE, SEQUENCE, etc., to a list of feature arrays representing the actual
features.
'label_key' and 'label_sub_key' point to the labels inside 'data'. For
example, if your intent labels are stored under INTENT -> IDS, 'label_key' would
be "INTENT" and 'label_sub_key' would be "IDS".
"""
def __init__(
self,
label_key: Optional[Text] = None,
label_sub_key: Optional[Text] = None,
data: Optional[Data] = None,
) -> None:
"""
Initializes the RasaModelData object.
Args:
label_key: the key of a label used for balancing, etc.
label_sub_key: the sub key of a label used for balancing, etc.
data: the data holding the features
"""
self.data = data or defaultdict(lambda: defaultdict(list))
self.label_key = label_key
self.label_sub_key = label_sub_key
# should be updated when features are added
self.num_examples = self.number_of_examples()
def get(
self, key: Text, sub_key: Optional[Text] = None
) -> Union[Dict[Text, List[FeatureArray]], List[FeatureArray]]:
"""Get the data under the given keys.
Args:
key: The key.
sub_key: The optional sub key.
Returns:
The requested data.
"""
if sub_key is None and key in self.data:
return self.data[key]
if sub_key and key in self.data and sub_key in self.data[key]:
return self.data[key][sub_key]
return []
def items(self) -> ItemsView:
"""Return the items of the data attribute.
Returns:
The items of data.
"""
return self.data.items()
def values(self) -> Any:
"""Return the values of the data attribute.
Returns:
The values of data.
"""
return self.data.values()
def keys(self, key: Optional[Text] = None) -> List[Text]:
"""Return the keys of the data attribute.
Args:
key: The optional key.
Returns:
The keys of the data.
"""
if key is None:
return list(self.data.keys())
if key in self.data:
return list(self.data[key].keys())
return []
def sort(self):
"""Sorts data according to its keys."""
for key, attribute_data in self.data.items():
self.data[key] = OrderedDict(sorted(attribute_data.items()))
self.data = OrderedDict(sorted(self.data.items()))
def first_data_example(self) -> Data:
"""Return the data with just one feature example per key, sub-key.
Returns:
The simplified data.
"""
out_data = {}
for key, attribute_data in self.data.items():
out_data[key] = {}
for sub_key, features in attribute_data.items():
out_data[key][sub_key] = [feature[:1] for feature in features]
return out_data
def does_feature_exist(self, key: Text, sub_key: Optional[Text] = None) -> bool:
"""Check if feature key (and sub-key) is present and features are available.
Args:
key: The key.
sub_key: The optional sub-key.
Returns:
False, if no features for the given keys exists, True otherwise.
"""
return not self.does_feature_not_exist(key, sub_key)
def does_feature_not_exist(self, key: Text, sub_key: Optional[Text] = None) -> bool:
"""Check if feature key (and sub-key) is present and features are available.
Args:
key: The key.
sub_key: The optional sub-key.
Returns:
True, if no features for the given keys exists, False otherwise.
"""
if sub_key:
return (
key not in self.data
or not self.data[key]
or sub_key not in self.data[key]
or not self.data[key][sub_key]
)
return key not in self.data or not self.data[key]
def is_empty(self) -> bool:
"""Checks if data is set."""
return not self.data
def number_of_examples(self, data: Optional[Data] = None) -> int:
"""Obtain number of examples in data.
Args:
data: The data.
Raises: A ValueError if number of examples differ for different features.
Returns:
The number of examples in data.
"""
if not data:
data = self.data
if not data:
return 0
example_lengths = [
len(f)
for attribute_data in data.values()
for features in attribute_data.values()
for f in features
]
if not example_lengths:
return 0
# check if number of examples is the same for all values
if not all(length == example_lengths[0] for length in example_lengths):
raise ValueError(
f"Number of examples differs for keys '{data.keys()}'. Number of "
f"examples should be the same for all data."
)
return example_lengths[0]
def number_of_units(self, key: Text, sub_key: Text) -> int:
"""Get the number of units of the given key.
Args:
key: The key.
sub_key: The optional sub-key.
Returns:
The number of units.
"""
if key not in self.data or sub_key not in self.data[key]:
return 0
units = 0
for features in self.data[key][sub_key]:
if len(features) > 0:
units += features.units
return units
def add_data(self, data: Data, key_prefix: Optional[Text] = None) -> None:
"""Add incoming data to data.
Args:
data: The data to add.
key_prefix: Optional key prefix to use in front of the key value.
"""
for key, attribute_data in data.items():
for sub_key, features in attribute_data.items():
if key_prefix:
self.add_features(f"{key_prefix}{key}", sub_key, features)
else:
self.add_features(key, sub_key, features)
def update_key(
self, from_key: Text, from_sub_key: Text, to_key: Text, to_sub_key: Text
) -> None:
"""Copies the features under the given keys to the new keys and deletes the old keys.
Args:
from_key: current feature key
from_sub_key: current feature sub-key
to_key: new key for feature
to_sub_key: new sub-key for feature
"""
if from_key not in self.data or from_sub_key not in self.data[from_key]:
return
if to_key not in self.data:
self.data[to_key] = {}
self.data[to_key][to_sub_key] = self.get(from_key, from_sub_key)
del self.data[from_key][from_sub_key]
if not self.data[from_key]:
del self.data[from_key]
def add_features(
self, key: Text, sub_key: Text, features: Optional[List[FeatureArray]]
) -> None:
"""Add list of features to data under specified key.
Should update number of examples.
Args:
key: The key
sub_key: The sub-key
features: The features to add.
"""
if features is None:
return
for feature_array in features:
if len(feature_array) > 0:
self.data[key][sub_key].append(feature_array)
if not self.data[key][sub_key]:
del self.data[key][sub_key]
# update number of examples
self.num_examples = self.number_of_examples()
def add_lengths(
self, key: Text, sub_key: Text, from_key: Text, from_sub_key: Text
) -> None:
"""Adds a feature array of lengths of sequences to data under given key.
Args:
key: The key to add the lengths to
sub_key: The sub-key to add the lengths to
from_key: The key to take the lengths from
from_sub_key: The sub-key to take the lengths from
"""
if not self.data.get(from_key) or not self.data.get(from_key, {}).get(
from_sub_key
):
return
self.data[key][sub_key] = []
for features in self.data[from_key][from_sub_key]:
if len(features) == 0:
continue
if features.number_of_dimensions == 4:
lengths = FeatureArray(
np.array(
[
# add one more dim so that dialogue dim
# would be a sequence
np.array([[[x.shape[0]]] for x in _features])
for _features in features
]
),
number_of_dimensions=4,
)
else:
lengths = FeatureArray(
np.array([x.shape[0] for x in features]), number_of_dimensions=1
)
self.data[key][sub_key].extend([lengths])
break
def split(
self, number_of_test_examples: int, random_seed: int
) -> Tuple["RasaModelData", "RasaModelData"]:
"""Create random hold out test set using stratified split.
Args:
number_of_test_examples: Number of test examples.
random_seed: Random seed.
Returns:
A tuple of train and test RasaModelData.
"""
self._check_label_key()
if self.label_key is None or self.label_sub_key is None:
# randomly split data as no label key is set
multi_values = [
v
for attribute_data in self.data.values()
for data in attribute_data.values()
for v in data
]
solo_values = [
[]
for attribute_data in self.data.values()
for data in attribute_data.values()
for _ in data
]
stratify = None
else:
# make sure that examples for each label value are in both split sets
label_ids = self._create_label_ids(
self.data[self.label_key][self.label_sub_key][0]
)
label_counts = dict(zip(*np.unique(label_ids, return_counts=True, axis=0)))
self._check_train_test_sizes(number_of_test_examples, label_counts)
counts = np.array([label_counts[label] for label in label_ids])
# we perform stratified train test split,
# which insures every label is present in the train and test data
# this operation can be performed only for labels
# that contain several data points
multi_values = [
f[counts > 1]
for attribute_data in self.data.values()
for features in attribute_data.values()
for f in features
]
# collect data points that are unique for their label
solo_values = [
f[counts == 1]
for attribute_data in self.data.values()
for features in attribute_data.values()
for f in features
]
stratify = label_ids[counts > 1]
output_values = train_test_split(
*multi_values,
test_size=number_of_test_examples,
random_state=random_seed,
stratify=stratify,
)
return self._convert_train_test_split(output_values, solo_values)
def get_signature(
self, data: Optional[Data] = None
) -> Dict[Text, Dict[Text, List[FeatureSignature]]]:
"""Get signature of RasaModelData.
Signature stores the shape and whether features are sparse or not for every key.
Returns:
A dictionary of key and sub-key to a list of feature signatures
(same structure as the data attribute).
"""
if not data:
data = self.data
return {
key: {
sub_key: [
FeatureSignature(f.is_sparse, f.units, f.number_of_dimensions)
for f in features
]
for sub_key, features in attribute_data.items()
}
for key, attribute_data in data.items()
}
def as_tf_dataset(
self, batch_size: int, batch_strategy: Text = SEQUENCE, shuffle: bool = False
) -> tf.data.Dataset:
"""Create tf dataset.
Args:
batch_size: The batch size to use.
batch_strategy: The batch strategy to use.
shuffle: Boolean indicating whether the data should be shuffled or not.
Returns:
The tf.data.Dataset.
"""
shapes, types = self._get_shapes_types()
return tf.data.Dataset.from_generator(
lambda batch_size_: self._gen_batch(batch_size_, batch_strategy, shuffle),
output_types=types,
output_shapes=shapes,
args=([batch_size]),
)
def prepare_batch(
self,
data: Optional[Data] = None,
start: Optional[int] = None,
end: Optional[int] = None,
tuple_sizes: Optional[Dict[Text, int]] = None,
) -> Tuple[Optional[np.ndarray]]:
"""Slices model data into batch using given start and end value.
Args:
data: The data to prepare.
start: The start index of the batch
end: The end index of the batch
tuple_sizes: In case the feature is not present we propagate the batch with
None. Tuple sizes contains the number of how many None values to add for
what kind of feature.
Returns:
The features of the batch.
"""
if not data:
data = self.data
batch_data = []
for key, attribute_data in data.items():
for sub_key, f_data in attribute_data.items():
# add None for not present values during processing
if not f_data:
if tuple_sizes:
batch_data += [None] * tuple_sizes[key]
else:
batch_data.append(None)
continue
for v in f_data:
if start is not None and end is not None:
_data = v[start:end]
elif start is not None:
_data = v[start:]
elif end is not None:
_data = v[:end]
else:
_data = v[:]
if _data.is_sparse:
batch_data.extend(self._scipy_matrix_to_values(_data))
else:
batch_data.append(self._pad_dense_data(_data))
# len of batch_data is equal to the number of keys in model data
return tuple(batch_data)
def _get_shapes_types(self) -> Tuple:
"""Extract shapes and types from model data.
Returns:
A tuple of shapes and a tuple of types.
"""
types = []
shapes = []
for attribute_data in self.data.values():
for features in attribute_data.values():
for f in features:
_shapes, _types = f.get_shape_type_info()
shapes.extend(_shapes)
types.extend(_types)
return tuple(shapes), tuple(types)
def _shuffled_data(self, data: Data) -> Data:
"""Shuffle model data.
Args:
data: The data to shuffle
Returns:
The shuffled data.
"""
ids = np.random.permutation(self.num_examples)
return self._data_for_ids(data, ids)
def _balanced_data(self, data: Data, batch_size: int, shuffle: bool) -> Data:
"""Mix model data to account for class imbalance.
This batching strategy puts rare classes approximately in every other batch,
by repeating them. Mimics stratified batching, but also takes into account
that more populated classes should appear more often.
Args:
data: The data.
batch_size: The batch size.
shuffle: Boolean indicating whether to shuffle the data or not.
Returns:
The balanced data.
"""
self._check_label_key()
# skip balancing if labels are token based
if (
self.label_key is None
or self.label_sub_key is None
or data[self.label_key][self.label_sub_key][0][0].size > 1
):
return data
label_ids = self._create_label_ids(data[self.label_key][self.label_sub_key][0])
unique_label_ids, counts_label_ids = np.unique(
label_ids, return_counts=True, axis=0
)
num_label_ids = len(unique_label_ids)
# group data points by their label
# need to call every time, so that the data is shuffled inside each class
data_by_label = self._split_by_label_ids(data, label_ids, unique_label_ids)
# running index inside each data grouped by labels
data_idx = [0] * num_label_ids
# number of cycles each label was passed
num_data_cycles = [0] * num_label_ids
# if a label was skipped in current batch
skipped = [False] * num_label_ids
new_data = defaultdict(lambda: defaultdict(list))
while min(num_data_cycles) == 0:
if shuffle:
indices_of_labels = np.random.permutation(num_label_ids)
else:
indices_of_labels = range(num_label_ids)
for index in indices_of_labels:
if num_data_cycles[index] > 0 and not skipped[index]:
skipped[index] = True
continue
skipped[index] = False
index_batch_size = (
int(counts_label_ids[index] / self.num_examples * batch_size) + 1
)
for key, attribute_data in data_by_label[index].items():
for sub_key, features in attribute_data.items():
for i, f in enumerate(features):
if len(new_data[key][sub_key]) < i + 1:
new_data[key][sub_key].append([])
new_data[key][sub_key][i].append(
f[data_idx[index] : data_idx[index] + index_batch_size]
)
data_idx[index] += index_batch_size
if data_idx[index] >= counts_label_ids[index]:
num_data_cycles[index] += 1
data_idx[index] = 0
if min(num_data_cycles) > 0:
break
final_data = defaultdict(lambda: defaultdict(list))
for key, attribute_data in new_data.items():
for sub_key, features in attribute_data.items():
for f in features:
final_data[key][sub_key].append(
FeatureArray(
np.concatenate(np.array(f)),
number_of_dimensions=f[0].number_of_dimensions,
)
)
return final_data
def _gen_batch(
self, batch_size: int, batch_strategy: Text = SEQUENCE, shuffle: bool = False
) -> Generator[Tuple[Optional[np.ndarray]], None, None]:
"""Generate batches.
Args:
batch_size: The batch size
batch_strategy: The batch strategy.
shuffle: Boolean indicating whether to shuffle the data or not.
Returns:
A generator over the batches.
"""
data = self.data
num_examples = self.num_examples
if shuffle:
data = self._shuffled_data(data)
if batch_strategy == BALANCED:
data = self._balanced_data(data, batch_size, shuffle)
# after balancing, number of examples increased
num_examples = self.number_of_examples(data)
num_batches = num_examples // batch_size + int(num_examples % batch_size > 0)
for batch_num in range(num_batches):
start = batch_num * batch_size
end = start + batch_size
yield self.prepare_batch(data, start, end)
def _check_train_test_sizes(
self, number_of_test_examples: int, label_counts: Dict[Any, int]
) -> None:
"""Check whether the test data set is too large or too small.
Args:
number_of_test_examples: number of test examples
label_counts: number of labels
Raises:
A ValueError if the number of examples does not fit.
"""
if number_of_test_examples >= self.num_examples - len(label_counts):
raise ValueError(
f"Test set of {number_of_test_examples} is too large. Remaining "
f"train set should be at least equal to number of classes "
f"{len(label_counts)}."
)
if number_of_test_examples < len(label_counts):
raise ValueError(
f"Test set of {number_of_test_examples} is too small. It should "
f"be at least equal to number of classes {label_counts}."
)
@staticmethod
def _data_for_ids(data: Optional[Data], ids: np.ndarray) -> Data:
"""Filter model data by ids.
Args:
data: The data to filter
ids: The ids
Returns:
The filtered data
"""
new_data = defaultdict(lambda: defaultdict(list))
if data is None:
return new_data
for key, attribute_data in data.items():
for sub_key, features in attribute_data.items():
for f in features:
new_data[key][sub_key].append(f[ids])
return new_data
def _split_by_label_ids(
self, data: Optional[Data], label_ids: np.ndarray, unique_label_ids: np.ndarray
) -> List["RasaModelData"]:
"""Reorganize model data into a list of model data with the same labels.
Args:
data: The data
label_ids: The label ids
unique_label_ids: The unique label ids
Returns:
Reorganized RasaModelData
"""
label_data = []
for label_id in unique_label_ids:
matching_ids = np.array(label_ids) == label_id
label_data.append(
RasaModelData(
self.label_key,
self.label_sub_key,
self._data_for_ids(data, matching_ids),
)
)
return label_data
def _check_label_key(self) -> None:
"""Check if the label key exists.
Raises:
ValueError if the label key and sub-key is not in data.
"""
if (
self.label_key is not None
and self.label_sub_key is not None
and (
self.label_key not in self.data
or self.label_sub_key not in self.data[self.label_key]
or len(self.data[self.label_key][self.label_sub_key]) > 1
)
):
raise ValueError(
f"Key '{self.label_key}.{self.label_sub_key}' not in RasaModelData."
)
def _convert_train_test_split(
self, output_values: List[Any], solo_values: List[Any]
) -> Tuple["RasaModelData", "RasaModelData"]:
"""Converts the output of sklearn's train_test_split into model data.
Args:
output_values: output values of sklearn's train_test_split
solo_values: list of solo values
Returns:
The test and train RasaModelData
"""
data_train = defaultdict(lambda: defaultdict(list))
data_val = defaultdict(lambda: defaultdict(list))
# output_values = x_train, x_val, y_train, y_val, z_train, z_val, etc.
# order is kept, e.g. same order as model data keys
# train datasets have an even index
index = 0
for key, attribute_data in self.data.items():
for sub_key, features in attribute_data.items():
for f in features:
data_train[key][sub_key].append(
self._combine_features(
output_values[index * 2],
solo_values[index],
f.number_of_dimensions,
)
)
index += 1
# val datasets have an odd index
index = 0
for key, attribute_data in self.data.items():
for sub_key, features in attribute_data.items():
for _ in features:
data_val[key][sub_key].append(output_values[(index * 2) + 1])
index += 1
return (
RasaModelData(self.label_key, self.label_sub_key, data_train),
RasaModelData(self.label_key, self.label_sub_key, data_val),
)
@staticmethod
def _combine_features(
feature_1: Union[np.ndarray, scipy.sparse.spmatrix],
feature_2: Union[np.ndarray, scipy.sparse.spmatrix],
number_of_dimensions: Optional[int] = 1,
) -> FeatureArray:
"""Concatenate features.
Args:
feature_1: Features to concatenate.
feature_2: Features to concatenate.
Returns:
The combined features.
"""
if isinstance(feature_1, scipy.sparse.spmatrix) and isinstance(
feature_2, scipy.sparse.spmatrix
):
if feature_2.shape[0] == 0:
return FeatureArray(feature_1, number_of_dimensions)
if feature_1.shape[0] == 0:
return FeatureArray(feature_2, number_of_dimensions)
return FeatureArray(
scipy.sparse.vstack([feature_1, feature_2]), number_of_dimensions
)
return FeatureArray(
np.concatenate([feature_1, feature_2]), number_of_dimensions
)
@staticmethod
def _create_label_ids(label_ids: FeatureArray) -> np.ndarray:
"""Convert various size label_ids into single dim array.
For multi-label y, map each distinct row to a string representation
using join because str(row) uses an ellipsis if len(row) > 1000.
Idea taken from sklearn's stratify split.
Args:
label_ids: The label ids.
Raises:
ValueError if dimensionality of label ids is not supported
Returns:
The single dim label array.
"""
if label_ids.ndim == 1:
return label_ids
if label_ids.ndim == 2 and label_ids.shape[-1] == 1:
return label_ids[:, 0]
if label_ids.ndim == 2:
return np.array([" ".join(row.astype("str")) for row in label_ids])
if label_ids.ndim == 3 and label_ids.shape[-1] == 1:
return np.array([" ".join(row.astype("str")) for row in label_ids[:, :, 0]])
raise ValueError("Unsupported label_ids dimensions")
@staticmethod
def _filter_out_fake_inputs(
array_of_array_of_features: FeatureArray,
) -> Union[List[List[np.ndarray]], List[List[scipy.sparse.spmatrix]]]:
return list(
filter(
# filter empty lists created by another filter
lambda x: len(x) > 0,
[
# filter all the "fake" inputs, we know the input is "fake",
# when sequence dimension is `0`
list(filter(lambda x: x.shape[0] > 0, array_of_features))
for array_of_features in array_of_array_of_features
],
)
)
@staticmethod
def _pad_dense_data(array_of_dense: FeatureArray) -> np.ndarray:
"""Pad data of different lengths.
Sequential data is padded with zeros. Zeros are added to the end of data.
Args:
array_of_dense: The array to pad.
Returns:
The padded array.
"""
if array_of_dense.number_of_dimensions == 4:
return RasaModelData._pad_4d_dense_data(array_of_dense)
if array_of_dense[0].ndim < 2:
# data doesn't contain a sequence
return array_of_dense.astype(np.float32)
data_size = len(array_of_dense)
max_seq_len = max([x.shape[0] for x in array_of_dense])
data_padded = np.zeros(
[data_size, max_seq_len, array_of_dense[0].shape[-1]],
dtype=array_of_dense[0].dtype,
)
for i in range(data_size):
data_padded[i, : array_of_dense[i].shape[0], :] = array_of_dense[i]
return data_padded.astype(np.float32)
@staticmethod
def _pad_4d_dense_data(array_of_array_of_dense: FeatureArray) -> np.ndarray:
# in case of dialogue data we may have 4 dimensions
# batch size x dialogue history length x sequence length x number of features
# as transformers cannot handle 4D tensors pad and reshape the data
# so that the resulting tensor is 3D
# the shape is (sum of dialogue history length for all tensors in the
# batch x max sequence length x number of features)
# the original shape and the original dialogue length is passed on to the model
# it can be used to transform the 3D tensor back into 4D
# in order to create 4d tensor inputs, we created "fake" zero features
# for nonexistent inputs. To save calculation we filter this features before
# input to tf methods.
number_of_features = array_of_array_of_dense[0][0].shape[-1]
array_of_array_of_dense = RasaModelData._filter_out_fake_inputs(
array_of_array_of_dense
)
if not array_of_array_of_dense:
# return empty 3d array with appropriate last dims
return np.zeros((0, 0, number_of_features), dtype=np.float32)
combined_dialogue_len = sum(
len(array_of_dense) for array_of_dense in array_of_array_of_dense
)
max_seq_len = max(
[
x.shape[0]
for array_of_dense in array_of_array_of_dense
for x in array_of_dense
]
)
data_padded = np.zeros(
[combined_dialogue_len, max_seq_len, number_of_features],
dtype=array_of_array_of_dense[0][0].dtype,
)
current_sum_dialogue_len = 0
for i, array_of_dense in enumerate(array_of_array_of_dense):
for j, dense in enumerate(array_of_dense):
data_padded[current_sum_dialogue_len + j, : dense.shape[0], :] = dense
current_sum_dialogue_len += len(array_of_dense)
return data_padded.astype(np.float32)
@staticmethod
def _scipy_matrix_to_values(array_of_sparse: FeatureArray) -> List[np.ndarray]:
"""Convert a scipy matrix into indices, data, and shape.
Args:
array_of_sparse: The sparse data array.
Returns:
A list of dense numpy arrays representing the sparse data.
"""
if array_of_sparse.number_of_dimensions == 4:
return RasaModelData._4d_scipy_matrix_to_values(array_of_sparse)
# we need to make sure that the matrices are coo_matrices otherwise the
# transformation does not work (e.g. you cannot access x.row, x.col)
if not isinstance(array_of_sparse[0], scipy.sparse.coo_matrix):
array_of_sparse = [x.tocoo() for x in array_of_sparse]
max_seq_len = max([x.shape[0] for x in array_of_sparse])
# get the indices of values
indices = np.hstack(
[
np.vstack([i * np.ones_like(x.row), x.row, x.col])
for i, x in enumerate(array_of_sparse)
]
).T
data = np.hstack([x.data for x in array_of_sparse])
number_of_features = array_of_sparse[0].shape[-1]
shape = np.array((len(array_of_sparse), max_seq_len, number_of_features))
return [
indices.astype(np.int64),
data.astype(np.float32),
shape.astype(np.int64),
]
@staticmethod
def _4d_scipy_matrix_to_values(
array_of_array_of_sparse: FeatureArray,
) -> List[np.ndarray]:
# in case of dialogue data we may have 4 dimensions
# batch size x dialogue history length x sequence length x number of features
# transformers cannot handle 4D tensors, therefore pad and reshape the data
# so that the resulting tensor is 3D
# the shape is (sum of dialogue history length for all tensors in the
# batch x max sequence length x number of features)
# the original shape and the original dialogue length is passed on to the model
# it can be used to transform the 3D tensor back into 4D
# in order to create 4d tensor inputs, we created "fake" zero features
# for nonexistent inputs. To save calculation we filter this features before
# input to tf methods.
number_of_features = array_of_array_of_sparse[0][0].shape[-1]
array_of_array_of_sparse = RasaModelData._filter_out_fake_inputs(
array_of_array_of_sparse
)
if not array_of_array_of_sparse:
# create empty array with appropriate last dims
return [
np.empty((0, 3), dtype=np.int64),
np.array([], dtype=np.float32),
np.array([0, 0, number_of_features], dtype=np.int64),
]
# we need to make sure that the matrices are coo_matrices otherwise the
# transformation does not work (e.g. you cannot access x.row, x.col)
if not isinstance(array_of_array_of_sparse[0][0], scipy.sparse.coo_matrix):
array_of_array_of_sparse = [
[x.tocoo() for x in array_of_sparse]
for array_of_sparse in array_of_array_of_sparse
]
dialogue_len = [
len(array_of_sparse) for array_of_sparse in array_of_array_of_sparse
]
combined_dialogue_len = sum(dialogue_len)
max_seq_len = max(
[
x.shape[0]
for array_of_sparse in array_of_array_of_sparse
for x in array_of_sparse
]
)
# get the indices of values
indices = np.hstack(
[
np.vstack(
[sum(dialogue_len[:i]) + j * np.ones_like(x.row), x.row, x.col]
)
for i, array_of_sparse in enumerate(array_of_array_of_sparse)
for j, x in enumerate(array_of_sparse)
]
).T
data = np.hstack(
[
x.data
for array_of_sparse in array_of_array_of_sparse
for x in array_of_sparse
]
)
shape = np.array((combined_dialogue_len, max_seq_len, number_of_features))
return [
indices.astype(np.int64),
data.astype(np.float32),
shape.astype(np.int64),
]
| 35.092791 | 107 | 0.576036 |
ca5b92871762bc110a03d12546e164b085b92130 | 611 | py | Python | tmc_summarizer/helpers.py | dvrpc/tmc-summarizer | aabe9889773b59b5e4de761358b69904569e1c60 | [
"MIT"
] | null | null | null | tmc_summarizer/helpers.py | dvrpc/tmc-summarizer | aabe9889773b59b5e4de761358b69904569e1c60 | [
"MIT"
] | null | null | null | tmc_summarizer/helpers.py | dvrpc/tmc-summarizer | aabe9889773b59b5e4de761358b69904569e1c60 | [
"MIT"
] | null | null | null | import zipfile
from pathlib import Path
def zip_files(output_filename: Path,
list_of_filepaths: list) -> None:
"""Write a list of files to the provided output_filename
:param output_filename: path to the new ZIP file
:type output_filename: Path
:param list_of_filepaths: list of filepaths to put into the zip file
:type list_of_filepaths: list
:return: None
"""
compression = zipfile.ZIP_DEFLATED
zf = zipfile.ZipFile(output_filename, mode="w")
for file in list_of_filepaths:
zf.write(file, file.name, compress_type=compression)
zf.close()
| 27.772727 | 72 | 0.703764 |
1b9b152ab8ec3d229fc599ec445e4ae5a7f90430 | 710 | py | Python | src/pyramid_oidc/authentication/keycloak.py | ausecocloud/pyramid_oidc | b1e5db9092f066f4a815515c4d7001801303c541 | [
"Apache-2.0"
] | 1 | 2019-04-19T19:25:02.000Z | 2019-04-19T19:25:02.000Z | src/pyramid_oidc/authentication/keycloak.py | ausecocloud/pyramid_oidc | b1e5db9092f066f4a815515c4d7001801303c541 | [
"Apache-2.0"
] | null | null | null | src/pyramid_oidc/authentication/keycloak.py | ausecocloud/pyramid_oidc | b1e5db9092f066f4a815515c4d7001801303c541 | [
"Apache-2.0"
] | null | null | null |
def keycloak_callback(userid, request):
# extract role claims from decoded access_token
claims = request.environ.get('oidc.claims', {})
if not claims:
# no claims, so not authenticated
return None
# realm_access ... roles per realm
roles = []
roles.extend(claims.get('realm_access', {}).get('roles', []))
# resource_access ... roles per application
for resource, resource_roles in claims.get('resource_access', {}).items():
roles.extend('{}/{}'.format(resource, role)
for role in resource_roles.get('roles', []))
# roles way be empty here, but we have a doceded access token so we should
# not return None
return roles
| 37.368421 | 78 | 0.639437 |
b992089cc1c7c036a8a5bc6e9598827df441bf81 | 6,661 | py | Python | letsencrypt/tests/client_test.py | stewnorriss/letsencrypt | 4b8651274f83394909af23905abbb715f150b8bf | [
"Apache-2.0"
] | 1 | 2018-08-27T03:17:09.000Z | 2018-08-27T03:17:09.000Z | letsencrypt/tests/client_test.py | rsumnerz/certbot | a65f14635257e6ce0e1b9cfca6bf4c801214d14c | [
"Apache-2.0",
"MIT"
] | null | null | null | letsencrypt/tests/client_test.py | rsumnerz/certbot | a65f14635257e6ce0e1b9cfca6bf4c801214d14c | [
"Apache-2.0",
"MIT"
] | null | null | null | """Tests for letsencrypt.client."""
import unittest
import configobj
import OpenSSL
import mock
from acme import jose
from letsencrypt import account
from letsencrypt import configuration
from letsencrypt import errors
from letsencrypt import le_util
from letsencrypt.tests import test_util
KEY = test_util.load_vector("rsa512_key.pem")
CSR_SAN = test_util.load_vector("csr-san.der")
class RegisterTest(unittest.TestCase):
"""Tests for letsencrypt.client.register."""
def setUp(self):
self.config = mock.MagicMock(rsa_key_size=1024)
self.account_storage = account.AccountMemoryStorage()
self.tos_cb = mock.MagicMock()
def _call(self):
from letsencrypt.client import register
return register(self.config, self.account_storage, self.tos_cb)
def test_no_tos(self):
with mock.patch("letsencrypt.client.acme_client.Client") as mock_client:
mock_client.register().terms_of_service = "http://tos"
with mock.patch("letsencrypt.account.report_new_account"):
self.tos_cb.return_value = False
self.assertRaises(errors.Error, self._call)
self.tos_cb.return_value = True
self._call()
self.tos_cb = None
self._call()
def test_it(self):
with mock.patch("letsencrypt.client.acme_client.Client"):
with mock.patch("letsencrypt.account."
"report_new_account"):
self._call()
class ClientTest(unittest.TestCase):
"""Tests for letsencrypt.client.Client."""
def setUp(self):
self.config = mock.MagicMock(
no_verify_ssl=False, config_dir="/etc/letsencrypt")
# pylint: disable=star-args
self.account = mock.MagicMock(**{"key.pem": KEY})
from letsencrypt.client import Client
with mock.patch("letsencrypt.client.acme_client.Client") as acme:
self.acme_client = acme
self.acme = acme.return_value = mock.MagicMock()
self.client = Client(
config=self.config, account_=self.account,
dv_auth=None, installer=None)
def test_init_acme_verify_ssl(self):
self.acme_client.assert_called_once_with(
new_reg_uri=mock.ANY, key=mock.ANY, verify_ssl=True)
def _mock_obtain_certificate(self):
self.client.auth_handler = mock.MagicMock()
self.acme.request_issuance.return_value = mock.sentinel.certr
self.acme.fetch_chain.return_value = mock.sentinel.chain
def _check_obtain_certificate(self):
self.client.auth_handler.get_authorizations.assert_called_once_with(
["example.com", "www.example.com"])
self.acme.request_issuance.assert_called_once_with(
jose.ComparableX509(OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, CSR_SAN)),
self.client.auth_handler.get_authorizations())
self.acme.fetch_chain.assert_called_once_with(mock.sentinel.certr)
def test_obtain_certificate_from_csr(self):
self._mock_obtain_certificate()
self.assertEqual(
(mock.sentinel.certr, mock.sentinel.chain),
self.client.obtain_certificate_from_csr(le_util.CSR(
form="der", file=None, data=CSR_SAN)))
self._check_obtain_certificate()
@mock.patch("letsencrypt.client.crypto_util")
def test_obtain_certificate(self, mock_crypto_util):
self._mock_obtain_certificate()
csr = le_util.CSR(form="der", file=None, data=CSR_SAN)
mock_crypto_util.init_save_csr.return_value = csr
mock_crypto_util.init_save_key.return_value = mock.sentinel.key
domains = ["example.com", "www.example.com"]
self.assertEqual(
self.client.obtain_certificate(domains),
(mock.sentinel.certr, mock.sentinel.chain, mock.sentinel.key, csr))
mock_crypto_util.init_save_key.assert_called_once_with(
self.config.rsa_key_size, self.config.key_dir)
mock_crypto_util.init_save_csr.assert_called_once_with(
mock.sentinel.key, domains, self.config.cert_dir)
self._check_obtain_certificate()
@mock.patch("letsencrypt.client.zope.component.getUtility")
def test_report_renewal_status(self, mock_zope):
# pylint: disable=protected-access
cert = mock.MagicMock()
cert.configuration = configobj.ConfigObj()
cert.cli_config = configuration.RenewerConfiguration(self.config)
cert.configuration["autorenew"] = "True"
cert.configuration["autodeploy"] = "True"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("renewal and deployment has been" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
cert.configuration["autorenew"] = "False"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("deployment but not automatic renewal" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
cert.configuration["autodeploy"] = "False"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("renewal and deployment has not" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
cert.configuration["autorenew"] = "True"
self.client._report_renewal_status(cert)
msg = mock_zope().add_message.call_args[0][0]
self.assertTrue("renewal but not automatic deployment" in msg)
self.assertTrue(cert.cli_config.renewal_configs_dir in msg)
class RollbackTest(unittest.TestCase):
"""Tests for letsencrypt.client.rollback."""
def setUp(self):
self.m_install = mock.MagicMock()
@classmethod
def _call(cls, checkpoints, side_effect):
from letsencrypt.client import rollback
with mock.patch("letsencrypt.client"
".display_ops.pick_installer") as mock_pick_installer:
mock_pick_installer.side_effect = side_effect
rollback(None, checkpoints, {}, mock.MagicMock())
def test_no_problems(self):
self._call(1, self.m_install)
self.assertEqual(self.m_install().rollback_checkpoints.call_count, 1)
self.assertEqual(self.m_install().restart.call_count, 1)
def test_no_installer(self):
self._call(1, None) # Just make sure no exceptions are raised
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 38.281609 | 80 | 0.680228 |
55902cf6f084c5ebdc23423ee7db22b8952f87cd | 1,813 | py | Python | Algorithms_medium/0436. Find Right Interval.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | 4 | 2020-08-11T20:45:15.000Z | 2021-03-12T00:33:34.000Z | Algorithms_medium/0436. Find Right Interval.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | Algorithms_medium/0436. Find Right Interval.py | VinceW0/Leetcode_Python_solutions | 09e9720afce21632372431606ebec4129eb79734 | [
"Xnet",
"X11"
] | null | null | null | """
0436. Find Right Interval
Given a set of intervals, for each of the interval i, check if there exists an interval j whose start point is bigger than or equal to the end point of the interval i, which can be called that j is on the "right" of i.
For any interval i, you need to store the minimum interval j's index, which means that the interval j has the minimum start point to build the "right" relationship for interval i. If the interval j doesn't exist, store -1 for the interval i. Finally, you need output the stored value of each interval as an array.
Note:
You may assume the interval's end point is always bigger than its start point.
You may assume none of these intervals have the same start point.
Example 1:
Input: [ [1,2] ]
Output: [-1]
Explanation: There is only one interval in the collection, so it outputs -1.
Example 2:
Input: [ [3,4], [2,3], [1,2] ]
Output: [-1, 0, 1]
Explanation: There is no satisfied "right" interval for [3,4].
For [2,3], the interval [3,4] has minimum-"right" start point;
For [1,2], the interval [2,3] has minimum-"right" start point.
Example 3:
Input: [ [1,4], [2,3], [3,4] ]
Output: [-1, 2, -1]
Explanation: There is no satisfied "right" interval for [1,4] and [3,4].
For [2,3], the interval [3,4] has minimum-"right" start point.
NOTE: input types have been changed on April 15, 2019. Please reset to default code definition to get new method signature.
"""
class Solution:
def findRightInterval(self, intervals: List[List[int]]) -> List[int]:
n = len(intervals)
if n == 0: return []
if n == 1: return [-1]
p = [[start, idx] for idx, (start, _) in enumerate(intervals)]
p.sort()
q = [bisect.bisect_left(p, [j, 0]) for i, j in intervals]
return [(p[i][1] if i < n else -1) for i in q]
| 35.54902 | 313 | 0.675124 |
054485791c2dff46abce343eb990b6b2cef9ad18 | 37,768 | py | Python | soco/services.py | relevitt/SoCo | aeffc02d11dbfc60e4589c473a3a528abaceea0a | [
"MIT"
] | null | null | null | soco/services.py | relevitt/SoCo | aeffc02d11dbfc60e4589c473a3a528abaceea0a | [
"MIT"
] | null | null | null | soco/services.py | relevitt/SoCo | aeffc02d11dbfc60e4589c473a3a528abaceea0a | [
"MIT"
] | null | null | null | # pylint: disable=fixme, invalid-name
# Disable while we have Python 2.x compatability
# pylint: disable=useless-object-inheritance
"""Classes representing Sonos UPnP services.
>>> import soco
>>> device = soco.SoCo('192.168.1.102')
>>> print(RenderingControl(device).GetMute([('InstanceID', 0),
... ('Channel', 'Master')]))
{'CurrentMute': '0'}
>>> r = ContentDirectory(device).Browse([
... ('ObjectID', 'Q:0'),
... ('BrowseFlag', 'BrowseDirectChildren'),
... ('Filter', '*'),
... ('StartingIndex', '0'),
... ('RequestedCount', '100'),
... ('SortCriteria', '')
... ])
>>> print(r['Result'])
<?xml version="1.0" ?><DIDL-Lite xmlns="urn:schemas-upnp-org:metadata ...
>>> for action, in_args, out_args in AlarmClock(device).iter_actions():
... print(action, in_args, out_args)
...
SetFormat [Argument(name='DesiredTimeFormat', vartype='string'), Argument(
name='DesiredDateFormat', vartype='string')] []
GetFormat [] [Argument(name='CurrentTimeFormat', vartype='string'),
Argument(name='CurrentDateFormat', vartype='string')] ...
"""
# UPnP Spec at http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.0.pdf
import logging
from collections import namedtuple
from xml.sax.saxutils import escape
import requests
from .cache import Cache
from . import events
from . import config
from .exceptions import NotSupportedException, SoCoUPnPException, UnknownSoCoException
from .utils import prettify
from .xml import XML, illegal_xml_re
# UNICODE NOTE
# UPnP requires all XML to be transmitted/received with utf-8 encoding. All
# strings used in this module are unicode. The Requests library should take
# care of all of the necessary encoding (on sending) and decoding (on
# receiving) for us, provided that we specify the correct encoding headers
# (which, hopefully, we do).
# But since ElementTree seems to prefer being fed bytes to unicode, at least
# for Python 2.x, we have to encode strings specifically before using it. see
# http://bugs.python.org/issue11033 TODO: Keep an eye on this when it comes to
# Python 3 compatibility
log = logging.getLogger(__name__) # pylint: disable=C0103
# logging.basicConfig()
# log.setLevel(logging.INFO)
if config.EVENTS_MODULE is None:
config.EVENTS_MODULE = events
class Action(namedtuple("ActionBase", "name, in_args, out_args")):
"""A UPnP Action and its arguments."""
def __str__(self):
args = ", ".join(str(arg) for arg in self.in_args)
returns = ", ".join(str(arg) for arg in self.out_args)
return "{0}({1}) -> {{{2}}}".format(self.name, args, returns)
class Argument(namedtuple("ArgumentBase", "name, vartype")):
"""A UPnP Argument and its type."""
def __str__(self):
argument = self.name
if self.vartype.default:
argument = "{}={}".format(self.name, self.vartype.default)
return "{}: {}".format(argument, str(self.vartype))
class Vartype(namedtuple("VartypeBase", "datatype, default, list, range")):
"""An argument type with default value and range."""
def __str__(self):
if self.list:
return "[{}]".format(", ".join(self.list))
if self.range:
return "[{}..{}]".format(self.range[0], self.range[1])
return self.datatype
# pylint: disable=too-many-instance-attributes
class Service:
"""A class representing a UPnP service.
This is the base class for all Sonos Service classes. This class has a
dynamic method dispatcher. Calls to methods which are not explicitly
defined here are dispatched automatically to the service action with the
same name.
"""
# pylint: disable=bad-continuation
soap_body_template = (
'<?xml version="1.0"?>'
'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
"<s:Body>"
'<u:{action} xmlns:u="urn:schemas-upnp-org:service:'
'{service_type}:{version}">'
"{arguments}"
"</u:{action}>"
"</s:Body>"
"</s:Envelope>"
) # noqa PEP8
def __init__(self, soco):
"""
Args:
soco (SoCo): A `SoCo` instance to which the UPnP Actions will be
sent
"""
#: `SoCo`: The `SoCo` instance to which UPnP Actions are sent
self.soco = soco
# Some defaults. Some or all these will need to be overridden
# specifically in a sub-class. There is other information we could
# record, but this will do for the moment. Info about a Sonos device is
# available at <IP_address>/xml/device_description.xml in the
# <service> tags
#: str: The UPnP service type.
self.service_type = self.__class__.__name__
#: str: The UPnP service version.
self.version = 1
self.service_id = self.service_type
#: str: The base URL for sending UPnP Actions.
self.base_url = "http://{}:1400".format(self.soco.ip_address)
#: str: The UPnP Control URL.
self.control_url = "/{}/Control".format(self.service_type)
#: str: The service control protocol description URL.
self.scpd_url = "/xml/{}{}.xml".format(self.service_type, self.version)
#: str: The service eventing subscription URL.
self.event_subscription_url = "/{}/Event".format(self.service_type)
#: A cache for storing the result of network calls. By default, this is
#: a `TimedCache` with a default timeout=0.
self.cache = Cache(default_timeout=0)
# Caching variables for actions and event_vars, will be filled when
# they are requested for the first time
self._actions = None
self._event_vars = None
# From table 3.3 in
# http://upnp.org/specs/arch/UPnP-arch-DeviceArchitecture-v1.1.pdf
# This list may not be complete, but should be good enough to be going
# on with. Error codes between 700-799 are defined for particular
# services, and may be overriden in subclasses. Error codes >800
# are generally SONOS specific. NB It may well be that SONOS does not
# use some of these error codes.
# pylint: disable=invalid-name
self.UPNP_ERRORS = {
400: "Bad Request",
401: "Invalid Action",
402: "Invalid Args",
404: "Invalid Var",
412: "Precondition Failed",
501: "Action Failed",
600: "Argument Value Invalid",
601: "Argument Value Out of Range",
602: "Optional Action Not Implemented",
603: "Out Of Memory",
604: "Human Intervention Required",
605: "String Argument Too Long",
606: "Action Not Authorized",
607: "Signature Failure",
608: "Signature Missing",
609: "Not Encrypted",
610: "Invalid Sequence",
611: "Invalid Control URL",
612: "No Such Session",
}
self.DEFAULT_ARGS = {}
def __getattr__(self, action):
"""Called when a method on the instance cannot be found.
Causes an action to be sent to UPnP server. See also
`object.__getattr__`.
Args:
action (str): The name of the unknown method.
Returns:
callable: The callable to be invoked. .
"""
# Define a function to be invoked as the method, which calls
# send_command.
def _dispatcher(self, *args, **kwargs):
"""Dispatch to send_command."""
return self.send_command(action, *args, **kwargs)
# rename the function so it appears to be the called method. We
# probably don't need this, but it doesn't harm
_dispatcher.__name__ = action
# _dispatcher is now an unbound menthod, but we need a bound method.
# This turns an unbound method into a bound method (i.e. one that
# takes self - an instance of the class - as the first parameter)
# pylint: disable=no-member
method = _dispatcher.__get__(self, self.__class__)
# Now we have a bound method, we cache it on this instance, so that
# next time we don't have to go through this again
setattr(self, action, method)
log.debug("Dispatching method %s", action)
# return our new bound method, which will be called by Python
return method
@staticmethod
def wrap_arguments(args=None):
"""Wrap a list of tuples in xml ready to pass into a SOAP request.
Args:
args (list): a list of (name, value) tuples specifying the
name of each argument and its value, eg
``[('InstanceID', 0), ('Speed', 1)]``. The value
can be a string or something with a string representation. The
arguments are escaped and wrapped in <name> and <value> tags.
Example:
>>> from soco import SoCo
>>> device = SoCo('192.168.1.101')
>>> s = Service(device)
>>> print(s.wrap_arguments([('InstanceID', 0), ('Speed', 1)]))
<InstanceID>0</InstanceID><Speed>1</Speed>'
"""
if args is None:
args = []
tags = []
for name, value in args:
tag = "<{name}>{value}</{name}>".format(
name=name, value=escape("%s" % value, {'"': """})
)
# % converts to unicode because we are using unicode literals.
# Avoids use of 'unicode' function which does not exist in python 3
tags.append(tag)
xml = "".join(tags)
return xml
@staticmethod
def unwrap_arguments(xml_response):
"""Extract arguments and their values from a SOAP response.
Args:
xml_response (str): SOAP/xml response text (unicode,
not utf-8).
Returns:
dict: a dict of ``{argument_name: value}`` items.
"""
# A UPnP SOAP response (including headers) looks like this:
# HTTP/1.1 200 OK
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8" DATE: when response was
# generated
# EXT:
# SERVER: OS/version UPnP/1.0 product/version
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionNameResponse
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>out arg value</argumentName>
# ... other out args and their values go here, if any
# </u:actionNameResponse>
# </s:Body>
# </s:Envelope>
# Get all tags in order. Elementree (in python 2.x) seems to prefer to
# be fed bytes, rather than unicode
xml_response = xml_response.encode("utf-8")
try:
tree = XML.fromstring(xml_response)
except XML.ParseError:
# Try to filter illegal xml chars (as unicode), in case that is
# the reason for the parse error
filtered = illegal_xml_re.sub("", xml_response.decode("utf-8")).encode(
"utf-8"
)
tree = XML.fromstring(filtered)
# Get the first child of the <Body> tag which will be
# <{actionNameResponse}> (depends on what actionName is). Turn the
# children of this into a {tagname, content} dict. XML unescaping
# is carried out for us by elementree.
action_response = tree.find("{http://schemas.xmlsoap.org/soap/envelope/}Body")[
0
]
return {i.tag: i.text or "" for i in action_response}
def compose_args(self, action_name, in_argdict):
"""Compose the argument list from an argument dictionary, with
respect for default values.
Args:
action_name (str): The name of the action to be performed.
in_argdict (dict): Arguments as a dict, e.g.
``{'InstanceID': 0, 'Speed': 1}``. The values
can be a string or something with a string representation.
Returns:
list: a list of ``(name, value)`` tuples.
Raises:
AttributeError: If this service does not support the action.
ValueError: If the argument lists do not match the action
signature.
"""
for action in self.actions:
if action.name == action_name:
# The found 'action' will be visible from outside the loop
break
else:
raise AttributeError("Unknown Action: {}".format(action_name))
# Check for given argument names which do not occur in the expected
# argument list
# pylint: disable=undefined-loop-variable
unexpected = set(in_argdict) - {argument.name for argument in action.in_args}
if unexpected:
raise ValueError(
"Unexpected argument '{}'. Method signature: {}".format(
next(iter(unexpected)), str(action)
)
)
# List the (name, value) tuples for each argument in the argument list
composed = []
for argument in action.in_args:
name = argument.name
if name in in_argdict:
composed.append((name, in_argdict[name]))
continue
if name in self.DEFAULT_ARGS:
composed.append((name, self.DEFAULT_ARGS[name]))
continue
if argument.vartype.default is not None:
composed.append((name, argument.vartype.default))
raise ValueError(
"Missing argument '{}'. Method signature: {}".format(
argument.name, str(action)
)
)
return composed
def build_command(self, action, args=None):
"""Build a SOAP request.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples.
Returns:
tuple: a tuple containing the POST headers (as a dict) and a
string containing the relevant SOAP body. Does not set
content-length, or host headers, which are completed upon
sending.
"""
# A complete request should look something like this:
# POST path of control URL HTTP/1.1
# HOST: host of control URL:port of control URL
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName"
#
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <u:actionName
# xmlns:u="urn:schemas-upnp-org:service:serviceType:v">
# <argumentName>in arg value</argumentName>
# ... other in args and their values go here, if any
# </u:actionName>
# </s:Body>
# </s:Envelope>
arguments = self.wrap_arguments(args)
body = self.soap_body_template.format(
arguments=arguments,
action=action,
service_type=self.service_type,
version=self.version,
)
soap_action_template = (
"urn:schemas-upnp-org:service:{service_type}:{version}#{action}"
)
soap_action = soap_action_template.format(
service_type=self.service_type, version=self.version, action=action
)
headers = {
"Content-Type": 'text/xml; charset="utf-8"',
"SOAPACTION": soap_action,
}
# Note that although we set the charset to utf-8 here, in fact the
# body is still unicode. It will only be converted to bytes when it
# is set over the network
return (headers, body)
def send_command(self, action, args=None, cache=None, cache_timeout=None, **kwargs):
"""Send a command to a Sonos device.
Args:
action (str): the name of an action (a string as specified in the
service description XML file) to be sent.
args (list, optional): Relevant arguments as a list of (name,
value) tuples, as an alternative to ``kwargs``.
cache (Cache): A cache is operated so that the result will be
stored for up to ``cache_timeout`` seconds, and a subsequent
call with the same arguments within that period will be
returned from the cache, saving a further network call. The
cache may be invalidated or even primed from another thread
(for example if a UPnP event is received to indicate that
the state of the Sonos device has changed). If
``cache_timeout`` is missing or `None`, the cache will use a
default value (which may be 0 - see
:attr:`~soco.services.Service.cache`). By default, the cache
identified by the service's
:attr:`~soco.services.Service.cache` attribute will
be used, but a different cache object may be specified in
the ``cache`` parameter.
kwargs: Relevant arguments for the command.
Returns:
dict: a dict of ``{argument_name, value}`` items.
Raises:
AttributeError: If this service does not support the action.
ValueError: If the argument lists do not match the action
signature.
`SoCoUPnPException`: if a SOAP error occurs.
`UnknownSoCoException`: if an unknown UPnP error occurs.
`requests.exceptions.HTTPError`: if an http error occurs.
"""
# Determine the timeout for the request: use the value of
# config.REQUEST_TIMEOUT unless overridden by 'timeout'
# being provided as a kwarg by the caller, in which case
# use this and remove it from kwargs.
timeout = kwargs.pop("timeout", config.REQUEST_TIMEOUT)
log.debug("Request timeout set to %s", timeout)
if args is None:
args = self.compose_args(action, kwargs)
if cache is None:
cache = self.cache
result = cache.get(action, args)
if result is not None:
log.debug("Cache hit")
return result
# Cache miss, so go ahead and make a network call
headers, body = self.build_command(action, args)
log.debug("Sending %s %s to %s", action, args, self.soco.ip_address)
log.debug("Sending %s, %s", headers, prettify(body))
# Convert the body to bytes, and send it.
response = requests.post(
self.base_url + self.control_url,
headers=headers,
data=body.encode("utf-8"),
timeout=timeout,
)
log.debug("Received %s, %s", response.headers, response.text)
status = response.status_code
log.debug("Received status %s from %s", status, self.soco.ip_address)
if status == 200:
# The response is good. Get the output params, and return them.
# NB an empty dict is a valid result. It just means that no
# params are returned. By using response.text, we rely upon
# the requests library to convert to unicode for us.
result = self.unwrap_arguments(response.text) or True
# Store in the cache. There is no need to do this if there was an
# error, since we would want to try a network call again.
cache.put(result, action, args, timeout=cache_timeout)
return result
elif status == 405:
raise NotSupportedException(
"{} not supported on {}".format(action, self.soco.ip_address)
)
elif status == 500:
# Internal server error. UPnP requires this to be returned if the
# device does not like the action for some reason. The returned
# content will be a SOAP Fault. Parse it and raise an error.
self.handle_upnp_error(response.text)
else:
# Something else has gone wrong. Probably a network error. Let
# Requests handle it
response.raise_for_status()
return None
def handle_upnp_error(self, xml_error):
"""Disect a UPnP error, and raise an appropriate exception.
Args:
xml_error (str): a unicode string containing the body of the
UPnP/SOAP Fault response. Raises an exception containing the
error code.
"""
# An error code looks something like this:
# HTTP/1.1 500 Internal Server Error
# CONTENT-LENGTH: bytes in body
# CONTENT-TYPE: text/xml; charset="utf-8"
# DATE: when response was generated
# EXT:
# SERVER: OS/version UPnP/1.0 product/version
# <?xml version="1.0"?>
# <s:Envelope
# xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
# s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
# <s:Body>
# <s:Fault>
# <faultcode>s:Client</faultcode>
# <faultstring>UPnPError</faultstring>
# <detail>
# <UPnPError xmlns="urn:schemas-upnp-org:control-1-0">
# <errorCode>error code</errorCode>
# <errorDescription>error string</errorDescription>
# </UPnPError>
# </detail>
# </s:Fault>
# </s:Body>
# </s:Envelope>
#
# All that matters for our purposes is the errorCode.
# errorDescription is not required, and Sonos does not seem to use it.
# NB need to encode unicode strings before passing to ElementTree
xml_error = xml_error.encode("utf-8")
error = XML.fromstring(xml_error)
log.debug("Error %s", xml_error)
error_code = error.findtext(".//{urn:schemas-upnp-org:control-1-0}errorCode")
if error_code is not None:
description = self.UPNP_ERRORS.get(int(error_code), "")
raise SoCoUPnPException(
message="UPnP Error {} received: {} from {}".format(
error_code, description, self.soco.ip_address
),
error_code=error_code,
error_description=description,
error_xml=xml_error,
)
# Unknown error, so just return the entire response
log.error("Unknown error received from %s", self.soco.ip_address)
raise UnknownSoCoException(xml_error)
def subscribe(
self, requested_timeout=None, auto_renew=False, event_queue=None, strict=True
):
"""Subscribe to the service's events.
Args:
requested_timeout (int, optional): If requested_timeout is
provided, a subscription valid for that
number of seconds will be requested, but not guaranteed. Check
:attr:`~soco.events.Subscription.timeout` on return to find out
what period of validity is actually allocated.
auto_renew (bool): If auto_renew is `True`, the subscription will
automatically be renewed just before it expires, if possible.
Default is `False`.
event_queue (:class:`~queue.Queue`): a thread-safe queue object on
which received events will be put. If not specified,
a (:class:`~queue.Queue`) will be created and used.
strict (bool, optional): If True and an Exception occurs during
execution, the Exception will be raised or, if False, the
Exception will be logged and the Subscription instance will be
returned. Default `True`.
Returns:
:class:`~soco.events.Subscription`: an instance of
:class:`~soco.events.Subscription`, representing the new
subscription. If config.EVENTS_MODULE has
been set to refer to :py:mod:`events_twisted`, a deferred will
be returned with the Subscription as its result and
deferred.subscription will be set to refer to the Subscription.
To unsubscribe, call the :meth:`~soco.events.Subscription.unsubscribe`
method on the returned object.
"""
subscription = config.EVENTS_MODULE.Subscription(self, event_queue)
return subscription.subscribe(
requested_timeout=requested_timeout, auto_renew=auto_renew, strict=strict
)
def _update_cache_on_event(self, event):
"""Update the cache when an event is received.
This will be called before an event is put onto the event queue. Events
will often indicate that the Sonos device's state has changed, so this
opportunity is made available for the service to update its cache. The
event will be put onto the event queue once this method returns.
`event` is an Event namedtuple: ('sid', 'seq', 'service', 'variables')
.. warning:: This method will not be called from the main thread but
by one or more threads, which handle the events as they come in.
You *must not* access any class, instance or global variables
without appropriate locks. Treat all parameters passed to this
method as read only.
"""
@property
def actions(self):
"""The service's actions with their arguments.
Returns:
list(`Action`): A list of Action namedtuples, consisting of
action_name (str), in_args (list of Argument namedtuples,
consisting of name and argtype), and out_args (ditto).
The return value looks like this:
.. code-block:: python
[
Action(
name='GetMute',
in_args=[
Argument(name='InstanceID', ...),
Argument(
name='Channel',
vartype='string',
list=['Master', 'LF', 'RF', 'SpeakerOnly'],
range=None
)
],
out_args=[
Argument(name='CurrentMute, ...)
]
)
Action(...)
]
Its string representation will look like this:
.. code-block:: text
GetMute(InstanceID: ui4, Channel: [Master, LF, RF, SpeakerOnly])\n
-> {CurrentMute: boolean}
"""
if self._actions is None:
self._actions = list(self.iter_actions())
return self._actions
def iter_actions(self):
"""Yield the service's actions with their arguments.
Yields:
`Action`: the next action.
Each action is an Action namedtuple, consisting of action_name
(a string), in_args (a list of Argument namedtuples consisting of name
and argtype), and out_args (ditto), eg::
Action(
name='SetFormat',
in_args=[
Argument(name='DesiredTimeFormat', vartype=<Vartype>),
Argument(name='DesiredDateFormat', vartype=<Vartype>)],
out_args=[]
)
"""
# pylint: disable=too-many-locals
# pylint: disable=invalid-name
ns = "{urn:schemas-upnp-org:service-1-0}"
# get the scpd body as bytes, and feed directly to elementtree
# which likes to receive bytes
scpd_body = requests.get(self.base_url + self.scpd_url, timeout=10).content
tree = XML.fromstring(scpd_body)
# parse the state variables to get the relevant variable types
vartypes = {}
srvStateTables = tree.findall("{}serviceStateTable".format(ns))
for srvStateTable in srvStateTables:
statevars = srvStateTable.findall("{}stateVariable".format(ns))
for state in statevars:
name = state.findtext("{}name".format(ns))
datatype = state.findtext("{}dataType".format(ns))
default = state.findtext("{}defaultValue".format(ns))
value_list_elt = state.find("{}allowedValueList".format(ns)) or ()
value_list = [item.text for item in value_list_elt] or None
value_range_elt = state.find("{}allowedValueRange".format(ns)) or ()
value_range = [item.text for item in value_range_elt] or None
vartypes[name] = Vartype(datatype, default, value_list, value_range)
# find all the actions
actionLists = tree.findall("{}actionList".format(ns))
for actionList in actionLists:
actions = actionList.findall("{}action".format(ns))
for i in actions:
action_name = i.findtext("{}name".format(ns))
argLists = i.findall("{}argumentList".format(ns))
for argList in argLists:
args_iter = argList.findall("{}argument".format(ns))
in_args = []
out_args = []
for arg in args_iter:
arg_name = arg.findtext("{}name".format(ns))
direction = arg.findtext("{}direction".format(ns))
related_variable = arg.findtext(
"{}relatedStateVariable".format(ns)
)
vartype = vartypes[related_variable]
if direction == "in":
in_args.append(Argument(arg_name, vartype))
else:
out_args.append(Argument(arg_name, vartype))
yield Action(action_name, in_args, out_args)
@property
def event_vars(self):
"""The service's eventable variables.
Returns:
list(tuple): A list of (variable name, data type) tuples.
"""
if self._event_vars is None:
self._event_vars = list(self.iter_event_vars())
return self._event_vars
def iter_event_vars(self):
"""Yield the services eventable variables.
Yields:
`tuple`: a tuple of (variable name, data type).
"""
# pylint: disable=invalid-name
ns = "{urn:schemas-upnp-org:service-1-0}"
scpd_body = requests.get(self.base_url + self.scpd_url, timeout=10).text
tree = XML.fromstring(scpd_body.encode("utf-8"))
# parse the state variables to get the relevant variable types
statevars = tree.findall("{}stateVariable".format(ns))
for state in statevars:
# We are only interested if 'sendEvents' is 'yes', i.e this
# is an eventable variable
if state.attrib["sendEvents"] == "yes":
name = state.findtext("{}name".format(ns))
vartype = state.findtext("{}dataType".format(ns))
yield (name, vartype)
class AlarmClock(Service):
"""Sonos alarm service, for setting and getting time and alarms."""
def __init__(self, soco):
super().__init__(soco)
self.UPNP_ERRORS.update(
{
801: "Already an alarm for this time",
}
)
class MusicServices(Service):
"""Sonos music services service, for functions related to 3rd party music
services."""
class AudioIn(Service):
"""Sonos audio in service, for functions related to RCA audio input."""
class DeviceProperties(Service):
"""Sonos device properties service, for functions relating to zones, LED
state, stereo pairs etc."""
class SystemProperties(Service):
"""Sonos system properties service, for functions relating to
authentication etc."""
class ZoneGroupTopology(Service):
"""Sonos zone group topology service, for functions relating to network
topology, diagnostics and updates."""
class GroupManagement(Service):
"""Sonos group management service, for services relating to groups."""
class QPlay(Service):
"""Sonos Tencent QPlay service (a Chinese music service)"""
class ContentDirectory(Service):
"""UPnP standard Content Directory service, for functions relating to
browsing, searching and listing available music."""
def __init__(self, soco):
super().__init__(soco)
self.control_url = "/MediaServer/ContentDirectory/Control"
self.event_subscription_url = "/MediaServer/ContentDirectory/Event"
# For error codes, see table 2.7.16 in
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v1-Service.pdf
self.UPNP_ERRORS.update(
{
701: "No such object",
702: "Invalid CurrentTagValue",
703: "Invalid NewTagValue",
704: "Required tag",
705: "Read only tag",
706: "Parameter Mismatch",
708: "Unsupported or invalid search criteria",
709: "Unsupported or invalid sort criteria",
710: "No such container",
711: "Restricted object",
712: "Bad metadata",
713: "Restricted parent object",
714: "No such source resource",
715: "Resource access denied",
716: "Transfer busy",
717: "No such file transfer",
718: "No such destination resource",
719: "Destination resource access denied",
720: "Cannot process the request",
}
)
class MS_ConnectionManager(Service): # pylint: disable=invalid-name
"""UPnP standard connection manager service for the media server."""
def __init__(self, soco):
super().__init__(soco)
self.service_type = "ConnectionManager"
self.control_url = "/MediaServer/ConnectionManager/Control"
self.event_subscription_url = "/MediaServer/ConnectionManager/Event"
class RenderingControl(Service):
"""UPnP standard rendering control service, for functions relating to
playback rendering, eg bass, treble, volume and EQ."""
def __init__(self, soco):
super().__init__(soco)
self.control_url = "/MediaRenderer/RenderingControl/Control"
self.event_subscription_url = "/MediaRenderer/RenderingControl/Event"
self.DEFAULT_ARGS.update({"InstanceID": 0})
class MR_ConnectionManager(Service): # pylint: disable=invalid-name
"""UPnP standard connection manager service for the media renderer."""
def __init__(self, soco):
super().__init__(soco)
self.service_type = "ConnectionManager"
self.control_url = "/MediaRenderer/ConnectionManager/Control"
self.event_subscription_url = "/MediaRenderer/ConnectionManager/Event"
class AVTransport(Service):
"""UPnP standard AV Transport service, for functions relating to transport
management, eg play, stop, seek, playlists etc."""
def __init__(self, soco):
super().__init__(soco)
self.control_url = "/MediaRenderer/AVTransport/Control"
self.event_subscription_url = "/MediaRenderer/AVTransport/Event"
# For error codes, see
# http://upnp.org/specs/av/UPnP-av-AVTransport-v1-Service.pdf
self.UPNP_ERRORS.update(
{
701: "Transition not available",
702: "No contents",
703: "Read error",
704: "Format not supported for playback",
705: "Transport is locked",
706: "Write error",
707: "Media is protected or not writeable",
708: "Format not supported for recording",
709: "Media is full",
710: "Seek mode not supported",
711: "Illegal seek target",
712: "Play mode not supported",
713: "Record quality not supported",
714: "Illegal MIME-Type",
715: 'Content "BUSY"',
716: "Resource Not found",
717: "Play speed not supported",
718: "Invalid InstanceID",
737: "No DNS Server",
738: "Bad Domain Name",
739: "Server Error",
}
)
self.DEFAULT_ARGS.update({"InstanceID": 0})
class Queue(Service):
"""Sonos queue service, for functions relating to queue management, saving
queues etc."""
def __init__(self, soco):
super().__init__(soco)
self.control_url = "/MediaRenderer/Queue/Control"
self.event_subscription_url = "/MediaRenderer/Queue/Event"
class GroupRenderingControl(Service):
"""Sonos group rendering control service, for functions relating to group
volume etc."""
def __init__(self, soco):
super().__init__(soco)
self.control_url = "/MediaRenderer/GroupRenderingControl/Control"
self.event_subscription_url = "/MediaRenderer/GroupRenderingControl/Event"
self.DEFAULT_ARGS.update({"InstanceID": 0})
| 40.050901 | 88 | 0.591135 |
472ea213af4bcccba0c7083153004d12f901cd49 | 4,342 | py | Python | src/m9_using_objects.py | AlangarCSSE120/03-AccumulatorsAndFunctionsWithParameters | 0d883001a96a2092d5ab7620a1dd33e267312fe4 | [
"MIT"
] | null | null | null | src/m9_using_objects.py | AlangarCSSE120/03-AccumulatorsAndFunctionsWithParameters | 0d883001a96a2092d5ab7620a1dd33e267312fe4 | [
"MIT"
] | null | null | null | src/m9_using_objects.py | AlangarCSSE120/03-AccumulatorsAndFunctionsWithParameters | 0d883001a96a2092d5ab7620a1dd33e267312fe4 | [
"MIT"
] | null | null | null | """
This module lets you practice ** using objects **, including:
-- CONSTRUCTING objects,
-- applying METHODS to them, and
-- accessing their DATA via INSTANCE VARIABLES
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and PUT_YOUR_NAME_HERE.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the other functions to demonstrate and/or test them. """
# Test your functions by putting calls to them here:
# two_circles()
circle_and_rectangle()
def two_circles():
"""
-- Constructs an rg.RoseWindow.
-- Constructs and draws two rg.Circle objects on the window
such that:
-- They fit in the window and are easily visible.
-- They have different radii.
-- One is filled with some color and one is not filled.
-- Waits for the user to press the mouse, then closes the window.
"""
# ------------------------------------------------------------------
# DONE: 2. Implement this function, per its green doc-string above.
# -- ANY two rg.Circle objects that meet the criteria are fine.
# -- File COLORS.pdf lists all legal color-names.
# Put a statement in main to test this function
# (by calling this function).
# ------------------------------------------------------------------
window = rg.RoseWindow()
fill_circle = rg.Circle(rg.Point(100, 100), 20)
fill_circle.fill_color = 'blue'
not_filled_circle = rg.Circle(rg.Point(250, 250), 25)
fill_circle.attach_to(window)
not_filled_circle.attach_to(window)
window.render()
window.close_on_mouse_click()
def circle_and_rectangle():
"""
-- Constructs an rg.RoseWindow.
-- Constructs and draws a rg.Circle and rg.Rectangle
on the window such that:
-- They fit in the window and are easily visible.
-- The rg.Circle is filled with 'blue'
-- Prints (on the console, on SEPARATE lines) the following data
associated with your rg.Circle:
-- Its outline thickness.
-- Its fill color.
-- Its center.
-- Its center's x coordinate.
-- Its center's y coordinate.
-- Prints (on the console, on SEPARATE lines) the same data
but for your rg.Rectangle.
-- Waits for the user to press the mouse, then closes the window.
Here is an example of the output on the console,
for one particular circle and rectangle:
1
blue
Point(180.0, 115.0)
180
115
1
None
Point(75.0, 150.0)
75.0
150.0
"""
# ------------------------------------------------------------------
# TODO: 3. Implement this function, per its green doc-string above.
# -- ANY objects that meet the criteria are fine.
# Put a statement in main to test this function
# (by calling this function).
#
# IMPORTANT: Use the DOT TRICK to guess the names of the relevant
# instance variables for outline thickness, etc.
# ------------------------------------------------------------------
def lines():
"""
-- Constructs a rg.RoseWindow.
-- Constructs and draws on the window two rg.Lines such that:
-- They both fit in the window and are easily visible.
-- One rg.Line has the default thickness.
-- The other rg.Line is thicker (i.e., has a bigger width).
-- Uses a rg.Line method to get the midpoint (center) of the
thicker rg.Line.
-- Then prints (on the console, on SEPARATE lines):
-- the midpoint itself
-- the x-coordinate of the midpoint
-- the y-coordinate of the midpoint
Here is an example of the output on the console, if the two
endpoints of the thicker line are at (100, 100) and (121, 200):
Point(110.5, 150.0)
110.5
150.0
-- Waits for the user to press the mouse, then closes the window.
"""
# TODO: 4. Implement and test this function.
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 35.590164 | 79 | 0.560111 |
890b4bbd61cae5a0d87ed45e0c4c8e30a36d8d0f | 1,251 | py | Python | alipay/aop/api/domain/BaseWebResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/BaseWebResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/BaseWebResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BaseWebResponse(object):
def __init__(self):
self._code = None
self._message = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.message:
if hasattr(self.message, 'to_alipay_dict'):
params['message'] = self.message.to_alipay_dict()
else:
params['message'] = self.message
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BaseWebResponse()
if 'code' in d:
o.code = d['code']
if 'message' in d:
o.message = d['message']
return o
| 22.339286 | 65 | 0.544365 |
9faa2421da7ccb837b2e0b21d8f54fbad9ced789 | 1,790 | py | Python | quickstart.py | mehdi-S/InstaPy | df40de6e709dfaf4562edc61348aa6b0229dca8e | [
"MIT"
] | null | null | null | quickstart.py | mehdi-S/InstaPy | df40de6e709dfaf4562edc61348aa6b0229dca8e | [
"MIT"
] | null | null | null | quickstart.py | mehdi-S/InstaPy | df40de6e709dfaf4562edc61348aa6b0229dca8e | [
"MIT"
] | null | null | null | import os
import time
from tempfile import gettempdir
from selenium.common.exceptions import NoSuchElementException
from instapy import InstaPy
insta_username = 'moi'
insta_password = ''
# set headless_browser=True if you want to run InstaPy on a server
# set these in instapy/settings.py if you're locating the
# library in the /usr/lib/pythonX.X/ directory:
# Settings.database_location = '/path/to/instapy.db'
# Settings.chromedriver_location = '/path/to/chromedriver'
session = InstaPy(username=insta_username,
password=insta_password,
headless_browser=False,
multi_logs=True)
try:
session.login()
# settings
session.set_relationship_bounds(enabled=True,
potency_ratio=-1.21,
delimit_by_numbers=True,
max_followers=4590,
max_following=5555,
min_followers=45,
min_following=77)
session.set_do_comment(True, percentage=10)
session.set_comments(['aMEIzing!', 'So much fun!!', 'Nicey!'])
session.set_dont_include(['friend1', 'friend2', 'friend3'])
session.set_dont_like(['pizza', 'girl'])
# actions
session.like_by_tags(['natgeo'], amount=1)
except Exception as exc:
# if changes to IG layout, upload the file to help us locate the change
if isinstance(exc, NoSuchElementException):
file_path = os.path.join(gettempdir(), '{}.html'.format(time.strftime('%Y%m%d-%H%M%S')))
with open(file_path, 'wb') as fp:
fp.write(session.browser.page_source.encode('utf8'))
print('{0}\nIf raising an issue, please also upload the file located at:\n{1}\n{0}'.format(
'*' * 70, file_path))
# full stacktrace when raising Github issue
raise
finally:
# end the bot session
session.end()
| 31.403509 | 99 | 0.675978 |
bdd9dd11ae243787b08477041ae377ee8ea556c5 | 651 | py | Python | molecule/default/tests/test_default.py | kotofeych/ansible-docker | fd86d038c3d8e2daae0ab4b97062bce5384001b1 | [
"BSD-3-Clause"
] | null | null | null | molecule/default/tests/test_default.py | kotofeych/ansible-docker | fd86d038c3d8e2daae0ab4b97062bce5384001b1 | [
"BSD-3-Clause"
] | null | null | null | molecule/default/tests/test_default.py | kotofeych/ansible-docker | fd86d038c3d8e2daae0ab4b97062bce5384001b1 | [
"BSD-3-Clause"
] | null | null | null | import testinfra
host = testinfra.get_hosts('all')
# Check distribution
def test_distribution(host):
assert host.system_info.distribution.lower() in [
'ubuntu',
'centos'
]
# Check exists dockerd file
def test_availability_of_important_files(host):
important_files = [
"/usr/bin/dockerd",
"/tmp/install_docker.sh",
"/lib/systemd/system/docker.service"
]
for file in important_files:
assert host.file(file).exists
# Check start service Docker
def test_docker_service(host):
assert host.package("docker-ce").is_installed
assert host.package("docker-ce-cli").is_installed
| 22.448276 | 53 | 0.688172 |
3c9c9537cf1a99eff01c66280f820dc248b4d40a | 2,942 | py | Python | bin/cut-video.py | cpausmit/Config | d63bc72091c21f45ac83fafa970ded8d9234c409 | [
"MIT"
] | null | null | null | bin/cut-video.py | cpausmit/Config | d63bc72091c21f45ac83fafa970ded8d9234c409 | [
"MIT"
] | null | null | null | bin/cut-video.py | cpausmit/Config | d63bc72091c21f45ac83fafa970ded8d9234c409 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#====================================================================================================
#
# Make defined snippets of a given video input file (mp4 format).
#
# Requirements:
# - install ffmpeg
#
# Video session 1:
#
## Fisher:
# cut-video.py --input 3184_Year_of_the_Quark_01.mp4 --output fisher.mp4 \
# --start "00:00:00" --end "00:04:50"
## Zweig:
# cut-video.py --input 3184_Year_of_the_Quark_01.mp4 --output zweig.mp4 \
# --start "00:04:51" --end "00:35:51"
## LLewellyn-Smith:
# cut-video.py --input 3184_Year_of_the_Quark_01.mp4 --output llewellyn-smith.mp4 \
# --start "00:35:54" --end "01:16:00"
## Breidenbach:
# cut-video.py --input 3184_Year_of_the_Quark_01.mp4 --output breidenbach.mp4 \
# --start "01:16:00" --end "01:44:34"
#
# Video session 2:
#
## Bodek:
# cut-video.py --input 3184_Year_of_the_Quark_02.mp4 --output bodek.mp4 \
# --start "00:00:00" --end "00:23:11"
## Riordan:
# cut-video.py --input 3184_Year_of_the_Quark_02.mp4 --output riordan.mp4 \
# --start "00:23:13" --end "00:55:01"
## Jaffe:
# cut-video.py --input 3184_Year_of_the_Quark_02.mp4 --output jaffe.mp4 \
# --start "00:55:00" --end "01:33:22"
#
# Ch. Paus (V0, Nov 07, 2019)
#====================================================================================================
import os,sys,getopt
#====================================================================================================
# H E L P E R S
#====================================================================================================
#===================================================================================================
# M A I N
#===================================================================================================
# Command line options and their defaults
input = "i.mp4" # input file
output = "o.mp4" # output file
start = "00:00:00" # at the beginning
end = "00:05:00" # at five minutes into the file
# Define string to explain usage of the script
usage = "\nUsage: cut-video.py --input=i.mp4 --output=o.mp4 --start=00:00:00 --end=00:05:00 \n"
valid = ['input=','output=','start=','end=','debug','help']
try:
opts, args = getopt.getopt(sys.argv[1:], "", valid)
except getopt.GetoptError, ex:
print usage
print str(ex)
sys.exit(1)
# read all command line options
for opt, arg in opts:
if opt == "--help":
print usage
sys.exit(0)
if opt == "--input":
input = arg
if opt == "--output":
output = arg
if opt == "--start":
start = arg
if opt == "--end":
end = arg
cmd = "ffmpeg -ss %s -i %s -to %s -c:v libx264 -c:a copy %s"%(start,input,end,output)
print " CMD: %s"%(cmd)
os.system(cmd)
# and exit!
sys.exit(0)
| 35.445783 | 101 | 0.457172 |
ebdf3d5c132c4826c720a0eb350eb2090391c875 | 76,641 | py | Python | gdal/swig/python/samples/validate_gpkg.py | Esri/gdal | 8d9af5086ddb96f707ed281786a1cd278066a7f2 | [
"MIT"
] | 9 | 2019-05-30T17:01:56.000Z | 2021-01-30T01:06:41.000Z | gdal/swig/python/samples/validate_gpkg.py | Esri/gdal | 8d9af5086ddb96f707ed281786a1cd278066a7f2 | [
"MIT"
] | 4 | 2018-10-23T18:43:35.000Z | 2019-07-01T19:29:49.000Z | gdal/swig/python/samples/validate_gpkg.py | Esri/gdal | 8d9af5086ddb96f707ed281786a1cd278066a7f2 | [
"MIT"
] | 6 | 2019-02-03T14:19:32.000Z | 2021-12-19T06:36:49.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR
# Purpose: Test compliance of GeoPackage database w.r.t GeoPackage spec
# Author: Even Rouault <even.rouault at spatialys.com>
#
###############################################################################
# Copyright (c) 2017, Even Rouault <even.rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import datetime
import os
import sqlite3
import struct
import sys
# GDAL may be used for checks on tile content for the tiled gridded extension.
# If not available, those tests will be skipped
try:
from osgeo import gdal
has_gdal = True
except ImportError:
has_gdal = False
def _esc_literal(literal):
return literal.replace("'", "''")
def _esc_id(identifier):
return '"' + identifier.replace('"', "\"\"") + '"'
def _is_valid_data_type(type):
return type in ('BOOLEAN', 'TINYINT', 'SMALLINT', 'MEDIUMINT',
'INT', 'INTEGER', 'FLOAT', 'DOUBLE', 'REAL',
'TEXT', 'BLOB', 'DATE', 'DATETIME') or \
type.startswith('TEXT(') or type.startswith('BLOB(')
class GPKGCheckException(Exception):
pass
class GPKGChecker:
EXT_GEOM_TYPES = ('CIRCULARSTRING', 'COMPOUNDCURVE', 'CURVEPOLYGON',
'MULTICURVE', 'MULTISURFACE', 'CURVE', 'SURFACE')
def __init__(self, filename, abort_at_first_error=True, verbose=False):
self.filename = filename
self.extended_pragma_info = False
self.abort_at_first_error = abort_at_first_error
self.verbose = verbose
self.errors = []
def _log(self, msg):
if self.verbose:
print(msg)
def _assert(self, cond, req, msg):
# self._log('Verified requirement %s' % req)
if not cond:
self.errors += [(req, msg)]
if self.abort_at_first_error:
if req:
raise GPKGCheckException('Req %s: %s' % (str(req), msg))
else:
raise GPKGCheckException(msg)
return cond
def _check_structure(self, columns, expected_columns, req, table_name):
self._assert(len(columns) == len(expected_columns), req,
'Table %s has %d columns, whereas %d are expected' %
(table_name, len(columns), len(expected_columns)))
for (_, expected_name, expected_type, expected_notnull,
expected_default, expected_pk) in expected_columns:
found = False
for (_, name, type, notnull, default, pk) in columns:
if name != expected_name:
continue
if expected_type == 'INTEGER' and expected_pk:
expected_notnull = 1
if type == 'INTEGER' and pk:
notnull = 1
if not self.extended_pragma_info and expected_pk > 1:
expected_pk = 1
self._assert(type == expected_type, req,
'Wrong type for %s of %s. Expected %s, got %s' %
(name, table_name, expected_type, type))
self._assert(notnull == expected_notnull, req,
('Wrong notnull for %s of %s. ' +
'Expected %s, got %s') %
(name, table_name, expected_notnull, notnull))
self._assert(default == expected_default, req,
('Wrong default for %s of %s. ' +
'Expected %s, got %s') %
(name, table_name, expected_default, default))
self._assert(pk == expected_pk, req,
'Wrong pk for %s of %s. Expected %s, got %s' %
(name, table_name, expected_pk, pk))
found = True
break
self._assert(found, req, 'Column %s of %s not found!' %
(expected_name, table_name))
def _check_gpkg_spatial_ref_sys(self, c):
self._log('Checking gpkg_spatial_ref_sys')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_spatial_ref_sys'")
if not self._assert(c.fetchone() is not None, 10,
"gpkg_spatial_ref_sys table missing"):
return
c.execute("PRAGMA table_info(gpkg_spatial_ref_sys)")
columns = c.fetchall()
has_definition_12_063 = False
for (_, name, _, _, _, _) in columns:
if name == 'definition_12_063':
has_definition_12_063 = True
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
row = None
if c.fetchone() is not None:
c.execute("SELECT scope FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_crs_wkt'")
row = c.fetchone()
if row:
scope, = row
self._assert(scope == 'read-write', 145,
'scope of gpkg_crs_wkt extension should be read-write')
self._assert(
has_definition_12_063, 145,
"gpkg_spatial_ref_sys should have a definition_12_063 column, "
"as gpkg_crs_wkt extension is declared")
else:
self._assert(
not has_definition_12_063, 145,
"gpkg_extensions should declare gpkg_crs_wkt extension "
"as gpkg_spatial_ref_sys has a definition_12_063 column")
if has_definition_12_063:
expected_columns = [
(0, 'srs_name', 'TEXT', 1, None, 0),
(1, 'srs_id', 'INTEGER', 1, None, 1),
(2, 'organization', 'TEXT', 1, None, 0),
(3, 'organization_coordsys_id', 'INTEGER', 1, None, 0),
(4, 'definition', 'TEXT', 1, None, 0),
(5, 'description', 'TEXT', 0, None, 0),
(6, 'definition_12_063', 'TEXT', 1, None, 0)
]
else:
expected_columns = [
(0, 'srs_name', 'TEXT', 1, None, 0),
(1, 'srs_id', 'INTEGER', 1, None, 1),
(2, 'organization', 'TEXT', 1, None, 0),
(3, 'organization_coordsys_id', 'INTEGER', 1, None, 0),
(4, 'definition', 'TEXT', 1, None, 0),
(5, 'description', 'TEXT', 0, None, 0)
]
self._check_structure(columns, expected_columns, 10,
'gpkg_spatial_ref_sys')
if has_definition_12_063:
c.execute("SELECT srs_id, organization, organization_coordsys_id, "
"definition, definition_12_063 "
"FROM gpkg_spatial_ref_sys "
"WHERE srs_id IN (-1, 0, 4326) ORDER BY srs_id")
else:
c.execute("SELECT srs_id, organization, organization_coordsys_id, "
"definition FROM gpkg_spatial_ref_sys "
"WHERE srs_id IN (-1, 0, 4326) ORDER BY srs_id")
ret = c.fetchall()
self._assert(len(ret) == 3, 11,
'There should be at least 3 records in '
'gpkg_spatial_ref_sys')
if len(ret) != 3:
return
self._assert(ret[0][1] == 'NONE', 11,
'wrong value for organization for srs_id = -1: %s' %
ret[0][1])
self._assert(ret[0][2] == -1, 11,
'wrong value for organization_coordsys_id for '
'srs_id = -1: %s' % ret[0][2])
self._assert(ret[0][3] == 'undefined', 11,
'wrong value for definition for srs_id = -1: %s' %
ret[0][3])
if has_definition_12_063:
self._assert(ret[0][4] == 'undefined', 116,
'wrong value for definition_12_063 for ' +
'srs_id = -1: %s' % ret[0][4])
self._assert(ret[1][1] == 'NONE', 11,
'wrong value for organization for srs_id = 0: %s' %
ret[1][1])
self._assert(ret[1][2] == 0, 11,
'wrong value for organization_coordsys_id for '
'srs_id = 0: %s' % ret[1][2])
self._assert(ret[1][3] == 'undefined', 11,
'wrong value for definition for srs_id = 0: %s' %
ret[1][3])
if has_definition_12_063:
self._assert(ret[1][4] == 'undefined', 116,
'wrong value for definition_12_063 for ' +
'srs_id = 0: %s' % ret[1][4])
self._assert(ret[2][1].lower() == 'epsg', 11,
'wrong value for organization for srs_id = 4326: %s' %
ret[2][1])
self._assert(ret[2][2] == 4326, 11,
'wrong value for organization_coordsys_id for '
'srs_id = 4326: %s' % ret[2][2])
self._assert(ret[2][3] != 'undefined', 11,
'wrong value for definition for srs_id = 4326: %s' %
ret[2][3])
if has_definition_12_063:
self._assert(ret[2][4] != 'undefined', 116,
'wrong value for definition_12_063 for ' +
'srs_id = 4326: %s' % ret[2][4])
if has_definition_12_063:
c.execute("SELECT srs_id FROM gpkg_spatial_ref_sys "
"WHERE srs_id NOT IN (0, -1) AND "
"definition = 'undefined' AND "
"definition_12_063 = 'undefined'")
rows = c.fetchall()
for (srs_id, ) in rows:
self._assert(False, 117,
'srs_id = %d has both definition and ' % srs_id +
'definition_12_063 undefined')
def _check_gpkg_contents(self, c):
self._log('Checking gpkg_contents')
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_contents'")
self._assert(c.fetchone() is not None, 13,
"gpkg_contents table missing")
c.execute("PRAGMA table_info(gpkg_contents)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'data_type', 'TEXT', 1, None, 0),
(2, 'identifier', 'TEXT', 0, None, 0),
(3, 'description', 'TEXT', 0, "''", 0),
(4, 'last_change', 'DATETIME', 1,
"strftime('%Y-%m-%dT%H:%M:%fZ','now')", 0),
(5, 'min_x', 'DOUBLE', 0, None, 0),
(6, 'min_y', 'DOUBLE', 0, None, 0),
(7, 'max_x', 'DOUBLE', 0, None, 0),
(8, 'max_y', 'DOUBLE', 0, None, 0),
(9, 'srs_id', 'INTEGER', 0, None, 0)
]
self._check_structure(columns, expected_columns, 13, 'gpkg_contents')
c.execute("SELECT 1 FROM gpkg_contents "
"WHERE data_type IN ('features', 'tiles')")
self._assert(c.fetchone() is not None, 17,
'gpkg_contents should at least have one table with '
'data_type = features and/or tiles')
c.execute("SELECT table_name, data_type FROM gpkg_contents "
"WHERE data_type NOT IN "
"('features', 'tiles', 'attributes', '2d-gridded-coverage')")
ret = c.fetchall()
self._assert(len(ret) == 0, 17,
'Unexpected data types in gpkg_contents: %s' % str(ret))
c.execute('SELECT table_name, last_change, srs_id FROM gpkg_contents')
rows = c.fetchall()
for (table_name, last_change, srs_id) in rows:
c.execute("SELECT 1 FROM sqlite_master WHERE "
"lower(name) = lower(?) AND type IN ('table', 'view')", (table_name,))
self._assert(c.fetchone() is not None, 14,
('table_name=%s in gpkg_contents is not a ' +
'table or view') % table_name)
try:
datetime.datetime.strptime(
last_change, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self._assert(False, 15,
('last_change = %s for table_name = %s ' +
'is invalid datetime') %
(last_change, table_name))
if srs_id is not None:
c.execute('SELECT 1 FROM gpkg_spatial_ref_sys '
'WHERE srs_id = ?', (srs_id, ))
self._assert(c.fetchone() is not None, 14,
("table_name=%s has srs_id=%d in gpkg_contents " +
"which isn't found in gpkg_spatial_ref_sys") %
(table_name, srs_id))
def _check_vector_user_table(self, c, table_name):
self._log('Checking vector user table ' + table_name)
c.execute("SELECT column_name, z, m, geometry_type_name, srs_id "
"FROM gpkg_geometry_columns WHERE table_name = ?",
(table_name,))
rows_gpkg_geometry_columns = c.fetchall()
self._assert(len(rows_gpkg_geometry_columns) == 1, 22,
('table_name = %s is not registered in ' +
'gpkg_geometry_columns') % table_name)
geom_column_name = rows_gpkg_geometry_columns[0][0]
z = rows_gpkg_geometry_columns[0][1]
m = rows_gpkg_geometry_columns[0][2]
geometry_type_name = rows_gpkg_geometry_columns[0][3]
srs_id = rows_gpkg_geometry_columns[0][4]
c.execute('PRAGMA table_info(%s)' % _esc_id(table_name))
base_geom_types = ('GEOMETRY', 'POINT', 'LINESTRING', 'POLYGON',
'MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON',
'GEOMETRYCOLLECTION')
cols = c.fetchall()
found_geom = False
count_pkid = 0
for (_, name, type, notnull, default, pk) in cols:
if name.lower() == geom_column_name.lower():
found_geom = True
self._assert(
type in base_geom_types or
type in GPKGChecker.EXT_GEOM_TYPES,
25, ('invalid type (%s) for geometry ' +
'column of table %s') % (type, table_name))
self._assert(type == geometry_type_name, 31,
('table %s has geometry column of type %s in ' +
'SQL and %s in geometry_type_name of ' +
'gpkg_geometry_columns') %
(table_name, type, geometry_type_name))
elif pk == 1:
count_pkid += 1
self._assert(type == 'INTEGER', 29,
('table %s has a PRIMARY KEY of type %s ' +
'instead of INTEGER') % (table_name, type))
else:
self._assert(_is_valid_data_type(type), 5,
('table %s has column %s of unexpected type %s'
% (table_name, name, type)))
self._assert(found_geom, 24,
'table %s has no %s column' %
(table_name, geom_column_name))
self._assert(count_pkid == 1, 29,
'table %s has no INTEGER PRIMARY KEY' % table_name)
self._assert(z in (0, 1, 2), 27, ("z value of %s is %d. " +
"Expected 0, 1 or 2") % (table_name, z))
self._assert(m in (0, 1, 2), 27, ("m value of %s is %d. " +
"Expected 0, 1 or 2") % (table_name, m))
if geometry_type_name in GPKGChecker.EXT_GEOM_TYPES:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_geom_%s' AND "
"table_name = ? AND column_name = ? AND "
"scope = 'read-write'" % geometry_type_name,
(table_name, geom_column_name))
self._assert(c.fetchone() is not None, 68,
"gpkg_geom_%s extension should be declared for "
"table %s" % (geometry_type_name, table_name))
wkb_geometries = base_geom_types + GPKGChecker.EXT_GEOM_TYPES
c.execute("SELECT %s FROM %s " %
(_esc_id(geom_column_name), _esc_id(table_name)))
found_geom_types = set()
for (blob,) in c.fetchall():
if blob is None:
continue
self._assert(len(blob) >= 8, 19, 'Invalid geometry')
max_size_needed = min(len(blob), 8 + 4 * 2 * 8 + 5)
blob_ar = struct.unpack('B' * max_size_needed,
blob[0:max_size_needed])
self._assert(blob_ar[0] == ord('G'), 19, 'Invalid geometry')
self._assert(blob_ar[1] == ord('P'), 19, 'Invalid geometry')
self._assert(blob_ar[2] == 0, 19, 'Invalid geometry')
flags = blob_ar[3]
big_endian = (flags & 1) == 0
env_ind = (flags >> 1) & 7
self._assert(((flags >> 5) & 1) == 0, 19,
'Invalid geometry: ExtendedGeoPackageBinary not '
'allowed')
self._assert(env_ind <= 4, 19,
'Invalid geometry: invalid envelope indicator code')
if big_endian:
geom_srs_id = struct.unpack('>I' * 1, blob[4:8])[0]
else:
geom_srs_id = struct.unpack('<I' * 1, blob[4:8])[0]
self._assert(srs_id == geom_srs_id, 33,
('table %s has geometries with SRID %d, ' +
'whereas only %d is expected') %
(table_name, geom_srs_id, srs_id))
if env_ind == 0:
coord_dim = 0
elif env_ind == 1:
coord_dim = 2
elif env_ind == 2 or env_ind == 3:
coord_dim = 3
else:
coord_dim = 4
# if env_ind == 2 or env_ind == 4:
# self._assert(z > 0, 19,
# 'z found in geometry, but not in gpkg_geometry_columns')
# if env_ind == 3 or env_ind == 4:
# self._assert(m > 0, 19,
# 'm found in geometry, but not in gpkg_geometry_columns')
header_len = 8 + coord_dim * 2 * 8
self._assert(len(blob) >= header_len, 19, 'Invalid geometry')
wkb_endianness = blob_ar[header_len]
wkb_big_endian = (wkb_endianness == 0)
if wkb_big_endian:
wkb_geom_type = struct.unpack(
'>I' * 1, blob[header_len + 1:header_len + 5])[0]
else:
wkb_geom_type = struct.unpack(
'<I' * 1, blob[header_len + 1:header_len + 5])[0]
self._assert(wkb_geom_type >= 0 and
(wkb_geom_type % 1000) < len(wkb_geometries),
19, 'Invalid WKB geometry type')
wkb_dim = int(wkb_geom_type / 1000)
if z == 1:
self._assert(wkb_dim == 1 or wkb_dim == 3, 19,
'geometry without Z found')
if m == 1:
self._assert(wkb_dim == 2 or wkb_dim == 3, 19,
'geometry without M found')
if wkb_dim == 1 or wkb_dim == 3: # Z or ZM
self._assert(z > 0, 19,
'z found in geometry, but not in '
'gpkg_geometry_columns')
if wkb_dim == 2 or wkb_dim == 3: # M or ZM
self._assert(m > 0, 19,
'm found in geometry, but not in '
'gpkg_geometry_columns')
found_geom_types.add(wkb_geometries[wkb_geom_type % 1000])
if geometry_type_name in ('POINT', 'LINESTRING', 'POLYGON',
'MULTIPOINT', 'MULTILINESTRING',
'MULTIPOLYGON'):
self._assert(len(found_geom_types) == 0 or
found_geom_types == set([geometry_type_name]), 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'GEOMETRYCOLLECTION':
self._assert(len(found_geom_types) == 0 or
len(found_geom_types.difference(
set(['GEOMETRYCOLLECTION', 'MULTIPOINT',
'MULTILINESTRING', 'MULTIPOLYGON',
'MULTICURVE', 'MULTISURFACE']))) == 0, 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name in ('CURVEPOLYGON', 'SURFACE'):
self._assert(len(found_geom_types) == 0 or
len(found_geom_types.difference(
set(['POLYGON', 'CURVEPOLYGON']))) == 0, 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'MULTICURVE':
self._assert(len(found_geom_types) == 0 or
len(found_geom_types.difference(
set(['MULTILINESTRING', 'MULTICURVE']))) == 0, 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'MULTISURFACE':
self._assert(len(found_geom_types) == 0 or
len(found_geom_types.difference(
set(['MULTIPOLYGON', 'MULTISURFACE']))) == 0, 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
elif geometry_type_name == 'CURVE':
self._assert(len(found_geom_types) == 0 or
len(found_geom_types.difference(
set(['LINESTRING', 'CIRCULARSTRING',
'COMPOUNDCURVE']))) == 0, 32,
'in table %s, found geometry types %s' %
(table_name, str(found_geom_types)))
for geom_type in found_geom_types:
if geom_type in GPKGChecker.EXT_GEOM_TYPES:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_geom_%s' AND "
"table_name = ? AND column_name = ? AND "
"scope = 'read-write'" % geom_type,
(table_name, geom_column_name))
self._assert(c.fetchone() is not None, 68,
"gpkg_geom_%s extension should be declared for "
"table %s" % (geom_type, table_name))
rtree_name = 'rtree_%s_%s' % (table_name, geom_column_name)
c.execute("SELECT 1 FROM sqlite_master WHERE name = ?", (rtree_name,))
has_rtree = c.fetchone() is not None
if has_rtree:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_rtree_index' AND "
"table_name=? AND column_name=? AND "
"scope='write-only'",
(table_name, geom_column_name))
self._assert(c.fetchone() is not None, 78,
("Table %s has a RTree, but not declared in " +
"gpkg_extensions") % table_name)
c.execute('PRAGMA table_info(%s)' % _esc_id(rtree_name))
columns = c.fetchall()
expected_columns = [
(0, 'id', '', 0, None, 0),
(1, 'minx', '', 0, None, 0),
(2, 'maxx', '', 0, None, 0),
(3, 'miny', '', 0, None, 0),
(4, 'maxy', '', 0, None, 0)
]
self._check_structure(columns, expected_columns, 77, rtree_name)
c.execute("SELECT 1 FROM sqlite_master WHERE type = 'trigger' " +
"AND name = '%s_insert'" % _esc_literal(rtree_name))
self._assert(c.fetchone() is not None, 75,
"%s_insert trigger missing" % rtree_name)
for i in range(4):
c.execute("SELECT 1 FROM sqlite_master WHERE " +
"type = 'trigger' " +
"AND name = '%s_update%d'" %
(_esc_literal(rtree_name), i + 1))
self._assert(c.fetchone() is not None, 75,
"%s_update%d trigger missing" % (rtree_name, i + 1))
c.execute("SELECT 1 FROM sqlite_master WHERE type = 'trigger' " +
"AND name = '%s_delete'" % _esc_literal(rtree_name))
self._assert(c.fetchone() is not None, 75,
"%s_delete trigger missing" % rtree_name)
def _check_features(self, c):
self._log('Checking features')
c.execute("SELECT 1 FROM gpkg_contents WHERE data_type = 'features'")
if c.fetchone() is None:
self._log('... No features table')
return
self._log('Checking gpkg_geometry_columns')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_geometry_columns'")
self._assert(c.fetchone() is not None, 21,
"gpkg_geometry_columns table missing")
c.execute("PRAGMA table_info(gpkg_geometry_columns)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'column_name', 'TEXT', 1, None, 2),
(2, 'geometry_type_name', 'TEXT', 1, None, 0),
(3, 'srs_id', 'INTEGER', 1, None, 0),
(4, 'z', 'TINYINT', 1, None, 0),
(5, 'm', 'TINYINT', 1, None, 0)
]
self._check_structure(columns, expected_columns, 21,
'gpkg_geometry_columns')
c.execute("SELECT table_name FROM gpkg_contents WHERE "
"data_type = 'features'")
rows = c.fetchall()
for (table_name,) in rows:
self._check_vector_user_table(c, table_name)
c.execute("SELECT table_name, srs_id FROM gpkg_geometry_columns")
rows = c.fetchall()
for (table_name, srs_id) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ? " +
"AND data_type='features'", (table_name,))
ret = c.fetchall()
self._assert(len(ret) == 1, 23,
('table_name = %s is registered in ' +
'gpkg_geometry_columns, but not in gpkg_contents') %
table_name)
c.execute('SELECT 1 FROM gpkg_spatial_ref_sys WHERE ' +
'srs_id = ?', (srs_id, ))
self._assert(c.fetchone() is not None, 14,
("table_name=%s has srs_id=%d in " +
"gpkg_geometry_columns which isn't found in " +
"gpkg_spatial_ref_sys") % (table_name, srs_id))
def _check_attributes(self, c):
self._log('Checking attributes')
c.execute("SELECT table_name FROM gpkg_contents WHERE "
"data_type = 'attributes'")
rows = c.fetchall()
if len(rows) == 0:
self._log('... No attributes table')
for (table_name,) in rows:
self._log('Checking attributes table ' + table_name)
c.execute('PRAGMA table_info(%s)' % _esc_id(table_name))
cols = c.fetchall()
count_pkid = 0
for (_, name, type, notnull, default, pk) in cols:
if pk == 1:
count_pkid += 1
self._assert(type == 'INTEGER', 119,
('table %s has a PRIMARY KEY of type %s ' +
'instead of INTEGER') % (table_name, type))
else:
self._assert(_is_valid_data_type(type), 5,
'table %s has column %s of unexpected type %s'
% (table_name, name, type))
self._assert(count_pkid == 1, 119,
'table %s has no INTEGER PRIMARY KEY' % table_name)
def _check_tile_user_table(self, c, table_name, data_type):
self._log('Checking tile pyramid user table ' + table_name)
c.execute("PRAGMA table_info(%s)" % _esc_id(table_name))
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 0, None, 1),
(1, 'zoom_level', 'INTEGER', 1, None, 0),
(2, 'tile_column', 'INTEGER', 1, None, 0),
(3, 'tile_row', 'INTEGER', 1, None, 0),
(4, 'tile_data', 'BLOB', 1, None, 0)
]
self._check_structure(columns, expected_columns, 54,
'gpkg_tile_matrix_set')
c.execute("SELECT DISTINCT zoom_level FROM %s" % _esc_id(table_name))
rows = c.fetchall()
for (zoom_level, ) in rows:
c.execute("SELECT 1 FROM gpkg_tile_matrix WHERE table_name = ? "
"AND zoom_level = ?", (table_name, zoom_level))
self._assert(c.fetchone() is not None, 44,
("Table %s has data for zoom_level = %d, but no " +
"corresponding row in gpkg_tile_matrix") %
(table_name, zoom_level))
zoom_other_levels = False
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
if c.fetchone() is not None:
c.execute("SELECT column_name FROM gpkg_extensions WHERE "
"table_name = ? "
"AND extension_name = 'gpkg_zoom_other'", (table_name,))
row = c.fetchone()
if row is not None:
(column_name, ) = row
self._assert(column_name == 'tile_data', 88,
'Wrong column_name in gpkg_extensions for '
'gpkg_zoom_other')
zoom_other_levels = True
c.execute("SELECT zoom_level, pixel_x_size, pixel_y_size "
"FROM gpkg_tile_matrix "
"WHERE table_name = ? ORDER BY zoom_level", (table_name,))
rows = c.fetchall()
prev_zoom_level = None
prev_pixel_x_size = None
prev_pixel_y_size = None
for (zoom_level, pixel_x_size, pixel_y_size) in rows:
if prev_pixel_x_size is not None:
self._assert(
pixel_x_size < prev_pixel_x_size and
pixel_y_size < prev_pixel_y_size,
53,
('For table %s, pixel size are not consistent ' +
'with zoom_level') % table_name)
if prev_zoom_level is not None and \
zoom_level == prev_zoom_level + 1 and not zoom_other_levels:
self._assert(
abs((pixel_x_size - prev_pixel_x_size / 2) /
prev_pixel_x_size) < 1e-5, 35,
"Expected pixel_x_size=%f for zoom_level=%d. Got %f" %
(prev_pixel_x_size / 2, zoom_level, pixel_x_size))
self._assert(
abs((pixel_y_size - prev_pixel_y_size / 2) /
prev_pixel_y_size) < 1e-5, 35,
"Expected pixel_y_size=%f for zoom_level=%d. Got %f" %
(prev_pixel_y_size / 2, zoom_level, pixel_y_size))
prev_pixel_x_size = pixel_x_size
prev_pixel_y_size = pixel_y_size
prev_zoom_level = zoom_level
c.execute("SELECT max_x - min_x, "
" MIN(matrix_width * tile_width * pixel_x_size), "
" MAX(matrix_width * tile_width * pixel_x_size), "
" max_y - min_y, "
" MIN(matrix_height * tile_height * pixel_y_size), "
" MAX(matrix_height * tile_height * pixel_y_size) "
"FROM gpkg_tile_matrix tm JOIN gpkg_tile_matrix_set tms "
"ON tm.table_name = tms.table_name WHERE tm.table_name = ?",
(table_name,))
rows = c.fetchall()
if len(rows) != 0:
(dx, min_dx, max_dx, dy, min_dy, max_dy) = rows[0]
self._assert(abs((min_dx - dx) / dx) < 1e-3 and
abs((max_dx - dx) / dx) < 1e-3 and
abs((min_dy - dy) / dy) < 1e-3 and
abs((max_dy - dy) / dy) < 1e-3, 45,
("Inconsistent values in gpkg_tile_matrix and " +
"gpkg_tile_matrix_set for table %s") % table_name)
c.execute("SELECT DISTINCT zoom_level FROM %s" % _esc_id(table_name))
rows = c.fetchall()
for (zoom_level,) in rows:
c.execute(("SELECT MIN(tile_column), MAX(tile_column), " +
"MIN(tile_row), MAX(tile_row) FROM %s " +
"WHERE zoom_level = %d") %
(_esc_id(table_name), zoom_level))
min_col, max_col, min_row, max_row = c.fetchone()
c.execute("SELECT matrix_width, matrix_height FROM "
"gpkg_tile_matrix "
"WHERE table_name = ? AND zoom_level = ?",
(table_name, zoom_level))
rows2 = c.fetchall()
if len(rows2) == 0:
self._assert(False, 55,
"Invalid zoom_level in %s" % table_name)
else:
matrix_width, matrix_height = rows2[0]
self._assert(min_col >= 0 and min_col < matrix_width, 56,
"Invalid tile_col in %s" % table_name)
self._assert(min_row >= 0 and min_row < matrix_height, 57,
"Invalid tile_row in %s" % table_name)
c.execute("SELECT tile_data FROM %s" % _esc_id(table_name))
found_webp = False
for (blob,) in c.fetchall():
self._assert(blob is not None and len(blob) >= 12, 19,
'Invalid blob')
max_size_needed = 12
blob_ar = struct.unpack('B' * max_size_needed,
blob[0:max_size_needed])
is_jpeg = blob_ar[0:3] == (0xff, 0xd8, 0xff)
is_png = blob_ar[0:4] == (0x89, 0x50, 0x4E, 0x47)
is_webp = blob_ar[0:4] == (ord('R'), ord('I'),
ord('F'), ord('F')) and \
blob_ar[8:12] == (ord('W'), ord('E'), ord('B'), ord('P'))
is_tiff = blob_ar[0:4] == (0x49, 0x49, 0x2A, 0x00) or \
blob_ar[0:4] == (0x4D, 0x4D, 0x00, 0x2A)
self._assert(is_jpeg or is_png or is_webp or is_tiff, 36,
'Unrecognized image mime type')
if data_type == 'tiles':
self._assert(is_jpeg or is_png or is_webp, 36,
'Unrecognized image mime type')
elif data_type == '2d-gridded-coverage':
self._assert(is_png or is_tiff, 36,
'Unrecognized image mime type')
if is_webp:
found_webp = True
if found_webp:
c.execute("SELECT 1 FROM gpkg_extensions WHERE "
"table_name = ? AND column_name = 'tile_data' AND "
"extension_name = 'gpkg_webp' AND "
"scope = 'read-write'", (table_name, ))
self._assert(c.fetchone() is not None, 91,
("Table %s has webp content, but not registered "
"in gpkg_extensions" % table_name))
def _check_tiles(self, c):
self._log('Checking tiles')
c.execute("SELECT 1 FROM gpkg_contents WHERE data_type IN "
"('tiles', '2d-gridded-coverage')")
if c.fetchone() is None:
self._log('... No tiles table')
return
self._log('Checking gpkg_tile_matrix_set ')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_tile_matrix_set'")
self._assert(c.fetchone() is not None, 38,
"gpkg_tile_matrix_set table missing")
c.execute("PRAGMA table_info(gpkg_tile_matrix_set)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'srs_id', 'INTEGER', 1, None, 0),
(2, 'min_x', 'DOUBLE', 1, None, 0),
(3, 'min_y', 'DOUBLE', 1, None, 0),
(4, 'max_x', 'DOUBLE', 1, None, 0),
(5, 'max_y', 'DOUBLE', 1, None, 0)]
self._check_structure(columns, expected_columns, 38,
'gpkg_tile_matrix_set')
c.execute("SELECT table_name, srs_id FROM gpkg_tile_matrix_set")
rows = c.fetchall()
for (table_name, srs_id) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ? " +
"AND data_type IN ('tiles', '2d-gridded-coverage')",
(table_name,))
ret = c.fetchall()
self._assert(len(ret) == 1, 39,
('table_name = %s is registered in ' +
'gpkg_tile_matrix_set, but not in gpkg_contents') %
table_name)
c.execute('SELECT 1 FROM gpkg_spatial_ref_sys WHERE srs_id = ?',
(srs_id, ))
self._assert(c.fetchone() is not None, 41,
("table_name=%s has srs_id=%d in " +
"gpkg_tile_matrix_set which isn't found in " +
"gpkg_spatial_ref_sys") % (table_name, srs_id))
self._log('Checking gpkg_tile_matrix')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_tile_matrix'")
self._assert(c.fetchone() is not None, 42,
"gpkg_tile_matrix table missing")
c.execute("PRAGMA table_info(gpkg_tile_matrix)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 1, None, 1),
(1, 'zoom_level', 'INTEGER', 1, None, 2),
(2, 'matrix_width', 'INTEGER', 1, None, 0),
(3, 'matrix_height', 'INTEGER', 1, None, 0),
(4, 'tile_width', 'INTEGER', 1, None, 0),
(5, 'tile_height', 'INTEGER', 1, None, 0),
(6, 'pixel_x_size', 'DOUBLE', 1, None, 0),
(7, 'pixel_y_size', 'DOUBLE', 1, None, 0)
]
self._check_structure(columns, expected_columns, 42,
'gpkg_tile_matrix')
c.execute("SELECT table_name, zoom_level, matrix_width, "
"matrix_height, tile_width, tile_height, pixel_x_size, "
"pixel_y_size FROM gpkg_tile_matrix")
rows = c.fetchall()
for (table_name, zoom_level, matrix_width, matrix_height, tile_width,
tile_height, pixel_x_size, pixel_y_size) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ? "
"AND data_type IN ('tiles', '2d-gridded-coverage')",
(table_name,))
ret = c.fetchall()
self._assert(len(ret) == 1, 43,
('table_name = %s is registered in ' +
'gpkg_tile_matrix, but not in gpkg_contents') %
table_name)
self._assert(zoom_level >= 0, 46,
"Invalid zoom_level = %d for table %s" %
(zoom_level, table_name))
self._assert(matrix_width > 0, 47,
"Invalid matrix_width = %d for table %s" %
(matrix_width, table_name))
self._assert(matrix_height > 0, 48,
"Invalid matrix_height = %d for table %s" %
(matrix_height, table_name))
self._assert(tile_width > 0, 49,
"Invalid tile_width = %d for table %s" %
(tile_width, table_name))
self._assert(tile_height > 0, 50,
"Invalid tile_height = %d for table %s" %
(tile_height, table_name))
self._assert(pixel_x_size > 0, 51,
"Invalid pixel_x_size = %f for table %s" %
(pixel_x_size, table_name))
self._assert(pixel_y_size > 0, 52,
"Invalid pixel_y_size = %f for table %s" %
(pixel_y_size, table_name))
c.execute("SELECT table_name, data_type FROM gpkg_contents WHERE "
"data_type IN ('tiles', '2d-gridded-coverage')")
rows = c.fetchall()
for (table_name, data_type) in rows:
self._check_tile_user_table(c, table_name, data_type)
def _check_tiled_gridded_coverage_data(self, c):
self._log('Checking tiled gridded elevation data')
c.execute("SELECT table_name FROM gpkg_contents WHERE "
"data_type = '2d-gridded-coverage'")
tables = c.fetchall()
if len(tables) == 0:
self._log('... No tiled gridded coverage table')
return
tables = [tables[i][0] for i in range(len(tables))]
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_2d_gridded_coverage_ancillary'")
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#1',
'gpkg_2d_gridded_coverage_ancillary table is missing')
c.execute("PRAGMA table_info(gpkg_2d_gridded_coverage_ancillary)")
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 1, None, 1),
(1, 'tile_matrix_set_name', 'TEXT', 1, None, 0),
(2, 'datatype', 'TEXT', 1, "'integer'", 0),
(3, 'scale', 'REAL', 1, '1.0', 0),
(4, 'offset', 'REAL', 1, '0.0', 0),
(5, 'precision', 'REAL', 0, '1.0', 0),
(6, 'data_null', 'REAL', 0, None, 0),
(7, 'grid_cell_encoding', 'TEXT', 0, "'grid-value-is-center'", 0),
(8, 'uom', 'TEXT', 0, None, 0),
(9, 'field_name', 'TEXT', 0, "'Height'", 0),
(10, 'quantity_definition', 'TEXT', 0, "'Height'", 0)
]
self._check_structure(columns, expected_columns, 'gpkg_2d_gridded_coverage#1',
'gpkg_2d_gridded_coverage_ancillary')
c.execute("SELECT 1 FROM sqlite_master WHERE "
"name = 'gpkg_2d_gridded_tile_ancillary'")
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#2',
'gpkg_2d_gridded_tile_ancillary table is missing')
c.execute("PRAGMA table_info(gpkg_2d_gridded_tile_ancillary)")
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 0, None, 1),
(1, 'tpudt_name', 'TEXT', 1, None, 0),
(2, 'tpudt_id', 'INTEGER', 1, None, 0),
(3, 'scale', 'REAL', 1, '1.0', 0),
(4, 'offset', 'REAL', 1, '0.0', 0),
(5, 'min', 'REAL', 0, 'NULL', 0),
(6, 'max', 'REAL', 0, 'NULL', 0),
(7, 'mean', 'REAL', 0, 'NULL', 0),
(8, 'std_dev', 'REAL', 0, 'NULL', 0)
]
self._check_structure(columns, expected_columns, 'gpkg_2d_gridded_coverage#2',
'gpkg_2d_gridded_tile_ancillary')
c.execute("SELECT srs_id, organization, organization_coordsys_id, "
"definition FROM gpkg_spatial_ref_sys "
"WHERE srs_id = 4979")
ret = c.fetchall()
self._assert(len(ret) == 1, 'gpkg_2d_gridded_coverage#3',
"gpkg_spatial_ref_sys shall have a row for srs_id=4979")
self._assert(ret[0][1].lower() == 'epsg', 'gpkg_2d_gridded_coverage#3',
'wrong value for organization for srs_id = 4979: %s' %
ret[0][1])
self._assert(ret[0][2] == 4979, 'gpkg_2d_gridded_coverage#3',
('wrong value for organization_coordsys_id for ' +
'srs_id = 4979: %s') % ret[0][2])
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#6',
'gpkg_extensions does not exist')
c.execute("SELECT table_name, column_name, definition, scope FROM "
"gpkg_extensions WHERE "
"extension_name = 'gpkg_2d_gridded_coverage'")
rows = c.fetchall()
self._assert(len(rows) == 2 + len(tables), 'gpkg_2d_gridded_coverage#6',
"Wrong number of entries in gpkg_extensions with "
"2d_gridded_coverage extension name")
found_gpkg_2d_gridded_coverage_ancillary = False
found_gpkg_2d_gridded_tile_ancillary = False
expected_def = \
'http://docs.opengeospatial.org/is/17-066r1/17-066r1.html'
for (table_name, column_name, definition, scope) in rows:
if table_name == 'gpkg_2d_gridded_coverage_ancillary':
found_gpkg_2d_gridded_coverage_ancillary = True
self._assert(column_name is None, 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_coverage_ancillary "
"in gpkg_extensions")
self._assert(definition == expected_def, 'gpkg_2d_gridded_coverage#6',
"Wrong entry (definition) for "
"gpkg_2d_gridded_coverage_ancillary "
"in gpkg_extensions")
self._assert(scope == 'read-write', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_coverage_ancillary "
"in gpkg_extensions")
elif table_name == 'gpkg_2d_gridded_tile_ancillary':
found_gpkg_2d_gridded_tile_ancillary = True
self._assert(column_name is None, 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_tile_ancillary "
"in gpkg_extensions")
self._assert(definition == expected_def, 'gpkg_2d_gridded_coverage#6',
"Wrong entry (definition) for "
"gpkg_2d_gridded_tile_ancillary "
"in gpkg_extensions")
self._assert(scope == 'read-write', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for "
"gpkg_2d_gridded_tile_ancillary "
"in gpkg_extensions")
else:
self._assert(table_name in tables, 'gpkg_2d_gridded_coverage#6',
"Unexpected table_name registered for " +
"2d_gridded_coverage: %s" % table_name)
self._assert(column_name == 'tile_data', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for %s " % table_name +
"in gpkg_extensions")
self._assert(definition == expected_def, 'gpkg_2d_gridded_coverage#6',
"Wrong entry (definition) for %s " % table_name +
"in gpkg_extensions")
self._assert(scope == 'read-write', 'gpkg_2d_gridded_coverage#6',
"Wrong entry for %s " % table_name +
"in gpkg_extensions")
self._assert(found_gpkg_2d_gridded_coverage_ancillary, 'gpkg_2d_gridded_coverage#6',
"gpkg_2d_gridded_coverage_ancillary not registered "
"for 2d_gridded_coverage")
self._assert(found_gpkg_2d_gridded_tile_ancillary, 'gpkg_2d_gridded_coverage#6',
"gpkg_2d_gridded_tile_ancillary not registered "
"for 2d_gridded_coverage")
c.execute("SELECT tile_matrix_set_name, datatype FROM "
"gpkg_2d_gridded_coverage_ancillary")
rows = c.fetchall()
self._assert(len(rows) == len(tables), 'gpkg_2d_gridded_coverage#7',
"Wrong number of entries in "
"gpkg_2d_gridded_coverage_ancillary")
for (tile_matrix_set_name, datatype) in rows:
self._assert(tile_matrix_set_name in tables, 'gpkg_2d_gridded_coverage#7',
"Table %s has a row in " % tile_matrix_set_name +
"gpkg_2d_gridded_coverage_ancillary, but not in "
"gpkg_contents")
c.execute('SELECT 1 FROM gpkg_tile_matrix_set WHERE '
'table_name = ?', (tile_matrix_set_name,))
self._assert(c.fetchone() is not None, 'gpkg_2d_gridded_coverage#8',
'missing entry in gpkg_tile_matrix_set ' +
'for %s' % tile_matrix_set_name)
self._assert(datatype in ('integer', 'float'), 'gpkg_2d_gridded_coverage#9',
'Unexpected datatype = %s' % datatype)
for table in tables:
c.execute("SELECT COUNT(*) FROM %s" % _esc_id(table))
count_tpudt = c.fetchone()
c.execute("SELECT COUNT(*) FROM gpkg_2d_gridded_tile_ancillary "
"WHERE tpudt_name = ?", (table, ))
count_tile_ancillary = c.fetchone()
self._assert(count_tpudt == count_tile_ancillary, 'gpkg_2d_gridded_coverage#10',
("Inconsistent number of rows in " +
"gpkg_2d_gridded_tile_ancillary for %s") % table)
c.execute("SELECT DISTINCT tpudt_name FROM "
"gpkg_2d_gridded_tile_ancillary")
rows = c.fetchall()
for (tpudt_name, ) in rows:
self._assert(tpudt_name in tables, 'gpkg_2d_gridded_coverage#11',
"tpudt_name = %s is invalid" % tpudt_name)
c.execute("SELECT tile_matrix_set_name FROM "
"gpkg_2d_gridded_coverage_ancillary WHERE "
"datatype = 'float'")
rows = c.fetchall()
for (tile_matrix_set_name, ) in rows:
c.execute("SELECT 1 FROM gpkg_2d_gridded_tile_ancillary WHERE "
"tpudt_name = ? AND "
"NOT (offset == 0.0 AND scale == 1.0)",
(tile_matrix_set_name,))
self._assert(len(c.fetchall()) == 0, 'gpkg_2d_gridded_coverage#9',
"Wrong scale and offset values " +
"for %s " % tile_matrix_set_name +
"in gpkg_2d_gridded_coverage_ancillary")
for table in tables:
c.execute("SELECT 1 FROM gpkg_2d_gridded_tile_ancillary WHERE " +
"tpudt_name = ? AND tpudt_id NOT IN (SELECT id FROM " +
"%s)" % table, (table,))
self._assert(len(c.fetchall()) == 0, 'gpkg_2d_gridded_coverage#12',
"tpudt_id in gpkg_2d_gridded_coverage_ancillary " +
"not referencing an id from %s" % table)
c.execute("SELECT tile_matrix_set_name, datatype FROM "
"gpkg_2d_gridded_coverage_ancillary")
rows = c.fetchall()
warn_gdal_not_available = False
for (table_name, datatype) in rows:
c.execute("SELECT id, tile_data FROM %s" % _esc_id(table_name))
for (id, blob) in c.fetchall():
self._assert(blob is not None and len(blob) >= 12, 19,
'Invalid blob')
max_size_needed = 12
blob_ar = struct.unpack('B' * max_size_needed,
blob[0:max_size_needed])
is_png = blob_ar[0:4] == (0x89, 0x50, 0x4E, 0x47)
is_tiff = blob_ar[0:4] == (0x49, 0x49, 0x2A, 0x00) or \
blob_ar[0:4] == (0x4D, 0x4D, 0x00, 0x2A)
if datatype == 'integer':
self._assert(is_png, 'gpkg_2d_gridded_coverage#13',
'Tile for %s should be PNG' % table_name)
if has_gdal:
tmp_file = '/vsimem/temp_validate_gpkg.tif'
try:
blob = bytes(blob)
except:
blob = str(blob)
gdal.FileFromMemBuffer(tmp_file, blob)
ds = gdal.Open(tmp_file)
try:
self._assert(ds is not None, 'gpkg_2d_gridded_coverage#13',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.RasterCount == 1, 'gpkg_2d_gridded_coverage#13',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.GetRasterBand(1).DataType ==
gdal.GDT_UInt16, 'gpkg_2d_gridded_coverage#13',
'Invalid tile %d in %s' %
(id, table_name))
finally:
gdal.Unlink(tmp_file)
else:
if not warn_gdal_not_available:
warn_gdal_not_available = True
self._log('GDAL not available. Req gpkg_2d_gridded_coverage#13 not tested')
elif datatype == 'float':
self._assert(is_tiff, 'gpkg_2d_gridded_coverage#14',
'Tile for %s should be TIFF' % table_name)
if has_gdal:
tmp_file = '/vsimem/temp_validate_gpkg.tif'
try:
blob = bytes(blob)
except:
blob = str(blob)
gdal.FileFromMemBuffer(tmp_file, blob)
ds = gdal.Open(tmp_file)
try:
self._assert(ds is not None, 'gpkg_2d_gridded_coverage#15',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.RasterCount == 1, 'gpkg_2d_gridded_coverage#16',
'Invalid tile %d in %s' %
(id, table_name))
self._assert(ds.GetRasterBand(1).DataType ==
gdal.GDT_Float32, 'gpkg_2d_gridded_coverage#17',
'Invalid tile %d in %s' %
(id, table_name))
compression = ds.GetMetadataItem('COMPRESSION',
'IMAGE_STRUCTURE')
self._assert(compression is None or
compression == 'LZW', 'gpkg_2d_gridded_coverage#18',
'Invalid tile %d in %s' %
(id, table_name))
ovr_count = ds.GetRasterBand(1).GetOverviewCount()
self._assert(len(ds.GetSubDatasets()) == 0 and
ovr_count == 0, 'gpkg_2d_gridded_coverage#19',
'Invalid tile %d in %s' %
(id, table_name))
(blockxsize, _) = \
ds.GetRasterBand(1).GetBlockSize()
self._assert(blockxsize == ds.RasterXSize, 'gpkg_2d_gridded_coverage#20',
'Invalid tile %d in %s' %
(id, table_name))
finally:
gdal.Unlink(tmp_file)
else:
if not warn_gdal_not_available:
warn_gdal_not_available = True
self._log('GDAL not available. '
'Req gpkg_2d_gridded_coverage#15 to gpkg_2d_gridded_coverage#19 not tested')
def _check_gpkg_extensions(self, c):
self._log('Checking gpkg_extensions')
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
if c.fetchone() is None:
self._log('... No extensions')
return
c.execute("PRAGMA table_info(gpkg_extensions)")
columns = c.fetchall()
expected_columns = [
(0, 'table_name', 'TEXT', 0, None, 0),
(1, 'column_name', 'TEXT', 0, None, 0),
(2, 'extension_name', 'TEXT', 1, None, 0),
(3, 'definition', 'TEXT', 1, None, 0),
(4, 'scope', 'TEXT', 1, None, 0)]
self._check_structure(columns, expected_columns, 58,
'gpkg_extensions')
c.execute("SELECT table_name, column_name FROM gpkg_extensions WHERE "
"table_name IS NOT NULL")
rows = c.fetchall()
for (table_name, column_name) in rows:
# Doesn't work for gpkg_2d_gridded_coverage_ancillary
# c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ?", \
# (table_name,) )
# ret = c.fetchall()
# self._assert(len(ret) == 1, \
# 60, ('table_name = %s is registered in ' +\
# 'gpkg_extensions, but not in gpkg_contents') % table_name)
if column_name is not None:
try:
c.execute('SELECT %s FROM %s' %
(_esc_id(column_name), _esc_id(table_name)))
c.fetchone()
except:
self._assert(False, 61,
("Column %s of table %s mentioned in " +
"gpkg_extensions doesn't exist") %
(column_name, table_name))
c.execute("SELECT extension_name FROM gpkg_extensions")
rows = c.fetchall()
KNOWN_EXTENSIONS = ['gpkg_rtree_index',
'gpkg_zoom_other',
'gpkg_webp',
'gpkg_metadata',
'gpkg_schema',
'gpkg_crs_wkt',
'gpkg_elevation_tiles', # deprecated one
'gpkg_2d_gridded_coverage'
]
for geom_name in GPKGChecker.EXT_GEOM_TYPES:
KNOWN_EXTENSIONS += ['gpkg_geom_' + geom_name]
for (extension_name,) in rows:
if extension_name.startswith('gpkg_'):
self._assert(extension_name in KNOWN_EXTENSIONS,
62,
"extension_name %s not valid" % extension_name)
else:
self._assert('_' in extension_name,
62,
"extension_name %s not valid" % extension_name)
author = extension_name[0:extension_name.find('_')]
ext_name = extension_name[extension_name.find('_') + 1:]
for x in author:
self._assert((x >= 'a' and x <= 'z') or
(x >= 'A' and x <= 'Z') or
(x >= '0' and x <= '9'),
62,
"extension_name %s not valid" %
extension_name)
for x in ext_name:
self._assert((x >= 'a' and x <= 'z') or
(x >= 'A' and x <= 'Z') or
(x >= '0' and x <= '9') or x == '_',
62,
"extension_name %s not valid" %
extension_name)
# c.execute("SELECT extension_name, definition FROM gpkg_extensions "
# "WHERE definition NOT LIKE 'Annex %' AND "
# "definition NOT LIKE 'http%' AND "
# "definition NOT LIKE 'mailto:%' AND "
# "definition NOT LIKE 'Extension Title%' ")
# rows = c.fetchall()
# for (extension_name, definition) in rows:
# self._assert(False, 63,
# "extension_name %s has invalid definition %s" %
# (extension_name, definition))
c.execute("SELECT extension_name, scope FROM gpkg_extensions "
"WHERE scope NOT IN ('read-write', 'write-only')")
rows = c.fetchall()
for (extension_name, scope) in rows:
self._assert(False, 64,
"extension_name %s has invalid scope %s" %
(extension_name, scope))
c.execute("SELECT table_name, scope FROM gpkg_extensions "
"WHERE extension_name = 'gpkg_rtree_index' ")
rows = c.fetchall()
for (table_name, scope) in rows:
c.execute("SELECT 1 FROM gpkg_contents WHERE lower(table_name) = lower(?) "
"AND data_type = 'features'", (table_name,))
self._assert(c.fetchone() is not None, 75,
('gpkg_extensions declares gpkg_rtree_index for %s,' +
' but this is not a features table') % table_name)
self._assert(scope == 'write-only', 75,
'Invalid scope %s for gpkg_rtree_index' % scope)
def _check_metadata(self, c):
self._log('Checking gpkg_metadata')
must_have_gpkg_metadata = False
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_extensions'")
if c.fetchone() is not None:
c.execute("SELECT scope FROM gpkg_extensions WHERE "
"extension_name = 'gpkg_metadata'")
row = c.fetchone()
if row is not None:
must_have_gpkg_metadata = True
(scope, ) = row
self._assert(scope == 'read-write', 140,
"Wrong scope for gpkg_metadata in "
"gpkg_extensions")
c.execute("SELECT 1 FROM sqlite_master WHERE name = 'gpkg_metadata'")
if c.fetchone() is None:
if must_have_gpkg_metadata:
self._assert(False, 140, "gpkg_metadata table missing")
else:
self._log('... No metadata')
return
c.execute("PRAGMA table_info(gpkg_metadata)")
columns = c.fetchall()
expected_columns = [
(0, 'id', 'INTEGER', 1, None, 1),
(1, 'md_scope', 'TEXT', 1, "'dataset'", 0),
(2, 'md_standard_uri', 'TEXT', 1, None, 0),
(3, 'mime_type', 'TEXT', 1, "'text/xml'", 0),
(4, 'metadata', 'TEXT', 1, "''", 0)
]
self._check_structure(columns, expected_columns, 93,
'gpkg_metadata')
c.execute("SELECT 1 FROM sqlite_master "
"WHERE name = 'gpkg_metadata_reference'")
self._assert(c.fetchone() is not None, 95,
"gpkg_metadata_reference is missing")
c.execute("PRAGMA table_info(gpkg_metadata_reference)")
columns = c.fetchall()
expected_columns = [
(0, 'reference_scope', 'TEXT', 1, None, 0),
(1, 'table_name', 'TEXT', 0, None, 0),
(2, 'column_name', 'TEXT', 0, None, 0),
(3, 'row_id_value', 'INTEGER', 0, None, 0),
(4, 'timestamp', 'DATETIME', 1,
"strftime('%Y-%m-%dT%H:%M:%fZ','now')", 0),
(5, 'md_file_id', 'INTEGER', 1, None, 0),
(6, 'md_parent_id', 'INTEGER', 0, None, 0)
]
self._check_structure(columns, expected_columns, 95,
'gpkg_metadata_reference')
c.execute("SELECT DISTINCT md_scope FROM gpkg_metadata WHERE "
"md_scope NOT IN ('undefined', 'fieldSession', "
"'collectionSession', 'series', 'dataset', 'featureType', "
"'feature', 'attributeType', 'attribute', 'tile', "
"'model', 'catalog', 'schema', 'taxonomy', 'software', "
"'service', 'collectionHardware', 'nonGeographicDataset', "
"'dimensionGroup')")
rows = c.fetchall()
for (md_scope, ) in rows:
self._assert(False, 94, 'Invalid md_scope %s found' % md_scope)
c.execute("SELECT DISTINCT reference_scope FROM "
"gpkg_metadata_reference WHERE "
"reference_scope NOT IN ('geopackage', 'table', "
"'column', 'row', 'row/col')")
rows = c.fetchall()
for (md_scope, ) in rows:
self._assert(False, 96,
'Invalid reference_scope %s found' % md_scope)
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope = 'geopackage' AND table_name is NOT NULL")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(False, 97,
"row in gpkg_metadata_reference with table_name " +
"not null (%s)" % table_name +
"but reference_scope = geopackage")
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope != 'geopackage'")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(table_name is not None, 97,
"row in gpkg_metadata_reference with null table_name")
c.execute("SELECT 1 FROM gpkg_contents WHERE table_name = ?",
(table_name,))
self._assert(c.fetchone() is not None, 97,
"row in gpkg_metadata_reference with table_name " +
"not null (%s) with no reference in " % table_name +
"gpkg_contents but reference_scope != geopackage")
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope IN ('geopackage', 'table', 'row') "
"AND column_name is NOT NULL")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(False, 98,
"row in gpkg_metadata_reference with column_name " +
"not null (table=%s)" % table_name +
"but reference_scope = geopackage, table or row")
c.execute("SELECT table_name, column_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope NOT IN ('geopackage', 'table', 'row')")
rows = c.fetchall()
for (table_name, column_name) in rows:
self._assert(column_name is not None, 98,
"row in gpkg_metadata_reference with null "
"column_name")
try:
c.execute("SELECT %s FROM %s" %
(_esc_id(column_name), _esc_id(table_name)))
except:
self._assert(False, 98,
"column %s of %s does not exist" %
(column_name, table_name))
c.execute("SELECT table_name FROM "
"gpkg_metadata_reference WHERE "
"reference_scope IN ('geopackage', 'table', 'column') "
"AND row_id_value is NOT NULL")
rows = c.fetchall()
for (table_name, ) in rows:
self._assert(False, 99,
"row in gpkg_metadata_reference with row_id_value " +
"not null (table=%s)" % table_name +
"but reference_scope = geopackage, table or column")
c.execute("SELECT table_name, row_id_value FROM "
"gpkg_metadata_reference WHERE "
"reference_scope NOT IN ('geopackage', 'table', 'column')")
rows = c.fetchall()
for (table_name, row_id_value) in rows:
self._assert(row_id_value is not None, 99,
"row in gpkg_metadata_reference with null "
"row_id_value")
c.execute("SELECT 1 FROM %s WHERE ROWID = ?" %
_esc_id(column_name), (row_id_value, ))
self._assert(c.fetchone() is not None, 99,
"row %s of %s does not exist" %
(str(row_id_value), table_name))
c.execute("SELECT timestamp FROM gpkg_metadata_reference")
rows = c.fetchall()
for (timestamp, ) in rows:
try:
datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self._assert(False, 100,
('timestamp = %s in gpkg_metadata_reference' +
'is invalid datetime') % (timestamp))
c.execute("SELECT md_file_id FROM gpkg_metadata_reference")
rows = c.fetchall()
for (md_file_id, ) in rows:
c.execute("SELECT 1 FROM gpkg_metadata WHERE id = ?",
(md_file_id,))
self._assert(c.fetchone() is not None, 101,
"md_file_id = %s " % str(md_file_id) +
"does not have a row in gpkg_metadata")
c.execute("SELECT md_parent_id FROM gpkg_metadata_reference "
"WHERE md_parent_id IS NOT NULL")
rows = c.fetchall()
for (md_parent_id, ) in rows:
c.execute("SELECT 1 FROM gpkg_metadata WHERE id = ?",
(md_parent_id,))
self._assert(c.fetchone() is not None, 102,
"md_parent_id = %s " % str(md_parent_id) +
"does not have a row in gpkg_metadata")
c.execute("SELECT md_file_id FROM "
"gpkg_metadata_reference WHERE md_parent_id IS NOT NULL "
"AND md_file_id = md_parent_id")
rows = c.fetchall()
for (md_file_id, ) in rows:
self._assert(False, 102,
"Row with md_file_id = md_parent_id = %s " %
str(md_file_id))
def check(self):
self._assert(os.path.exists(self.filename), None,
"%s does not exist" % self.filename)
self._assert(self.filename.lower().endswith('.gpkg'), 3,
"filename extension isn't .gpkg'")
with open(self.filename, 'rb') as f:
f.seek(68, 0)
application_id = struct.unpack('B' * 4, f.read(4))
gp10 = struct.unpack('B' * 4, 'GP10'.encode('ASCII'))
gp11 = struct.unpack('B' * 4, 'GP11'.encode('ASCII'))
gpkg = struct.unpack('B' * 4, 'GPKG'.encode('ASCII'))
self._assert(application_id in (gp10, gp11, gpkg), 2,
("Wrong application_id: %s. " +
"Expected one of GP10, GP11, GPKG") %
str(application_id))
if application_id == gpkg:
f.seek(60, 0)
user_version = f.read(4)
expected_version = 10200
user_version = struct.unpack('>I', user_version)[0]
self._assert(user_version >= expected_version, 2,
'Wrong user_version: %d. Expected >= %d' %
(user_version, expected_version))
conn = sqlite3.connect(':memory:')
c = conn.cursor()
c.execute('CREATE TABLE foo(one TEXT, two TEXT, '
'CONSTRAINT pk PRIMARY KEY (one, two))')
c.execute('PRAGMA table_info(foo)')
rows = c.fetchall()
if rows[1][5] == 2:
self.extended_pragma_info = True
c.close()
conn.close()
conn = sqlite3.connect(self.filename)
c = conn.cursor()
try:
try:
c.execute('SELECT 1 FROM sqlite_master')
c.fetchone()
except:
self._assert(False, 1, 'not a sqlite3 database')
c.execute('PRAGMA foreign_key_check')
ret = c.fetchall()
self._assert(len(ret) == 0, 7,
'foreign_key_check failed: %s' % str(ret))
c.execute('PRAGMA integrity_check')
self._assert(c.fetchone()[0] == 'ok', 6, 'integrity_check failed')
self._check_gpkg_spatial_ref_sys(c)
self._check_gpkg_contents(c)
self._check_features(c)
self._check_tiles(c)
self._check_attributes(c)
self._check_tiled_gridded_coverage_data(c)
self._check_gpkg_extensions(c)
self._check_metadata(c)
# TODO: check gpkg_schema
finally:
c.close()
conn.close()
def check(filename, abort_at_first_error=True, verbose=False):
checker = GPKGChecker(filename,
abort_at_first_error=abort_at_first_error,
verbose=verbose)
checker.check()
return checker.errors
def Usage():
print('validate_gpkg.py [[-v]|[-q]] [-k] my.gpkg')
print('')
print('-q: quiet mode')
print('-k: (try to) keep going when error is encountered')
sys.exit(1)
if __name__ == '__main__':
filename = None
verbose = False
abort_at_first_error = True
if len(sys.argv) == 1:
Usage()
for arg in sys.argv[1:]:
if arg == '-k':
abort_at_first_error = False
elif arg == '-q':
verbose = False
elif arg == '-v':
verbose = True
elif arg[0] == '-':
Usage()
else:
filename = arg
if filename is None:
Usage()
ret = check(filename, abort_at_first_error=abort_at_first_error,
verbose=verbose)
if not abort_at_first_error:
if len(ret) == 0:
sys.exit(0)
else:
for (req, msg) in ret:
if req:
print('Req %d: %s' % (req, msg))
else:
print(msg)
sys.exit(1)
| 47.250925 | 114 | 0.499928 |
4519fb630c2a49c0114b4eb0b72bcc7b72b3d210 | 12,405 | py | Python | python/tvm/ir/transform.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | 3 | 2020-03-12T10:25:51.000Z | 2020-08-05T05:36:23.000Z | python/tvm/ir/transform.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | null | null | null | python/tvm/ir/transform.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | 1 | 2018-10-19T18:11:41.000Z | 2018-10-19T18:11:41.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-argument
"""Common pass infrastructure across IR variants."""
import types
import inspect
import functools
import tvm._ffi
from tvm._ffi.runtime_ctypes import TVMContext
from tvm.runtime import Object, ndarray as _nd
from . import _ffi_transform_api
@tvm._ffi.register_object("relay.PassInfo")
class PassInfo(Object):
"""The class contains the meta data required by a pass. It is the
container of information needed by running an optimization or analysis.
This class can be extended by adding new members when more meta data is
needed.
Parameters
----------
opt_level : int
The optimization level of this pass.
name : str
The pass name.
required : List[str]
The list of passes that are required by a certain pass.
"""
def __init__(self, opt_level, name, required=None):
self.__init_handle_by_constructor__(
_ffi_transform_api.PassInfo, opt_level, name, required)
@tvm._ffi.register_object("relay.PassContext")
class PassContext(Object):
"""The basis where a Relay optimization/analysis runs on.
Each pass context contains a number of auxiliary information that is used
to help an optimization pass. Such information includes the error reporter
to record the errors of during the optimization, etc.
opt_level : Optional[int]
The optimization level of this pass.
fallback_device : Optional[Union[int, str, TVMContext]]
The fallback device type. It is also used as the default device for
operators that are not annotated during heterogeneous execution.
required_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of passes that are required by a certain pass.
disabled_pass : Optional[Union[List[str], Set[str], Tuple[str]]]
The list of passes that are disabled.
"""
def __init__(self,
opt_level=2,
fallback_device=_nd.cpu(),
required_pass=None,
disabled_pass=None,
trace=None):
if isinstance(fallback_device, str):
fallback_device = _nd.context(fallback_device).device_type
elif isinstance(fallback_device, TVMContext):
fallback_device = fallback_device.device_type
if not isinstance(fallback_device, int):
raise TypeError("required_pass is expected to be the type of " +
"int/str/TVMContext.")
required = list(required_pass) if required_pass else []
if not isinstance(required, (list, tuple)):
raise TypeError("required_pass is expected to be the type of " +
"list/tuple/set.")
disabled = list(disabled_pass) if disabled_pass else []
if not isinstance(disabled, (list, tuple)):
raise TypeError("disabled_pass is expected to be the type of " +
"list/tuple/set.")
self.__init_handle_by_constructor__(_ffi_transform_api.PassContext, opt_level,
fallback_device, required,
disabled, trace)
def __enter__(self):
_ffi_transform_api.EnterPassContext(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_transform_api.ExitPassContext(self)
@staticmethod
def current():
"""Return the current pass context."""
return _ffi_transform_api.GetCurrentPassContext()
@tvm._ffi.register_object("relay.Pass")
class Pass(Object):
"""The base class of all passes. All methods here are just simple wrappers
that are implemented in the backend. They are defined for users to
conveniently interact with the base class.
"""
@property
def info(self):
"""Get the pass meta."""
return _ffi_transform_api.Info(self)
def __call__(self, mod):
"""Execute the pass. Note that for sequential pass, the dependency among
different passes will be resolved in the backend.
Parameters
----------
mod : tvm.IRModule
The module that a certain optimization is performed on.
Returns
-------
mod : tvm.IRModule
The updated module after applying this pass.
"""
return _ffi_transform_api.RunPass(self, mod)
@tvm._ffi.register_object("relay.ModulePass")
class ModulePass(Pass):
"""A pass that works on tvm.IRModule. Users don't need to interact with
this class directly. Instead, a module pass should be created through
`module_pass`, because the design of the `module_pass` API is flexible
enough to handle the creation of a module pass in different manners. In
addition, all members of a module pass can be accessed from the base class.
The same rule applies to FunctionPass as well.
"""
@tvm._ffi.register_object("relay.Sequential")
class Sequential(Pass):
"""A pass that works on a sequence of pass objects. Multiple passes can be
executed sequentially using this class.
Some typical usage of the sequential pass are:
1. Users provide a list of passes for optimization.
2. Only an optimization level is provided so that the backend system has
to glob all passes at this level and below to perform the optimizations.
Note that users can also provide a series of passes that they don't want to
apply when running a sequential pass. Pass dependency will be resolved in
the backend as well.
Parameters
----------
passes : Optional[List[Pass]]
A sequence of passes candidate for optimization.
opt_level : Optional[int]
The optimization level of this sequential pass.
name : Optional[str]
The name of the sequential pass.
required : Optional[List[str]]
The list of passes that the sequential pass is dependent on.
"""
def __init__(self,
passes=None,
opt_level=2,
name="sequential",
required=None):
passes = passes if passes else []
if not isinstance(passes, (list, tuple)):
raise TypeError("passes must be a list of Pass objects.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of list/tuple.")
self.__init_handle_by_constructor__(_ffi_transform_api.Sequential,
passes, opt_level, name, required)
def _wrap_class_module_pass(pass_cls, pass_info):
"""Wrap a python class as function pass"""
class PyModulePass(ModulePass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in cass pass_cls creation failed.fg
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(mod, ctx):
return inst.transform_module(mod, ctx)
self.__init_handle_by_constructor__(
_ffi_transform_api.MakeModulePass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyModulePass.__init__, pass_cls.__init__)
PyModulePass.__name__ = pass_cls.__name__
PyModulePass.__doc__ = pass_cls.__doc__
PyModulePass.__module__ = pass_cls.__module__
return PyModulePass
def module_pass(pass_func=None, opt_level=None, name=None, required=None):
"""Decorate a module pass.
This function returns a callback when pass_func is provided.
Otherwise, it serves a decorator function.
pass_func can also be a class type with a method transform_module.
This function will create a decorated ModulePass using transform_module
as the pass function.
Parameters
----------
pass_func : Optional[Callable[(Module, PassContext) ->Module]]
The transformation function or class.
opt_level : int
The optimization level of this module pass.
name : Optional[str]
The name of the module pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the module pass is dependent on.
Returns
-------
create_module_pass : Union[Callable, ModulePass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new ModulePass will be returned when we decorate a pass function.
A new ModulePass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a module pass class.
.. code-block:: python
@relay.transform.module_pass
class CustomPipeline:
def __init__(self, enable_fold):
self.enable_fold = enable_fold
self.cse = relay.transform.EliminateCommonSubexpr()
self.const_fold = relay.transform.FoldConstant()
def transform_module(self, mod, ctx):
mod = self.cse(mod, ctx)
if self.enable_fold:
mod = self.const_fold(mod, ctx)
return mod
# create an instance of customized pipeline
pipeline = CustomPipeline(enable_fold=False)
assert isinstance(pipeline, transform.ModulePass)
# run the pipeline.
output_module = pipeline(input_module)
The following code creates a module pass by decorating
a user defined transform function.
.. code-block:: python
@relay.transform.module_pass(opt_level=2)
def transform(mod, ctx):
tp = relay.TensorType((10,), "float32")
x = relay.var("x", tp)
gv = relay.GlobalVar("var")
func = relay.Function([x], relay.abs(x))
new_mod = tvm.IRModule({gv: func})
new_mod.update(mod)
return new_mod
module_pass = transform
assert isinstance(module_pass, transform.ModulePass)
assert module_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = module_pass(m)
# Now a function abs should be added to the module m.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the module pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " +
"list/tuple.")
def create_module_pass(pass_arg):
"""Internal function that creates a module pass"""
fname = name if name else pass_arg.__name__
info = PassInfo(opt_level, fname, required)
if inspect.isclass(pass_arg):
return _wrap_class_module_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Module pass")
return _ffi_transform_api.MakeModulePass(pass_arg, info)
if pass_func:
return create_module_pass(pass_func)
return create_module_pass
| 37.590909 | 86 | 0.661669 |
03a2bbf9b36c41e75038882383043e85111739dc | 15 | py | Python | app/version.py | bitspradp/canary_sb | 6c70f9ff6db1452545c57e9dd89bc0b1dfa1efba | [
"Apache-2.0"
] | null | null | null | app/version.py | bitspradp/canary_sb | 6c70f9ff6db1452545c57e9dd89bc0b1dfa1efba | [
"Apache-2.0"
] | null | null | null | app/version.py | bitspradp/canary_sb | 6c70f9ff6db1452545c57e9dd89bc0b1dfa1efba | [
"Apache-2.0"
] | null | null | null | version="1.01"
| 7.5 | 14 | 0.666667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.