hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
432c8a0798a550bc2be62356a720731fb992f36b
| 1,540
|
py
|
Python
|
third_party/models/fc.py
|
Buhua-Liu/understanding-curricula
|
fdeb3175140d4da92bee4c9a3a1c83539feb8b33
|
[
"Apache-2.0"
] | 25
|
2021-03-18T02:30:15.000Z
|
2022-03-06T12:50:24.000Z
|
third_party/models/fc.py
|
Buhua-Liu/understanding-curricula
|
fdeb3175140d4da92bee4c9a3a1c83539feb8b33
|
[
"Apache-2.0"
] | 2
|
2021-08-10T05:20:46.000Z
|
2021-08-10T05:50:43.000Z
|
third_party/models/fc.py
|
Buhua-Liu/understanding-curricula
|
fdeb3175140d4da92bee4c9a3a1c83539feb8b33
|
[
"Apache-2.0"
] | 6
|
2021-06-16T19:04:39.000Z
|
2022-01-16T11:59:42.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
__all__ = [
'fc1000', 'fc500',
]
import torch.nn as nn
import torch.nn.functional as F
class FcNet(nn.Module):
def __init__(self,hidden=1000):
super(FcNet, self).__init__()
self.hidden = hidden
self.conv1 = nn.Conv2d(3, 20, 1)
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(20* 16 * 16, hidden)
self.fc2 = nn.Linear(hidden, 10)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = x.view(-1, 20 * 16 * 16)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def fc500():
model = FcNet(hidden=500)
return model
def fc1000():
model = FcNet(hidden=1000)
return model
| 29.056604
| 86
| 0.655844
|
c478d8282a0d39312de8e6bc2c6c4b5c89701757
| 8,080
|
py
|
Python
|
cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 571
|
2015-01-01T17:47:26.000Z
|
2022-03-23T07:46:36.000Z
|
cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 37
|
2015-01-22T23:27:04.000Z
|
2021-02-05T16:38:48.000Z
|
cinder/zonemanager/drivers/brocade/brcd_fc_san_lookup_service.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 841
|
2015-01-04T17:17:11.000Z
|
2022-03-31T12:06:51.000Z
|
# (c) Copyright 2019 Brocade, a Broadcom Company
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
from cinder.zonemanager import fc_san_lookup_service as fc_service
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
class BrcdFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Brocade switches.
Version History:
1.0.0 - Initial version
1.1 - Add support to use config option for switch southbound protocol
1.2 - Fix open sessions issue
"""
VERSION = "1.2"
def __init__(self, **kwargs):
"""Initializing the client."""
super(BrcdFCSanLookupService, self).__init__(**kwargs)
self.configuration = kwargs.get('configuration', None)
self.create_configuration()
def create_configuration(self):
"""Configuration specific to SAN context values."""
config = self.configuration
fabric_names = [x.strip() for x in config.fc_fabric_names.split(',')]
LOG.debug('Fabric Names: %s', fabric_names)
# There can be more than one SAN in the network and we need to
# get credentials for each for SAN context lookup later.
if len(fabric_names) > 0:
self.fabric_configs = fabric_opts.load_fabric_configurations(
fabric_names)
def get_device_mapping_from_network(self,
initiator_wwn_list,
target_wwn_list):
"""Provides the initiator/target map for available SAN contexts.
Looks up nameserver of each fc SAN configured to find logged in devices
and returns a map of initiator and target port WWNs for each fabric.
:param initiator_wwn_list: List of initiator port WWN
:param target_wwn_list: List of target port WWN
:returns: List -- device wwn map in following format
.. code-block:: default
{
<San name>: {
'initiator_port_wwn_list':
('200000051e55a100', '200000051e55a121'..)
'target_port_wwn_list':
('100000051e55a100', '100000051e55a121'..)
}
}
:raises Exception: when connection to fabric is failed
"""
device_map = {}
formatted_target_list = []
formatted_initiator_list = []
fabric_map = {}
fabric_names = self.configuration.fc_fabric_names
fabrics = None
if not fabric_names:
raise exception.InvalidParameterValue(
err=_("Missing Fibre Channel SAN configuration "
"param - fc_fabric_names"))
fabrics = [x.strip() for x in fabric_names.split(',')]
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
formatted_target_list.append(fczm_utils.get_formatted_wwn(t))
for i in initiator_wwn_list:
formatted_initiator_list.append(fczm_utils.
get_formatted_wwn(i))
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
'fc_fabric_address')
# Get name server data from fabric and find the targets
# logged in
nsinfo = ''
conn = None
try:
LOG.debug("Getting name server data for "
"fabric %s", fabric_ip)
conn = self._get_southbound_client(fabric_name)
nsinfo = conn.get_nameserver_info()
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error("Failed collecting name server info from"
" fabric %s", fabric_ip)
except Exception as e:
msg = _("Connection failed "
"for %(fabric)s with error: %(err)s"
) % {'fabric': fabric_ip, 'err': e}
LOG.error(msg)
raise exception.FCSanLookupServiceException(message=msg)
finally:
if conn:
conn.cleanup()
LOG.debug("Lookup service:nsinfo-%s", nsinfo)
LOG.debug("Lookup service:initiator list from "
"caller-%s", formatted_initiator_list)
LOG.debug("Lookup service:target list from "
"caller-%s", formatted_target_list)
visible_targets = [x for x in nsinfo
if x in formatted_target_list]
visible_initiators = [x for x in nsinfo
if x in formatted_initiator_list]
if visible_targets:
LOG.debug("Filtered targets is: %s", visible_targets)
# getting rid of the : before returning
for idx, elem in enumerate(visible_targets):
elem = str(elem).replace(':', '')
visible_targets[idx] = elem
else:
LOG.debug("No targets are in the nameserver for SAN %s",
fabric_name)
if visible_initiators:
# getting rid of the : before returning ~sk
for idx, elem in enumerate(visible_initiators):
elem = str(elem).replace(':', '')
visible_initiators[idx] = elem
else:
LOG.debug("No initiators are in the nameserver "
"for SAN %s", fabric_name)
fabric_map = {
'initiator_port_wwn_list': visible_initiators,
'target_port_wwn_list': visible_targets
}
device_map[fabric_name] = fabric_map
LOG.debug("Device map for SAN context: %s", device_map)
return device_map
def _get_southbound_client(self, fabric):
"""Implementation to get SouthBound Connector.
South bound connector will be
dynamically selected based on the configuration
:param fabric: fabric information
"""
fabric_info = self.fabric_configs[fabric]
fc_ip = fabric_info.safe_get('fc_fabric_address')
sb_connector = fabric_info.safe_get('fc_southbound_protocol')
if sb_connector is None:
sb_connector = self.configuration.brcd_sb_connector
try:
conn_factory = importutils.import_object(
"cinder.zonemanager.drivers.brocade."
"brcd_fc_zone_connector_factory."
"BrcdFCZoneFactory")
client = conn_factory.get_connector(fabric_info,
sb_connector.upper())
except Exception:
msg = _("Failed to create south bound connector for %s.") % fc_ip
LOG.exception(msg)
raise exception.FCZoneDriverException(msg)
return client
| 41.22449
| 79
| 0.573639
|
fcf67c03af69cb4822399184aba215e2ddc4c5d6
| 489
|
py
|
Python
|
src/tipsextension/azext_tipsextension/vendored_sdks/oscp/dataplane/aio/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | null | null | null |
src/tipsextension/azext_tipsextension/vendored_sdks/oscp/dataplane/aio/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | null | null | null |
src/tipsextension/azext_tipsextension/vendored_sdks/oscp/dataplane/aio/__init__.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.2.1, generator: {generator})
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._open_supply_chain_platform_service_api import OpenSupplyChainPlatformServiceAPI
__all__ = ['OpenSupplyChainPlatformServiceAPI']
| 54.333333
| 99
| 0.560327
|
0a59c7211d2952d9a6f2fdc0cf6e236c7e5e65f1
| 6,151
|
py
|
Python
|
test/unit/test_graph_selection.py
|
pieter-lazzaro/dbt
|
b6d1e15a9f677a7569eec47f19c8baebb6ed7818
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_graph_selection.py
|
pieter-lazzaro/dbt
|
b6d1e15a9f677a7569eec47f19c8baebb6ed7818
|
[
"Apache-2.0"
] | 1
|
2019-02-14T20:10:46.000Z
|
2019-02-19T13:06:38.000Z
|
test/unit/test_graph_selection.py
|
pieter-lazzaro/dbt
|
b6d1e15a9f677a7569eec47f19c8baebb6ed7818
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import mock
import os
import string
import dbt.graph.selector as graph_selector
import networkx as nx
class GraphSelectionTest(unittest.TestCase):
def setUp(self):
integer_graph = nx.balanced_tree(2, 2, nx.DiGraph())
package_mapping = {
i: 'm.' + ('X' if i % 2 == 0 else 'Y') + '.' + letter
for (i, letter) in enumerate(string.ascii_lowercase)
}
# Edges: [(X.a, Y.b), (X.a, X.c), (Y.b, Y.d), (Y.b, X.e), (X.c, Y.f), (X.c, X.g)]
self.package_graph = nx.relabel_nodes(integer_graph, package_mapping)
nodes = {
node: mock.MagicMock(fqn=node.split('.')[1:], tags=[])
for node in self.package_graph
}
nodes['m.X.a'].tags = ['abc']
nodes['m.Y.b'].tags = ['abc']
nodes['m.X.c'].tags = ['abc']
nodes['m.Y.d'].tags = []
nodes['m.X.e'].tags = ['efg']
nodes['m.Y.f'].tags = ['efg']
nodes['m.X.g'].tags = ['efg']
self.manifest = mock.MagicMock(nodes=nodes)
self.linker = mock.MagicMock(graph=self.package_graph)
self.selector = graph_selector.NodeSelector(self.linker, self.manifest)
def run_specs_and_assert(self, graph, include, exclude, expected):
selected = self.selector.select_nodes(
graph,
include,
exclude
)
self.assertEquals(selected, expected)
def test__single_node_selection_in_package(self):
self.run_specs_and_assert(
self.package_graph,
['X.a'],
[],
set(['m.X.a'])
)
def test__select_by_tag(self):
self.run_specs_and_assert(
self.package_graph,
['tag:abc'],
[],
set(['m.X.a', 'm.Y.b', 'm.X.c'])
)
def test__exclude_by_tag(self):
self.run_specs_and_assert(
self.package_graph,
['*'],
['tag:abc'],
set(['m.Y.d', 'm.X.e', 'm.Y.f', 'm.X.g'])
)
def test__select_by_tag_and_model_name(self):
self.run_specs_and_assert(
self.package_graph,
['tag:abc', 'a'],
[],
set(['m.X.a', 'm.Y.b', 'm.X.c'])
)
self.run_specs_and_assert(
self.package_graph,
['tag:abc', 'd'],
[],
set(['m.X.a', 'm.Y.b', 'm.X.c', 'm.Y.d'])
)
def test__multiple_node_selection_in_package(self):
self.run_specs_and_assert(
self.package_graph,
['X.a', 'b'],
[],
set(['m.X.a', 'm.Y.b'])
)
def test__select_children_except_in_package(self):
self.run_specs_and_assert(
self.package_graph,
['X.a+'],
['b'],
set(['m.X.a','m.X.c', 'm.Y.d','m.X.e','m.Y.f','m.X.g']))
def test__select_children_except_tag(self):
self.run_specs_and_assert(
self.package_graph,
['X.a+'],
['tag:efg'],
set(['m.X.a','m.Y.b','m.X.c', 'm.Y.d']))
def parse_spec_and_assert(self, spec, parents, children, filter_type, filter_value):
parsed = graph_selector.parse_spec(spec)
self.assertEquals(
parsed,
{
"select_parents": parents,
"select_children": children,
"filter": {
'type': filter_type,
'value': filter_value
},
"raw": spec
}
)
def test__spec_parsing(self):
self.parse_spec_and_assert('a', False, False, 'fqn', 'a')
self.parse_spec_and_assert('+a', True, False, 'fqn', 'a')
self.parse_spec_and_assert('a+', False, True, 'fqn', 'a')
self.parse_spec_and_assert('+a+', True, True, 'fqn', 'a')
self.parse_spec_and_assert('a.b', False, False, 'fqn', 'a.b')
self.parse_spec_and_assert('+a.b', True, False, 'fqn', 'a.b')
self.parse_spec_and_assert('a.b+', False, True, 'fqn', 'a.b')
self.parse_spec_and_assert('+a.b+', True, True, 'fqn', 'a.b')
self.parse_spec_and_assert('a.b.*', False, False, 'fqn', 'a.b.*')
self.parse_spec_and_assert('+a.b.*', True, False, 'fqn', 'a.b.*')
self.parse_spec_and_assert('a.b.*+', False, True, 'fqn', 'a.b.*')
self.parse_spec_and_assert('+a.b.*+', True, True, 'fqn', 'a.b.*')
self.parse_spec_and_assert('tag:a', False, False, 'tag', 'a')
self.parse_spec_and_assert('+tag:a', True, False, 'tag', 'a')
self.parse_spec_and_assert('tag:a+', False, True, 'tag', 'a')
self.parse_spec_and_assert('+tag:a+', True, True, 'tag', 'a')
def test__package_name_getter(self):
found = graph_selector.get_package_names(self.package_graph)
expected = set(['X', 'Y'])
self.assertEquals(found, expected)
def assert_is_selected_node(self, node, spec, should_work):
self.assertEqual(
graph_selector.is_selected_node(node, spec),
should_work
)
def test__is_selected_node(self):
self.assert_is_selected_node(('X', 'a'), ('a'), True)
self.assert_is_selected_node(('X', 'a'), ('X', 'a'), True)
self.assert_is_selected_node(('X', 'a'), ('*'), True)
self.assert_is_selected_node(('X', 'a'), ('X', '*'), True)
self.assert_is_selected_node(('X', 'a', 'b', 'c'), ('X', '*'), True)
self.assert_is_selected_node(('X', 'a', 'b', 'c'), ('X', 'a', '*'), True)
self.assert_is_selected_node(('X', 'a', 'b', 'c'), ('X', 'a', 'b', '*'), True)
self.assert_is_selected_node(('X', 'a', 'b', 'c'), ('X', 'a', 'b', 'c'), True)
self.assert_is_selected_node(('X', 'a', 'b', 'c'), ('X', 'a'), True)
self.assert_is_selected_node(('X', 'a', 'b', 'c'), ('X', 'a', 'b'), True)
self.assert_is_selected_node(('X', 'a'), ('b'), False)
self.assert_is_selected_node(('X', 'a'), ('X', 'b'), False)
self.assert_is_selected_node(('X', 'a'), ('X', 'a', 'b'), False)
self.assert_is_selected_node(('X', 'a'), ('Y', '*'), False)
| 34.751412
| 89
| 0.521866
|
70b3c17d6589ee73194694658836b1a579066375
| 684
|
py
|
Python
|
flask_mongoengine/wtf/models.py
|
Ijwu/flask-mongoengine
|
66ad6a2d670c881a146bd25b9114e5f6530f57a9
|
[
"BSD-3-Clause"
] | 1
|
2020-07-29T08:42:21.000Z
|
2020-07-29T08:42:21.000Z
|
flask_mongoengine/wtf/models.py
|
Ijwu/flask-mongoengine
|
66ad6a2d670c881a146bd25b9114e5f6530f57a9
|
[
"BSD-3-Clause"
] | null | null | null |
flask_mongoengine/wtf/models.py
|
Ijwu/flask-mongoengine
|
66ad6a2d670c881a146bd25b9114e5f6530f57a9
|
[
"BSD-3-Clause"
] | null | null | null |
from flask_wtf import FlaskForm
class ModelForm(FlaskForm):
"""A WTForms mongoengine model form"""
def __init__(self, formdata=None, **kwargs):
self.instance = kwargs.pop("instance", None) or kwargs.get("obj")
if self.instance and not formdata:
kwargs["obj"] = self.instance
self.formdata = formdata
super(ModelForm, self).__init__(formdata, **kwargs)
def save(self, commit=True, **kwargs):
if self.instance:
self.populate_obj(self.instance)
else:
self.instance = self.model_class(**self.data)
if commit:
self.instance.save(**kwargs)
return self.instance
| 29.73913
| 73
| 0.619883
|
cb80dc9d8e5b1173363dbe0c103fb499b12d2b4a
| 688
|
py
|
Python
|
clara/__init__.py
|
csettles/Clara
|
146f25dc4d435a27edea0f07fa4e8ebde9da30b6
|
[
"MIT"
] | null | null | null |
clara/__init__.py
|
csettles/Clara
|
146f25dc4d435a27edea0f07fa4e8ebde9da30b6
|
[
"MIT"
] | null | null | null |
clara/__init__.py
|
csettles/Clara
|
146f25dc4d435a27edea0f07fa4e8ebde9da30b6
|
[
"MIT"
] | null | null | null |
from .bot import Clara
from .utils import settings
from .utils.logger import logger
clara = Clara(command_prefix="!", description=settings.DESCRIPTION)
@clara.event
async def on_ready():
logger.info('Logged in as {} with id {}'.format(clara.user.name, clara.user.id))
@clara.event
async def on_resume():
logger.info('Resuming the bot after failure to connect...')
@clara.event
async def on_message(message):
if message.author.bot:
return
await clara.process_commands(message)
# custom checker for the commands.ext module
def is_owner():
def predicate(ctx):
return ctx.message.author.id == "172187265433337857"
return commands.check(predicate)
| 26.461538
| 84
| 0.726744
|
8b34557d33fc19f74717d67ff1960d363c21fc1b
| 27,148
|
py
|
Python
|
tests/test_for_support/test_for_consistency.py
|
matthiaskoenig/memote
|
7c14cd304523dda83eaf4835ee007243e8673f85
|
[
"Apache-2.0"
] | null | null | null |
tests/test_for_support/test_for_consistency.py
|
matthiaskoenig/memote
|
7c14cd304523dda83eaf4835ee007243e8673f85
|
[
"Apache-2.0"
] | null | null | null |
tests/test_for_support/test_for_consistency.py
|
matthiaskoenig/memote
|
7c14cd304523dda83eaf4835ee007243e8673f85
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2017 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure the expected functioning of ``memote.support.consistency``."""
from __future__ import absolute_import
import cobra
import pytest
from cobra.exceptions import Infeasible
import memote.support.consistency as consistency
from memote.utils import register_with
import memote.support.consistency_helpers as con_helpers
MODEL_REGISTRY = dict()
@register_with(MODEL_REGISTRY)
def figure_1(base):
# Example in figure 1 of Gevorgyan et al. (2008) Bioinformatics
# Metabolites
met_a = cobra.Metabolite("A")
met_a_prime = cobra.Metabolite("A'")
met_b = cobra.Metabolite("B")
met_b_prime = cobra.Metabolite("B'")
met_c = cobra.Metabolite("C")
met_c_prime = cobra.Metabolite("C'")
# Reactions
rxn_1 = cobra.Reaction("R1")
rxn_1.add_metabolites({met_a: -1, met_a_prime: -1, met_b: 1})
rxn_2 = cobra.Reaction("R2")
rxn_2.add_metabolites({met_b: -1, met_b_prime: -1, met_c: 1})
rxn_3 = cobra.Reaction("R3")
rxn_3.add_metabolites({met_c: -1, met_c_prime: -2, met_a: 1})
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def equation_8(base):
# Example in equation 8 of Gevorgyan et al. (2008) Bioinformatics
# Metabolites
met_a = cobra.Metabolite("A")
met_b = cobra.Metabolite("B")
met_c = cobra.Metabolite("C")
# Reactions
rxn_1 = cobra.Reaction("R1")
rxn_1.add_metabolites({met_a: -1, met_b: 1, met_c: 1})
rxn_2 = cobra.Reaction("R2")
rxn_2.add_metabolites({met_a: -1, met_b: 1})
rxn_3 = cobra.Reaction("R3")
rxn_3.add_metabolites({met_a: -1, met_c: 1})
base.add_reactions([rxn_1, rxn_2, rxn_3])
return base
@register_with(MODEL_REGISTRY)
def figure_2(base):
# Example in figure 2 of Gevorgyan et al. (2008) Bioinformatics
# Metabolites
met_a = cobra.Metabolite("A")
met_b = cobra.Metabolite("B")
met_x = cobra.Metabolite("X")
met_p = cobra.Metabolite("P")
met_q = cobra.Metabolite("Q")
# Reactions
rxn_1 = cobra.Reaction("R1")
rxn_1.add_metabolites({met_a: -1, met_b: 1})
rxn_2 = cobra.Reaction("R2")
rxn_2.add_metabolites({met_a: -1, met_b: 1, met_x: 1})
rxn_3 = cobra.Reaction("R3")
rxn_3.add_metabolites({met_p: -1, met_q: 1})
rxn_4 = cobra.Reaction("R4")
rxn_4.add_metabolites({met_p: -1, met_q: 1, met_x: 1})
base.add_reactions([rxn_1, rxn_2, rxn_3, rxn_4])
return base
@register_with(MODEL_REGISTRY)
def blocked_reactions(base):
met_a = cobra.Metabolite("C", compartment="e")
met_b = cobra.Metabolite("A", compartment="e")
met_c = cobra.Metabolite("B", compartment="e")
met_d = cobra.Metabolite("D", compartment="e")
rxn1 = cobra.Reaction("Gen")
rxn1.add_metabolites({met_d: -1, met_b: -1, met_a: 1, met_c: 1})
rxn2 = cobra.Reaction("Recap", lower_bound=-1000, upper_bound=1000)
rxn2.add_metabolites({met_c: -1, met_b: 1})
rxn3 = cobra.Reaction("EX_C_e", lower_bound=-1000, upper_bound=1000)
rxn3.add_metabolites({met_a: -1})
rxn4 = cobra.Reaction("EX_A_e", lower_bound=-1000, upper_bound=1000)
rxn4.add_metabolites({met_b: -1})
rxn5 = cobra.Reaction("EX_B_e", lower_bound=-1000, upper_bound=1000)
rxn5.add_metabolites({met_c: -1})
base.add_reactions([rxn1, rxn2, rxn3, rxn4, rxn5])
return base
@register_with(MODEL_REGISTRY)
def produces_atp(base):
"""Returns a simple model with an EGC producing atp_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "atp_c + h2o_c + a <--> pi_c + adp_c + c + h_c"
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h2o_c, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.adp_c, type="sink")
base.add_boundary(base.metabolites.atp_c, type="sink")
base.add_boundary(base.metabolites.pi_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def infeasible(base):
"""Returns an infeasible model with an EGC producing atp_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
rd = cobra.Reaction('MAINTENANCE')
base.add_reactions([ra, rb, rc, rd])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "atp_c + h2o_c + a <--> pi_c + adp_c + c + h_c"
rd.reaction = "h2o_c + b --> c + h_c + met_c"
rd.bounds = 10, 1000
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h2o_c, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.adp_c, type="sink")
base.add_boundary(base.metabolites.atp_c, type="sink")
base.add_boundary(base.metabolites.pi_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def maintenance_present(base):
"""Returns a model with an ATPM reaction"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
rd = cobra.Reaction('ATPM')
base.add_reactions([ra, rb, rc, rd])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "atp_c + h2o_c + a <--> pi_c + adp_c + c + h_c"
rd.reaction = "atp_c + h2o_c + a --> pi_c + adp_c + c + h_c"
rd.bounds = 7.9, 1000
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h2o_c, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.adp_c, type="sink")
base.add_boundary(base.metabolites.atp_c, type="sink")
base.add_boundary(base.metabolites.pi_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def missing_energy_partner(base):
"""Returns a broken model with a missing energy partner to atp"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "atp_c + a <--> c "
return base
@register_with(MODEL_REGISTRY)
def produces_nadh(base):
"""Returns a simple model with an EGC producing nadh_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "nadh_c + a <--> nad_c + c + h_c"
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.nad_c, type="sink")
base.add_boundary(base.metabolites.nadh_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def produces_fadh2(base):
"""Returns a simple model with an EGC producing fadh2_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "fadh2_c + a <--> fad_c + c + 2 h_c"
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.fad_c, type="sink")
base.add_boundary(base.metabolites.fadh2_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def produces_accoa(base):
"""Returns a simple model with an EGC producing accoa_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "accoa_c + h2o_c + a <--> coa_c + c + ac_c + h_c"
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.ac_c, type="sink")
base.add_boundary(base.metabolites.h2o_c, type="sink")
base.add_boundary(base.metabolites.coa_c, type="sink")
base.add_boundary(base.metabolites.accoa_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def produces_glu(base):
"""Returns a simple model with an EGC producing glu__L_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "glu__L_c + h2o_c + a <--> c + akg_c + nh3_c + 2 h_c"
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.nh3_c, type="sink")
base.add_boundary(base.metabolites.h2o_c, type="sink")
base.add_boundary(base.metabolites.akg_c, type="sink")
base.add_boundary(base.metabolites.glu__L_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
# TODO: Removed until detection of organism type is implemented.
# @register_with(MODEL_REGISTRY)
# def produces_h(base):
# """Returns a simple model with an EGC producing h_p"""
# ra = cobra.Reaction('A')
# rb = cobra.Reaction('B')
# rc = cobra.Reaction('C')
# base.add_reactions([ra, rb, rc])
# ra.reaction = "a <--> b"
# rb.reaction = "b <--> c"
# rc.reaction = "h_p + a <--> c + h_c"
# base.add_boundary(base.metabolites.a, type="sink")
# base.add_boundary(base.metabolites.h_p, type="sink")
# base.add_boundary(base.metabolites.h_c, type="sink")
# base.add_boundary(base.metabolites.c, type="demand")
# for met in base.metabolites:
# met.compartment = 'c'
# return base
@register_with(MODEL_REGISTRY)
def no_atp(base):
"""Returns a simple model without an EGC producing atp_c"""
ra = cobra.Reaction('A')
rb = cobra.Reaction('B')
rc = cobra.Reaction('C')
base.add_reactions([ra, rb, rc])
ra.reaction = "a <--> b"
rb.reaction = "b <--> c"
rc.reaction = "atp_c + h2o_c + a --> pi_c + adp_c + c + h_c"
base.add_boundary(base.metabolites.a, type="sink")
base.add_boundary(base.metabolites.h2o_c, type="sink")
base.add_boundary(base.metabolites.h_c, type="sink")
base.add_boundary(base.metabolites.adp_c, type="sink")
base.add_boundary(base.metabolites.atp_c, type="sink")
base.add_boundary(base.metabolites.pi_c, type="sink")
base.add_boundary(base.metabolites.c, type="demand")
for met in base.metabolites:
met.compartment = 'c'
return base
@register_with(MODEL_REGISTRY)
def all_balanced(base):
met_a = cobra.Metabolite("A", formula='CHOPNS', charge=1)
met_b = cobra.Metabolite("B", formula='C2H2O2P2N2S2', charge=2)
rxn1 = cobra.Reaction("RA1")
rxn1.add_metabolites({met_a: -2, met_b: 1})
base.add_reactions([rxn1])
return base
@register_with(MODEL_REGISTRY)
def mass_unbalanced(base):
met_a = cobra.Metabolite("A", formula='CHOPNS', charge=2)
met_b = cobra.Metabolite("B", formula='C2H2O2P2N2S2', charge=2)
rxn1 = cobra.Reaction("RA1")
rxn1.add_metabolites({met_a: -1, met_b: 1})
base.add_reactions([rxn1])
return base
@register_with(MODEL_REGISTRY)
def charge_unbalanced(base):
met_a = cobra.Metabolite("A", formula='CHOPNS', charge=1)
met_b = cobra.Metabolite("B", formula='C2H2O2P2N2S2', charge=1)
rxn1 = cobra.Reaction("RA1")
rxn1.add_metabolites({met_a: -2, met_b: 1})
base.add_reactions([rxn1])
return base
@register_with(MODEL_REGISTRY)
def met_no_formula(base):
met_a = cobra.Metabolite("A", formula=None, charge=1)
met_b = cobra.Metabolite("B", formula='C2H2O2P2N2S2', charge=2)
rxn1 = cobra.Reaction("RA1")
rxn1.add_metabolites({met_a: -2, met_b: 1})
base.add_reactions([rxn1])
return base
@register_with(MODEL_REGISTRY)
def met_no_charge(base):
met_a = cobra.Metabolite("A", formula='CHOPNS', charge=1)
met_b = cobra.Metabolite("B", formula='C2H2O2P2N2S2')
rxn1 = cobra.Reaction("RA1")
rxn1.add_metabolites({met_a: -2, met_b: 1})
base.add_reactions([rxn1])
return base
@register_with(MODEL_REGISTRY)
def loopy_toy_model(base):
base.add_metabolites([cobra.Metabolite(i) for i in "ABC"])
base.add_reactions([cobra.Reaction(i)
for i in ["VA", "VB", "v1", "v2", "v3"]]
)
base.reactions.VA.add_metabolites({"A": 1})
base.reactions.VB.add_metabolites({"C": -1})
base.reactions.v1.add_metabolites({"A": -1, "B": 1})
base.reactions.v2.add_metabolites({"B": -1, "C": 1})
base.reactions.v3.add_metabolites({"A": -1, "C": 1})
base.reactions.v1.bounds = -1000, 1000
base.reactions.v2.bounds = -1000, 1000
base.reactions.v3.bounds = -1000, 1000
base.objective = 'VB'
base.reactions.VB.bounds = 0, 1
return base
@register_with(MODEL_REGISTRY)
def loopless_toy_model(base):
base.add_metabolites([cobra.Metabolite(i) for i in "ABC"])
base.add_reactions([cobra.Reaction(i)
for i in ["VA", "VB", "v1", "v2"]]
)
base.reactions.VA.add_metabolites({"A": 1})
base.reactions.VB.add_metabolites({"C": -1})
base.reactions.v1.add_metabolites({"A": -1, "B": 1})
base.reactions.v2.add_metabolites({"B": -1, "C": 1})
base.reactions.v1.bounds = -1000, 1000
base.reactions.v2.bounds = -1000, 1000
base.objective = 'VB'
base.reactions.VB.bounds = 0, 1
return base
@register_with(MODEL_REGISTRY)
def constrained_toy_model(base):
base.add_metabolites([cobra.Metabolite(i) for i in "ABC"])
base.add_reactions([cobra.Reaction(i)
for i in ["VA", "VB", "v1", "v2", "v3"]]
)
base.reactions.VA.add_metabolites({"A": 1})
base.reactions.VB.add_metabolites({"C": -1})
base.reactions.v1.add_metabolites({"A": -1, "B": 1})
base.reactions.v2.add_metabolites({"B": -1, "C": 1})
base.reactions.v3.add_metabolites({"A": -1, "C": 1})
base.reactions.v1.bounds = -1000, 1000
base.reactions.v2.bounds = -1000, 1000
base.reactions.v3.bounds = 1, 1
base.objective = 'VB'
base.reactions.VB.bounds = 0, 1
return base
@register_with(MODEL_REGISTRY)
def infeasible_toy_model(base):
base.add_metabolites([cobra.Metabolite(i) for i in "ABC"])
base.add_reactions([cobra.Reaction(i)
for i in ["VA", "VB", "v1", "v2", "v3"]]
)
base.reactions.VA.add_metabolites({"A": 1})
base.reactions.VB.add_metabolites({"C": -1})
base.reactions.v1.add_metabolites({"A": -1, "B": 1})
base.reactions.v2.add_metabolites({"B": -1, "C": 1})
base.reactions.v3.add_metabolites({"A": -1, "C": 1})
# Forcing a lower bound on a 'metabolic' reaction that is higher than the
# uptake rate will make a model infeasible.
base.reactions.v1.bounds = 2, 1000
base.reactions.v2.bounds = -1000, 1000
base.reactions.v3.bounds = 1, 1
base.objective = 'VB'
base.reactions.VB.bounds = 0, 1
return base
@register_with(MODEL_REGISTRY)
def producing_toy_model(base):
base.add_metabolites([cobra.Metabolite(i) for i in "ABCD"])
base.add_reactions([cobra.Reaction(i)
for i in ["VA", "VB", "VD", "v1", "v2", "v3", "v4"]]
)
base.reactions.VA.add_metabolites({"A": 1})
base.reactions.VB.add_metabolites({"C": -1})
base.reactions.VD.add_metabolites({"D": -1})
base.reactions.v1.add_metabolites({"A": -1, "B": 1})
base.reactions.v2.add_metabolites({"B": -1, "C": 1})
base.reactions.v3.add_metabolites({"A": -1, "C": 1})
base.reactions.v4.add_metabolites({"A": -1, "C": 1, "D": 1})
base.reactions.v1.bounds = -1000, 1000
base.reactions.v2.bounds = -1000, 1000
base.reactions.v3.bounds = -1000, 1000
base.reactions.v4.bounds = 0, 1
base.objective = 'VB'
base.reactions.VB.bounds = 0, 1
return base
@register_with(MODEL_REGISTRY)
def consuming_toy_model(base):
base.add_metabolites([cobra.Metabolite(i) for i in "ABCD"])
base.add_reactions([cobra.Reaction(i)
for i in ["VA", "VB", "VD", "v1", "v2", "v3", "v4"]]
)
base.reactions.VA.add_metabolites({"A": 1})
base.reactions.VB.add_metabolites({"C": -1})
base.reactions.VD.add_metabolites({"D": -1})
base.reactions.v1.add_metabolites({"A": -1, "B": 1})
base.reactions.v2.add_metabolites({"B": -1, "C": 1})
base.reactions.v3.add_metabolites({"A": -1, "C": 1})
base.reactions.v4.add_metabolites({"A": -1, "C": 1, "D": -1})
base.reactions.v1.bounds = -1000, 1000
base.reactions.v2.bounds = -1000, 1000
base.reactions.v3.bounds = -1000, 1000
base.reactions.v4.bounds = -1, 0
base.objective = 'VB'
base.reactions.VB.bounds = 0, 1
return base
@register_with(MODEL_REGISTRY)
def gap_model(base):
a_c = cobra.Metabolite("a_c", compartment="c")
a_e = cobra.Metabolite("a_e", compartment="e")
b_c = cobra.Metabolite("b_c", compartment="c")
c_c = cobra.Metabolite("c_c", compartment="c")
base.add_metabolites([a_e])
rxn1 = cobra.Reaction("R1")
rxn1.add_metabolites({a_c: -1, b_c: 1})
rxn2 = cobra.Reaction("R2")
rxn2.add_metabolites({a_c: -1, c_c: 1})
base.add_reactions([rxn1, rxn2])
return base
@register_with(MODEL_REGISTRY)
def gap_model_2(base):
a_c = cobra.Metabolite("a_c", compartment="c")
b_c = cobra.Metabolite("b_c", compartment="c")
c_c = cobra.Metabolite("c_c", compartment="c")
d_c = cobra.Metabolite("d_c", compartment="c")
base.add_reactions([cobra.Reaction(i)
for i in ["EX_A", "A2B", "C2D", "EX_D"]])
base.reactions.EX_A.add_metabolites({a_c: -1})
base.reactions.EX_D.add_metabolites({d_c: -1})
base.reactions.A2B.add_metabolites({a_c: -1, b_c: 1})
base.reactions.C2D.add_metabolites({c_c: -1, d_c: 1})
base.reactions.EX_A.bounds = -1000, 1000
base.reactions.A2B.bounds = 0, 1000
base.reactions.C2D.bounds = 0, 1000
base.reactions.EX_D.bounds = -1000, 1000
return base
@register_with(MODEL_REGISTRY)
def gapfilled_model(base):
a_c = cobra.Metabolite("a_c", compartment="c")
a_e = cobra.Metabolite("a_e", compartment="e")
b_c = cobra.Metabolite("b_c", compartment="c")
c_c = cobra.Metabolite("c_c", compartment="c")
rxn1 = cobra.Reaction("R1")
rxn1.add_metabolites({a_c: -1, b_c: 1})
rxn2 = cobra.Reaction("R2")
rxn2.add_metabolites({a_c: -1, c_c: 1})
rxn3 = cobra.Reaction("R3tec")
rxn3.add_metabolites({a_e: -1, a_c: 1})
rxn4 = cobra.Reaction("DM_b_c")
rxn4.add_metabolites({b_c: -1})
rxn5 = cobra.Reaction("DM_c_c")
rxn5.add_metabolites({c_c: -1})
rxn6 = cobra.Reaction("EX_a_e")
rxn6.add_metabolites({a_e: 1})
base.add_reactions([rxn1, rxn2, rxn3, rxn4, rxn5, rxn6])
return base
@register_with(MODEL_REGISTRY)
def reversible_gap(base):
a_c = cobra.Metabolite("a_c", compartment="c")
b_c = cobra.Metabolite("b_c", compartment="c")
c_c = cobra.Metabolite("c_c", compartment="c")
rxn1 = cobra.Reaction("R1", lower_bound=-1000)
rxn1.add_metabolites({a_c: -1, b_c: 1})
rxn2 = cobra.Reaction("R2")
rxn2.add_metabolites({a_c: -1, c_c: 1})
base.add_reactions([rxn1, rxn2])
return base
@pytest.mark.parametrize("model, consistent", [
("textbook", True),
("figure_1", False),
("equation_8", False),
("figure_2", False),
], indirect=["model"])
def test_check_stoichiometric_consistency(model, consistent):
assert consistency.check_stoichiometric_consistency(model) is consistent
@pytest.mark.parametrize("model, inconsistent", [
("textbook", []),
("figure_1", ["A'", "B'", "C'"]),
("equation_8", ["A", "B", "C"]),
("figure_2", ["X"]),
], indirect=["model"])
def test_find_unconserved_metabolites(model, inconsistent):
unconserved_mets = consistency.find_unconserved_metabolites(model)
assert set([met.id for met in unconserved_mets]) == set(inconsistent)
@pytest.mark.xfail(reason="Bug in current implementation.")
@pytest.mark.parametrize("model, inconsistent", [
("textbook", []),
("figure_1", [("A'",), ("B'",), ("C'",)]),
("equation_8", [("A",), ("B",), ("C",)]),
("figure_2", [("X",)]),
], indirect=["model"])
def test_find_inconsistent_min_stoichiometry(model, inconsistent):
unconserved_sets = consistency.find_inconsistent_min_stoichiometry(model)
for unconserved in unconserved_sets:
assert tuple(met.id for met in unconserved) in set(inconsistent)
@pytest.mark.parametrize("model, metabolite_id", [
# test control flow statements
("produces_atp", 'MNXM3'),
("produces_accoa", 'MNXM21'),
("produces_fadh2", "MNXM38"),
("produces_glu", "MNXM89557"),
("produces_nadh", "MNXM10"),
("maintenance_present", "MNXM3"),
], indirect=["model"])
def test_detect_energy_generating_cycles_control_flow(model, metabolite_id):
"""Expect that energy-generating cycles don't exist for metabolite ID."""
cycle = consistency.detect_energy_generating_cycles(model, metabolite_id)
assert set(cycle) == {'A', 'B', 'C'}
@pytest.mark.parametrize("model, metabolite_id, output", [
# test for possible exceptions
("no_atp", "MNXM3", []),
("infeasible", "MNXM3", {'A', 'B', 'C'})
], indirect=["model"])
def test_detect_energy_generating_cycles_exceptions(model, metabolite_id,
output):
"""Expect that energy-generating cycles don't exist for metabolite ID."""
result = consistency.detect_energy_generating_cycles(model, metabolite_id)
assert set(result) == set(output)
@pytest.mark.parametrize("model, num", [
("all_balanced", 0),
("mass_unbalanced", 0),
("charge_unbalanced", 1),
("met_no_charge", 1),
("met_no_formula", 0)
], indirect=["model"])
def test_find_charge_unbalanced_reactions(model, num):
"""Expect all reactions to be charge balanced."""
internal_rxns = con_helpers.get_internals(model)
reactions = consistency.find_charge_unbalanced_reactions(internal_rxns)
assert len(reactions) == num
@pytest.mark.parametrize("model, num", [
("all_balanced", 0),
("mass_unbalanced", 1),
("charge_unbalanced", 0),
("met_no_charge", 0),
("met_no_formula", 1)
], indirect=["model"])
def test_find_mass_unbalanced_reactions(model, num):
"""Expect all reactions to be mass balanced."""
internal_rxns = con_helpers.get_internals(model)
reactions = consistency.find_mass_unbalanced_reactions(internal_rxns)
assert len(reactions) == num
@pytest.mark.parametrize("model, num", [
("loopy_toy_model", 3),
("loopless_toy_model", 0),
("infeasible_toy_model", 0),
], indirect=["model"])
def test_find_stoichiometrically_balanced_cycles(model, num):
"""Expect no stoichiometrically balanced loops to be present."""
rxns_in_loops = consistency.find_stoichiometrically_balanced_cycles(
model
)
assert len(rxns_in_loops) == num
@pytest.mark.parametrize("model, num", [
("gap_model", 1),
("gapfilled_model", 0),
("reversible_gap", 0)
], indirect=["model"])
def test_find_orphans(model, num):
"""Expect the appropriate amount of orphans to be found."""
orphans = consistency.find_orphans(model)
assert len(orphans) == num
@pytest.mark.parametrize("model, num", [
("gap_model", 2),
("gapfilled_model", 0),
("reversible_gap", 1)
], indirect=["model"])
def test_find_deadends(model, num):
"""Expect the appropriate amount of deadends to be found."""
deadends = consistency.find_deadends(model)
assert len(deadends) == num
@pytest.mark.parametrize("model, num", [
("gap_model", 1),
("gapfilled_model", 0),
], indirect=["model"])
def test_find_disconnected(model, num):
"""Expect the appropriate amount of disconnected to be found."""
disconnected = consistency.find_disconnected(model)
assert len(disconnected) == num
@pytest.mark.parametrize("model, num", [
("gap_model", 4),
("gap_model_2", 1),
("gapfilled_model", 0),
], indirect=['model'])
def test_find_metabolites_not_produced_with_open_bounds(model, num):
"""Expect the appropriate amount of nonproduced metabolites to be found."""
badmets = consistency.find_metabolites_not_produced_with_open_bounds(model)
assert len(badmets) == num
@pytest.mark.parametrize("model, num", [
("gap_model", 4),
("gap_model_2", 1),
("gapfilled_model", 0),
], indirect=['model'])
def test_find_metabolites_not_consumed_with_open_bounds(model, num):
"""Expect the appropriate amount of nonconsumed metabolites to be found."""
badmets = consistency.find_metabolites_not_consumed_with_open_bounds(model)
assert len(badmets) == num
@pytest.mark.parametrize("model, fraction", [
("blocked_reactions", 1.0),
("constrained_toy_model", 0.0),
("loopy_toy_model", 0.6)
], indirect=["model"])
def test_find_reactions_with_unbounded_flux_default_condition(model, fraction):
"""Expect the number of unbounded and blocked metabolites to be correct."""
_, unb_fraction, _ = \
consistency.find_reactions_with_unbounded_flux_default_condition(model)
assert unb_fraction == fraction
@pytest.mark.parametrize("model", [
pytest.param("missing_energy_partner",
marks=pytest.mark.raises(exception=ZeroDivisionError)),
pytest.param("infeasible",
marks=pytest.mark.raises(exception=Infeasible))
], indirect=["model"])
def test_find_reactions_with_unbounded_flux_default_condition_errors(model):
"""Expect the number of unbounded and blocked metabolites to be correct."""
consistency.find_reactions_with_unbounded_flux_default_condition(model)
| 36.636977
| 79
| 0.662627
|
d5e21832afab34174f56382429792e5c24c1b229
| 11,246
|
py
|
Python
|
tensorflow/python/framework/python_api_info_test.py
|
ashutom/tensorflow-upstream
|
c16069c19de9e286dd664abb78d0ea421e9f32d4
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/framework/python_api_info_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/framework/python_api_info_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.python_api_info."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import _pywrap_python_api_info
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
# pylint: disable=g-long-lambda
# Helper function to make expected output in examples more compact:
def Const(x):
return constant_op.constant(x)
@test_util.run_all_in_graph_and_eager_modes
class PythonAPIInfoTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def setUp(self):
context.ensure_initialized()
super(PythonAPIInfoTest, self).setUp()
def makeConverterForGenOp(self, op_name):
"""Returns a PythonAPIInfo for the given gen_op."""
api_info = _pywrap_python_api_info.PythonAPIInfo(op_name)
api_info.InitializeFromRegisteredOp(op_name)
return api_info
def makeConverterFromParamSpecs(self,
api_name,
param_names,
input_specs,
attr_specs,
defaults=()):
"""Returns a PythonAPIInfo built from the given specs."""
api_info = _pywrap_python_api_info.PythonAPIInfo(api_name)
api_info.InitializeFromParamSpecs(input_specs, attr_specs, param_names,
defaults)
return api_info
# This test initializes a PythonAPIInfo from a registered
# op, and then uses DebugInfo() to check that the internal state is
# correct.
@parameterized.named_parameters([
# An op whose inputs have fixed dtypes.
("RegexFullMatch", "RegexFullMatch", "DebugInfo for RegexFullMatch:\n"
" param_names=[input, pattern, name]\n"
" defaults_tuple=('RegexFullMatch',)\n"
" inputs=[\n"
" {index=0, name=input, is_list=0},\n"
" {index=1, name=pattern, is_list=0},]\n"
" inputs_with_fixed_dtype=[\n"
" {index=0, dtype=DT_STRING, is_list=0},\n"
" {index=1, dtype=DT_STRING, is_list=0},]\n"),
# An op whose input has a variable dtype.
("Abs", "Abs", "DebugInfo for Abs:\n"
" param_names=[x, name]\n"
" defaults_tuple=('Abs',)\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=x, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, tensor_params=[0], ok_dtypes=[DT_BFLOAT16, DT_HALF, "
"DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64]},]\n"
" inferred_type_attrs=[T]\n"),
# An op with two inputs that have the same (variable) dtype.
("AddV2", "AddV2", "DebugInfo for AddV2:\n"
" param_names=[x, y, name]\n"
" defaults_tuple=('AddV2',)\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=x, is_list=0},\n"
" {index=1, name=y, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, tensor_params=[0, 1], ok_dtypes=[DT_BFLOAT16, "
"DT_HALF, DT_FLOAT, DT_DOUBLE, DT_UINT8, DT_UINT16, DT_UINT32, "
"DT_UINT64, DT_INT8, DT_INT16, "
"DT_INT32, DT_INT64, DT_COMPLEX64, DT_COMPLEX128]},]\n"
" inferred_type_attrs=[T]\n"),
# An op with an int attribute.
("GatherV2", "GatherV2", "DebugInfo for GatherV2:\n"
" param_names=[params, indices, axis, batch_dims, name]\n"
" defaults_tuple=(0, 'GatherV2')\n"
" attributes=[\n"
" {index=3, name=batch_dims, type=int},\n"
" {inferred_index=0, name=Tparams, type=type},\n"
" {inferred_index=1, name=Tindices, type=type},\n"
" {inferred_index=2, name=Taxis, type=type},]\n"
" inputs=[\n"
" {index=0, name=params, is_list=0},\n"
" {index=1, name=indices, is_list=0},\n"
" {index=2, name=axis, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=Tparams, tensor_params=[0]},\n"
" {type_attr=Tindices, tensor_params=[1], "
"ok_dtypes=[DT_INT32, DT_INT64]},\n"
" {type_attr=Taxis, tensor_params=[2], "
"ok_dtypes=[DT_INT32, DT_INT64]},]\n"
" inferred_type_attrs=[Tparams, Tindices, Taxis]\n"),
# An op with default attrib values.
("ReduceJoin", "ReduceJoin", "DebugInfo for ReduceJoin:\n"
" param_names=[inputs, reduction_indices, keep_dims, separator, name]\n"
" defaults_tuple=(False, '', 'ReduceJoin')\n"
" attributes=[\n"
" {index=2, name=keep_dims, type=bool},\n"
" {index=3, name=separator, type=string},]\n"
" inputs=[\n"
" {index=0, name=inputs, is_list=0},\n"
" {index=1, name=reduction_indices, is_list=0},]\n"
" inputs_with_fixed_dtype=[\n"
" {index=0, dtype=DT_STRING, is_list=0},\n"
" {index=1, dtype=DT_INT32, is_list=0},]\n"),
# An op with a variable-dtype list input, and an int attribute.
("ParseExampleV2", "ParseExampleV2", "DebugInfo for ParseExampleV2:\n"
" param_names=[serialized, names, sparse_keys, dense_keys, "
"ragged_keys, dense_defaults, num_sparse, sparse_types, "
"ragged_value_types, ragged_split_types, dense_shapes, name]\n"
" defaults_tuple=('ParseExampleV2',)\n"
" attributes=[\n"
" {inferred_index=0, name=Tdense, type=list(type)},\n"
" {index=6, name=num_sparse, type=int},\n"
" {index=7, name=sparse_types, type=list(type)},\n"
" {index=8, name=ragged_value_types, type=list(type)},\n"
" {index=9, name=ragged_split_types, type=list(type)},\n"
" {index=10, name=dense_shapes, type=list(shape)},]\n"
" inputs=[\n"
" {index=0, name=serialized, is_list=0},\n"
" {index=1, name=names, is_list=0},\n"
" {index=2, name=sparse_keys, is_list=0},\n"
" {index=3, name=dense_keys, is_list=0},\n"
" {index=4, name=ragged_keys, is_list=0},\n"
" {index=5, name=dense_defaults, is_list=1},]\n"
" inputs_with_fixed_dtype=[\n"
" {index=0, dtype=DT_STRING, is_list=0},\n"
" {index=1, dtype=DT_STRING, is_list=0},\n"
" {index=2, dtype=DT_STRING, is_list=0},\n"
" {index=3, dtype=DT_STRING, is_list=0},\n"
" {index=4, dtype=DT_STRING, is_list=0},]\n"
" inputs_with_type_list_attrs=[\n"
" {type_list_attr=Tdense, tensor_list_params=[5], "
"ok_dtypes=[DT_FLOAT, DT_INT64, DT_STRING]},]\n"
" inferred_type_list_attrs=[Tdense]\n"),
# An op with a default dtype
("BroadcastArgs", "BroadcastArgs", "DebugInfo for BroadcastArgs:\n"
" param_names=[s0, s1, name]\n"
" defaults_tuple=('BroadcastArgs',)\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=s0, is_list=0},\n"
" {index=1, name=s1, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, default_dtype=DT_INT32, tensor_params=[0, 1], "
"ok_dtypes=[DT_INT32, DT_INT64]},]\n"
" inferred_type_attrs=[T]\n"),
])
def testInitializeFromRegisteredOp(self, op_name, debug_info):
api_info = self.makeConverterForGenOp(op_name)
self.assertEqual(api_info.DebugInfo().strip(), debug_info.strip())
# This test initializes a PythonAPIInfo from parameter specs,
# and then uses DebugInfo() to check that the internal state is correct.
@parameterized.named_parameters([
("NoParams", "NoParams", [], {}, {}, "DebugInfo for NoParams:\n"
" param_names=[]\n"
" defaults_tuple=()\n"),
("OnlyNameParam", "OnlyNameParam", ["name"], {}, {},
"DebugInfo for OnlyNameParam:\n"
" param_names=[name]\n"
" defaults_tuple=()\n"),
("SomeBinaryOp", "SomeBinaryOp", ["x", "y"], dict(x="T", y="T"),
dict(T="type"), "DebugInfo for SomeBinaryOp:\n"
" param_names=[x, y]\n"
" defaults_tuple=()\n"
" attributes=[\n"
" {inferred_index=0, name=T, type=type},]\n"
" inputs=[\n"
" {index=0, name=x, is_list=0},\n"
" {index=1, name=y, is_list=0},]\n"
" inputs_with_type_attr=[\n"
" {type_attr=T, tensor_params=[0, 1]},]\n"
" inferred_type_attrs=[T]\n"),
("AllAttributeTypes", "AllAttributeTypes", [
"a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p"
], {},
dict(
a="any",
b="float",
c="int",
d="string",
e="bool",
f="type",
g="shape",
h="tensor",
i="list(any)",
j="list(float)",
k="list(int)",
l="list(string)",
m="list(bool)",
n="list(type)",
o="list(shape)",
p="list(tensor)"), "DebugInfo for AllAttributeTypes:\n"
" param_names=[a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p]\n"
" defaults_tuple=()\n"
" attributes=[\n"
" {index=0, name=a, type=any},\n"
" {index=1, name=b, type=float},\n"
" {index=2, name=c, type=int},\n"
" {index=3, name=d, type=string},\n"
" {index=4, name=e, type=bool},\n"
" {index=5, name=f, type=type},\n"
" {index=6, name=g, type=shape},\n"
" {index=7, name=h, type=tensor},\n"
" {index=8, name=i, type=list(any)},\n"
" {index=9, name=j, type=list(float)},\n"
" {index=10, name=k, type=list(int)},\n"
" {index=11, name=l, type=list(string)},\n"
" {index=12, name=m, type=list(bool)},\n"
" {index=13, name=n, type=list(type)},\n"
" {index=14, name=o, type=list(shape)},\n"
" {index=15, name=p, type=list(tensor)},]\n"),
])
def testInitializeFromParamSpecs(self, api_name, param_names, input_specs,
attr_specs, debug_info):
api_info = self.makeConverterFromParamSpecs(api_name, param_names,
input_specs, attr_specs)
self.assertEqual(api_info.DebugInfo().strip(), debug_info.strip())
if __name__ == "__main__":
googletest.main()
| 43.929688
| 80
| 0.580829
|
3553f408010c663cd9ab817665cab0da213d6bb2
| 12,261
|
py
|
Python
|
kernel/components/binning/vertfeaturebinning/base_feature_binning.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 39
|
2021-10-12T01:43:27.000Z
|
2022-03-28T04:46:35.000Z
|
kernel/components/binning/vertfeaturebinning/base_feature_binning.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 6
|
2021-10-14T02:11:47.000Z
|
2022-03-23T02:41:50.000Z
|
kernel/components/binning/vertfeaturebinning/base_feature_binning.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 10
|
2021-10-14T09:36:03.000Z
|
2022-02-10T11:05:12.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from common.python.common.consts import DataSetSourceType
from common.python.utils import log_utils
from kernel.components.binning.core.base_binning import Binning
from kernel.components.binning.core.bin_inner_param import BinInnerParam
from kernel.components.binning.core.bucket_binning import BucketBinning
from kernel.components.binning.core.quantile_binning import QuantileBinning
from kernel.components.binning.vertfeaturebinning.param import FeatureBinningParam
from kernel.model_base import ModelBase
from kernel.protobuf.generated import feature_binning_meta_pb2, feature_binning_param_pb2
from kernel.transfer.variables.transfer_class.vert_feature_binning_transfer_variable import \
VertFeatureBinningTransferVariable
from kernel.utils import consts
from kernel.utils import data_util
from kernel.utils.data_util import get_header
from kernel.utils.io_check import assert_io_num_rows_equal
LOGGER = log_utils.get_logger()
MODEL_PARAM_NAME = 'FeatureBinningParam'
MODEL_META_NAME = 'FeatureBinningMeta'
class BaseVertFeatureBinning(ModelBase):
"""
Do binning method through promoter and provider
"""
def __init__(self):
super(BaseVertFeatureBinning, self).__init__()
self.transfer_variable = VertFeatureBinningTransferVariable()
self.binning_obj: Binning = None
self.binning_obj_list = []
self.header = None
self.schema = None
self.provider_results = []
self.provider_results_list = []
self.transform_type = None
self.model_save_to_storage = True
self.save_dataset = True
self.model_param = FeatureBinningParam()
self.bin_inner_param = BinInnerParam()
self.set_show_name("(Binning)")
self.source_type = DataSetSourceType.BINNING
def _init_model(self, params: FeatureBinningParam):
self.model_param = params
#
# self.transform_type = self.model_param.transform_param.transform_type
#
# if self.model_param.method == consts.QUANTILE:
# self.binning_obj = QuantileBinning(self.model_param)
# elif self.model_param.method == consts.BUCKET:
# self.binning_obj = BucketBinning(self.model_param)
# elif self.model_param.method == consts.OPTIMAL:
# if self.role == consts.PROVIDER:
# self.model_param.bin_num = self.model_param.optimal_binning_param.init_bin_nums
# self.binning_obj = QuantileBinning(self.model_param)
# else:
# self.binning_obj = OptimalBinning(self.model_param)
# else:
# # self.binning_obj = QuantileBinning(self.bin_param)
# raise ValueError("Binning method: {} is not supported yet".format(self.model_param.method))
# LOGGER.debug("in _init_model, role: {}, local_member_id: {}".format(self.role, self.component_properties))
# self.binning_obj.set_role_party(self.role, self.component_properties.local_member_id)
def _setup_bin_inner_param(self, data_instances, params: FeatureBinningParam):
# if self.schema is not None:
# return
self.bin_inner_param = BinInnerParam()
self.header = get_header(data_instances)
LOGGER.debug("_setup_bin_inner_param, get header: {}".format(self.header))
self.schema = data_instances.schema
self.bin_inner_param.set_header(self.header)
if params.bin_indexes == -1:
self.bin_inner_param.set_bin_all()
else:
self.bin_inner_param.add_bin_indexes(params.bin_indexes)
self.bin_inner_param.add_bin_names(params.bin_names)
self.bin_inner_param.add_category_indexes(params.category_indexes)
self.bin_inner_param.add_category_names(params.category_names)
if params.transform_param.transform_cols == -1:
self.bin_inner_param.set_transform_all()
else:
self.bin_inner_param.add_transform_bin_indexes(params.transform_param.transform_cols)
self.bin_inner_param.add_transform_bin_names(params.transform_param.transform_names)
# LOGGER.debug("After _setup_bin_inner_param: {}".format(self.bin_inner_param.__dict__))
self.binning_obj.set_bin_inner_param(self.bin_inner_param)
LOGGER.debug("After _setup_bin_inner_param, header: {}".format(self.header))
@assert_io_num_rows_equal
def transform(self, data_instances):
self._setup_bin_inner_param(data_instances, self.model_param)
data_instances = self.binning_obj.transform(data_instances, self.transform_type)
self.set_schema(data_instances)
self.data_output = self.binning_obj.convert_feature_to_woe(data_instances)
return data_instances
def _get_meta(self):
# col_list = [str(x) for x in self.cols]
transform_param = feature_binning_meta_pb2.TransformMeta(
transform_cols=self.bin_inner_param.transform_bin_indexes,
transform_type=self.model_param.transform_param.transform_type
)
meta_protobuf_obj = feature_binning_meta_pb2.FeatureBinningMeta(
method=self.model_param.method,
compress_thres=self.model_param.compress_thres,
head_size=self.model_param.head_size,
error=self.model_param.error,
bin_num=int(self.model_param.bin_num),
cols=self.bin_inner_param.bin_names,
adjustment_factor=self.model_param.adjustment_factor,
local_only=self.model_param.local_only,
need_run=self.need_run,
transform_param=transform_param
)
return meta_protobuf_obj
def _get_param(self):
binning_result_obj = self.binning_obj.bin_results.generated_pb()
# binning_result_obj = self.bin_results.generated_pb()
provider_results = [x.bin_results.generated_pb() for x in self.provider_results if
x.bin_results.all_cols_results]
result_obj = feature_binning_param_pb2.FeatureBinningParam(binning_result=binning_result_obj,
provider_results=provider_results,
header=self.header)
LOGGER.debug("json_result: {}".format(result_obj))
return result_obj
def load_model(self, model_dict):
model_0 = list(model_dict.get('model'))[0]
cols = model_0.get('Model_Meta').get('cols')
Model_Param = model_0.get('Model_Param')
binningResult = Model_Param.get('binningResult').get('binningResult')
header = Model_Param.get('header')
model_dict_str = json.dumps(model_dict)
model_dict_str = self.hump2underline(model_dict_str)
model_dict = json.loads(model_dict_str)
LOGGER.debug("model_dict ===> {}".format(model_dict))
for name, value in binningResult.items():
binningResult[name] = json.loads(self.hump2underline(json.dumps(value)))
model_meta = list(model_dict.get('model'))[0].get("model_meta")
model_meta['cols'] = cols
model_param = list(model_dict.get('model'))[0].get("model_param")
binning_result = model_param.get('binning_result')
binning_result['binning_result'] = binningResult
model_param['binning_result'] = binning_result
model_param['header'] = header
LOGGER.debug(f"model_meta={model_meta}, model_param={model_param}")
self.bin_inner_param = BinInnerParam()
# assert isinstance(model_meta, feature_binning_meta_pb2.FeatureBinningMeta)
# assert isinstance(model_param, feature_binning_param_pb2.FeatureBinningParam)
self.header = list(model_param["header"])
self.bin_inner_param.set_header(self.header)
self.bin_inner_param.add_transform_bin_indexes(list(model_meta["transform_param"]["transform_cols"]))
self.bin_inner_param.add_bin_names(list(model_meta["cols"]))
self.transform_type = model_meta["transform_param"]["transform_type"]
bin_method = str(model_meta["method"])
if bin_method == consts.QUANTILE:
self.binning_obj = QuantileBinning(params=model_meta)
else:
self.binning_obj = BucketBinning(params=model_meta)
self.binning_obj.set_role_party(self.role, self.component_properties.local_member_id)
self.binning_obj.set_bin_inner_param(self.bin_inner_param)
self.binning_obj.bin_results.reconstruct2(model_param["binning_result"])
self.provider_results = []
LOGGER.debug(f"provider_results={model_param['provider_results']}")
for host_pb in model_param["provider_results"]:
LOGGER.debug("host_pb ===> {}".format(host_pb))
binning_result = host_pb["binning_result"]
if not binning_result:
continue
host_bin_obj = Binning()
host_bin_obj.bin_results.reconstruct2(host_pb)
self.provider_results.append(host_bin_obj)
def export_model(self):
if self.model_output is not None:
return self.model_output
meta_obj = self._get_meta()
param_obj = self._get_param()
result = {
MODEL_META_NAME: meta_obj,
MODEL_PARAM_NAME: param_obj
}
self.model_output = result
# self.model_save_to_storage = True
return result
def output_data(self):
return self.data_output
def set_schema(self, data_instance):
self.schema['header'] = self.header
data_instance.schema = self.schema
LOGGER.debug("After Binning, data_instance is : {}".format(data_instance))
LOGGER.debug("After Binning, when setting schema, schema is : {}".format(data_instance.schema))
def _abnormal_detection(self, data_instances):
"""
Make sure input data_instances is valid.
"""
data_util.empty_table_detection(data_instances)
data_util.empty_feature_detection(data_instances)
def get_indexes(self, bin_feature_names: list, data_instances):
bin_indexes = []
if len(bin_feature_names) == 0:
return bin_indexes
data_feature_names = data_instances.schema["header"]
for bin_feature_name in bin_feature_names:
index = data_feature_names.index(bin_feature_name)
bin_indexes.append(index)
return bin_indexes
def hump2underline(self, hump_str):
"""
Camel case string convert underscore form
:param hump_str: Camel case string
:return: underscore form
"""
p = re.compile(r'([a-z]|\d)([A-Z])')
sub = re.sub(p, r'\1_\2', hump_str).lower()
return sub
def _add_summary(self, split_points):
summary = {}
for k, v in split_points.items():
summary[k] = list(v)
self.set_summary({"split_points": summary})
LOGGER.info(f'summary={summary}')
| 43.021053
| 116
| 0.69195
|
5018fecea46e40b631af5d9a8a9302c9737c99ff
| 1,187
|
py
|
Python
|
Lib/site-packages/qtutils/disconnect_contextmanager.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
Lib/site-packages/qtutils/disconnect_contextmanager.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/qtutils/disconnect_contextmanager.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
#####################################################################
# #
# disconnect_contextmanager.py #
# #
# Copyright 2013, Christopher Billington, Philip Starkey #
# #
# This file is part of the qtutils project #
# (see https://github.com/philipstarkey/qtutils ) #
# and is licensed under the 2-clause, or 3-clause, BSD License. #
# See the license.txt file in the root of the project #
# for the full license. #
# #
#####################################################################
class DisconnectContextManager(object):
def __init__(self, signal, slot):
self.signal = signal
self.slot = slot
def __enter__(self):
self.signal.disconnect(self.slot)
def __exit__(self, *exc_info):
self.signal.connect(self.slot)
| 45.653846
| 69
| 0.361415
|
80eabaf1a7bfb906d521d66b8c45a14298aa204a
| 1,486
|
py
|
Python
|
ros2bag/setup.py
|
albtam/rosbag2
|
e4ce24cdfa7e24c6d2c025ecc38ab1157a0eecc8
|
[
"Apache-2.0"
] | null | null | null |
ros2bag/setup.py
|
albtam/rosbag2
|
e4ce24cdfa7e24c6d2c025ecc38ab1157a0eecc8
|
[
"Apache-2.0"
] | null | null | null |
ros2bag/setup.py
|
albtam/rosbag2
|
e4ce24cdfa7e24c6d2c025ecc38ab1157a0eecc8
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import find_packages
from setuptools import setup
package_name = 'ros2bag'
setup(
name=package_name,
version='0.6.0',
packages=find_packages(exclude=['test']),
data_files=[
('share/' + package_name, ['package.xml']),
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
],
install_requires=['ros2cli'],
zip_safe=True,
author='Karsten Knese',
author_email='karsten@osrfoundation.org',
maintainer='Karsten Knese',
maintainer_email='karsten@osrfoundation.org',
keywords=[],
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
],
description='Entry point for rosbag in ROS 2',
long_description="""\
The package provides the rosbag command for the ROS 2 command line tools.""",
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'ros2cli.command': [
'bag = ros2bag.command.bag:BagCommand',
],
'ros2cli.extension_point': [
'ros2bag.verb = ros2bag.verb:VerbExtension',
],
'ros2bag.verb': [
'info = ros2bag.verb.info:InfoVerb',
'list = ros2bag.verb.list:ListVerb',
'play = ros2bag.verb.play:PlayVerb',
'record = ros2bag.verb.record:RecordVerb',
],
}
)
| 30.958333
| 77
| 0.611709
|
ae394dd25e9ce1ff43e7a610b1e8e0a75cca2600
| 457
|
py
|
Python
|
src/source.py
|
CoolPiotr/CRITERIA
|
7ec94fbc95dc363227c50a923052115f28c12623
|
[
"MIT"
] | 13
|
2020-08-17T22:04:50.000Z
|
2022-02-25T20:59:18.000Z
|
src/source.py
|
CoolPiotr/CRITERIA
|
7ec94fbc95dc363227c50a923052115f28c12623
|
[
"MIT"
] | 12
|
2020-08-24T15:49:29.000Z
|
2022-03-17T17:49:46.000Z
|
src/source.py
|
CoolPiotr/CRITERIA
|
7ec94fbc95dc363227c50a923052115f28c12623
|
[
"MIT"
] | 2
|
2021-04-22T16:40:33.000Z
|
2021-04-22T16:48:38.000Z
|
# Main CIDOC-CRM classes, serving as Mermaid classes for styling
classes = [
"E2_Temporal_Entity",
"E55_Type",
"E52_Time-Span",
"E41_Appellation",
"E53_Place",
"E77_Persistent_Item",
"E28_Conceptual_Object",
"E18_Physical_Thing",
"E39_Actor",
"E1_CRM_Entity"]
# Ontologies files
onto = {
'crm': 'cidoc_crm_v6.2.1-2018April.rdfs',
'pc': 'CRMpc_v1.1.1.rdfs',
'frbroo': 'FRBR2.4-draft.rdfs',
'crmdig': 'CRMdig_v3.2.2.rdfs'
}
| 21.761905
| 64
| 0.673961
|
83e74dfc444edffa7c75e3e4c00a729f2f9977c6
| 19,746
|
py
|
Python
|
M3_T5_no_pretrain_subword/t5_no_pretraining_main.py
|
VulRepairTeam/VulRepair
|
9cf2abd7ca27d84445ddfc7ab323745a5b676cce
|
[
"MIT"
] | null | null | null |
M3_T5_no_pretrain_subword/t5_no_pretraining_main.py
|
VulRepairTeam/VulRepair
|
9cf2abd7ca27d84445ddfc7ab323745a5b676cce
|
[
"MIT"
] | null | null | null |
M3_T5_no_pretrain_subword/t5_no_pretraining_main.py
|
VulRepairTeam/VulRepair
|
9cf2abd7ca27d84445ddfc7ab323745a5b676cce
|
[
"MIT"
] | 2
|
2022-03-21T04:32:39.000Z
|
2022-03-22T01:02:49.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
from transformers import (AdamW, get_linear_schedule_with_warmup,
T5ForConditionalGeneration, RobertaTokenizer, T5Config)
from tqdm import tqdm
import pandas as pd
from torch.utils.tensorboard import SummaryWriter
cpu_cont = 16
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
input_ids,
label,
decoder_input_ids):
self.input_ids = input_ids
self.label=label
self.decoder_input_ids = decoder_input_ids
class TextDataset(Dataset):
def __init__(self, tokenizer, args, file_type="train"):
if file_type == "train":
file_path = args.train_data_file
elif file_type == "eval":
file_path = args.eval_data_file
elif file_type == "test":
file_path = args.test_data_file
self.examples = []
df = pd.read_csv(file_path)
sources = df["source"].tolist()
labels = df["target"].tolist()
for i in tqdm(range(len(sources))):
self.examples.append(convert_examples_to_features(sources[i], labels[i], tokenizer, args))
if file_type == "train":
for example in self.examples[:3]:
logger.info("*** Example ***")
logger.info("label: {}".format(example.label))
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
logger.info("decoder_input_ids: {}".format(' '.join(map(str, example.decoder_input_ids))))
def __len__(self):
return len(self.examples)
def __getitem__(self, i):
return self.examples[i].input_ids, self.examples[i].input_ids.ne(0), self.examples[i].label, self.examples[i].decoder_input_ids
def convert_examples_to_features(source, label, tokenizer, args):
# encode
source_ids = tokenizer.encode(source, truncation=True, max_length=args.encoder_block_size, padding='max_length', return_tensors='pt')
decoder_input_ids = tokenizer.encode(label, truncation=True, max_length=args.decoder_block_size, padding='max_length', return_tensors='pt')
label = tokenizer.encode(label, truncation=True, max_length=args.decoder_block_size, padding='max_length', return_tensors='pt')
return InputFeatures(source_ids, label, decoder_input_ids)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, eval_dataset):
""" Train the model """
# build dataloader
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=0)
args.max_steps = args.epochs * len(train_dataloader)
# evaluate model per epoch
args.save_steps = len(train_dataloader) * 1
args.warmup_steps = args.max_steps // 5
model.to(args.device)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps)
# multi-gpu training
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size//max(args.n_gpu, 1))
logger.info(" Total train batch size = %d",args.train_batch_size*args.gradient_accumulation_steps)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", args.max_steps)
global_step = 0
tr_loss, logging_loss, avg_loss, tr_nb, tr_num, train_loss = 0.0, 0.0, 0.0, 0, 0, 0
best_loss = 100
writer_path = "tb/codet5_training_loss"
writer = SummaryWriter(writer_path)
model.zero_grad()
for idx in range(args.epochs):
bar = tqdm(train_dataloader, total=len(train_dataloader))
tr_num = 0
train_loss = 0
for step, batch in enumerate(bar):
(input_ids, attention_mask, labels, decoder_input_ids) = [x.squeeze(1).to(args.device) for x in batch]
model.train()
# the forward function automatically creates the correct decoder_input_ids
loss = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).loss
if args.n_gpu > 1:
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
tr_num += 1
train_loss += loss.item()
if avg_loss == 0:
avg_loss = tr_loss
avg_loss = round(train_loss/tr_num,5)
bar.set_description("epoch {} loss {}".format(idx,avg_loss))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
output_flag = True
avg_loss = round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
if global_step % args.save_steps == 0:
# placeholder of evaluation
eval_loss = evaluate(args, model, tokenizer, eval_dataset, eval_when_training=True)
# Save model checkpoint
if eval_loss < best_loss:
best_loss = eval_loss
logger.info(" "+"*"*20)
logger.info(" Best Loss:%s",round(best_loss,4))
logger.info(" "+"*"*20)
checkpoint_prefix = 'checkpoint-best-loss'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model,'module') else model
output_dir = os.path.join(output_dir, '{}'.format(args.model_name))
torch.save(model_to_save.state_dict(), output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
def clean_tokens(tokens):
tokens = tokens.replace("<pad>", "")
tokens = tokens.replace("<s>", "")
tokens = tokens.replace("</s>", "")
tokens = tokens.strip("\n")
tokens = tokens.strip()
return tokens
def evaluate(args, model, tokenizer, eval_dataset, eval_when_training=False):
#build dataloader
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=0)
# multi-gpu evaluate
if args.n_gpu > 1 and eval_when_training is False:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
eval_loss, num = 0, 0
bar = tqdm(eval_dataloader, total=len(eval_dataloader))
for batch in bar:
(input_ids, attention_mask, labels, decoder_input_ids) = [x.squeeze(1).to(args.device) for x in batch]
loss = model(input_ids=input_ids, attention_mask=attention_mask, labels=labels).loss
if args.n_gpu > 1:
loss = loss.mean()
eval_loss += loss.item()
num += 1
eval_loss = round(eval_loss/num,5)
model.train()
logger.info("***** Eval results *****")
logger.info(f"Evaluation Loss: {str(eval_loss)}")
return eval_loss
def test(args, model, tokenizer, test_dataset, best_threshold=0.5):
# build dataloader
test_sampler = SequentialSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.eval_batch_size, num_workers=0)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Test!
logger.info("***** Running Test *****")
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
nb_eval_steps = 0
model.eval()
accuracy = []
raw_predictions = []
correct_prediction = ""
bar = tqdm(test_dataloader, total=len(test_dataloader))
for batch in bar:
correct_pred = False
(input_ids, attention_mask, labels, decoder_input_ids)=[x.squeeze(1).to(args.device) for x in batch]
with torch.no_grad():
beam_outputs = model.generate(input_ids=input_ids,
attention_mask=attention_mask,
do_sample=False, # disable sampling to test if batching affects output
num_beams=args.num_beams,
num_return_sequences=args.num_beams,
max_length=args.decoder_block_size)
beam_outputs = beam_outputs.detach().cpu().tolist()
decoder_input_ids = decoder_input_ids.detach().cpu().tolist()
for single_output in beam_outputs:
# pred
prediction = tokenizer.decode(single_output, skip_special_tokens=False)
prediction = clean_tokens(prediction)
# truth
ground_truth = tokenizer.decode(decoder_input_ids[0], skip_special_tokens=False)
ground_truth = clean_tokens(ground_truth)
if prediction == ground_truth:
correct_prediction = prediction
correct_pred = True
break
if correct_pred:
raw_predictions.append(correct_prediction)
accuracy.append(1)
else:
# if not correct, use the first output in the beam as the raw prediction
raw_pred = tokenizer.decode(beam_outputs[0], skip_special_tokens=False)
raw_pred = clean_tokens(raw_pred)
raw_predictions.append(raw_pred)
accuracy.append(0)
nb_eval_steps += 1
# calculate accuracy
test_result = round(sum(accuracy) / len(accuracy), 4)
logger.info("***** Test results *****")
logger.info(f"Test Accuracy: {str(test_result)}")
# write prediction to file
df = pd.read_csv(args.test_data_file)
df["raw_predictions"] = raw_predictions
df["correctly_predicted"] = accuracy
f_name = args.test_data_file.split("/")[-1].split("_")[:2]
f_name = "_".join(f_name)
df.to_csv(f"../data/raw_predictions/T5-no-pretraining/{f_name}_raw_preds.csv")
def main():
parser = argparse.ArgumentParser()
# Params
parser.add_argument("--train_data_file", default=None, type=str, required=False,
help="The input training data file (a csv file).")
parser.add_argument("--output_dir", default=None, type=str, required=False,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--model_type", default="t5", type=str,
help="The model architecture to be fine-tuned.")
parser.add_argument("--encoder_block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--decoder_block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--num_beams", default=50, type=int,
help="Beam size to use when decoding.")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--test_data_file", default=None, type=str,
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--model_name", default="model.bin", type=str,
help="Saved model name.")
parser.add_argument("--checkpoint_model_name", default="non_domain_model.bin", type=str,
help="Checkpoint model name.")
parser.add_argument("--model_name_or_path", default=None, type=str,
help="The model checkpoint for weights initialization.")
parser.add_argument("--config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--load_model_from_checkpoint", default=False, action='store_true',
help="Whether to load model from checkpoint.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--train_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=4, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--epochs', type=int, default=1,
help="training epochs")
args = parser.parse_args()
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# to remove
args.n_gpu = 1
args.device = "cuda:1"
###
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %H:%M:%S',level=logging.INFO)
logger.warning("device: %s, n_gpu: %s",device, args.n_gpu,)
# Set seed
set_seed(args)
tokenizer = RobertaTokenizer.from_pretrained(args.tokenizer_name)
config = T5Config.from_pretrained(args.config_name)
model = T5ForConditionalGeneration(config=config)
model.resize_token_embeddings(len(tokenizer))
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = TextDataset(tokenizer, args, file_type='train')
eval_dataset = TextDataset(tokenizer, args, file_type='eval')
if args.load_model_from_checkpoint:
checkpoint_prefix = f'checkpoint-best-loss/{args.checkpoint_model_name}'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
train(args, train_dataset, model, tokenizer, eval_dataset)
# Evaluation
results = {}
if args.do_eval:
checkpoint_prefix = f'checkpoint-best-loss/{args.model_name}'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir))
model.to(args.device)
eval_dataset = TextDataset(tokenizer, args, file_type='eval')
result=evaluate(args, model, tokenizer, eval_dataset)
if args.do_test:
checkpoint_prefix = f'checkpoint-best-loss/{args.model_name}'
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
model.load_state_dict(torch.load(output_dir, map_location=args.device))
model.to(args.device)
test_dataset = TextDataset(tokenizer, args, file_type='test')
test(args, model, tokenizer, test_dataset, best_threshold=0.5)
return results
if __name__ == "__main__":
main()
| 48.997519
| 143
| 0.632837
|
37fe2f860a561a40f8a6a70dca11a87518fec383
| 2,084
|
py
|
Python
|
opensearchpy/client/features.py
|
CEHENKLE/opensearch-py
|
44965ad0c91ec59948eb28b652d1474fa1818d76
|
[
"Apache-2.0"
] | 75
|
2021-08-20T03:43:38.000Z
|
2022-03-31T12:55:05.000Z
|
opensearchpy/client/features.py
|
CEHENKLE/opensearch-py
|
44965ad0c91ec59948eb28b652d1474fa1818d76
|
[
"Apache-2.0"
] | 75
|
2021-08-19T19:06:51.000Z
|
2022-03-28T16:11:04.000Z
|
opensearchpy/client/features.py
|
CEHENKLE/opensearch-py
|
44965ad0c91ec59948eb28b652d1474fa1818d76
|
[
"Apache-2.0"
] | 28
|
2021-08-19T16:49:34.000Z
|
2022-03-22T21:48:34.000Z
|
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .utils import NamespacedClient, query_params
class FeaturesClient(NamespacedClient):
@query_params("master_timeout")
def get_features(self, params=None, headers=None):
"""
Gets a list of features which can be included in snapshots using the
feature_states field when creating a snapshot
:arg master_timeout: Explicit operation timeout for connection
to master node
"""
return self.transport.perform_request(
"GET", "/_features", params=params, headers=headers
)
@query_params()
def reset_features(self, params=None, headers=None):
"""
Resets the internal state of features, usually by deleting system indices
.. warning::
This API is **experimental** so may include breaking changes
or be removed in a future version
"""
return self.transport.perform_request(
"POST", "/_features/_reset", params=params, headers=headers
)
| 35.322034
| 81
| 0.708733
|
5a45d617d7bc2dd8ccc82467df23a52b5c1f7d5d
| 2,195
|
py
|
Python
|
setup.py
|
friikjones/keepluggable
|
9a32a885ca617468a2a8ab932b9123e55d490677
|
[
"MIT"
] | null | null | null |
setup.py
|
friikjones/keepluggable
|
9a32a885ca617468a2a8ab932b9123e55d490677
|
[
"MIT"
] | null | null | null |
setup.py
|
friikjones/keepluggable
|
9a32a885ca617468a2a8ab932b9123e55d490677
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Installer for keepluggable."""
from codecs import open
from sys import version_info
from setuptools import setup, find_packages
# http://peak.telecommunity.com/DevCenter/setuptools#developer-s-guide
with open("README.rst", encoding="utf-8") as f:
long_description = f.read()
requires = [ # Each backend may have additional dependencies.
"bag >= 3.0.0",
"kerno",
"pydantic > 1.4a, < 1.5a",
]
if version_info[:2] < (3, 4):
requires.append("pathlib") # 'enum34'
setup(
name="keepluggable",
version="0.8.2.dev1",
description="Manages storage of images and other files, with metadata."
" Also offers an HTTP API done on Pyramid.",
long_description=long_description,
classifiers=[ # https://pypi.org/pypi?:action=list_classifiers
"Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
# 'Programming Language :: Python :: Implementation :: PyPy',
"Framework :: Pyramid",
"Topic :: Database",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Multimedia :: Graphics :: Graphics Conversion",
"Topic :: Software Development :: Libraries :: Python Modules",
],
author="Nando Florestan",
author_email="nandoflorestan@gmail.com",
url="https://github.com/nandoflorestan/keepluggable",
keywords="web pylons pyramid images store thumbnails",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
# tests_require=requires,
# test_suite="keepluggable",
# entry_points="""\
# [paste.app_factory]
# main = keepluggable:main
# """,
license="MIT",
)
| 33.769231
| 75
| 0.638269
|
0ac1a40a18839c3d15113dc0a2bc38dd5dbb1d9d
| 3,235
|
py
|
Python
|
profiles_project/settings.py
|
achrafel1/profiles-rest-api
|
8fba50caa513a3e6553805e030f65b8d3f3c39c1
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
achrafel1/profiles-rest-api
|
8fba50caa513a3e6553805e030f65b8d3f3c39c1
|
[
"MIT"
] | 4
|
2020-06-27T17:32:56.000Z
|
2022-02-10T09:41:41.000Z
|
profiles_project/settings.py
|
achrafel1/profiles-rest-api
|
8fba50caa513a3e6553805e030f65b8d3f3c39c1
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mk20l&intl(fxjncf-2#jc*$y5s$@p6_##hqkm()kp))%)dvc-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.88
| 91
| 0.698609
|
7f02c00b6adfbd6b86fa2199bd7541ad43006417
| 3,070
|
py
|
Python
|
test.py
|
adit98/Rainbow
|
928cd4afd0718c956d8cd5b47ea89990e6e2e26f
|
[
"MIT"
] | null | null | null |
test.py
|
adit98/Rainbow
|
928cd4afd0718c956d8cd5b47ea89990e6e2e26f
|
[
"MIT"
] | null | null | null |
test.py
|
adit98/Rainbow
|
928cd4afd0718c956d8cd5b47ea89990e6e2e26f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division
import os
import plotly
from plotly.graph_objs import Scatter
from plotly.graph_objs.scatter import Line
import torch
from env import Env, MinigridEnv
# Test DQN
def test(args, T, dqn, val_mem, metrics, results_dir, evaluate=False):
if args.minigrid:
env = MinigridEnv(args)
else:
env = Env(args)
env.eval()
metrics['steps'].append(T)
T_rewards, T_Qs = [], []
# Test performance over several episodes
done = True
for _ in range(args.evaluation_episodes):
while True:
if done:
state, reward_sum, done = env.reset(), 0, False
action = dqn.act_e_greedy(state) # Choose an action ε-greedily
state, reward, done = env.step(action) # Step
reward_sum += reward
if args.render:
env.render()
if done:
T_rewards.append(reward_sum)
break
env.close()
# Test Q-values over validation memory
for state in val_mem: # Iterate over valid states
T_Qs.append(dqn.evaluate_q(state))
avg_reward, avg_Q = sum(T_rewards) / len(T_rewards), sum(T_Qs) / len(T_Qs)
if not evaluate:
# Save model parameters if improved
if avg_reward > metrics['best_avg_reward']:
metrics['best_avg_reward'] = avg_reward
dqn.save(results_dir)
# Append to results and save metrics
metrics['rewards'].append(T_rewards)
metrics['Qs'].append(T_Qs)
torch.save(metrics, os.path.join(results_dir, 'metrics.pth'))
# Plot
_plot_line(metrics['steps'], metrics['rewards'], 'Reward', path=results_dir)
_plot_line(metrics['steps'], metrics['Qs'], 'Q', path=results_dir)
# Return average reward and Q-value
return avg_reward, avg_Q
# Plots min, max and mean + standard deviation bars of a population over time
def _plot_line(xs, ys_population, title, path=''):
max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'
ys = torch.tensor(ys_population, dtype=torch.float32)
ys_min, ys_max, ys_mean, ys_std = ys.min(1)[0].squeeze(), ys.max(1)[0].squeeze(), ys.mean(1).squeeze(), ys.std(1).squeeze()
ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std
trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')
trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)
trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')
trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=transparent), name='-1 Std. Dev.', showlegend=False)
trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')
plotly.offline.plot({
'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],
'layout': dict(title=title, xaxis={'title': 'Step'}, yaxis={'title': title})
}, filename=os.path.join(path, title + '.html'), auto_open=False)
| 36.987952
| 156
| 0.683713
|
bd376b96db0cad9d84d181a55a0a136cf5d81935
| 2,993
|
py
|
Python
|
tests/test_changelog_file.py
|
KarstenSiemer/gordian
|
2724b32e7853912bae43536d150e1da8aaed69f1
|
[
"Apache-2.0"
] | 55
|
2020-01-07T18:00:06.000Z
|
2022-03-23T08:59:13.000Z
|
tests/test_changelog_file.py
|
KarstenSiemer/gordian
|
2724b32e7853912bae43536d150e1da8aaed69f1
|
[
"Apache-2.0"
] | 31
|
2020-01-07T18:21:59.000Z
|
2022-03-22T18:57:08.000Z
|
tests/test_changelog_file.py
|
KarstenSiemer/gordian
|
2724b32e7853912bae43536d150e1da8aaed69f1
|
[
"Apache-2.0"
] | 14
|
2020-01-07T17:56:52.000Z
|
2022-01-25T18:43:53.000Z
|
import unittest
from unittest.mock import MagicMock, patch
from gordian.repo import Repo
from gordian.files import ChangelogFile
from .utils import Utils
from datetime import datetime
class TestBaseFile(unittest.TestCase):
def setUp(self):
self.github_file = Utils.create_github_content_file(file='changelog_no_footer.md')
self.mock_git = MagicMock()
self.repo = Repo('test', github=self.mock_git)
self.repo.new_version = '1.2.0'
self.changelog = ChangelogFile(self.github_file, self.repo)
def test_iterable(self):
assert(iter(self.changelog))
def test_assert_added(self):
self.changelog.added('test')
assert(len(self.changelog._added) == 1)
assert(self.changelog._added[0] == ('test', None))
def test_assert_added_with_ticket(self):
self.changelog.added('test', 'something-1234')
assert(len(self.changelog._added) == 1)
assert(self.changelog._added[0] == ('test', 'something-1234'))
def test_assert_updated(self):
self.changelog.updated('test')
assert(len(self.changelog._updated) == 1)
assert(self.changelog._updated[0] == ('test', None))
def test_assert_removed(self):
self.changelog.removed('test')
assert(len(self.changelog._removed) == 1)
assert(self.changelog._removed[0] == ('test', None))
def test_save_changelog(self):
self.changelog.added('test')
self.changelog.save('save file', False)
def test_changelog_format_no_footer(self):
changelog = '''# Changelog
## [1.2.0] - 2020-06-02
### Added
- something
### Removed
- something else [ticket-1234]
## [1.1.0] - 2020-02-15
### Added
- Something new JIRA-10000
- Something else SRE-11000
## [1.0.0] - 2020-02-14
### Changed
- Foobar SRE-9999
### Removed
- SRE-8454 Removed a feature
'''
self.changelog.added('something')
self.changelog.removed('something else', 'ticket-1234')
with patch('gordian.files.changelog_file.ChangelogFile._format_date', return_value=datetime(2020, 6, 2).strftime('%Y-%m-%d')):
assert(self.changelog._dump() == changelog)
def test_changelog_format_with_footer(self):
self.github_file = Utils.create_github_content_file(file='changelog_with_footer.md')
self.changelog = ChangelogFile(self.github_file, self.repo)
changelog = '''# Changelog
## [1.2.0] - 2020-06-02
### Added
- something
### Removed
- something else [ticket-1234]
## [1.1.0] - 2020-02-15
### Added
- Something new JIRA-10000
- Something else SRE-11000
## [1.0.0] - 2020-02-14
### Changed
- Foobar SRE-9999
### Removed
- SRE-8454 Removed a feature
this is a footer'''
self.changelog.added('something')
self.changelog.removed('something else', 'ticket-1234')
with patch('gordian.files.changelog_file.ChangelogFile._format_date', return_value=datetime(2020, 6, 2).strftime('%Y-%m-%d')):
assert(self.changelog._dump() == changelog)
| 30.85567
| 134
| 0.668226
|
756e7bb5df0646931e18630f9f84cc31eb1f472c
| 4,608
|
py
|
Python
|
fram/fram_spi.py
|
mcauser/micropython_eeprom
|
65ac90a559aa096ae91d24edaed87361aa833bb5
|
[
"MIT"
] | 40
|
2019-12-12T20:35:09.000Z
|
2022-02-23T16:08:48.000Z
|
fram/fram_spi.py
|
mcauser/micropython_eeprom
|
65ac90a559aa096ae91d24edaed87361aa833bb5
|
[
"MIT"
] | 10
|
2020-02-11T08:35:53.000Z
|
2022-02-19T10:01:09.000Z
|
fram/fram_spi.py
|
peterhinch/micropython_eeprom
|
65ac90a559aa096ae91d24edaed87361aa833bb5
|
[
"MIT"
] | 18
|
2019-12-11T13:56:45.000Z
|
2022-03-24T20:12:25.000Z
|
# fram_spi.py Supports Fujitsu 256KiB and 512KiB FRAM devices
# M85RS2MT Adafruit https://www.adafruit.com/product/4718
# M85RS4MT Adafruit https://www.adafruit.com/product/4719
# These chips are almost identical. Command sets are identical.
# Product ID 1st byte, LS 4 bits is density 0x8 == 2MiB 0x9 == 4MiB
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2020 Peter Hinch
from micropython import const
from bdevice import BlockDevice
# import time # for sleep command
# Command set
_WREN = const(6)
_WRDI = const(4)
_RDSR = const(5) # Read status reg
_WRSR = const(1)
_READ = const(3)
_WRITE = const(2)
_RDID = const(0x9f)
# _FSTRD = const(0x0b) No obvious difference to _READ
_SLEEP = const(0xb9)
class FRAM(BlockDevice):
def __init__(self, spi, cspins, size=512, verbose=True, block_size=9):
if size not in (256, 512):
raise ValueError('FRAM size must be 256 or 512')
super().__init__(block_size, len(cspins), size * 1024)
self._spi = spi
self._cspins = cspins
self._ccs = None # Chip select Pin object for current chip
self._bufp = bytearray(5) # instruction + 3 byte address + 1 byte value
mvp = memoryview(self._bufp) # cost-free slicing
self._mvp = mvp
# Check hardware
density = 8 if size == 256 else 9
for n, cs in enumerate(cspins):
mvp[:] = b'\0\0\0\0\0'
mvp[0] = _RDID
cs(0)
self._spi.write_readinto(mvp, mvp)
cs(1)
# Ignore bits labelled "proprietary"
if mvp[1] != 4 or mvp[2] != 0x7f:
s = 'FRAM not found at cspins[{}].'
raise RuntimeError(s.format(n))
if (mvp[3] & 0x1f) != density:
s = 'FRAM at cspins[{}] is incorrect size.'
raise RuntimeError(s.format(n))
if verbose:
s = 'Total FRAM size {} bytes in {} devices.'
print(s.format(self._a_bytes, n + 1))
# Set up status register on each chip
for cs in cspins:
self._wrctrl(cs, True)
mvp[0] = _WRSR
mvp[1] = 0 # No block protect or SR protect
cs(0)
self._spi.write(mvp[:2])
cs(1)
self._wrctrl(cs, False) # Disable write to array
for n, cs in enumerate(self._cspins):
mvp[0] = _RDSR
cs(0)
self._spi.write_readinto(mvp[:2], mvp[:2])
cs(1)
if mvp[1]:
s = 'FRAM has bad status at cspins[{}].'
raise RuntimeError(s.format(n))
def _wrctrl(self, cs, en): # Enable/Disable device write
mvp = self._mvp
mvp[0] = _WREN if en else _WRDI
cs(0)
self._spi.write(mvp[:1])
cs(1)
#def sleep(self, on):
#mvp = self._mvp
#mvp[0] = _SLEEP
#for cs in self._cspins:
#cs(0)
#if on:
#self._spi.write(mvp[:1])
#else:
#time.sleep_us(500)
#cs(1)
# Given an address, set current chip select and address buffer.
# Return the number of bytes that can be processed in the current chip.
def _getaddr(self, addr, nbytes):
if addr >= self._a_bytes:
raise RuntimeError("FRAM Address is out of range")
ca, la = divmod(addr, self._c_bytes) # ca == chip no, la == offset into chip
self._ccs = self._cspins[ca] # Current chip select
mvp = self._mvp
mvp[1] = la >> 16
mvp[2] = (la >> 8) & 0xff
mvp[3] = la & 0xff
pe = (addr & ~0xff) + 0x100 # byte 0 of next chip
return min(nbytes, pe - la)
# Interface to bdevice
def readwrite(self, addr, buf, read):
nbytes = len(buf)
mvb = memoryview(buf)
mvp = self._mvp
start = 0 # Offset into buf.
while nbytes > 0:
npage = self._getaddr(addr, nbytes) # No of bytes that fit on current chip
cs = self._ccs
if read:
mvp[0] = _READ
cs(0)
self._spi.write(mvp[:4])
self._spi.readinto(mvb[start : start + npage])
cs(1)
else:
self._wrctrl(cs, True)
mvp[0] = _WRITE
cs(0)
self._spi.write(mvp[:4])
self._spi.write(mvb[start: start + npage])
cs(1)
self._wrctrl(cs, False)
nbytes -= npage
start += npage
addr += npage
return buf
| 34.38806
| 87
| 0.534288
|
c22a652f82207317c9583d6fc273fbdfee9560b3
| 819
|
py
|
Python
|
2 - BitFriend's auth/authPwn.py
|
lfontesm/Reverse-Engineering-Challenges
|
a317977002cd51bea2434690ddefd38ce3cc73c4
|
[
"WTFPL"
] | 1
|
2021-01-11T17:13:23.000Z
|
2021-01-11T17:13:23.000Z
|
2 - BitFriend's auth/authPwn.py
|
lfontesm/Reverse-Engineering-Challenges
|
a317977002cd51bea2434690ddefd38ce3cc73c4
|
[
"WTFPL"
] | null | null | null |
2 - BitFriend's auth/authPwn.py
|
lfontesm/Reverse-Engineering-Challenges
|
a317977002cd51bea2434690ddefd38ce3cc73c4
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/python
from pwn import *
import re
offsetFromStack=0x555555556030-0x55555555519c
print('Offset from stack string to authenticated(): '+hex(offsetFromStack))
proc=process('./auth')
# Exploiting format string vuln to get 5 items on the stack
proc.sendline(b"%p %p %p %p %p")
procAnswer=proc.recvline()
print(procAnswer)
print()
# Get the stack address from the program's response
stackAddr=procAnswer.split(b' ')[7]
print('Address of stack string: '+str(stackAddr))
print()
# Calculate offset of authenticated from the address of string in stack
stackAddr=int(stackAddr, 0) - offsetFromStack
# Create payload
payload=b'A'*0x68
payload+=p64(stackAddr)
print('Payload to send: '+str(payload))
print()
# Send payload and receive process response
proc.sendline(payload)
print(proc.recvall())
proc.close()
| 22.75
| 75
| 0.757021
|
eb7cfa2c8d56d1ddfb8644e838c7af95b936c97a
| 8,027
|
py
|
Python
|
official/cv/resnet/src/momentum.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/cv/resnet/src/momentum.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/cv/resnet/src/momentum.py
|
mindspore-ai/models
|
9127b128e2961fd698977e918861dadfad00a44c
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""momentum"""
import mindspore as ms
import mindspore.ops as ops
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator
from mindspore.nn.optim.optimizer import Optimizer
_momentum_opt = ops.MultitypeFuncGraph("momentum_opt")
@_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt_ext(opt, weight_decay, scale, momentum, learning_rate, gradient, weight, moment):
"""Apply momentum optimizer to the weight parameter using Tensor."""
success = ops.depend(True, opt(weight_decay, scale, weight, moment, learning_rate, gradient, momentum))
return success
class Momentum(Optimizer):
r"""
Implements the Momentum algorithm.
Refer to the paper on the importance of initialization and momentum in deep learning for more details.
.. math::
v_{t+1} = v_{t} \ast u + gradients
If use_nesterov is True:
.. math::
p_{t+1} = p_{t} - (grad \ast lr + v_{t+1} \ast u \ast lr)
If use_nesterov is False:
.. math::
p_{t+1} = p_{t} - lr \ast v_{t+1}
Here: where grad, lr, p, v and u denote the gradients, learning_rate, params, moments, and momentum respectively.
Note:
When separating parameter groups, the weight decay in each group will be applied on the parameters if the
weight decay is positive. When not separating parameter groups, the `weight_decay` in the API will be applied
on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.
To improve parameter groups performance, the customized order of parameters can be supported.
Args:
params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated,
the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params",
"lr", "weight_decay" and "order_params" are the keys can be parsed.
- params: Required. The value must be a list of `Parameter`.
- lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
If not, the `learning_rate` in the API will be used.
- weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
will be used. If not, the `weight_decay` in the API will be used.
- order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and
the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which
in the value of 'order_params' must be in one of group parameters.
learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate.
When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then
the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
use dynamic learning rate, the i-th learning rate will be calculated during the process of training
according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero
dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be
equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float.
momentum (float): Hyperparameter of type float, means momentum for the moving average.
It must be at least 0.0.
weight_decay (int, float): Weight decay (L2 penalty). It must be equal to or greater than 0.0. Default: 0.0.
loss_scale (int, float): A floating point value for the loss scale. It must be greater than 0.0. Default: 1.0.
use_nesterov (bool): Enable Nesterov momentum. Default: False.
Inputs:
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
Outputs:
tuple[bool], all elements are True.
Raises:
ValueError: If the momentum is less than 0.0.
TypeError: If the momentum is not a float or use_nesterov is not a bool.
Supported Platforms:
``GPU``
Examples:
>>> net = Net()
>>> #1) All parameters use the same learning rate and weight decay
>>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>>
>>> #2) Use parameter groups and set different values
>>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params()))
>>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params()))
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01},
... {'params': no_conv_params, 'lr': 0.01},
... {'order_params': net.trainable_params()}]
>>> optim = Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0)
>>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01.
>>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0.
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
>>>
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
>>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None)
"""
def __init__(self, params, learning_rate, momentum, weight_decay=0.0, loss_scale=1.0, use_nesterov=False):
super(Momentum, self).__init__(learning_rate, params, weight_decay, loss_scale)
Validator.check_value_type("momentum", momentum, [float], self.cls_name)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, ms.float32), name="momentum")
self.params = self.parameters
self.use_nesterov = Validator.check_bool(use_nesterov)
self.moments = self.params.clone(prefix="moments", init='zeros')
self.hyper_map = ops.HyperMap()
# Use FusedWeightScaleApplyMomentum to avoid extra kernel launch.
self.opt = ops.FusedWeightScaleApplyMomentum()
def construct(self, gradients):
params = self.params
moments = self.moments
weight_decay = Tensor(0.0, ms.float32)
scale = Tensor(1.0, ms.float32)
if self.exec_weight_decay:
weight_decay = self.weight_decay_tensor
if self.need_scale:
scale = self.reciprocal_scale
lr = self.get_lr()
if self.is_group_lr:
success = self.hyper_map(ops.partial(_momentum_opt, self.opt, weight_decay, scale, self.momentum),
lr, gradients, params, moments)
else:
success = self.hyper_map(ops.partial(_momentum_opt, self.opt, weight_decay, scale, self.momentum, lr),
gradients, params, moments)
return success
| 52.464052
| 120
| 0.664881
|
26fef4bcd6d8c54f2027583d51d8929061f36b20
| 3,780
|
py
|
Python
|
src/scripts/inferencing/treelite_python/score.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 13
|
2021-08-20T01:03:51.000Z
|
2022-02-12T05:34:46.000Z
|
src/scripts/inferencing/treelite_python/score.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 199
|
2021-08-21T21:18:53.000Z
|
2022-03-27T23:08:44.000Z
|
src/scripts/inferencing/treelite_python/score.py
|
microsoft/lightgbm-benchmark
|
286668d698d9d166857f924ecb775d5de224d489
|
[
"MIT"
] | 4
|
2021-08-20T06:53:26.000Z
|
2022-01-24T22:22:39.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
TreeLite/Python inferencing script
"""
import os
import sys
import argparse
import logging
import numpy
from distutils.util import strtobool
import pandas as pd
import treelite, treelite_runtime
# Add the right path to PYTHONPATH
# so that you can import from common.*
COMMON_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
if COMMON_ROOT not in sys.path:
print(f"Adding {COMMON_ROOT} to PYTHONPATH")
sys.path.append(str(COMMON_ROOT))
# useful imports from common
from common.components import RunnableScript
from common.io import input_file_path
class TreeLightInferencingScript(RunnableScript):
def __init__(self):
super().__init__(
task = 'score',
framework = 'treelite_python',
framework_version = treelite.__version__
)
@classmethod
def get_arg_parser(cls, parser=None):
"""Adds component/module arguments to a given argument parser.
Args:
parser (argparse.ArgumentParser): an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add generic arguments
parser = RunnableScript.get_arg_parser(parser)
group_i = parser.add_argument_group("Input Data")
group_i.add_argument("--data",
required=True, type=input_file_path, help="Inferencing data location (file path)")
group_i.add_argument("--so_path",
required=False, default = "./mymodel.so" , help="full path to model so")
group_i.add_argument("--output",
required=False, default=None, type=str, help="Inferencing output location (file path)")
group_params = parser.add_argument_group("Scoring parameters")
group_params.add_argument("--num_threads",
required=False, default=1, type=int, help="number of threads")
return parser
def run(self, args, logger, metrics_logger, unknown_args):
"""Run script with arguments (the core of the component)
Args:
args (argparse.namespace): command line arguments provided to script
logger (logging.getLogger() for this script)
metrics_logger (common.metrics.MetricLogger)
unknown_args (list[str]): list of arguments not recognized during argparse
"""
# record relevant parameters
metrics_logger.log_parameters(
num_threads=args.num_threads
)
if args.output:
# make sure the output argument exists
os.makedirs(args.output, exist_ok=True)
# and create your own file inside the output
args.output = os.path.join(args.output, "predictions.txt")
logger.info(f"Loading data for inferencing")
with metrics_logger.log_time_block("time_data_loading"):
my_data = pd.read_csv(args.data).to_numpy()
predictor = treelite_runtime.Predictor(
args.so_path,
verbose=True,
nthread=args.num_threads
)
dmat = treelite_runtime.DMatrix(my_data)
logger.info(f"Running .predict()")
with metrics_logger.log_time_block("time_inferencing"):
predictor.predict(dmat)
def get_arg_parser(parser=None):
""" To ensure compatibility with shrike unit tests """
return TreeLightInferencingScript.get_arg_parser(parser)
def main(cli_args=None):
""" To ensure compatibility with shrike unit tests """
TreeLightInferencingScript.main(cli_args)
if __name__ == "__main__":
main()
| 32.586207
| 99
| 0.654497
|
7f7878e1ce9a81c1a85438a8f5a7f5d4e079a782
| 2,644
|
py
|
Python
|
ucsmsdk/mometa/firmware/FirmwareServerChassisConstraint.py
|
Curlyfingers/ucsmsdk
|
982ff2d8faa12ffb88e1f8cba98cf5749f05c93d
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/firmware/FirmwareServerChassisConstraint.py
|
Curlyfingers/ucsmsdk
|
982ff2d8faa12ffb88e1f8cba98cf5749f05c93d
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/firmware/FirmwareServerChassisConstraint.py
|
Curlyfingers/ucsmsdk
|
982ff2d8faa12ffb88e1f8cba98cf5749f05c93d
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for FirmwareServerChassisConstraint ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareServerChassisConstraintConsts:
pass
class FirmwareServerChassisConstraint(ManagedObject):
"""This is FirmwareServerChassisConstraint class."""
consts = FirmwareServerChassisConstraintConsts()
naming_props = set([u'serverModel'])
mo_meta = MoMeta("FirmwareServerChassisConstraint", "firmwareServerChassisConstraint", "server-chassis-constraint-[server_model]", VersionMeta.Version323a, "InputOutput", 0x3f, [], ["read-only"], [u'firmwareConstraints'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version323a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"min_cmc_version": MoPropertyMeta("min_cmc_version", "minCmcVersion", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version323a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"server_model": MoPropertyMeta("server_model", "serverModel", "string", VersionMeta.Version323a, MoPropertyMeta.NAMING, 0x10, 1, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version323a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"minCmcVersion": "min_cmc_version",
"rn": "rn",
"sacl": "sacl",
"serverModel": "server_model",
"status": "status",
}
def __init__(self, parent_mo_or_dn, server_model, **kwargs):
self._dirty_mask = 0
self.server_model = server_model
self.child_action = None
self.min_cmc_version = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "FirmwareServerChassisConstraint", parent_mo_or_dn, **kwargs)
| 53.959184
| 248
| 0.680408
|
a1e0b74846bf92de1979de1be368fe7e84b65b1f
| 652
|
py
|
Python
|
app/mail.py
|
fboaventura/flask-boilerplate
|
9f81f1c8d5baddc326a30f64f1d7726dd55c7d4e
|
[
"MIT"
] | null | null | null |
app/mail.py
|
fboaventura/flask-boilerplate
|
9f81f1c8d5baddc326a30f64f1d7726dd55c7d4e
|
[
"MIT"
] | 73
|
2021-03-22T14:24:20.000Z
|
2022-03-31T23:46:50.000Z
|
app/mail.py
|
fboaventura/flask-boilerplate
|
9f81f1c8d5baddc326a30f64f1d7726dd55c7d4e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
#
# Code taken from Miguel Grinberg's Mega Flask Tutorial
# https://github.com/miguelgrinberg/microblog/blob/master/app/email.py
from threading import Thread
from flask import current_app
from flask_mail import Message
from app import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email,
args=(current_app._get_current_object(), msg)).start()
| 27.166667
| 72
| 0.73773
|
22e39a467b8f6134c405dd4364d918ed38ca8a79
| 1,316
|
py
|
Python
|
test/functional/rpc_uptime.py
|
tradecraftio/tradecraft
|
a014fea4d4656df67aef19e379f10322386cf6f8
|
[
"MIT"
] | 10
|
2019-03-08T04:10:37.000Z
|
2021-08-20T11:55:14.000Z
|
test/functional/rpc_uptime.py
|
tradecraftio/tradecraft
|
a014fea4d4656df67aef19e379f10322386cf6f8
|
[
"MIT"
] | 69
|
2018-11-09T20:29:29.000Z
|
2021-10-05T00:08:36.000Z
|
test/functional/rpc_uptime.py
|
tradecraftio/tradecraft
|
a014fea4d4656df67aef19e379f10322386cf6f8
|
[
"MIT"
] | 7
|
2019-01-21T06:00:18.000Z
|
2021-12-19T16:18:00.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Copyright (c) 2010-2021 The Freicoin Developers
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of version 3 of the GNU Affero General Public License as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Test the RPC call related to the uptime command.
Test corresponds to code in rpc/server.cpp.
"""
import time
from test_framework.test_framework import FreicoinTestFramework
class UptimeTest(FreicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self._test_uptime()
def _test_uptime(self):
wait_time = 10
self.nodes[0].setmocktime(int(time.time() + wait_time))
assert(self.nodes[0].uptime() >= wait_time)
if __name__ == '__main__':
UptimeTest().main()
| 31.333333
| 79
| 0.731003
|
3ea75c0f76c3c032111d20bd197207ac4288a611
| 244
|
py
|
Python
|
models/networks/traceable_network.py
|
Mirevi/face-synthesizer-JVRB
|
3c5774b1c5c981131df21b299389f568502b8ecf
|
[
"BSD-3-Clause"
] | null | null | null |
models/networks/traceable_network.py
|
Mirevi/face-synthesizer-JVRB
|
3c5774b1c5c981131df21b299389f568502b8ecf
|
[
"BSD-3-Clause"
] | null | null | null |
models/networks/traceable_network.py
|
Mirevi/face-synthesizer-JVRB
|
3c5774b1c5c981131df21b299389f568502b8ecf
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import ABC, abstractmethod
from torch import nn
class TraceableNetwork(nn.Module, ABC):
def __init__(self):
super(TraceableNetwork, self).__init__()
@abstractmethod
def input_noise(self, metadata):
pass
| 18.769231
| 48
| 0.70082
|
e28829675cef1a68c8b6d6f9820c671d0f43f746
| 7,108
|
py
|
Python
|
maskrcnn/engine/demo_process.py
|
kSahatova/MULAN-XAI
|
7f2ede7cc8ad4e772a3cfe7d52f0a710d5c89d5a
|
[
"MIT"
] | null | null | null |
maskrcnn/engine/demo_process.py
|
kSahatova/MULAN-XAI
|
7f2ede7cc8ad4e772a3cfe7d52f0a710d5c89d5a
|
[
"MIT"
] | null | null | null |
maskrcnn/engine/demo_process.py
|
kSahatova/MULAN-XAI
|
7f2ede7cc8ad4e772a3cfe7d52f0a710d5c89d5a
|
[
"MIT"
] | null | null | null |
# Ke Yan, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory,
# National Institutes of Health Clinical Center, July 2019
"""Procedure in the demo mode"""
import os
import numpy as np
from time import time
import torch
import nibabel as nib
from tqdm import tqdm
import cv2
from openpyxl import load_workbook
from maskrcnn.config import cfg
from maskrcnn.data.datasets.load_ct_img import load_prep_img
from maskrcnn.structures.image_list import to_image_list
from maskrcnn.data.datasets.evaluation.DeepLesion.post_process import post_process_results
from maskrcnn.data.datasets.load_ct_img import windowing, windowing_rev
from maskrcnn.utils.draw import draw_results
def exec_model(model):
"""test model on user-provided data, instead of the preset DeepLesion dataset"""
import_tag_data()
model.eval()
device = torch.device(cfg.MODEL.DEVICE)
while True:
info = "Please input the path of a nifti CT volume >> "
while True:
path = input(info)
if not os.path.exists(path):
print('file does not exist!')
continue
try:
print('reading image ...')
nifti_data = nib.load(path)
break
except:
print('load nifti file error!')
while True:
win_sel = input('Window to show, 1:soft tissue, 2:lung, 3: bone >> ')
if win_sel not in ['1', '2', '3']:
continue
win_show = [[-175, 275], [-1500, 500], [-500, 1300]]
win_show = win_show[int(win_sel)-1]
break
vol, spacing, slice_intv = load_preprocess_nifti(nifti_data)
slice_num_per_run = max(1, int(float(cfg.TEST.TEST_SLICE_INTV_MM)/slice_intv+.5))
num_total_slice = vol.shape[2]
total_time = 0
output_dir = os.path.join(cfg.RESULTS_DIR,path.replace(os.sep, '_'))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
slices_to_process = range(int(slice_num_per_run/2), num_total_slice, slice_num_per_run)
msgs_all = []
print('predicting ...')
for slice_idx in tqdm(slices_to_process):
ims, im_np, im_scale, crop = get_ims(slice_idx, vol, spacing, slice_intv)
im_list = to_image_list(ims, cfg.DATALOADER.SIZE_DIVISIBILITY).to(device)
start_time = time()
with torch.no_grad():
result = model(im_list)
result = [o.to("cpu") for o in result]
info = {'spacing': spacing, 'im_scale': im_scale}
post_process_results(result[0], info)
total_time += time() - start_time
output_fn = os.path.join(output_dir, '%d.png'%(slice_idx+1))
overlay, msgs = gen_output(im_np, result[0], info, win_show)
cv2.imwrite(output_fn, overlay)
msgs_all.append('slice %d\r\n' % (slice_idx+1))
for msg in msgs:
msgs_all.append(msg+'\r\n')
msgs_all.append('\r\n')
with open(os.path.join(output_dir, 'results.txt'), 'w') as f:
f.writelines(msgs_all)
print('result images and text saved to', output_dir)
print('processing time: %d ms per slice' % int(1000.*total_time/len(slices_to_process)))
def import_tag_data():
cellname = lambda row, col: '%s%d' % (chr(ord('A') + col - 1), row)
fn = os.path.join(cfg.PROGDAT_DIR, '%s_%s.xlsx' % ('test_handlabeled', cfg.EXP_NAME))
wb = load_workbook(fn)
sheetnames = wb.sheetnames
sheet = wb[sheetnames[0]]
tags = []
thresolds = []
for p in range(2, sheet.max_row):
tags.append(sheet[cellname(p, 1)].value)
thresolds.append(float(sheet[cellname(p, 8)].value))
assert tags == cfg.runtime_info.tag_list
cfg.runtime_info.tag_sel_val = torch.tensor(thresolds).to(torch.float)
def load_preprocess_nifti(data):
vol = (data.get_data().astype('int32') + 32768).astype('uint16') # to be consistent with png files
# spacing = -data.get_affine()[0,1]
# slice_intv = -data.get_affine()[2,2]
aff = data.get_affine()[:3, :3]
spacing = np.abs(aff[:2, :2]).max()
slice_intv = np.abs(aff[2, 2])
# TODO: Ad-hoc code for normalizing the orientation of the volume.
# The aim is to make vol[:,:,i] an supine right-left slice
# It works for the authors' data, but maybe not suitable for some kinds of nifti files
if np.abs(aff[0, 0]) > np.abs(aff[0, 1]):
vol = np.transpose(vol, (1, 0, 2))
aff = aff[[1, 0, 2], :]
if np.max(aff[0, :2]) > 0:
vol = vol[::-1, :, :]
if np.max(aff[1, :2]) > 0:
vol = vol[:, ::-1, :]
return vol, spacing, slice_intv
def get_ims(slice_idx, vol, spacing, slice_intv):
num_slice = cfg.INPUT.NUM_SLICES * cfg.INPUT.NUM_IMAGES_3DCE
im_np, im_scale, crop = load_prep_img(vol, slice_idx, spacing, slice_intv,
cfg.INPUT.IMG_DO_CLIP, num_slice=num_slice)
im = im_np - cfg.INPUT.PIXEL_MEAN
im = torch.from_numpy(im.transpose((2, 0, 1))).to(dtype=torch.float)
ims = im.split(cfg.INPUT.NUM_IMAGES_3DCE)
return ims, im_np[:, :, int(num_slice/2)+1], im_scale, crop
def gen_output(im, result, info, win_show):
im = windowing_rev(im, cfg.INPUT.WINDOWING)
im = windowing(im, win_show).astype('uint8')
im = cv2.cvtColor(im, cv2.COLOR_GRAY2RGB)
scale = cfg.TEST.VISUALIZE.SHOW_SCALE
im = cv2.resize(im, None, None, fx=scale, fy=scale, interpolation=cv2.INTER_LINEAR)
pred = result.bbox.cpu().numpy()
labels = result.get_field('labels').cpu().numpy()
scores = result.get_field('scores').cpu().numpy()
tag_scores = result.get_field('tag_scores').cpu().numpy()
tag_predictions = result.get_field('tag_predictions').cpu().numpy()
mm2pix = info['im_scale'] / info['spacing'] * scale
contours = result.get_field('contour_mm').cpu().numpy() * mm2pix
contours = [c[c[:, 0] > 0, :] for c in contours]
contours = [c+1*scale for c in contours] # there seems to be a small offset in the mask?
recists = result.get_field('recist_mm').cpu().numpy() * mm2pix
recists += 1*scale # there seems to be a small offset in the mask?
diameters = result.get_field('diameter_mm').cpu().numpy()
pred *= scale
overlay, msgs = draw_results(im, pred, labels, scores, tag_predictions=tag_predictions, tag_scores=tag_scores,
contours=contours, recists=recists, diameters=diameters)
overlay = print_msg_on_img(overlay, msgs)
return overlay, msgs
def print_msg_on_img(overlay, msgs):
txt_height = 20
msg_im = np.zeros((txt_height*cfg.TEST.VISUALIZE.DETECTIONS_PER_IMG+10, overlay.shape[1], 3), dtype=np.uint8)
for p in range(len(msgs)):
msg = msgs[p].split(' | ')
msg = msg[0][7:10] + msg[1][:-2] + ': ' + msg[2]
cv2.putText(msg_im, msg, (0, txt_height*(p+1)),
cv2.FONT_HERSHEY_DUPLEX, fontScale=.5,
color=(255,255,255), thickness=1)
return np.vstack((overlay, msg_im))
| 40.617143
| 114
| 0.626055
|
4e11ed63bbfdd2dfd308c23289df476e23bca3bc
| 3,712
|
py
|
Python
|
src/weblayer/wsgi.py
|
thruflo/weblayer
|
24d74f71cedd3855911477ed4952a311c83b0b5f
|
[
"Unlicense"
] | 3
|
2016-07-04T15:27:12.000Z
|
2021-04-30T22:46:13.000Z
|
src/weblayer/wsgi.py
|
thruflo/weblayer
|
24d74f71cedd3855911477ed4952a311c83b0b5f
|
[
"Unlicense"
] | null | null | null |
src/weblayer/wsgi.py
|
thruflo/weblayer
|
24d74f71cedd3855911477ed4952a311c83b0b5f
|
[
"Unlicense"
] | 3
|
2015-09-16T08:55:30.000Z
|
2018-09-23T11:06:01.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" :py:mod:`weblayer.wsgi` provides :py:class:`WSGIApplication`, an
implementation of :py:class:`~weblayer.interfaces.IWSGIApplication` that
adapts :py:class:`~weblayer.interfaces.ISettings` and an
:py:class:`~weblayer.interfaces.IPathRouter`::
>>> settings = {}
>>> path_router = object()
To provide a callable `WSGI`_ application::
>>> application = WSGIApplication(settings, path_router)
.. _`WSGI`: http://www.python.org/dev/peps/pep-0333/
"""
__all__ = [
'WSGIApplication'
]
from zope.component import adapts
from zope.interface import implements
from base import Request, Response
from interfaces import IPathRouter, ISettings, IWSGIApplication
class WSGIApplication(object):
adapts(ISettings, IPathRouter)
implements(IWSGIApplication)
def __init__(
self,
settings,
path_router,
request_class=None,
response_class=None,
default_content_type='text/html; charset=UTF-8'
):
"""
"""
self._settings = settings
self._path_router = path_router
if request_class is None:
self._Request = Request
else:
self._Request = request_class
if response_class is None:
self._Response = Response
else:
self._Response = response_class
self._content_type = default_content_type
def __call__(self, environ, start_response):
""" Checks ``self._path_router`` for a
:py:meth:`~weblayer.interfaces.IPathRouter.match` against the
incoming :py:attr:`~weblayer.interfaces.IRequest.path`::
handler_class, args, kwargs = self._path_router.match(request.path)
If ``handler_class`` is not ``None``, instantiates the
:py:class:`~weblayer.interfaces.IRequestHandler`::
handler = handler_class(request, response, self._settings)
And calls it with ``environ['REQUEST_METHOD']`` and the ``args`` and
``kwargs`` from :py:meth:`~weblayer.interfaces.IPathRouter.match`::
response = handler(environ['REQUEST_METHOD'], *args, **kwargs)
.. note::
If calling the handler errors (which is shouldn't normally do, as
the handler *should* catch the error), returns a minimalist 500
response.
.. note::
If no match is found, returns a minimalist 404 response. To handle
404 responses more elegantly, define a catch all URL handler.
"""
request = self._Request(environ)
response = self._Response(
request=request,
status=200,
content_type=self._content_type
)
handler_class, args, kwargs = self._path_router.match(request.path)
if handler_class is not None:
handler = handler_class(request, response, self._settings)
try: # handler *should* catch all exceptions
response = handler(environ['REQUEST_METHOD'], *args, **kwargs)
except Exception: # unless deliberately bubbling them up
if environ.get('paste.throw_errors', False):
raise
else:
response.status = 500
else: # to handle 404 nicely, define a catch all url handler
response.status = 404
return response(environ, start_response)
| 32
| 81
| 0.579741
|
8855ef314d452894a0f7389beba59889d976e63d
| 5,781
|
py
|
Python
|
ProductDataCompiler/src/main/com/rowley/shavekeeper/productdatacompiler/utils/ModelDeDuper.py
|
alphonzo79/ShaveKeeper
|
e6dc98d45caa304db45e660c5d79902a62f2757c
|
[
"MIT"
] | null | null | null |
ProductDataCompiler/src/main/com/rowley/shavekeeper/productdatacompiler/utils/ModelDeDuper.py
|
alphonzo79/ShaveKeeper
|
e6dc98d45caa304db45e660c5d79902a62f2757c
|
[
"MIT"
] | null | null | null |
ProductDataCompiler/src/main/com/rowley/shavekeeper/productdatacompiler/utils/ModelDeDuper.py
|
alphonzo79/ShaveKeeper
|
e6dc98d45caa304db45e660c5d79902a62f2757c
|
[
"MIT"
] | null | null | null |
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductConsolidator import ProductConsolidator
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModelByBrandMap import ProductModelByBrandMap
from src.main.com.rowley.shavekeeper.productdatacompiler.web.FileHelper import load_consolidator, save_consolidator, \
save_reconciler, load_file, save_file
reconciled_json = load_file("Reconciler2_Reconciled", "../compiled_files/")
reconciled = ProductModelByBrandMap.from_json(reconciled_json)
consolidated_json = load_file("ConsolidatedProducts2", "../compiled_files/")
base_consolidated = ProductConsolidator.from_json(consolidated_json)
deduped = ProductConsolidator()
total_pre = 0
total_post = 0
for brand in base_consolidated.pre_shaves:
for model in base_consolidated.pre_shaves[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_pre_shave = base_consolidated.pre_shaves[brand][model]
reconciled_pre_shave = reconciled.brands[brand][model]
consolidated_pre_shave["brand"] = reconciled_pre_shave["brand"]
consolidated_pre_shave["model"] = reconciled_pre_shave["model"]
deduped.add_pre_shave(consolidated_pre_shave)
for brand in base_consolidated.soaps:
for model in base_consolidated.soaps[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_soap = base_consolidated.soaps[brand][model]
reconciled_soap = reconciled.brands[brand][model]
consolidated_soap["brand"] = reconciled_soap["brand"]
consolidated_soap["model"] = reconciled_soap["model"]
deduped.add_soap(consolidated_soap)
for brand in base_consolidated.brushes:
for model in base_consolidated.brushes[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_brush = base_consolidated.brushes[brand][model]
reconciled_brush = reconciled.brands[brand][model]
consolidated_brush["brand"] = reconciled_brush["brand"]
consolidated_brush["model"] = reconciled_brush["model"]
deduped.add_brush(consolidated_brush)
for brand in base_consolidated.razors:
for model in base_consolidated.razors[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_razor = base_consolidated.razors[brand][model]
reconciled_razor = reconciled.brands[brand][model]
consolidated_razor["brand"] = reconciled_razor["brand"]
consolidated_razor["model"] = reconciled_razor["model"]
deduped.add_razor(consolidated_razor)
for brand in base_consolidated.blades:
for model in base_consolidated.blades[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_blade = base_consolidated.blades[brand][model]
reconciled_blade = reconciled.brands[brand][model]
consolidated_blade["brand"] = reconciled_blade["brand"]
consolidated_blade["model"] = reconciled_blade["model"]
deduped.add_blade(consolidated_blade)
for brand in base_consolidated.post_shaves:
for model in base_consolidated.post_shaves[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_post_shave = base_consolidated.post_shaves[brand][model]
reconciled_post_shave = reconciled.brands[brand][model]
consolidated_post_shave["brand"] = reconciled_post_shave["brand"]
consolidated_post_shave["model"] = reconciled_post_shave["model"]
deduped.add_post_shave(consolidated_post_shave)
for brand in base_consolidated.after_shaves:
for model in base_consolidated.after_shaves[brand]:
total_pre += 1
if brand in reconciled.brands and model in reconciled.brands[brand]:
# print "handling brand: " + brand + " model: " + model
consolidated_after_shave = base_consolidated.after_shaves[brand][model]
reconciled_after_shave = reconciled.brands[brand][model]
consolidated_after_shave["brand"] = reconciled_after_shave["brand"]
consolidated_after_shave["model"] = reconciled_after_shave["model"]
deduped.add_after_shave(consolidated_after_shave)
for brand in deduped.pre_shaves:
for model in deduped.pre_shaves[brand]:
total_post += 1
for brand in deduped.soaps:
for model in deduped.soaps[brand]:
total_post += 1
for brand in deduped.brushes:
for model in deduped.brushes[brand]:
total_post += 1
for brand in deduped.razors:
for model in deduped.razors[brand]:
total_post += 1
for brand in deduped.blades:
for model in deduped.blades[brand]:
total_post += 1
for brand in deduped.post_shaves:
for model in deduped.post_shaves[brand]:
total_post += 1
for brand in deduped.after_shaves:
for model in deduped.after_shaves[brand]:
total_post += 1
print "Total Pre: " + str(total_pre)
print "Total Post: " + str(total_post)
save_file(deduped, "ConsolidatedProducts3", "../compiled_files/")
| 45.880952
| 118
| 0.700052
|
7b2a6fbcc3f0437df0a5c25d4209a907aa89c15e
| 1,707
|
py
|
Python
|
src/python/pants/backend/native/tasks/cpp_compile.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/native/tasks/cpp_compile.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/native/tasks/cpp_compile.py
|
ghthor/pants
|
450de702414f87f563081ddefaefd8a554de07a3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from pants.backend.native.config.environment import CppCompiler
from pants.backend.native.subsystems.native_compile_settings import CppCompileSettings
from pants.backend.native.subsystems.native_toolchain import NativeToolchain
from pants.backend.native.targets.native_library import CppLibrary
from pants.backend.native.tasks.native_compile import NativeCompile
from pants.util.memo import memoized_property
from pants.util.objects import SubclassesOf
class CppCompile(NativeCompile):
# Compile only C++ library targets.
source_target_constraint = SubclassesOf(CppLibrary)
workunit_label = 'cpp-compile'
@classmethod
def implementation_version(cls):
return super(CppCompile, cls).implementation_version() + [('CppCompile', 0)]
@classmethod
def subsystem_dependencies(cls):
return super(CppCompile, cls).subsystem_dependencies() + (
CppCompileSettings.scoped(cls),
NativeToolchain.scoped(cls),
)
@memoized_property
def _toolchain(self):
return NativeToolchain.scoped_instance(self)
def get_compile_settings(self):
return CppCompileSettings.scoped_instance(self)
def get_compiler(self):
return self._request_single(CppCompiler, self._toolchain)
# FIXME(#5951): don't have any command-line args in the task or in the subsystem -- rather,
# subsystem options should be used to populate an `Executable` which produces its own arguments.
def extra_compile_args(self):
return ['-x', 'c++', '-std=c++11']
| 35.5625
| 98
| 0.783831
|
9ba02eea4fbdbad142e93d071e4a2f525e28b762
| 5,091
|
py
|
Python
|
cogdl/tasks/node_classification.py
|
kwyoke/cogdl
|
df919b4fc7db40f8b035665edbcc7ed59f9d448e
|
[
"MIT"
] | null | null | null |
cogdl/tasks/node_classification.py
|
kwyoke/cogdl
|
df919b4fc7db40f8b035665edbcc7ed59f9d448e
|
[
"MIT"
] | null | null | null |
cogdl/tasks/node_classification.py
|
kwyoke/cogdl
|
df919b4fc7db40f8b035665edbcc7ed59f9d448e
|
[
"MIT"
] | null | null | null |
import copy
import random
from typing import Optional
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from cogdl import options
from cogdl.datasets import build_dataset
from cogdl.models import build_model
from cogdl.models.supervised_model import SupervisedHomogeneousNodeClassificationModel
from cogdl.trainers.supervised_trainer import (
SupervisedHomogeneousNodeClassificationTrainer,
)
from cogdl.trainers.sampled_trainer import SampledTrainer
from . import BaseTask, register_task
@register_task("node_classification")
class NodeClassification(BaseTask):
"""Node classification task."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
# parser.add_argument("--num-features", type=int)
# fmt: on
def __init__(
self,
args,
dataset=None,
model: Optional[SupervisedHomogeneousNodeClassificationModel] = None,
):
super(NodeClassification, self).__init__(args)
self.args = args
self.model_name = args.model
self.device = args.device_id[0] if not args.cpu else "cpu"
dataset = build_dataset(args) if dataset is None else dataset
self.dataset = dataset
self.data = dataset[0]
args.num_features = dataset.num_features
args.num_classes = dataset.num_classes
args.num_nodes = dataset.data.x.shape[0]
self.model: SupervisedHomogeneousNodeClassificationModel = build_model(args) if model is None else model
self.trainer: Optional[
SupervisedHomogeneousNodeClassificationTrainer
] = self.model.get_trainer(NodeClassification, self.args)(
self.args
) if self.model.get_trainer(
NodeClassification, self.args
) else None
if not self.trainer:
self.optimizer = torch.optim.Adam(
self.model.parameters(), lr=args.lr, weight_decay=args.weight_decay
) if not hasattr(self.model, "get_optimizer") else self.model.get_optimizer(args)
self.data.apply(lambda x: x.to(self.device))
self.model: SupervisedHomogeneousNodeClassificationModel = self.model.to(
self.device
)
self.patience = args.patience
self.max_epoch = args.max_epoch
def train(self):
if self.trainer:
# if issubclass(type(self.trainer), SampledTrainer):
# self.model = self.trainer.fit(self.model, self.dataset)
# else:
# return dict(Acc=self.trainer.fit(self.model, self.dataset.data))
result = self.trainer.fit(self.model, self.dataset)
if isinstance(result, torch.nn.Module):
self.model = result
else:
return result
else:
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
best_score = 0
best_loss = np.inf
max_score = 0
min_loss = np.inf
best_model = copy.deepcopy(self.model)
for epoch in epoch_iter:
self._train_step()
train_acc, _ = self._test_step(split="train")
val_acc, val_loss = self._test_step(split="val")
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Train: {train_acc:.4f}, Val: {val_acc:.4f}"
)
if val_loss <= min_loss or val_acc >= max_score:
if val_loss <= best_loss: # and val_acc >= best_score:
best_loss = val_loss
best_score = val_acc
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss))
max_score = np.max((max_score, val_acc))
patience = 0
else:
patience += 1
if patience == self.patience:
epoch_iter.close()
break
print(f"Valid accurracy = {best_score}")
self.model = best_model
test_acc, _ = self._test_step(split="test")
val_acc, _ = self._test_step(split="val")
print(f"Test accuracy = {test_acc}")
return dict(Acc=test_acc, ValAcc=val_acc)
def _train_step(self):
self.model.train()
self.optimizer.zero_grad()
self.model.loss(self.data).backward()
self.optimizer.step()
def _test_step(self, split="val", logits=None):
self.model.eval()
logits = logits if logits else self.model.predict(self.data)
if split == "train":
mask = self.data.train_mask
elif split == "val":
mask = self.data.val_mask
else:
mask = self.data.test_mask
loss = F.nll_loss(logits[mask], self.data.y[mask]).item()
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
return acc, loss
| 36.625899
| 112
| 0.593989
|
cbb80e41628cb773c1a6ead822e6dff65b354e12
| 3,466
|
py
|
Python
|
app/logic/bluesteel/migrations/0001_initial.py
|
imvu/bluesteel
|
ab52133249a693b3cd2d8593c5d47408a3b0fce6
|
[
"MIT"
] | 10
|
2017-01-13T06:28:04.000Z
|
2020-11-18T13:00:26.000Z
|
app/logic/bluesteel/migrations/0001_initial.py
|
imvu/bluesteel
|
ab52133249a693b3cd2d8593c5d47408a3b0fce6
|
[
"MIT"
] | null | null | null |
app/logic/bluesteel/migrations/0001_initial.py
|
imvu/bluesteel
|
ab52133249a693b3cd2d8593c5d47408a3b0fce6
|
[
"MIT"
] | 2
|
2018-03-29T14:10:53.000Z
|
2019-11-20T08:21:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('gitrepo', '0001_initial'),
('commandrepo', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BluesteelCommandEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('order', models.IntegerField(default=0)),
('command', models.CharField(default=b'', max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BluesteelCommandSetEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('command_set_type', models.IntegerField(default=0, choices=[(0, b'CLONE'), (1, b'FETCH')])),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BluesteelLayoutEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'', max_length=50)),
('archive', models.CharField(default=b'', max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BluesteelProjectEntry',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(default=b'', max_length=50)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('command_group', models.ForeignKey(related_name='bluesteel_command_group', to='commandrepo.CommandGroupEntry')),
('git_project', models.ForeignKey(related_name='bluesteel_git_project', to='gitrepo.GitProjectEntry')),
('layout', models.ForeignKey(related_name='bluesteel_layout', to='bluesteel.BluesteelLayoutEntry')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='bluesteelcommandsetentry',
name='bluesteel_project',
field=models.ForeignKey(related_name='bluesteel_project', to='bluesteel.BluesteelProjectEntry'),
preserve_default=True,
),
migrations.AddField(
model_name='bluesteelcommandentry',
name='bluesteel_command_set',
field=models.ForeignKey(related_name='bluesteel_command_set', to='bluesteel.BluesteelCommandSetEntry'),
preserve_default=True,
),
]
| 42.790123
| 129
| 0.577323
|
6dea04f37ca647e3e26ff189cf799bc4d1fea796
| 1,732
|
py
|
Python
|
_2020/toboggan/test_toboggan.py
|
dcsparkes/adventofcode
|
e8bf8cef1d8757ad8981dde8dc76f8f7ec396be5
|
[
"Unlicense"
] | null | null | null |
_2020/toboggan/test_toboggan.py
|
dcsparkes/adventofcode
|
e8bf8cef1d8757ad8981dde8dc76f8f7ec396be5
|
[
"Unlicense"
] | null | null | null |
_2020/toboggan/test_toboggan.py
|
dcsparkes/adventofcode
|
e8bf8cef1d8757ad8981dde8dc76f8f7ec396be5
|
[
"Unlicense"
] | null | null | null |
import unittest
import toboggan
class TestToboggan(unittest.TestCase):
def test_testInput_default(self):
self.assertEqual(7, toboggan.traverse("test.txt"))
def test_testInput_slope0(self):
self.assertEqual(2, toboggan.traverse("test.txt", (1, 1)))
def test_testInput_slope1(self):
self.assertEqual(7, toboggan.traverse("test.txt", (1, 3)))
def test_testInput_slope2(self):
self.assertEqual(3, toboggan.traverse("test.txt", (1, 5)))
def test_testInput_slope3(self):
self.assertEqual(4, toboggan.traverse("test.txt", (1, 7)))
def test_testInput_slope4(self):
self.assertEqual(2, toboggan.traverse("test.txt", (2, 1)))
def test_Input_default(self):
self.assertEqual(254, toboggan.traverse("input.txt"))
def test_Input_slope0(self):
self.assertEqual(63, toboggan.traverse("input.txt", (1, 1)))
def test_Input_slope1(self):
self.assertEqual(254, toboggan.traverse("input.txt", (1, 3)))
def test_Input_slope2(self):
self.assertEqual(62, toboggan.traverse("input.txt", (1, 5)))
def test_Input_slope3(self):
self.assertEqual(56, toboggan.traverse("input.txt", (1, 7)))
def test_Input_slope4(self):
self.assertEqual(30, toboggan.traverse("input.txt", (2, 1)))
def test_Input_multipleSlopes(self):
multiple = toboggan.traverse("input.txt", (1, 1))
multiple *= toboggan.traverse("input.txt", (1, 3))
multiple *= toboggan.traverse("input.txt", (1, 5))
multiple *= toboggan.traverse("input.txt", (1, 7))
multiple *= toboggan.traverse("input.txt", (2, 1))
self.assertEqual(30, multiple)
if __name__ == '__main__':
unittest.main()
| 32.679245
| 69
| 0.654734
|
79b2be827e0b168f30d9cba7e0bd55ff89b9fe73
| 256
|
py
|
Python
|
comment/urls.py
|
HaibaraAi123/DjangoBlog-chenfeng123.cn
|
ca5a90a4ad91e383a5ff25131488527f5733e216
|
[
"MIT"
] | 1
|
2020-08-06T05:50:26.000Z
|
2020-08-06T05:50:26.000Z
|
comment/urls.py
|
HaibaraAi123/DjangoBlog-chenfeng123.cn
|
ca5a90a4ad91e383a5ff25131488527f5733e216
|
[
"MIT"
] | null | null | null |
comment/urls.py
|
HaibaraAi123/DjangoBlog-chenfeng123.cn
|
ca5a90a4ad91e383a5ff25131488527f5733e216
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'comment'
urlpatterns = [
path('<int:article_id>', views.post_comment, name='post_comment'),
path('<int:article_id>/<int:parent_comment_id', views.post_comment, name='reply_comment'),
]
| 25.6
| 94
| 0.726563
|
7c5ea6b74421ee2a8ecb58488b889438d43c83b5
| 1,398
|
py
|
Python
|
aalh_iit_charlesmensingcollection/debug-find-duplicates.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_charlesmensingcollection/debug-find-duplicates.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
aalh_iit_charlesmensingcollection/debug-find-duplicates.py
|
johndewees/iitmigration
|
4dadfbecda719d6e7d60af076a231aedec3c862f
|
[
"Unlicense"
] | null | null | null |
from openpyxl import load_workbook
filename = 'aalh_iit_charlesmensingcollection.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 43
maximumcol = 43
minimumrow = 7
maximumrow = 703
iterationrow = 7
identifiercol = 25
filenamecol = 43
countfilename = dict()
countidentifier = dict()
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
for cell in row:
testvar1 = ws.cell(row=iterationrow, column=filenamecol).value
if testvar1 not in countfilename:
countfilename[testvar1] = 1
else:
countfilename[testvar1] = countfilename[testvar1] + 1
for cell in row:
testvar2 = ws.cell(row=iterationrow, column=identifiercol).value
print(testvar2)
try:
if testvar2 not in countfilename:
countidentifier[testvar2] = 1
else:
countidentifier[testvar2] = countidentifier[testvar2] + 1
except:
continue
iterationrow = iterationrow + 1
for file1 in countfilename:
if countfilename[file1] > 1:
print('Duplicate File Names:')
print (file1, countfilename[file1])
for file2 in countidentifier:
if countidentifier[file2] > 1:
print('Duplicate Identifiers:')
print(file2, countidentifier[file2])
| 31.772727
| 105
| 0.652361
|
93f57090596c309c842c33b3ba9eab6e5736dbef
| 569
|
py
|
Python
|
main/model/album.py
|
lipis/the-smallest-creature
|
3079046a574bb4bae528b3ab467d34f24b9bd5c7
|
[
"MIT"
] | 1
|
2016-01-01T20:56:32.000Z
|
2016-01-01T20:56:32.000Z
|
main/model/album.py
|
lipis/the-smallest-creature
|
3079046a574bb4bae528b3ab467d34f24b9bd5c7
|
[
"MIT"
] | null | null | null |
main/model/album.py
|
lipis/the-smallest-creature
|
3079046a574bb4bae528b3ab467d34f24b9bd5c7
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from api import fields
import model
class Album(model.Base):
name = ndb.StringProperty(required=True)
description = ndb.StringProperty(verbose_name='Description (Markdown)')
release_date = ndb.DateProperty(required=True)
tags = ndb.StringProperty(repeated=True)
FIELDS = {
'name': fields.String,
'description': fields.String,
'release_date': fields.DateTime,
'tags': fields.List(fields.String),
}
FIELDS.update(model.Base.FIELDS)
| 22.76
| 73
| 0.725835
|
b6c188fef319bed9c79a8d9a0b8e322a3667707c
| 33,841
|
py
|
Python
|
uavcan/dsdl/parser.py
|
eckel-formlabs/pyuavcan
|
9c3a0d09dc685cdb494b7fc2080986344d6a13e0
|
[
"MIT"
] | null | null | null |
uavcan/dsdl/parser.py
|
eckel-formlabs/pyuavcan
|
9c3a0d09dc685cdb494b7fc2080986344d6a13e0
|
[
"MIT"
] | 1
|
2018-05-29T14:16:50.000Z
|
2018-05-29T14:16:50.000Z
|
uavcan/dsdl/parser.py
|
eckel-formlabs/pyuavcan
|
9c3a0d09dc685cdb494b7fc2080986344d6a13e0
|
[
"MIT"
] | 2
|
2018-05-10T16:48:41.000Z
|
2018-12-03T16:18:59.000Z
|
#
# Copyright (C) 2014-2015 UAVCAN Development Team <uavcan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
# Ben Dyer <ben_dyer@mac.com>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import os
import re
from functools import lru_cache
from logging import getLogger
from io import StringIO
from .signature import Signature, compute_signature
from .common import DsdlException, pretty_filename, bytes_from_crc64
from .type_limits import get_unsigned_integer_range, get_signed_integer_range, get_float_range
# Python 2.7 compatibility
try:
# noinspection PyUnresolvedReferences,PyShadowingBuiltins
str = unicode # @ReservedAssignment @UndefinedVariable
except NameError:
pass
try:
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
long(1) # @UndefinedVariable
except NameError:
long = int # @ReservedAssignment
MAX_FULL_TYPE_NAME_LEN = 80
SERVICE_DATA_TYPE_ID_MAX = 255
MESSAGE_DATA_TYPE_ID_MAX = 65535
logger = getLogger(__name__)
class Type:
"""
Common type description. The specialized type description classes inherit from this one.
Fields:
full_name Full type name string, e.g. "uavcan.protocol.NodeStatus"
category Any CATEGORY_*
"""
CATEGORY_PRIMITIVE = 0
CATEGORY_ARRAY = 1
CATEGORY_COMPOUND = 2
CATEGORY_VOID = 3
def __init__(self, full_name, category):
self.full_name = str(full_name)
self.category = category
def __str__(self):
return self.get_normalized_definition()
def get_data_type_signature(self):
return None
def get_normalized_definition(self):
raise NotImplementedError('Pure virtual method')
def get_max_bitlen(self):
raise NotImplementedError('Pure virtual method')
def get_min_bitlen(self):
raise NotImplementedError('Pure virtual method')
__repr__ = __str__
class PrimitiveType(Type):
"""
Primitive type description, e.g. bool or float16.
Fields:
kind Any KIND_*
bitlen Bit length, 1 to 64
cast_mode Any CAST_MODE_*
value_range Tuple containing min and max values: (min, max)
"""
KIND_BOOLEAN = 0
KIND_UNSIGNED_INT = 1
KIND_SIGNED_INT = 2
KIND_FLOAT = 3
CAST_MODE_SATURATED = 0
CAST_MODE_TRUNCATED = 1
def __init__(self, kind, bitlen, cast_mode):
self.kind = kind
self.bitlen = bitlen
self.cast_mode = cast_mode
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_PRIMITIVE)
self.value_range = {
PrimitiveType.KIND_BOOLEAN: get_unsigned_integer_range,
PrimitiveType.KIND_UNSIGNED_INT: get_unsigned_integer_range,
PrimitiveType.KIND_SIGNED_INT: get_signed_integer_range,
PrimitiveType.KIND_FLOAT: get_float_range
}[self.kind](bitlen)
def get_normalized_definition(self):
"""Please refer to the specification for details about normalized definitions."""
cast_mode = 'saturated' if self.cast_mode == PrimitiveType.CAST_MODE_SATURATED else 'truncated'
primary_type = {
PrimitiveType.KIND_BOOLEAN: 'bool',
PrimitiveType.KIND_UNSIGNED_INT: 'uint' + str(self.bitlen),
PrimitiveType.KIND_SIGNED_INT: 'int' + str(self.bitlen),
PrimitiveType.KIND_FLOAT: 'float' + str(self.bitlen)
}[self.kind]
return cast_mode + ' ' + primary_type
def validate_value_range(self, value):
"""
Args:
value: Throws DsdlException if this value cannot be represented by this type.
"""
low, high = self.value_range
if not low <= value <= high:
error('Value [%s] is out of range %s', value, self.value_range)
def get_max_bitlen(self):
"""Returns type bit length."""
return self.bitlen
def get_min_bitlen(self):
"""Returns type bit length."""
return self.bitlen
class ArrayType(Type):
"""
Array type description, e.g. float32[8], uint12[<34].
Fields:
value_type Description of the array value type; the type of this field inherits Type, e.g. PrimitiveType
mode Any MODE_*
max_size Maximum number of elements in the array
"""
MODE_STATIC = 0
MODE_DYNAMIC = 1
def __init__(self, value_type, mode, max_size):
self.value_type = value_type
self.mode = mode
self.max_size = max_size
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_ARRAY)
def get_normalized_definition(self):
"""Please refer to the specification for details about normalized definitions."""
typedef = self.value_type.get_normalized_definition()
return ('%s[<=%d]' if self.mode == ArrayType.MODE_DYNAMIC else '%s[%d]') % (typedef, self.max_size)
def get_max_bitlen(self):
"""Returns total maximum bit length of the array, including length field if applicable."""
payload_max_bitlen = self.max_size * self.value_type.get_max_bitlen()
return {
self.MODE_DYNAMIC: payload_max_bitlen + self.max_size.bit_length(),
self.MODE_STATIC: payload_max_bitlen
}[self.mode]
def get_min_bitlen(self):
if self.mode == self.MODE_STATIC:
return self.value_type.get_min_bitlen() * self.max_size
else:
return 0 # Considering TAO
@lru_cache()
def get_data_type_signature(self):
return self.value_type.get_data_type_signature()
@property
def is_string_like(self):
return self.mode == self.MODE_DYNAMIC and \
self.value_type.category == Type.CATEGORY_PRIMITIVE and \
self.value_type.bitlen == 8
# noinspection PyAbstractClass
class CompoundType(Type):
"""
Compound type description, e.g. uavcan.protocol.NodeStatus.
Fields:
source_file Path to the DSDL definition file for this type
default_dtid Default Data Type ID, if specified, None otherwise
kind Any KIND_*
source_text Raw DSDL definition text (as is, with comments and the original formatting)
Fields if kind == KIND_SERVICE:
request_fields Request struct field list, the type of each element is Field
response_fields Response struct field list
request_constants Request struct constant list, the type of each element is Constant
response_constants Response struct constant list
request_union Boolean indicating whether the request struct is a union
response_union Boolean indicating whether the response struct is a union
Fields if kind == KIND_MESSAGE:
fields Field list, the type of each element is Field
constants Constant list, the type of each element is Constant
union Boolean indicating whether the message struct is a union
Extra methods if kind == KIND_SERVICE:
get_max_bitlen_request() Returns maximum total bit length of the serialized request struct
get_max_bitlen_response() Same for the response struct
get_min_bitlen_request() Returns minimum total bit length of the serialized request struct
get_min_bitlen_response() Same for the response struct
Extra methods if kind == KIND_MESSAGE:
get_max_bitlen() Returns maximum total bit length of the serialized struct
get_min_bitlen() Returns minimum total bit length of the serialized struct
"""
KIND_SERVICE = 0
KIND_MESSAGE = 1
def __init__(self, full_name, kind, source_file, default_dtid, source_text):
Type.__init__(self, full_name, Type.CATEGORY_COMPOUND)
self.source_file = source_file
self.default_dtid = default_dtid
self.kind = kind
self.source_text = source_text
def compute_max_bitlen(flds, union):
if len(flds) == 0:
return 0
lens = [x.type.get_max_bitlen() for x in flds]
if union:
return max(lens) + max(len(flds) - 1, 1).bit_length()
else:
return sum(lens)
def compute_min_bitlen(flds, union):
if len(flds) == 0:
return 0
lens = [x.type.get_min_bitlen() for x in flds]
if union:
return min(lens) + max(len(flds) - 1, 1).bit_length()
else:
return sum(lens)
if kind == CompoundType.KIND_SERVICE:
self.request_fields = []
self.response_fields = []
self.request_constants = []
self.response_constants = []
self.get_max_bitlen_request = lambda: compute_max_bitlen(self.request_fields, self.request_union)
self.get_max_bitlen_response = lambda: compute_max_bitlen(self.response_fields, self.response_union)
self.get_min_bitlen_request = lambda: compute_min_bitlen(self.request_fields, self.request_union)
self.get_min_bitlen_response = lambda: compute_min_bitlen(self.response_fields, self.response_union)
self.request_union = False
self.response_union = False
elif kind == CompoundType.KIND_MESSAGE:
self.fields = []
self.constants = []
self.get_max_bitlen = lambda: compute_max_bitlen(self.fields, self.union)
self.get_min_bitlen = lambda: compute_min_bitlen(self.fields, self.union)
self.union = False
else:
error('Compound type of unknown kind [%s]', kind)
def _instantiate(self, *args, **kwargs):
# This is a stub
pass
def __call__(self, *args, **kwargs):
return self._instantiate(*args, **kwargs)
def get_dsdl_signature_source_definition(self):
"""
Returns normalized DSDL definition text.
Please refer to the specification for details about normalized DSDL definitions.
"""
txt = StringIO()
txt.write(self.full_name + '\n')
def adjoin(attrs):
return txt.write('\n'.join(x.get_normalized_definition() for x in attrs) + '\n')
if self.kind == CompoundType.KIND_SERVICE:
if self.request_union:
txt.write('\n@union\n')
adjoin(self.request_fields)
txt.write('\n---\n')
if self.response_union:
txt.write('\n@union\n')
adjoin(self.response_fields)
elif self.kind == CompoundType.KIND_MESSAGE:
if self.union:
txt.write('\n@union\n')
adjoin(self.fields)
else:
error('Compound type of unknown kind [%s]', self.kind)
return txt.getvalue().strip().replace('\n\n\n', '\n').replace('\n\n', '\n')
def get_dsdl_signature(self):
"""
Computes DSDL signature of this type.
Please refer to the specification for details about signatures.
"""
return compute_signature(self.get_dsdl_signature_source_definition())
def get_normalized_definition(self):
"""Returns full type name string, e.g. 'uavcan.protocol.NodeStatus'"""
return self.full_name
@lru_cache()
def get_data_type_signature(self):
"""
Computes data type signature of this type. The data type signature is
guaranteed to match only if all nested data structures are compatible.
Please refer to the specification for details about signatures.
"""
sig = Signature(self.get_dsdl_signature())
fields = self.request_fields + self.response_fields if self.kind == CompoundType.KIND_SERVICE else self.fields
for field in fields:
field_sig = field.type.get_data_type_signature()
if field_sig is not None:
sig_value = sig.get_value()
sig.add(bytes_from_crc64(field_sig))
sig.add(bytes_from_crc64(sig_value))
return sig.get_value()
class VoidType(Type):
"""
Void type description, e.g. void2.
Fields:
bitlen Bit length, 1 to 64
"""
def __init__(self, bitlen):
self.bitlen = bitlen
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_VOID)
def get_normalized_definition(self):
"""Please refer to the specification for details about normalized definitions."""
return 'void' + str(self.bitlen)
def get_max_bitlen(self):
"""Returns type bit length."""
return self.bitlen
def get_min_bitlen(self):
"""Returns type bit length."""
return self.bitlen
class Attribute:
"""
Base class of an attribute description.
Fields:
type Attribute type description, the type of this field inherits the class Type, e.g. PrimitiveType
name Attribute name string
"""
# noinspection PyShadowingBuiltins
def __init__(self, type, name): # @ReservedAssignment
self.type = type
self.name = name
def __str__(self):
return self.get_normalized_definition()
def get_normalized_definition(self):
raise NotImplementedError('Pure virtual method')
__repr__ = __str__
class Field(Attribute):
"""
Field description.
Does not add new fields to Attribute.
If type is void, the name will be None.
"""
def get_normalized_definition(self):
if self.type.category == self.type.CATEGORY_VOID:
return self.type.get_normalized_definition()
else:
return '%s %s' % (self.type.get_normalized_definition(), self.name)
class Constant(Attribute):
"""
Constant description.
Fields:
init_expression Constant initialization expression string, e.g. "2+2" or "'\x66'"
value Computed result of the initialization expression in the final type (e.g. int, float)
string_value Computed result of the initialization expression as string
"""
# noinspection PyShadowingBuiltins
def __init__(self, type, name, init_expression, value): # @ReservedAssignment
Attribute.__init__(self, type, name)
self.init_expression = init_expression
self.value = value
self.string_value = repr(value)
if isinstance(value, long):
self.string_value = self.string_value.replace('L', '')
def get_normalized_definition(self):
return '%s %s = %s' % (self.type.get_normalized_definition(), self.name, self.init_expression)
class Parser:
"""
DSDL parser logic. Do not use this class directly; use the helper function instead.
"""
def __init__(self, search_dirs):
self.search_dirs = validate_search_directories(search_dirs)
def _namespace_from_filename(self, filename):
search_dirs = sorted(map(os.path.abspath, self.search_dirs)) # Nested last
filename = os.path.abspath(filename)
for dirname in search_dirs:
root_ns = dirname.split(os.path.sep)[-1]
if filename.startswith(dirname):
dir_len = len(dirname)
basename_len = len(os.path.basename(filename))
ns = filename[dir_len:-basename_len]
ns = (root_ns + '.' + ns.replace(os.path.sep, '.').strip('.')).strip('.')
validate_namespace_name(ns)
return ns
error('File [%s] was not found in search directories', filename)
def _full_typename_and_dtid_from_filename(self, filename):
basename = os.path.basename(filename)
items = basename.split('.')
if (len(items) != 2 and len(items) != 3) or items[-1] != 'uavcan':
error('Invalid file name [%s]; expected pattern: [<default-dtid>.]<short-type-name>.uavcan', basename)
if len(items) == 2:
default_dtid, name = None, items[0]
else:
default_dtid, name = items[0], items[1]
try:
default_dtid = int(default_dtid)
except ValueError:
error('Invalid default data type ID [%s]', default_dtid)
full_name = self._namespace_from_filename(filename) + '.' + name
validate_compound_type_full_name(full_name)
return full_name, default_dtid
def _locate_compound_type_definition(self, referencing_filename, typename):
def locate_namespace_directory(ns):
namespace_components = ns.split('.')
root_namespace, sub_namespace_components = namespace_components[0], namespace_components[1:]
for d in self.search_dirs:
if d.split(os.path.sep)[-1] == root_namespace:
return os.path.join(d, *sub_namespace_components)
error('Unknown namespace [%s]', ns)
if '.' not in typename:
current_namespace = self._namespace_from_filename(referencing_filename)
full_typename = current_namespace + '.' + typename
else:
full_typename = typename
namespace = '.'.join(full_typename.split('.')[:-1])
directory = locate_namespace_directory(namespace)
logger.debug('Searching for [%s] in [%s]', full_typename, directory)
for fn in os.listdir(directory):
fn = os.path.join(directory, fn)
if os.path.isfile(fn):
try:
fn_full_typename, _dtid = self._full_typename_and_dtid_from_filename(fn)
if full_typename == fn_full_typename:
return fn
except Exception as ex:
logger.debug('Unknown file [%s], skipping... [%s]', pretty_filename(fn), ex)
error('Type definition not found [%s]', typename)
# noinspection PyUnusedLocal
@staticmethod
def _parse_void_type(filename, bitlen):
enforce(1 <= bitlen <= 64, 'Invalid void bit length [%d]', bitlen)
return VoidType(bitlen)
def _parse_array_type(self, filename, value_typedef, size_spec, cast_mode):
logger.debug('Parsing the array value type [%s]...', value_typedef)
value_type = self._parse_type(filename, value_typedef, cast_mode)
enforce(value_type.category != value_type.CATEGORY_ARRAY,
'Multidimensional arrays are not allowed (protip: use nested types)')
try:
if size_spec.startswith('<='):
max_size = int(size_spec[2:], 0)
mode = ArrayType.MODE_DYNAMIC
elif size_spec.startswith('<'):
max_size = int(size_spec[1:], 0) - 1
mode = ArrayType.MODE_DYNAMIC
else:
max_size = int(size_spec, 0)
mode = ArrayType.MODE_STATIC
except ValueError:
error('Invalid array size specifier [%s] (valid patterns: [<=X], [<X], [X])', size_spec)
else:
enforce(max_size > 0, 'Array size must be positive, not %d', max_size)
return ArrayType(value_type, mode, max_size)
# noinspection PyUnusedLocal
@staticmethod
def _parse_primitive_type(filename, base_name, bitlen, cast_mode):
if cast_mode is None or cast_mode == 'saturated':
cast_mode = PrimitiveType.CAST_MODE_SATURATED
elif cast_mode == 'truncated':
cast_mode = PrimitiveType.CAST_MODE_TRUNCATED
else:
error('Invalid cast mode [%s]', cast_mode)
if base_name == 'bool':
return PrimitiveType(PrimitiveType.KIND_BOOLEAN, 1, cast_mode)
try:
kind = {
'uint': PrimitiveType.KIND_UNSIGNED_INT,
'int': PrimitiveType.KIND_SIGNED_INT,
'float': PrimitiveType.KIND_FLOAT,
}[base_name]
except KeyError:
error('Unknown primitive type (note: compound types should be in CamelCase)')
# noinspection PyUnboundLocalVariable
if kind == PrimitiveType.KIND_FLOAT:
enforce(bitlen in (16, 32, 64), 'Invalid bit length for float type [%d]', bitlen)
else:
enforce(2 <= bitlen <= 64, 'Invalid bit length [%d] (note: use bool instead of uint1)', bitlen)
return PrimitiveType(kind, bitlen, cast_mode)
def _parse_compound_type(self, filename, typedef):
definition_filename = self._locate_compound_type_definition(filename, typedef)
logger.debug('Nested type [%s] is defined in [%s], parsing...', typedef, pretty_filename(definition_filename))
t = self.parse(definition_filename)
if t.kind == t.KIND_SERVICE:
error('A service type can not be nested into another compound type')
return t
def _parse_type(self, filename, typedef, cast_mode):
typedef = typedef.strip()
void_match = re.match(r'void(\d{1,2})$', typedef)
array_match = re.match(r'(.+?)\[([^\]]*)\]$', typedef)
primitive_match = re.match(r'([a-z]+)(\d{1,2})$|(bool)$', typedef)
if void_match:
size_spec = void_match.group(1).strip()
return self._parse_void_type(filename, int(size_spec))
elif array_match:
assert not primitive_match
value_typedef = array_match.group(1).strip()
size_spec = array_match.group(2).strip()
return self._parse_array_type(filename, value_typedef, size_spec, cast_mode)
elif primitive_match:
if primitive_match.group(0) == 'bool':
return self._parse_primitive_type(filename, 'bool', 1, cast_mode)
else:
base_name = primitive_match.group(1)
bitlen = int(primitive_match.group(2))
return self._parse_primitive_type(filename, base_name, bitlen, cast_mode)
else:
enforce(cast_mode is None, 'Cast mode specifier is not applicable for compound types [%s]', cast_mode)
return self._parse_compound_type(filename, typedef)
@staticmethod
def _make_constant(attrtype, name, init_expression):
enforce(attrtype.category == attrtype.CATEGORY_PRIMITIVE, 'Invalid type for constant [%d]', attrtype.category)
init_expression = ''.join(init_expression.split()) # Remove spaces
value = evaluate_expression(init_expression)
if isinstance(value, str) and len(value) == 1: # ASCII character
value = ord(value)
elif isinstance(value, (float, int, bool, long)): # Numeric literal
value = {
attrtype.KIND_UNSIGNED_INT: long,
attrtype.KIND_SIGNED_INT: long,
attrtype.KIND_BOOLEAN: int, # Not bool because we need to check range
attrtype.KIND_FLOAT: float
}[attrtype.kind](value)
else:
error('Invalid type of constant initialization expression [%s]', type(value).__name__)
logger.debug('Constant initialization expression evaluated as: [%s] --> %s', init_expression, repr(value))
attrtype.validate_value_range(value)
return Constant(attrtype, name, init_expression, value)
def _parse_line(self, filename, tokens):
cast_mode = None
if tokens[0] == 'saturated' or tokens[0] == 'truncated':
cast_mode, tokens = tokens[0], tokens[1:]
if len(tokens) < 2 and not tokens[0].startswith('void'):
error('Invalid attribute definition')
if len(tokens) == 1:
typename, attrname, tokens = tokens[0], None, []
else:
typename, attrname, tokens = tokens[0], tokens[1], tokens[2:]
validate_attribute_name(attrname)
attrtype = self._parse_type(filename, typename, cast_mode)
if len(tokens) > 0:
if len(tokens) < 2 or tokens[0] != '=':
error('Constant assignment expected')
expression = ' '.join(tokens[1:])
return self._make_constant(attrtype, attrname, expression)
else:
return Field(attrtype, attrname)
@staticmethod
def _tokenize(text):
for idx, line in enumerate(text.splitlines()):
line = re.sub('#.*', '', line).strip() # Remove comments and leading/trailing whitespaces
if line:
tokens = [tk for tk in line.split() if tk]
yield idx + 1, tokens
def parse_source(self, filename, source_text):
try:
full_typename, default_dtid = self._full_typename_and_dtid_from_filename(filename)
numbered_lines = list(self._tokenize(source_text))
all_attributes_names = set()
fields, constants, resp_fields, resp_constants = [], [], [], []
union, resp_union = False, False
response_part = False
for num, tokens in numbered_lines:
try:
if tokens == ['---']:
enforce(not response_part, 'Duplicate response mark')
response_part = True
all_attributes_names = set()
continue
if tokens == ['@union']:
if response_part:
enforce(not resp_union, 'Response data structure has already been declared as union')
resp_union = True
else:
enforce(not union, 'Data structure has already been declared as union')
union = True
continue
attr = self._parse_line(filename, tokens)
if attr.name and attr.name in all_attributes_names:
error('Duplicated attribute name [%s]', attr.name)
all_attributes_names.add(attr.name)
if isinstance(attr, Constant):
(resp_constants if response_part else constants).append(attr)
elif isinstance(attr, Field):
(resp_fields if response_part else fields).append(attr)
else:
error('Unknown attribute type - internal error')
except DsdlException as ex:
if not ex.line:
ex.line = num
raise ex
except Exception as ex:
logger.error('Internal error', exc_info=True)
raise DsdlException('Internal error: %s' % str(ex), line=num)
if response_part:
t = CompoundType(full_typename, CompoundType.KIND_SERVICE, filename, default_dtid, source_text)
t.request_fields = fields
t.request_constants = constants
t.response_fields = resp_fields
t.response_constants = resp_constants
t.request_union = union
t.response_union = resp_union
max_bitlen = t.get_max_bitlen_request(), t.get_max_bitlen_response()
max_bytelen = tuple(map(bitlen_to_bytelen, max_bitlen))
else:
t = CompoundType(full_typename, CompoundType.KIND_MESSAGE, filename, default_dtid, source_text)
t.fields = fields
t.constants = constants
t.union = union
max_bitlen = t.get_max_bitlen()
max_bytelen = bitlen_to_bytelen(max_bitlen)
validate_union(t)
validate_data_type_id(t)
logger.debug('Type [%s], default DTID: %s, signature: %08x, maxbits: %s, maxbytes: %s, DSSD:',
full_typename, default_dtid, t.get_dsdl_signature(), max_bitlen, max_bytelen)
for ln in t.get_dsdl_signature_source_definition().splitlines():
logger.debug(' %s', ln)
return t
except DsdlException as ex:
if not ex.file:
ex.file = filename
raise ex
def parse(self, filename):
try:
filename = os.path.abspath(filename)
with open(filename) as f:
source_text = f.read()
return self.parse_source(filename, source_text)
except IOError as ex:
raise DsdlException('IO error: %s' % str(ex), file=filename)
except Exception as ex:
logger.error('Internal error', exc_info=True)
raise DsdlException('Internal error: %s' % str(ex), file=filename)
def error(fmt, *args):
raise DsdlException(fmt % args)
def enforce(cond, fmt, *args):
if not cond:
error(fmt, *args)
def bitlen_to_bytelen(x):
return int((x + 7) / 8)
def evaluate_expression(expression):
try:
env = {
'locals': None,
'globals': None,
'__builtins__': None,
'true': 1,
'false': 0
}
return eval(expression, env)
except Exception as ex:
error('Cannot evaluate expression: %s', str(ex))
def validate_search_directories(dirnames):
dirnames = set(dirnames)
dirnames = list(map(os.path.abspath, dirnames))
for d1 in dirnames:
for d2 in dirnames:
if d1 == d2:
continue
enforce(not d1.startswith(d2), 'Nested search directories are not allowed [%s] [%s]', d1, d2)
enforce(d1.split(os.path.sep)[-1] != d2.split(os.path.sep)[-1],
'Namespace roots must be unique [%s] [%s]', d1, d2)
return dirnames
def validate_namespace_name(name):
for component in name.split('.'):
enforce(re.match(r'[a-z][a-z0-9_]*$', component), 'Invalid namespace name [%s]', name)
enforce(len(name) <= MAX_FULL_TYPE_NAME_LEN, 'Namespace name is too long [%s]', name)
def validate_compound_type_full_name(name):
enforce('.' in name, 'Full type name must explicitly specify its namespace [%s]', name)
short_name = name.split('.')[-1]
namespace = '.'.join(name.split('.')[:-1])
validate_namespace_name(namespace)
enforce(re.match(r'[A-Z][A-Za-z0-9_]*$', short_name), 'Invalid type name [%s]', name)
enforce(len(name) <= MAX_FULL_TYPE_NAME_LEN, 'Type name is too long [%s]', name)
def validate_attribute_name(name):
enforce(re.match(r'[a-zA-Z][a-zA-Z0-9_]*$', name), 'Invalid attribute name [%s]', name)
def validate_data_type_id(t):
if t.default_dtid is None:
return
if t.kind == t.KIND_MESSAGE:
enforce(0 <= t.default_dtid <= MESSAGE_DATA_TYPE_ID_MAX,
'Invalid data type ID for message [%s]', t.default_dtid)
elif t.kind == t.KIND_SERVICE:
enforce(0 <= t.default_dtid <= SERVICE_DATA_TYPE_ID_MAX,
'Invalid data type ID for service [%s]', t.default_dtid)
else:
error('Invalid kind: %s', t.kind)
def validate_union(t):
def check_fields(fields):
enforce(len(fields) > 1, 'Union contains less than 2 fields')
enforce(not any(_.type.category == _.type.CATEGORY_VOID for _ in fields), 'Union must not contain void fields')
if t.kind == t.KIND_MESSAGE:
if t.union:
check_fields(t.fields)
elif t.kind == t.KIND_SERVICE:
if t.request_union:
check_fields(t.request_fields)
if t.response_union:
check_fields(t.response_fields)
else:
error('Invalid kind: %s', t.kind)
def parse_namespaces(source_dirs, search_dirs=None):
"""
Use only this function to parse DSDL definitions.
This function takes a list of root namespace directories (containing DSDL definition files to parse) and an
optional list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed).
Returns the list of parsed type definitions, where type of each element is CompoundType.
Args:
source_dirs: List of root namespace directories to parse.
search_dirs: List of root namespace directories with referenced types (optional). This list is
automatically extended with source_dirs.
Example:
>>> import uavcan
>>> a = uavcan.dsdl.parse_namespaces(['../dsdl/uavcan'])
>>> len(a)
77
>>> a[0]
uavcan.Timestamp
>>> a[0].fields
[truncated uint48 husec]
>>> a[0].constants
[saturated uint48 UNKNOWN = 0, saturated uint48 USEC_PER_LSB = 100]
"""
# noinspection PyShadowingNames
def walk():
import fnmatch
from functools import partial
def on_walk_error(directory, ex):
raise DsdlException('OS error in [%s]: %s' % (directory, str(ex)))
for source_dir in source_dirs:
walker = os.walk(source_dir, onerror=partial(on_walk_error, source_dir), followlinks=True)
for root, _dirnames, filenames in walker:
for filename in fnmatch.filter(filenames, '*.uavcan'):
filename = os.path.join(root, filename)
yield filename
all_default_dtid = {} # (kind, dtid) : filename
# noinspection PyShadowingNames
def ensure_unique_dtid(t, filename):
if t.default_dtid is None:
return
key = t.kind, t.default_dtid
if key in all_default_dtid:
first = pretty_filename(all_default_dtid[key])
second = pretty_filename(filename)
error('Default data type ID collision: [%s] [%s]', first, second)
all_default_dtid[key] = filename
parser = Parser(source_dirs + (search_dirs or []))
output_types = []
for filename in walk():
t = parser.parse(filename)
ensure_unique_dtid(t, filename)
output_types.append(t)
return output_types
| 39.812941
| 119
| 0.619397
|
2aae20cd931f1401d9fb9870543e83f208e5657e
| 1,533
|
py
|
Python
|
src/ggrc/app.py
|
ankit-collective/ggrc-core
|
7c94ab6f6cd4f95a0bdfbbd9c81358f35dc1c963
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/app.py
|
ankit-collective/ggrc-core
|
7c94ab6f6cd4f95a0bdfbbd9c81358f35dc1c963
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/app.py
|
ankit-collective/ggrc-core
|
7c94ab6f6cd4f95a0bdfbbd9c81358f35dc1c963
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2013 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By:
# Maintained By:
from . import settings
# Initialize Flask app
from flask import Flask
app = Flask('ggrc', instance_relative_config=True)
app.config.from_object(settings)
# Configure Flask-SQLAlchemy for app
from . import db
db.app = app
db.init_app(app)
# Initialize models
import ggrc.models
ggrc.models.init_app(app)
# Configure Flask-Login
import ggrc.login
ggrc.login.init_app(app)
# Configure webassets for app
from . import assets
app.jinja_env.add_extension('webassets.ext.jinja2.assets')
app.jinja_env.assets_environment = assets.environment
# Configure Jinja2 extensions for app
app.jinja_env.add_extension('jinja2.ext.autoescape')
app.jinja_env.autoescape = True
app.jinja_env.add_extension('jinja2.ext.with_')
app.jinja_env.add_extension('hamlpy.ext.HamlPyExtension')
# Initialize services
import ggrc.services
ggrc.services.init_all_services(app)
# Initialize views
import ggrc.views
ggrc.views.init_all_object_views(app)
# Initialize configured and default extensions
from ggrc.fulltext import get_indexer
ggrc.indexer = get_indexer()
if settings.ENABLE_JASMINE:
# Configure Flask-Jasmine, for dev mode unit testing
from flask.ext.jasmine import Jasmine, Asset
jasmine = Jasmine(app)
jasmine.sources(
Asset("dashboard-js"),
Asset("dashboard-js-spec-helpers"))
jasmine.specs(
Asset("dashboard-js-specs"))
| 24.725806
| 78
| 0.779517
|
64b614b2c181686ecc04eb43170d02502c8f72bc
| 788
|
py
|
Python
|
piecash/scripts/ledger.py
|
z-Wind/piecash
|
5ac01ac8ce2c98202bb168fdc2803eb9d741150c
|
[
"MIT"
] | null | null | null |
piecash/scripts/ledger.py
|
z-Wind/piecash
|
5ac01ac8ce2c98202bb168fdc2803eb9d741150c
|
[
"MIT"
] | null | null | null |
piecash/scripts/ledger.py
|
z-Wind/piecash
|
5ac01ac8ce2c98202bb168fdc2803eb9d741150c
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python
"""original script from https://github.com/MatzeB/pygnucash/blob/master/gnucash2ledger.py by Matthias Braun matze@braunis.de
adapted for:
- python 3 support
- new string formatting
"""
import argparse
import sys
import codecs
import piecash
import click
from piecash.scripts.cli import cli
@cli.command()
@click.argument("book", type=click.Path(exists=True))
@click.option(
"--output",
type=click.File("w", encoding="utf-8"),
default="-",
help="File to which to export the data (default=stdout)",
)
def ledger(book, output):
"""Export to ledger-cli format.
This scripts export a GnuCash BOOK to the ledget-cli format.
"""
with piecash.open_book(book, open_if_lock=True) as data:
output.write(piecash.ledger(data))
| 23.176471
| 124
| 0.709391
|
8a55e88551f12827d952f8081003d0196d619e60
| 28,099
|
py
|
Python
|
closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/spec/export_spec.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 12
|
2021-09-23T08:05:57.000Z
|
2022-03-21T03:52:11.000Z
|
closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/spec/export_spec.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 11
|
2021-09-23T20:34:06.000Z
|
2022-01-22T07:58:02.000Z
|
closed/FuriosaAI/code/quantization/furiosa_sdk_quantizer/frontend/onnx/spec/export_spec.py
|
ctuning/inference_results_v1.1
|
d9176eca28fcf6d7a05ccb97994362a76a1eb5ab
|
[
"Apache-2.0"
] | 16
|
2021-09-23T20:26:38.000Z
|
2022-03-09T12:59:56.000Z
|
from typing import List, Dict, Set, Callable, Tuple, Optional
import logging
import onnx
from onnx import numpy_helper
from furiosa_sdk_quantizer.frontend.onnx.spec import spec_utils
from furiosa_sdk_quantizer.ir import spec
from furiosa_sdk_quantizer.ir.common.operator import HeightWidth, Padding
from furiosa_sdk_quantizer.interfaces.export_spec import ExportSpec
logger = logging.getLogger("Furiosa-Quantizer")
logging.basicConfig(level=logging.INFO)
class OnnxExportSpec(ExportSpec):
def __init__(self, model: onnx.ModelProto):
super(OnnxExportSpec).__init__()
self.model = model
self.tensor_shapes = self.get_tensor_shapes(self.model)
self.initializer = {init.name: init for init in self.model.graph.initializer}
self.attributes = self.get_attributes(self.model)
# Build producer map
self.producer_map: Dict[str, onnx.NodeProto] = dict()
for node in self.model.graph.node:
for node_output in node.output:
if node_output in self.producer_map:
raise Exception(
"Invalid form of graph, a tensor {} has two or more producers.".format(
node_output
)
)
self.producer_map[node_output] = node
# Followings will be lazily initialized.
self._SKIP_NODE = None
self._MULTI_NODE_SPEC = None
self._SINGLE_NODE_SPEC = None
def export(self) -> (List[spec.OperatorSpec], Set[str]):
"""
Traverse graph and export nodes as specs.
Returns (a list of Spec, a set of unsupported ops)
"""
specs: List[spec.OperatorSpec] = list()
unsupported_ops = set()
outputs: List[str] = list(map(lambda output: output.name, self.model.graph.output))
# To prevent traversing cyclic connections
visited: Set[str] = set()
visited_node: List[onnx.NodeProto] = list()
while len(outputs) > 0:
output = outputs.pop(0)
if output not in self.producer_map:
continue
node = self.producer_map[output]
if node.op_type in self.skip_node:
outputs.append(node.input[0])
visited.update([node.input[0]])
continue
# prevent duplicate specs from being created from nodes that have multiple outputs like Split.
if node in visited_node:
continue
result = self.traverse_multi_node_spec(node)
if result is None:
result = self.traverse_single_node_spec(node)
# Failed to find how to process the node
if result is None:
unsupported_ops.add(node.op_type)
continue
s, inputs = result
# Put spec
specs.append(s)
# Put predecessor of node to new outputs
outputs += list(filter(lambda input: input not in visited, inputs))
visited.update(inputs)
visited_node.append(node)
return specs, unsupported_ops
def traverse_single_node_spec(
self, node: onnx.NodeProto
) -> Optional[Tuple[spec.Spec, List[str]]]:
"""
Returns (Spec, list of inputs of the node)
"""
if node.op_type not in self.single_node_spec:
return None
data_flow_input = list(
filter(lambda input: input not in self.initializer.keys(), node.input)
)
return self.single_node_spec[node.op_type](node), data_flow_input
def traverse_multi_node_spec(
self, node: onnx.NodeProto
) -> Optional[Tuple[spec.Spec, List[str]]]:
"""
Returns (Spec, list of inputs of the node)
"""
if node.op_type not in self.multi_node_spec:
return None
found = None
for func in self.multi_node_spec[node.op_type]:
result = func(node)
if result is None:
continue
# Check the ambiguity
if found is not None:
logger.warning(
"Find two or more ways of exporting as spec from multi-node for the the node {}, ".format(
node.op_type
)
)
return found
found = result
return found
@property
def skip_node(self) -> Set[str]:
if self._SKIP_NODE is None:
self._SKIP_NODE = {"Relu", "BatchNormalization"}
return self._SKIP_NODE
@property
def multi_node_spec(
self,
) -> Dict[str, List[Callable[[onnx.NodeProto], Optional[Tuple[spec.Spec, List[str]]]]]]:
if self._MULTI_NODE_SPEC is None:
self._MULTI_NODE_SPEC = {"Div": [self.multi_node_lp_norm]}
return self._MULTI_NODE_SPEC
@property
def single_node_spec(self) -> Dict[str, Callable[[onnx.NodeProto], spec.Spec]]:
if self._SINGLE_NODE_SPEC is not None:
return self._SINGLE_NODE_SPEC
self._SINGLE_NODE_SPEC = {
"Conv": self.conv2d,
"ConvTranspose": self.convtranspose2d,
"MaxPool": self.maxpool2d,
"AveragePool": self.avgpool2d,
"GlobalAveragePool": self.avgpool2d,
"Gemm": self.gemm,
"MatMul": self.matmul,
"DepthToSpace": self.depthtospace,
"Resize": self.resize,
"Add": self.add,
"Sub": self.sub,
"Mul": self.mul,
"Div": self.div,
"Exp": self.exp,
"Sigmoid": self.sigmoid,
"Softplus": self.softplus,
"Gelu": self.gelu,
"ReduceMean": self.reduce_mean,
"ReduceSum": self.reduce_sum,
"ReduceL2": self.reduce_l2,
"Squeeze": self.squeeze,
"Unsqueeze": self.unsqueeze,
"Reshape": self.reshape,
"Expand": self.expand,
"Concat": self.concatenation,
"Transpose": self.transpose,
"Slice": self.slice,
"Flatten": self.flatten,
"Pad": self.pad,
"Split": self.split,
"Softmax": self.softmax,
"Clip": self.clip,
"LayerNormalization": self.layer_norm,
"LpNormalization": self.lp_norm,
}
return self._SINGLE_NODE_SPEC
@staticmethod
def get_tensor_shapes(model: onnx.ModelProto) -> Dict[str, Tuple[int]]:
input_shapes = dict()
for vi in list(model.graph.input) + list(model.graph.output) + list(model.graph.value_info):
shape = [int(dim.dim_value) for dim in vi.type.tensor_type.shape.dim]
input_shapes[vi.name] = tuple(shape)
# include initializer's shape for this is also a node's input
for init in model.graph.initializer:
input_shapes[init.name] = numpy_helper.to_array(init).shape
return input_shapes
@staticmethod
def get_attributes(model: onnx.ModelProto) -> Dict[str, Dict[str, int or float]]:
attributes = dict()
for node in model.graph.node:
attrs = dict()
for attr in node.attribute:
if attr.type == 1:
attrs[attr.name] = attr.f
elif attr.type == 2:
attrs[attr.name] = attr.i
elif attr.type == 3:
attrs[attr.name] = attr.s.decode("utf-8")
elif attr.type == 7:
attrs[attr.name] = attr.ints
else:
raise Exception("Unknown data type: %s" % attr.type)
attributes[node.name] = attrs
return attributes
def get_inputs_for_gen_spec(
self, node: onnx.NodeProto
) -> Tuple[List[Tuple[int]], List[Tuple[int]], Dict]:
input_shapes = []
for input in node.input:
if input == "":
input_shapes.append([])
continue
input_shape = self.tensor_shapes[input]
input_shapes.append(input_shape)
if input in self.initializer.keys():
continue
assert input_shape, "input_shape: %s. shape_inference might have failed at %s" % (
input_shape,
node.name,
)
output_shapes = []
for output in node.output:
output_shape = self.tensor_shapes[output]
output_shapes.append(output_shape)
assert output_shape, "output_shape: %s. shape_inference might have failed at %s" % (
output_shape,
node.name,
)
attrs = self.attributes[node.name]
return input_shapes, output_shapes, attrs
def get_initializer_for_gen_spec(self, input_name: str) -> List[int] or List[float]:
return numpy_helper.to_array(self.initializer[input_name]).tolist()
def conv2d(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, output_shapes, attributes = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
output_shape = output_shapes[0]
# TODO assert -> warning. refer to https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions
# ONNX Conv assumes n-d array as its kernel.
assert len(attributes["kernel_shape"]) == 2
operator_spec_option = spec.Conv2d(
input=HeightWidth(input_shape[2], input_shape[3]),
kernel=HeightWidth(*attributes["kernel_shape"]),
stride=HeightWidth(*attributes.get("strides", (1, 1))),
dilation=HeightWidth(*attributes.get("dilations", (1, 1))),
batch=input_shape[0],
input_channel=input_shape[1],
output_channel=output_shape[1],
groups=attributes.get("group", 1),
padding=Padding(*attributes.get("pads", (0, 0, 0, 0))),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def convtranspose2d(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, output_shapes, attributes = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
output_shape = output_shapes[0]
# TODO assert -> warning. refer to https://docs.python.org/3/tutorial/errors.html#user-defined-exceptions
# ONNX Conv assumes n-d array as its kernel.
assert len(attributes["kernel_shape"]) == 2
operator_spec_option = spec.TrasnposeConv(
input=HeightWidth(input_shape[2], input_shape[3]),
kernel=HeightWidth(*attributes["kernel_shape"]),
stride=HeightWidth(*attributes.get("strides", (1, 1))),
dilation=HeightWidth(*attributes.get("dilations", (1, 1))),
batch=input_shape[0],
input_channel=input_shape[1],
output_channel=output_shape[1],
groups=attributes.get("group", 1),
padding=Padding(*attributes.get("pads", (0, 0, 0, 0))),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def maxpool2d(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, output_shapes, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == len(output_shapes) == 1
input_shape = input_shapes[0]
output_shape = output_shapes[0]
# ONNX MaxPool assumes n-d array as its kernel.
assert len(attributes["kernel_shape"]) == 2
operator_spec_option = spec.MaxPool2d(
input=HeightWidth(input_shape[2], input_shape[3]),
kernel=HeightWidth(*attributes["kernel_shape"]),
stride=HeightWidth(*attributes.get("strides", (1, 1))),
dilation=HeightWidth(*attributes.get("dilations", (1, 1))),
batch=input_shape[0],
channel=output_shape[1],
padding=Padding(*attributes.get("pads", (0, 0, 0, 0))),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def avgpool2d(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, output_shapes, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == len(output_shapes) == 1
input_shape = input_shapes[0]
output_shape = output_shapes[0]
# ONNX AveragePool assumes n-d array as its kernel.
if node.op_type == "AveragePool":
assert len(attributes["kernel_shape"]) == 2
elif node.op_type == "GlobalAveragePool":
attributes = {"kernel_shape": (input_shape[2:])}
operator_spec_option = spec.AveragePool2d(
input=HeightWidth(input_shape[2], input_shape[3]),
kernel=HeightWidth(*attributes["kernel_shape"]),
stride=HeightWidth(*attributes.get("strides", (1, 1))),
dilation=HeightWidth(*attributes.get("dilations", (1, 1))),
batch=input_shape[0],
channel=output_shape[1],
padding=Padding(*attributes.get("pads", (0, 0, 0, 0))),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def gemm(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
alpha = attributes.get("alpha", float(1.0))
beta = attributes.get("beta", float(1.0))
m, k, n = spec_utils.gemm_shapes(
input_shapes, attributes.get("transA", int(0)), attributes.get("transB", int(0))
)
operator_spec_option = spec.Gemm(alpha=alpha, beta=beta, m=m, k=k, n=n)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def matmul(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 2
lhs_shape, rhs_shape = [*input_shapes[0]], [*input_shapes[1]]
operator_spec_option = spec.MatMul(lhs_shape=lhs_shape, rhs_shape=rhs_shape)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def depthtospace(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
mode = attributes.get("mode", "DCR")
if mode == "CRD":
mode = "ColumnRowDepth"
elif mode == "DCR":
mode = "DepthColumnRow"
else:
raise Exception('Unknown mode: %s. Mode must be one of "DCR" or "CRD".' % mode)
operator_spec_option = spec.DepthToSpace(
batch=input_shape[0],
height=input_shape[2],
width=input_shape[3],
channel=input_shape[1],
block_size=attributes["blocksize"],
mode=mode,
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def resize(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
roi = self.get_initializer_for_gen_spec(node.input[1])
scales = self.get_initializer_for_gen_spec(node.input[2])
try:
sizes = self.get_initializer_for_gen_spec(node.input[3])
except IndexError:
sizes = []
operator_spec_option = spec.Resize(
shape=[*input_shape], roi=roi, scales=scales, sizes=sizes
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def add(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
operator_spec_option = spec.Add(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def sub(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
operator_spec_option = spec.Sub(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def mul(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
operator_spec_option = spec.Mul(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def div(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
operator_spec_option = spec.Div(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def exp(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Exp(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def sigmoid(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Sigmoid(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def softplus(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Softplus(input_shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def gelu(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Gelu(shape=[*input_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def reduce_mean(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.ReduceMean(
shape=[*input_shape],
axes=spec_utils.implicit_axis_to_explicit([*attributes["axes"]], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def reduce_sum(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.ReduceSum(
shape=[*input_shape],
axes=spec_utils.implicit_axis_to_explicit([*attributes["axes"]], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def reduce_l2(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.ReduceL2(
shape=[*input_shape],
axes=spec_utils.implicit_axis_to_explicit([*attributes["axes"]], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def squeeze(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Squeeze(
shape=[*input_shape],
axes=spec_utils.implicit_axis_to_explicit([*attributes["axes"]], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def unsqueeze(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Unsqueeze(
shape=[*input_shape],
axes=spec_utils.implicit_axis_to_explicit([*attributes["axes"]], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def reshape(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, output_shapes, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
output_shape = output_shapes[0]
operator_spec_option = spec.Reshape(
input_shape=[*input_shape], output_shape=[*output_shape]
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def expand(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, output_shapes, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
output_shape = output_shapes[0]
operator_spec_option = spec.Expand(input_shape=[*input_shape], output_shape=[*output_shape])
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def concatenation(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
operator_spec_option = spec.Concatenation(
tensors=list(map(list, input_shapes)),
axis=spec_utils.implicit_axis_to_explicit(attributes["axis"], input_shapes[0]),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def transpose(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Transpose(
shape=[*input_shape],
permutation=spec_utils.implicit_axis_to_explicit([*attributes["perm"]], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def slice(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
starts = self.get_initializer_for_gen_spec(node.input[1])
axes = self.get_initializer_for_gen_spec(node.input[3])
operator_spec_option = spec.Slice(
shape=[*input_shape], offset=spec_utils.slice_offset_dict(starts, axes, input_shape)
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def flatten(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Flatten(
shape=[*input_shape],
axis=spec_utils.implicit_axis_to_explicit(attributes["axis"], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def pad(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
assert len(input_shape) == 4
pads = self.get_initializer_for_gen_spec(node.input[1])
operator_spec_option = spec.Pad(shape=[*input_shape], pad=spec_utils.horizontal_pads(*pads))
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def layer_norm(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
operator_spec_option = spec.LayerNorm(
input_shape=[*input_shapes[0]], eps=attributes["epsilon"]
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def split(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Split(
shape=[*input_shape],
split=[*attributes["split"]],
axis=spec_utils.implicit_axis_to_explicit(attributes.get("axis", 0), input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def softmax(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, attributes = self.get_inputs_for_gen_spec(node)
assert len(input_shapes) == 1
input_shape = input_shapes[0]
operator_spec_option = spec.Softmax(
input_shape=[*input_shape],
beta=attributes.get("beta", float(1.0)),
axis=spec_utils.implicit_axis_to_explicit(attributes["axis"], input_shape),
)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def clip(self, node: onnx.NodeProto) -> spec.Spec:
input_shapes, _, _ = self.get_inputs_for_gen_spec(node)
input_shape = input_shapes[0]
kwargs = {}
if node.attribute:
for attr in node.attribute:
if attr.name == "min":
kwargs["min"] = float(attr.f)
elif attr.name == "max":
kwargs["max"] = float(attr.f)
else:
assert len(node.input) == 3
for idx, node_input in enumerate(node.input):
if idx == 1:
try:
kwargs["min"] = float(numpy_helper.to_array(self.initializer[node_input]))
except KeyError:
kwargs["min"] = None
elif idx == 2:
try:
kwargs["max"] = float(numpy_helper.to_array(self.initializer[node_input]))
except KeyError:
kwargs["max"] = None
if not kwargs:
raise Exception("Empty min and/or max.")
operator_spec_option = spec.Clip(input_shape=[*input_shape], **kwargs)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def lp_norm(self, node: onnx.NodeProto) -> spec.Spec:
input_shape, _, attrs = self.get_inputs_for_gen_spec(node)
operator_spec_option = spec.LpNorm(input_shape=[*input_shape], **attrs)
return spec.Spec(spec_utils.node_identifier(node), operator_spec_option)
def multi_node_lp_norm(self, node: onnx.NodeProto) -> Optional[Tuple[spec.Spec, List[str]]]:
"""
Starts from 'Div', traverse up to find the form of l2norm.
Returns all inputs of l2norm, consist of multi node
LpNormalization is not defined in ONNX Operator spec, so that we should traverse the graph:
Input --> ReduceL2 --> Clip --> Expand --> D
-----------------------------------> iv --> Output
"""
inputs_of_lp_norm: List[str] = []
for input in node.input:
# exclude input from initializer
if input not in self.producer_map:
continue
prev_node = self.producer_map[input]
if prev_node.op_type != "Expand":
continue
pprev_node = self.producer_map[prev_node.input[0]]
if pprev_node.op_type != "Clip":
continue
ppprev_node = self.producer_map[pprev_node.input[0]]
if ppprev_node.op_type != "ReduceL2":
continue
p = 2
inputs_of_lp_norm.append(ppprev_node.input[0])
input_shapes, _, attributes = self.get_inputs_for_gen_spec(ppprev_node)
axis = attributes["axes"][0]
operator_spec_option = spec.LpNorm(input_shape=[*input_shapes[0]], p=p, axis=axis)
return (
spec.Spec(spec_utils.node_identifier(node), operator_spec_option),
inputs_of_lp_norm,
)
| 41.813988
| 113
| 0.621801
|
136f58749aa59a30ae701af8db839f3e744b2b7a
| 3,092
|
py
|
Python
|
rnpix/pkcli/dedup.py
|
robnagler/pix
|
8054420fa49645ea0d29404bc4e0567af9be0394
|
[
"Apache-2.0"
] | null | null | null |
rnpix/pkcli/dedup.py
|
robnagler/pix
|
8054420fa49645ea0d29404bc4e0567af9be0394
|
[
"Apache-2.0"
] | 4
|
2020-05-06T14:05:22.000Z
|
2021-07-20T20:49:06.000Z
|
rnpix/pkcli/dedup.py
|
robnagler/rnpix
|
93237c1bd949d806ee21f9eef4bdae0d8b261d4c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
u"""deduplicate
:copyright: Copyright (c) 2021 Robert Nagler. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
import dbm.ndbm
import hashlib
import os
import pykern.pkio
import re
import rnpix.common
import subprocess
import time
def find(path, nowrite=False, overwrite=False, skip=''):
"""deduplicate images using $RNPIX_ROOT/dedup.db
"""
r = rnpix.common.root()
i = 0
if skip:
skip = pykern.pkio.py_path(skip)
with dbm.ndbm.open(
str(pykern.pkio.py_path(r).join('dedup')),
'c',
) as m:
for p in _walk(path):
if skip:
if p == skip:
skip = None
continue
i += 1
if i % 10 == 0:
print('#sleep 3')
time.sleep(2)
s, p = _signature(p)
if s in m and not overwrite:
o = pykern.pkio.py_path(m[s].decode())
if o == p:
# same path
continue
if (
o.dirname == p.dirname
and o.purebasename.startswith(p.purebasename)
):
# remove original, because longer (e.g. x-1.jpg)
m[s] = str(p).encode()
p = o
x = f'"{p}"' if "'" in str(p) else f"'{p}'"
print(f'#OLD {m[s].decode()}\nrm {x}')
else:
print(f'#NEW {p}')
if not nowrite:
m[s] = str(p).encode()
def not_in_db(path):
"""deduplicate images using $RNPIX_ROOT/dedup.db
"""
r = rnpix.common.root()
with dbm.ndbm.open(
str(pykern.pkio.py_path(r).join('dedup')),
'r',
) as m:
v = set([m[k] for k in m.keys()])
for p in _walk(path, print_cd=False):
if str(p).encode() not in v:
print(p)
def _signature(path):
if path.ext.lower() in ('.jpg', '.jpeg'):
try:
return (subprocess.check_output(('identify', '-format', '%#', str(path))), path)
except subprocess.CalledProcessError:
# weird thing: bunch of JPG files that are quicktime movies
if b'QuickTime movie' not in subprocess.check_output(('file', str(path))):
raise
n = path.new(ext='.mov')
assert not n.exists()
path.rename(n)
path = n
return (hashlib.md5(path.read_binary()).digest(), path)
def _walk(path, print_cd=True):
c = ''
for p in pykern.pkio.walk_tree(path):
if (
p.islink()
or rnpix.common.THUMB_DIR.search(p.dirpath().basename)
or not rnpix.common.IMAGE_SUFFIX.search(p.basename)
):
continue
if print_cd and c != p.dirname:
print(f'#CD {p.dirname}')
c = p.dirname
yield p
| 30.019417
| 92
| 0.51229
|
33d3657d837cb2ce5fb6831019f02d971c34debb
| 14,066
|
py
|
Python
|
python/cuml/common/array.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/common/array.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | null | null | null |
python/cuml/common/array.py
|
Nicholas-7/cuml
|
324d4490dc5254e1188d1678e704622eb69678cb
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import numpy as np
import operator
import nvtx
from rmm import DeviceBuffer
from cudf import DataFrame
from cudf import Series
from cudf.core.buffer import Buffer
from cuml.common.memory_utils import with_cupy_rmm
from cuml.common.memory_utils import _get_size_from_shape
from cuml.common.memory_utils import _order_to_strides
from cuml.common.memory_utils import _strides_to_order
from cuml.common.memory_utils import class_with_cupy_rmm
from numba import cuda
@class_with_cupy_rmm(ignore_pattern=["serialize"])
class CumlArray(Buffer):
"""
Array represents an abstracted array allocation. It can be instantiated by
itself, creating an rmm.DeviceBuffer underneath, or can be instantiated by
``__cuda_array_interface__`` or ``__array_interface__`` compliant arrays,
in which case it'll keep a reference to that data underneath. Also can be
created from a pointer, specifying the characteristics of the array, in
that case the owner of the data referred to by the pointer should be
specified explicitly.
Parameters
----------
data : rmm.DeviceBuffer, cudf.Buffer, array_like, int, bytes, bytearray or\
memoryview
An array-like object or integer representing a
device or host pointer to pre-allocated memory.
owner : object, optional
Python object to which the lifetime of the memory
allocation is tied. If provided, a reference to this
object is kept in this Buffer.
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
Attributes
----------
ptr : int
Pointer to the data
size : int
Size of the array data in bytes
_owner : Python Object
Object that owns the data of the array
shape : tuple of ints
Shape of the array
order : {'F', 'C'}
'F' or 'C' to indicate Fortran-major or C-major order of the array
strides : tuple of ints
Strides of the data
__cuda_array_interface__ : dictionary
``__cuda_array_interface__`` to interop with other libraries.
Notes
-----
cuml Array is not meant as an end-user array library. It is meant for
cuML/RAPIDS developer consumption. Therefore it contains the minimum
functionality. Its functionality is hidden by base.pyx to provide
automatic output format conversion so that the users see the important
attributes in whatever format they prefer.
Todo: support cuda streams in the constructor. See:
https://github.com/rapidsai/cuml/issues/1712
https://github.com/rapidsai/cuml/pull/1396
"""
@nvtx.annotate(message="common.CumlArray.__init__", category="utils",
domain="cuml_python")
def __init__(self, data=None, owner=None, dtype=None, shape=None,
order=None):
# Checks of parameters
memview_construction = False
if data is None:
raise TypeError("To create an empty Array, use the class method" +
" Array.empty().")
elif isinstance(data, memoryview):
data = np.asarray(data)
memview_construction = True
if dtype is not None:
dtype = np.dtype(dtype)
if _check_low_level_type(data):
if dtype is None or shape is None or order is None:
raise TypeError("Need to specify dtype, shape and order when" +
" creating an Array from {}."
.format(type(data)))
detailed_construction = True
elif dtype is not None and shape is not None and order is not None:
detailed_construction = True
else:
# Catch a likely developer error if CumlArray is created
# incorrectly
assert dtype is None and shape is None and order is None, \
("Creating array from array-like object. The arguments "
"`dtype`, `shape` and `order` should be `None`.")
detailed_construction = False
ary_interface = False
# Base class (Buffer) constructor call
size, shape = _get_size_from_shape(shape, dtype)
if not memview_construction and not detailed_construction:
# Convert to cupy array and manually specify the ptr, size and
# owner. This is to avoid the restriction on Buffer that requires
# all data be u8
cupy_data = cp.asarray(data)
flattened_data = cupy_data.data.ptr
# Size for Buffer is not the same as for cupy. Use nbytes
size = cupy_data.nbytes
owner = cupy_data if cupy_data.flags.owndata else data
else:
flattened_data = data
super().__init__(data=flattened_data,
owner=owner,
size=size)
# Post processing of meta data
if detailed_construction:
self.shape = shape
self.dtype = dtype
self.order = order
self.strides = _order_to_strides(order, shape, dtype)
elif hasattr(data, "__array_interface__"):
ary_interface = data.__array_interface__
elif hasattr(data, "__cuda_array_interface__"):
ary_interface = data.__cuda_array_interface__
else:
raise TypeError("Unrecognized data type: %s" % str(type(data)))
if ary_interface:
self.shape = ary_interface['shape']
self.dtype = np.dtype(ary_interface['typestr'])
if ary_interface.get('strides', None) is None:
self.order = 'C'
self.strides = _order_to_strides(self.order, self.shape,
self.dtype)
else:
self.strides = ary_interface['strides']
self.order = _strides_to_order(self.strides, self.dtype)
@with_cupy_rmm
def __getitem__(self, slice):
return CumlArray(data=cp.asarray(self).__getitem__(slice))
def __setitem__(self, slice, value):
cp.asarray(self).__setitem__(slice, value)
def __len__(self):
return self.shape[0]
def _operator_overload(self, other, fn):
return CumlArray(fn(self.to_output('cupy'), other))
def __add__(self, other):
return self._operator_overload(other, operator.add)
def __sub__(self, other):
return self._operator_overload(other, operator.sub)
@property
def __cuda_array_interface__(self):
output = {
"shape": self.shape,
"strides": self.strides,
"typestr": self.dtype.str,
"data": (self.ptr, False),
"version": 2,
}
return output
def item(self):
return cp.asarray(self).item()
@nvtx.annotate(message="common.CumlArray.to_output", category="utils",
domain="cuml_python")
def to_output(self, output_type='cupy', output_dtype=None):
"""
Convert array to output format
Parameters
----------
output_type : string
Format to convert the array to. Acceptable formats are:
- 'cupy' - to cupy array
- 'numpy' - to numpy (host) array
- 'numba' - to numba device array
- 'dataframe' - to cuDF DataFrame
- 'series' - to cuDF Series
- 'cudf' - to cuDF Series if array is single dimensional, to
DataFrame otherwise
output_dtype : string, optional
Optionally cast the array to a specified dtype, creating
a copy if necessary.
"""
if output_dtype is None:
output_dtype = self.dtype
# check to translate cudf to actual type converted
if output_type == 'cudf':
if len(self.shape) == 1:
output_type = 'series'
elif self.shape[1] == 1:
output_type = 'series'
else:
output_type = 'dataframe'
assert output_type != "mirror"
if output_type == 'cupy':
return cp.asarray(self, dtype=output_dtype)
elif output_type == 'numba':
return cuda.as_cuda_array(cp.asarray(self, dtype=output_dtype))
elif output_type == 'numpy':
return cp.asnumpy(
cp.asarray(self, dtype=output_dtype), order=self.order
)
elif output_type == 'dataframe':
if self.dtype not in [np.uint8, np.uint16, np.uint32,
np.uint64, np.float16]:
mat = cp.asarray(self, dtype=output_dtype)
if len(mat.shape) == 1:
mat = mat.reshape(mat.shape[0], 1)
return DataFrame(mat)
else:
raise ValueError('cuDF unsupported Array dtype')
elif output_type == 'series':
# check needed in case output_type was passed as 'series'
# directly instead of as 'cudf'
if len(self.shape) == 1:
if self.dtype not in [np.uint8, np.uint16, np.uint32,
np.uint64, np.float16]:
return Series(self, dtype=output_dtype)
else:
raise ValueError('cuDF unsupported Array dtype')
elif self.shape[1] > 1:
raise ValueError('Only single dimensional arrays can be '
'transformed to cuDF Series. ')
else:
if self.dtype not in [np.uint8, np.uint16, np.uint32,
np.uint64, np.float16]:
return Series(self, dtype=output_dtype)
else:
raise ValueError('cuDF unsupported Array dtype')
return self
@nvtx.annotate(message="common.CumlArray.serialize", category="utils",
domain="cuml_python")
def serialize(self):
header, frames = super().serialize()
header["constructor-kwargs"] = {
"dtype": self.dtype.str,
"shape": self.shape,
"order": self.order,
}
frames = [Buffer(f) for f in frames]
return header, frames
@classmethod
@nvtx.annotate(message="common.CumlArray.empty", category="utils",
domain="cuml_python")
def empty(cls, shape, dtype, order='F'):
"""
Create an empty Array with an allocated but uninitialized DeviceBuffer
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
return CumlArray(cp.empty(shape, dtype, order))
@classmethod
@nvtx.annotate(message="common.CumlArray.full", category="utils",
domain="cuml_python")
def full(cls, shape, value, dtype, order='F'):
"""
Create an Array with an allocated DeviceBuffer initialized to value.
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
return CumlArray(cp.full(shape, value, dtype, order))
@classmethod
@nvtx.annotate(message="common.CumlArray.zeros", category="utils",
domain="cuml_python")
def zeros(cls, shape, dtype='float32', order='F'):
"""
Create an Array with an allocated DeviceBuffer initialized to zeros.
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
return CumlArray.full(value=0, shape=shape, dtype=dtype, order=order)
@classmethod
@nvtx.annotate(message="common.CumlArray.ones", category="utils",
domain="cuml_python")
def ones(cls, shape, dtype='float32', order='F'):
"""
Create an Array with an allocated DeviceBuffer initialized to zeros.
Parameters
----------
dtype : data-type, optional
Any object that can be interpreted as a numpy or cupy data type.
shape : int or tuple of ints, optional
Shape of created array.
order: string, optional
Whether to create a F-major or C-major array.
"""
return CumlArray.full(value=1, shape=shape, dtype=dtype, order=order)
def _check_low_level_type(data):
if isinstance(data, CumlArray):
return False
elif not (
hasattr(data, "__array_interface__")
or hasattr(data, "__cuda_array_interface__")
) or isinstance(data, (DeviceBuffer, Buffer)):
return True
else:
return False
| 36.066667
| 79
| 0.601948
|
683659e9a812f3078a95435649e54e3e9d39f50a
| 36
|
py
|
Python
|
algoritmos/PythonM2/ex070.py
|
MiguelTeixeiraUFPB/PythonM2
|
1ee07879b141eae4c4edd5f4ac43002b11167b2f
|
[
"MIT"
] | null | null | null |
algoritmos/PythonM2/ex070.py
|
MiguelTeixeiraUFPB/PythonM2
|
1ee07879b141eae4c4edd5f4ac43002b11167b2f
|
[
"MIT"
] | null | null | null |
algoritmos/PythonM2/ex070.py
|
MiguelTeixeiraUFPB/PythonM2
|
1ee07879b141eae4c4edd5f4ac43002b11167b2f
|
[
"MIT"
] | null | null | null |
n='a'
if n[0]in'a':
print('sim')
| 12
| 16
| 0.472222
|
8213a6035d2b9f1e6022c6e1df4e43ee24e01380
| 109
|
py
|
Python
|
CurrencyConverter/apps.py
|
route2kernel/omen
|
62521690ead4341a562d27da7bceb45eae8b778f
|
[
"MIT"
] | null | null | null |
CurrencyConverter/apps.py
|
route2kernel/omen
|
62521690ead4341a562d27da7bceb45eae8b778f
|
[
"MIT"
] | null | null | null |
CurrencyConverter/apps.py
|
route2kernel/omen
|
62521690ead4341a562d27da7bceb45eae8b778f
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class CurrencyconverterConfig(AppConfig):
name = 'CurrencyConverter'
| 18.166667
| 41
| 0.798165
|
8e53663e5b4c0dd347d02d61d7bd30e0f41a8f48
| 1,134
|
py
|
Python
|
python3.4Smartforest/lib/python3.4/site-packages/django/contrib/sites/migrations/0001_initial.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
python3.4Smartforest/lib/python3.4/site-packages/django/contrib/sites/migrations/0001_initial.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
python3.4Smartforest/lib/python3.4/site-packages/django/contrib/sites/migrations/0001_initial.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.contrib.sites.models
from django.contrib.sites.models import _simple_domain_name_validator
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('domain', models.CharField(
max_length=100, verbose_name='domain name', validators=[_simple_domain_name_validator]
)),
('name', models.CharField(max_length=50, verbose_name='display name')),
],
options={
'ordering': ('domain',),
'db_table': 'django_site',
'verbose_name': 'site',
'verbose_name_plural': 'sites',
},
bases=(models.Model,),
managers=[
('objects', django.contrib.sites.models.SiteManager()),
],
),
]
| 1,134
| 1,134
| 0.559083
|
62776184aa4be230446ffd4e518077270ffe1293
| 1,326
|
py
|
Python
|
app/app.py
|
ansnadeem/aic
|
76b7e264df2aad76b7c3e94171334f3da599b9b0
|
[
"MIT"
] | null | null | null |
app/app.py
|
ansnadeem/aic
|
76b7e264df2aad76b7c3e94171334f3da599b9b0
|
[
"MIT"
] | 2
|
2021-10-10T20:53:14.000Z
|
2021-10-14T07:15:03.000Z
|
app/app.py
|
ansnadeem/aic
|
76b7e264df2aad76b7c3e94171334f3da599b9b0
|
[
"MIT"
] | null | null | null |
from flask import request
from flask import Flask
from flask import render_template
from engine import AICEngine
from dotenv import dotenv_values
from dotenv import load_dotenv
from utils import constants
aicEngine = None
def initalize():
global aicEngine
global app
load_dotenv()
config = dotenv_values(constants.DOT_ENV_PATH)
aicEngine = AICEngine(config)
app = Flask(__name__, template_folder=constants.TEMPLATES_FOLDER)
initalize()
@app.route('/')
def index():
return render_template('home.html')
@app.route('/githublistener', methods = ['POST'])
def listenAPI():
global aicEngine
response = {}
response[constants.RESPONSE_RESULT] = None
response[constants.RESPONSE_STATUS] = constants.HTTP_OK
data = request.get_json(True)
if (data[constants.GITHUB_ACTION] == constants.OPEN_STATUS):
issueText = data[constants.ISSUE][constants.TITLE] + " " + data[constants.ISSUE][constants.BODY]
predicted_labels = aicEngine.predict(issueText)
issue_url = data[constants.ISSUE][constants.URL]
installation_id = data[constants.INSTALLATION][constants.ID]
aicEngine.assign(predicted_labels, installation_id, issue_url)
response[constants.RESPONSE_RESULT] = predicted_labels
return response
| 30.837209
| 106
| 0.719457
|
13206c444b68f30d527aef0faeea26510285e7c6
| 1,619
|
py
|
Python
|
src/check_grades.py
|
troylar/eclass-for-alexa
|
999b04d48b77587818485ae9869f3d029a2c9ce3
|
[
"MIT"
] | null | null | null |
src/check_grades.py
|
troylar/eclass-for-alexa
|
999b04d48b77587818485ae9869f3d029a2c9ce3
|
[
"MIT"
] | 2
|
2021-03-31T18:39:53.000Z
|
2021-06-01T21:52:00.000Z
|
src/check_grades.py
|
troylar/eclass-for-alexa
|
999b04d48b77587818485ae9869f3d029a2c9ce3
|
[
"MIT"
] | null | null | null |
import json
from bs4 import BeautifulSoup
from twill.commands import *
import json
import os
import twill
import boto3
def lambda_handler(event, context):
if event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session']);
return 'Hello from Lambda'
def on_intent(intent_request, session):
# print("on_intent requestId=" + intent_request['requestId'] +
# ", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
if intent_name == "CheckGrades":
return check_grades(intent, session)
else:
raise ValueError("Invalid intent")
def check_grades(intent, session):
session_attributes={}
student = intent['slots']['student']['value'].lower()
card_title="Check grades for %s" % (student)
username=""
password=""
f = open(os.devnull,"w")
twill.set_output(f)
go('https://apps.gwinnett.k12.ga.us/dca/app/logout')
go('https://publish.gwinnett.k12.ga.us/gcps/home/gcpslogin')
fv("2", "portalUserID", username)
fv("2", "portalPassword", password)
submit()
go('https://apps.gwinnett.k12.ga.us/dca/student/dashboard')
soup = BeautifulSoup(show(),"lxml")
subjects = soup.findAll('span', { "class": "subject" })
percentages = soup.findAll('span', { "class": ["percentage","lettergrade"] })
grades={}
i = 0
for subject in subjects:
if (percentages[i].contents):
grades[subject.renderContents()] = percentages[i].renderContents()
i = i + 1
go('https://apps.gwinnett.k12.ga.us/dca/app/logout')
| 33.040816
| 81
| 0.660902
|
51fcf7eb07bf798a1c79b1e4011aff1f43db8903
| 216
|
py
|
Python
|
shipment/serializers/batch_shipment_serializer.py
|
thegangtechnology/thairod-django
|
b073186a4b5bc42dfef99685b3da30abf8e42862
|
[
"MIT"
] | null | null | null |
shipment/serializers/batch_shipment_serializer.py
|
thegangtechnology/thairod-django
|
b073186a4b5bc42dfef99685b3da30abf8e42862
|
[
"MIT"
] | 3
|
2021-07-27T13:11:36.000Z
|
2021-08-10T22:54:55.000Z
|
shipment/serializers/batch_shipment_serializer.py
|
thegangtechnology/thairod-django
|
b073186a4b5bc42dfef99685b3da30abf8e42862
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from shipment.models import BatchShipment
class BatchShipmentSerializer(serializers.ModelSerializer):
class Meta:
model = BatchShipment
fields = '__all__'
| 24
| 59
| 0.768519
|
17240c80d1c749e5c1243dd13ac48eb8e6d8c4d0
| 1,881
|
py
|
Python
|
research/delf/delf/python/analyze/get_pair.py
|
zijing-wu/models
|
dddcbaad9c9846f66febad8d88ea3af0d5d3acbf
|
[
"Apache-2.0"
] | 2
|
2018-07-05T18:52:53.000Z
|
2018-07-05T18:54:14.000Z
|
research/delf/delf/python/analyze/get_pair.py
|
zijing-wu/models
|
dddcbaad9c9846f66febad8d88ea3af0d5d3acbf
|
[
"Apache-2.0"
] | null | null | null |
research/delf/delf/python/analyze/get_pair.py
|
zijing-wu/models
|
dddcbaad9c9846f66febad8d88ea3af0d5d3acbf
|
[
"Apache-2.0"
] | null | null | null |
import os,re,shutil,sys
from subprocess import PIPE, Popen
from multiprocessing import Pool, Queue, Process, Manager
from itertools import product
from time import sleep
train_feature_path = os.path.join('..','examples','train_features_ds2')
test_feature_path = os.path.join('..','examples','test_features_ds2')
line_pattern = re.compile('Found (\d{1,10}) inliers')
name_pattern = re.compile('(.*?)\.delf')
train_file,test_file = os.listdir(train_feature_path),os.listdir(test_feature_path)
if not os.path.exists("lines"):
os.mkdir("lines")
des_file = os.path.join("lines")
def excute(test_file,train_file):
os.chdir('../examples')
try:
train = name_pattern.findall(train_file)[0]
test = name_pattern.findall(test_file)[0]
except:
print("There are some errors with %s and %s"%(train_file,test_file))
return
train_feature = os.path.join(train_feature_path,train+'.delf')
test_feature = os.path.join(test_feature_path,test+'.delf')
p = Popen('''python3 match_images.py \
--features_1_path %s \
--features_2_path %s '''%(train_feature,test_feature),shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
print("%s and %s finish"%(train,test))
print(stdout)
print(stderr)
res = line_pattern.findall(str(stderr))
print(res)
os.chdir('../analyze')
if len(res) == 1 :
return res[0]
else:
return 0
argv = sys.argv[1:]
for i in range(int(argv[0]),int(argv[1])):
if i >= len(test_file):
break
des_f = test_file[i][0:-5]
if os.path.exists(os.path.join(des_file,des_f+'.txt')):
continue
with open(os.path.join(des_file,des_f+'.txt'),'w') as file:
for t_file in train_file:
res = excute(test_file[i],t_file)
if int(res) >= 16:
file.write(t_file[0:-5]+','+str(res)+'\n')
| 30.33871
| 93
| 0.650718
|
c583a862f6419471fbb3d570df0ecf12c7cb14af
| 220
|
py
|
Python
|
Strings/1832. Check if the Sentence Is Pangram.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | 1
|
2021-06-30T17:51:56.000Z
|
2021-06-30T17:51:56.000Z
|
Strings/1832. Check if the Sentence Is Pangram.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
Strings/1832. Check if the Sentence Is Pangram.py
|
thewires2/Leetcode
|
a37ff81d60dd9195ba637b970b40aabbea5f4680
|
[
"Unlicense"
] | null | null | null |
class Solution:
def checkIfPangram(self, sentence: str) -> bool:
x="abcdefghijklmnopqrstuvwxyz"
for i in x:
if i not in sentence:
return False
return True
| 24.444444
| 52
| 0.545455
|
0839ad0e5f420c772974aa0d4b37ac07094c5a78
| 3,245
|
py
|
Python
|
app/app/settings.py
|
sonali-uttekar01/receipe-app-api
|
d06f7cda9e2c516dde1835a5e68e74c3e33b2400
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
sonali-uttekar01/receipe-app-api
|
d06f7cda9e2c516dde1835a5e68e74c3e33b2400
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
sonali-uttekar01/receipe-app-api
|
d06f7cda9e2c516dde1835a5e68e74c3e33b2400
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4lx^zh^nuc+p2wg*^ki!68_=+vu677w#3fo-7p+y&keqno0f@b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL='core.user'
| 25.753968
| 91
| 0.688752
|
32d936fc21c284d747f6a37882f102cf2a32a1e5
| 567
|
py
|
Python
|
src/directory-starter/README_text.py
|
hannahweber244/directory-starter
|
0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b
|
[
"MIT"
] | null | null | null |
src/directory-starter/README_text.py
|
hannahweber244/directory-starter
|
0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b
|
[
"MIT"
] | null | null | null |
src/directory-starter/README_text.py
|
hannahweber244/directory-starter
|
0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b
|
[
"MIT"
] | null | null | null |
"""
# [REPO NAME]
## Table of contents
[Here you can use a table of contents to keep your README structured.]
## Overview
[Here you give a short overview over the motivation behind your project and what problem it solves.]
## How to use it
[Here you can explain how your tool/project is usable.]
### Requirements and dependencies
[If there are any requirements or dependencies to use what you developed, you can put those here.]
## Additional information
[Here you can include an overview over the structure of your code, additional information, tests etc.]
"""
| 31.5
| 102
| 0.75485
|
890919851a1e1ecfaffad314fcaa16b358d060de
| 3,979
|
py
|
Python
|
setup.py
|
chalbersma/manowar_agent
|
ea2615f52658da2acfcc8970eca77226ac2de215
|
[
"BSD-2-Clause"
] | 1
|
2019-02-16T03:14:15.000Z
|
2019-02-16T03:14:15.000Z
|
setup.py
|
chalbersma/manowar_agent
|
ea2615f52658da2acfcc8970eca77226ac2de215
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
chalbersma/manowar_agent
|
ea2615f52658da2acfcc8970eca77226ac2de215
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
'''
An attempt to make a ghetto packaging script.
'''
# pylint: skip-file
import setuptools
import sys
import os
import git
import subprocess # nosec
import datetime
current_repo = git.Repo()
if current_repo.bare:
print("Something went wrong Repo is Bare, Failing the Build.")
sys.exit(1)
else:
env_keys = dict(os.environ).keys()
travis_keys = [key for key in env_keys if key.startswith("TRAVIS")]
for key in travis_keys:
print("{} : {}".format(key, os.environ[key]))
travis_repo = os.environ.get("TRAVIS_REPO_SLUG", "NOTRAVIS")
travis_pull_req = os.environ.get("TRAVIS_PULL_REQUEST", "UNKNOWN")
travis_branch = os.environ.get("TRAVIS_BRANCH", "UNKNOWN")
travis_event_type = os.environ.get("TRAVIS_EVENT_TYPE", "UNKNOWN")
travis_tag = os.environ.get("TRAVIS_TAG", "")
travis_build_no = os.environ.get("TRAVIS_BUILD_NUMBER", 0)
print(travis_build_no)
# Set Default Version
version_base = datetime.datetime.today().strftime("%Y.%m.%d")
upload_to_pypi = False
# My Known Good Repository
if travis_repo == "chalbersma/manowar_agent" and len(travis_tag) > 0:
# Make a Version Fix here that equls the tag
print("Tagged Branch : {}".format(travis_tag))
version = travis_tag
upload_to_pypi = "prod"
elif travis_repo == "chalbersma/manowar_agent":
# This is in my repo and
version = "{}-dev{}".format(version_base, travis_build_no)
print("VERSION : {}".format(version))
upload_to_pypi = "stag"
else:
upload_to_pypi = False
version = "{}-dev0".format(version_base)
# Only upload on 3.6.x Matrix
if "3.6" != "{}.{}".format(sys.version_info[0], sys.version_info[1]):
print("Version is : {} which doesn't equal 3.6.x not uploading".format(sys.version_info))
upload_to_pypi = False
if upload_to_pypi is not False and upload_to_pypi == "stag":
os.environ["TWINE_USERNAME"] = os.environ.get("PYPI_STAG_UNAME", "whoidit")
os.environ["TWINE_PASSWORD"] = os.environ.get("PYPI_STAG_PASSWD", "whasit")
twine_cmd = ["twine", "upload", "--repository-url", "https://test.pypi.org/legacy/", "dist/*"]
elif upload_to_pypi is not False and upload_to_pypi == "prod":
os.environ["TWINE_USERNAME"] = os.environ.get("PYPI_PROD_UNAME", "whoidit")
os.environ["TWINE_PASSWORD"] = os.environ.get("PYPI_PROD_PASSWD", "whasit")
twine_cmd = ["twine", "upload", "dist/*"]
else:
# Not Uploading
pass
print("VERSION : {}".format(version))
with open("README.md", "r") as fh:
long_description = fh.read()
# Get Version
setuptools.setup(
name="manowar_agent",
version=version,
author="Chris Halbersma",
author_email="chris+manowar@halbersma.us",
description="Package to Add as a Collector",
license="BSD-2-Clause",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/chalbersma/manowar",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Security"
],
install_requires=[
"Jinja2",
"pyjq",
"PyYAML",
"requests",
"salt",
"ec2_metadata"
],
scripts=["manowar_agent"],
data_files=[("etc/manowar_agent", ["etc/manowar_agent/collector.yaml", "etc/manowar_agent/minion", "etc/manowar_agent/saltcell.yaml"]),
("etc/manowar_agent/state/_modules/", ["etc/manowar_agent/state/_modules/platpi.py"])
]
)
if upload_to_pypi is not False:
print("Attempting to Upload to PyPi : {}".format(upload_to_pypi))
result = subprocess.check_call(twine_cmd) # nosec
print("Result : {}".format(result))
else:
print("Not attempting to Upload.")
| 31.330709
| 139
| 0.667002
|
84574874af2da6a3056ccfdfcf9b990023c20e5c
| 37,533
|
py
|
Python
|
core/dbt/adapters/base/impl.py
|
donaldrauscher/dbt
|
73d0308e3570b25a7a8c8d32fbcdaaf813c94179
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/adapters/base/impl.py
|
donaldrauscher/dbt
|
73d0308e3570b25a7a8c8d32fbcdaaf813c94179
|
[
"Apache-2.0"
] | null | null | null |
core/dbt/adapters/base/impl.py
|
donaldrauscher/dbt
|
73d0308e3570b25a7a8c8d32fbcdaaf813c94179
|
[
"Apache-2.0"
] | null | null | null |
import abc
from contextlib import contextmanager
from datetime import datetime
from typing import (
Optional, Tuple, Callable, Container, FrozenSet, Type, Dict, Any, List,
Mapping
)
import agate
import pytz
import dbt.exceptions
import dbt.flags
from dbt.clients.agate_helper import empty_table
from dbt.contracts.graph.manifest import Manifest
from dbt.node_types import NodeType
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.utils import filter_null_values
from dbt.adapters.base.connections import BaseConnectionManager
from dbt.adapters.base.meta import AdapterMeta, available
from dbt.adapters.base.relation import ComponentName, BaseRelation
from dbt.adapters.base import Column as BaseColumn
from dbt.adapters.cache import RelationsCache
GET_CATALOG_MACRO_NAME = 'get_catalog'
FRESHNESS_MACRO_NAME = 'collect_freshness'
def _expect_row_value(key: str, row: agate.Row):
if key not in row.keys():
raise dbt.exceptions.InternalException(
'Got a row without "{}" column, columns: {}'
.format(key, row.keys())
)
return row[key]
def _relations_filter_schemas(
schemas: Container[str]
) -> Callable[[agate.Row], bool]:
def test(row):
referenced_schema = _expect_row_value('referenced_schema', row)
dependent_schema = _expect_row_value('dependent_schema', row)
# handle the null schema
if referenced_schema is not None:
referenced_schema = referenced_schema.lower()
if dependent_schema is not None:
dependent_schema = dependent_schema.lower()
return referenced_schema in schemas or dependent_schema in schemas
return test
def _catalog_filter_schemas(manifest: Manifest) -> Callable[[agate.Row], bool]:
"""Return a function that takes a row and decides if the row should be
included in the catalog output.
"""
schemas = frozenset((d.lower(), s.lower())
for d, s in manifest.get_used_schemas())
def test(row: agate.Row) -> bool:
table_database = _expect_row_value('table_database', row)
table_schema = _expect_row_value('table_schema', row)
# the schema may be present but None, which is not an error and should
# be filtered out
if table_schema is None:
return False
return (table_database.lower(), table_schema.lower()) in schemas
return test
def _utc(
dt: Optional[datetime], source: BaseRelation, field_name: str
) -> datetime:
"""If dt has a timezone, return a new datetime that's in UTC. Otherwise,
assume the datetime is already for UTC and add the timezone.
"""
if dt is None:
raise dbt.exceptions.raise_database_error(
"Expected a non-null value when querying field '{}' of table "
" {} but received value 'null' instead".format(
field_name,
source))
elif not hasattr(dt, 'tzinfo'):
raise dbt.exceptions.raise_database_error(
"Expected a timestamp value when querying field '{}' of table "
"{} but received value of type '{}' instead".format(
field_name,
source,
type(dt).__name__))
elif dt.tzinfo:
return dt.astimezone(pytz.UTC)
else:
return dt.replace(tzinfo=pytz.UTC)
def _relation_name(rel: Optional[BaseRelation]) -> str:
if rel is None:
return 'null relation'
else:
return str(rel)
class SchemaSearchMap(dict):
"""A utility class to keep track of what information_schema tables to
search for what schemas
"""
def add(self, relation):
key = relation.information_schema_only()
if key not in self:
self[key] = set()
self[key].add(relation.schema.lower())
def search(self):
for information_schema_name, schemas in self.items():
for schema in schemas:
yield information_schema_name, schema
def schemas_searched(self):
result = set()
for information_schema_name, schemas in self.items():
result.update(
(information_schema_name.database, schema)
for schema in schemas
)
return result
def flatten(self):
new = self.__class__()
database = None
# iterate once to look for a database name
seen = {r.database.lower() for r in self if r.database}
if len(seen) > 1:
dbt.exceptions.raise_compiler_error(str(seen))
elif len(seen) == 1:
database = list(seen)[0]
for information_schema_name, schema in self.search():
new.add(information_schema_name.incorporate(
path={'database': database, 'schema': schema},
quote_policy={'database': False},
include_policy={'database': False},
))
return new
class BaseAdapter(metaclass=AdapterMeta):
"""The BaseAdapter provides an abstract base class for adapters.
Adapters must implement the following methods and macros. Some of the
methods can be safely overridden as a noop, where it makes sense
(transactions on databases that don't support them, for instance). Those
methods are marked with a (passable) in their docstrings. Check docstrings
for type information, etc.
To implement a macro, implement "${adapter_type}__${macro_name}". in the
adapter's internal project.
Methods:
- exception_handler
- date_function
- list_schemas
- drop_relation
- truncate_relation
- rename_relation
- get_columns_in_relation
- expand_column_types
- list_relations_without_caching
- is_cancelable
- create_schema
- drop_schema
- quote
- convert_text_type
- convert_number_type
- convert_boolean_type
- convert_datetime_type
- convert_date_type
- convert_time_type
Macros:
- get_catalog
"""
Relation: Type[BaseRelation] = BaseRelation
Column: Type[BaseColumn] = BaseColumn
ConnectionManager: Type[BaseConnectionManager]
# A set of clobber config fields accepted by this adapter
# for use in materializations
AdapterSpecificConfigs: FrozenSet[str] = frozenset()
def __init__(self, config):
self.config = config
self.cache = RelationsCache()
self.connections = self.ConnectionManager(config)
self._internal_manifest_lazy: Optional[Manifest] = None
###
# Methods that pass through to the connection manager
###
def acquire_connection(self, name=None):
return self.connections.set_connection_name(name)
def release_connection(self):
return self.connections.release()
def cleanup_connections(self):
return self.connections.cleanup_all()
def clear_transaction(self):
self.connections.clear_transaction()
def commit_if_has_connection(self):
return self.connections.commit_if_has_connection()
def nice_connection_name(self):
conn = self.connections.get_thread_connection()
if conn is None or conn.name is None:
return '<None>'
return conn.name
@contextmanager
def connection_named(self, name):
try:
yield self.acquire_connection(name)
finally:
self.release_connection()
@available.parse(lambda *a, **k: ('', empty_table()))
def execute(
self, sql: str, auto_begin: bool = False, fetch: bool = False
) -> Tuple[str, agate.Table]:
"""Execute the given SQL. This is a thin wrapper around
ConnectionManager.execute.
:param str sql: The sql to execute.
:param bool auto_begin: If set, and dbt is not currently inside a
transaction, automatically begin one.
:param bool fetch: If set, fetch results.
:return: A tuple of the status and the results (empty if fetch=False).
:rtype: Tuple[str, agate.Table]
"""
return self.connections.execute(
sql=sql,
auto_begin=auto_begin,
fetch=fetch
)
###
# Methods that should never be overridden
###
@classmethod
def type(cls) -> str:
"""Get the type of this adapter. Types must be class-unique and
consistent.
:return: The type name
:rtype: str
"""
return cls.ConnectionManager.TYPE
@property
def _internal_manifest(self) -> Manifest:
if self._internal_manifest_lazy is None:
return self.load_internal_manifest()
return self._internal_manifest_lazy
def check_internal_manifest(self) -> Optional[Manifest]:
"""Return the internal manifest (used for executing macros) if it's
been initialized, otherwise return None.
"""
return self._internal_manifest_lazy
def load_internal_manifest(self) -> Manifest:
if self._internal_manifest_lazy is None:
# avoid a circular import
from dbt.parser.manifest import load_internal_manifest
manifest = load_internal_manifest(self.config)
self._internal_manifest_lazy = manifest
return self._internal_manifest_lazy
###
# Caching methods
###
def _schema_is_cached(self, database: str, schema: str):
"""Check if the schema is cached, and by default logs if it is not."""
if dbt.flags.USE_CACHE is False:
return False
elif (database, schema) not in self.cache:
logger.debug(
'On "{}": cache miss for schema "{}.{}", this is inefficient'
.format(self.nice_connection_name(), database, schema)
)
return False
else:
return True
def _get_cache_schemas(
self, manifest: Manifest, exec_only: bool = False
) -> SchemaSearchMap:
"""Get a mapping of each node's "information_schema" relations to a
set of all schemas expected in that information_schema.
There may be keys that are technically duplicates on the database side,
for example all of '"foo", 'foo', '"FOO"' and 'FOO' could coexist as
databases, and values could overlap as appropriate. All values are
lowercase strings.
"""
info_schema_name_map = SchemaSearchMap()
for node in manifest.nodes.values():
if exec_only and node.resource_type not in NodeType.executable():
continue
relation = self.Relation.create_from(self.config, node)
info_schema_name_map.add(relation)
# result is a map whose keys are information_schema Relations without
# identifiers that have appropriate database prefixes, and whose values
# are sets of lowercase schema names that are valid members of those
# schemas
return info_schema_name_map
def _relations_cache_for_schemas(self, manifest: Manifest) -> None:
"""Populate the relations cache for the given schemas. Returns an
iteratble of the schemas populated, as strings.
"""
if not dbt.flags.USE_CACHE:
return
info_schema_name_map = self._get_cache_schemas(manifest,
exec_only=True)
for db, schema in info_schema_name_map.search():
for relation in self.list_relations_without_caching(db, schema):
self.cache.add(relation)
# it's possible that there were no relations in some schemas. We want
# to insert the schemas we query into the cache's `.schemas` attribute
# so we can check it later
self.cache.update_schemas(info_schema_name_map.schemas_searched())
def set_relations_cache(
self, manifest: Manifest, clear: bool = False
) -> None:
"""Run a query that gets a populated cache of the relations in the
database and set the cache on this adapter.
"""
if not dbt.flags.USE_CACHE:
return
with self.cache.lock:
if clear:
self.cache.clear()
self._relations_cache_for_schemas(manifest)
@available
def cache_added(self, relation: Optional[BaseRelation]) -> str:
"""Cache a new relation in dbt. It will show up in `list relations`."""
if relation is None:
name = self.nice_connection_name()
dbt.exceptions.raise_compiler_error(
'Attempted to cache a null relation for {}'.format(name)
)
if dbt.flags.USE_CACHE:
self.cache.add(relation)
# so jinja doesn't render things
return ''
@available
def cache_dropped(self, relation: Optional[BaseRelation]) -> str:
"""Drop a relation in dbt. It will no longer show up in
`list relations`, and any bound views will be dropped from the cache
"""
if relation is None:
name = self.nice_connection_name()
dbt.exceptions.raise_compiler_error(
'Attempted to drop a null relation for {}'.format(name)
)
if dbt.flags.USE_CACHE:
self.cache.drop(relation)
return ''
@available
def cache_renamed(
self,
from_relation: Optional[BaseRelation],
to_relation: Optional[BaseRelation],
) -> str:
"""Rename a relation in dbt. It will show up with a new name in
`list_relations`, but bound views will remain bound.
"""
if from_relation is None or to_relation is None:
name = self.nice_connection_name()
src_name = _relation_name(from_relation)
dst_name = _relation_name(to_relation)
dbt.exceptions.raise_compiler_error(
'Attempted to rename {} to {} for {}'
.format(src_name, dst_name, name)
)
if dbt.flags.USE_CACHE:
self.cache.rename(from_relation, to_relation)
return ''
###
# Abstract methods for database-specific values, attributes, and types
###
@abc.abstractclassmethod
def date_function(cls) -> str:
"""Get the date function used by this adapter's database."""
raise dbt.exceptions.NotImplementedException(
'`date_function` is not implemented for this adapter!')
@abc.abstractclassmethod
def is_cancelable(cls) -> bool:
raise dbt.exceptions.NotImplementedException(
'`is_cancelable` is not implemented for this adapter!'
)
###
# Abstract methods about schemas
###
@abc.abstractmethod
def list_schemas(self, database: str) -> List[str]:
"""Get a list of existing schemas in database"""
raise dbt.exceptions.NotImplementedException(
'`list_schemas` is not implemented for this adapter!'
)
@available.parse(lambda *a, **k: False)
def check_schema_exists(self, database: str, schema: str) -> bool:
"""Check if a schema exists.
The default implementation of this is potentially unnecessarily slow,
and adapters should implement it if there is an optimized path (and
there probably is)
"""
search = (
s.lower() for s in
self.list_schemas(database=database)
)
return schema.lower() in search
###
# Abstract methods about relations
###
@abc.abstractmethod
@available.parse_none
def drop_relation(self, relation: BaseRelation) -> None:
"""Drop the given relation.
*Implementors must call self.cache.drop() to preserve cache state!*
"""
raise dbt.exceptions.NotImplementedException(
'`drop_relation` is not implemented for this adapter!'
)
@abc.abstractmethod
@available.parse_none
def truncate_relation(self, relation: BaseRelation) -> None:
"""Truncate the given relation."""
raise dbt.exceptions.NotImplementedException(
'`truncate_relation` is not implemented for this adapter!'
)
@abc.abstractmethod
@available.parse_none
def rename_relation(
self, from_relation: BaseRelation, to_relation: BaseRelation
) -> None:
"""Rename the relation from from_relation to to_relation.
Implementors must call self.cache.rename() to preserve cache state.
"""
raise dbt.exceptions.NotImplementedException(
'`rename_relation` is not implemented for this adapter!'
)
@abc.abstractmethod
@available.parse_list
def get_columns_in_relation(
self, relation: BaseRelation
) -> List[BaseColumn]:
"""Get a list of the columns in the given Relation."""
raise dbt.exceptions.NotImplementedException(
'`get_columns_in_relation` is not implemented for this adapter!'
)
@available.deprecated('get_columns_in_relation', lambda *a, **k: [])
def get_columns_in_table(
self, schema: str, identifier: str
) -> List[BaseColumn]:
"""DEPRECATED: Get a list of the columns in the given table."""
relation = self.Relation.create(
database=self.config.credentials.database,
schema=schema,
identifier=identifier,
quote_policy=self.config.quoting
)
return self.get_columns_in_relation(relation)
@abc.abstractmethod
def expand_column_types(
self, goal: BaseRelation, current: BaseRelation
) -> None:
"""Expand the current table's types to match the goal table. (passable)
:param self.Relation goal: A relation that currently exists in the
database with columns of the desired types.
:param self.Relation current: A relation that currently exists in the
database with columns of unspecified types.
"""
raise dbt.exceptions.NotImplementedException(
'`expand_target_column_types` is not implemented for this adapter!'
)
@abc.abstractmethod
def list_relations_without_caching(
self, information_schema: BaseRelation, schema: str
) -> List[BaseRelation]:
"""List relations in the given schema, bypassing the cache.
This is used as the underlying behavior to fill the cache.
:param Relation information_schema: The information schema to list
relations from.
:param str schema: The name of the schema to list relations from.
:return: The relations in schema
:rtype: List[self.Relation]
"""
raise dbt.exceptions.NotImplementedException(
'`list_relations_without_caching` is not implemented for this '
'adapter!'
)
###
# Provided methods about relations
###
@available.parse_list
def get_missing_columns(
self, from_relation: BaseRelation, to_relation: BaseRelation
) -> List[BaseColumn]:
"""Returns a list of Columns in from_relation that are missing from
to_relation.
"""
if not isinstance(from_relation, self.Relation):
dbt.exceptions.invalid_type_error(
method_name='get_missing_columns',
arg_name='from_relation',
got_value=from_relation,
expected_type=self.Relation)
if not isinstance(to_relation, self.Relation):
dbt.exceptions.invalid_type_error(
method_name='get_missing_columns',
arg_name='to_relation',
got_value=to_relation,
expected_type=self.Relation)
from_columns = {
col.name: col for col in
self.get_columns_in_relation(from_relation)
}
to_columns = {
col.name: col for col in
self.get_columns_in_relation(to_relation)
}
missing_columns = set(from_columns.keys()) - set(to_columns.keys())
return [
col for (col_name, col) in from_columns.items()
if col_name in missing_columns
]
@available.parse_none
def valid_snapshot_target(self, relation: BaseRelation) -> None:
"""Ensure that the target relation is valid, by making sure it has the
expected columns.
:param Relation relation: The relation to check
:raises dbt.exceptions.CompilationException: If the columns are
incorrect.
"""
if not isinstance(relation, self.Relation):
dbt.exceptions.invalid_type_error(
method_name='valid_snapshot_target',
arg_name='relation',
got_value=relation,
expected_type=self.Relation)
columns = self.get_columns_in_relation(relation)
names = set(c.name.lower() for c in columns)
expanded_keys = ('scd_id', 'valid_from', 'valid_to')
extra = []
missing = []
for legacy in expanded_keys:
desired = 'dbt_' + legacy
if desired not in names:
missing.append(desired)
if legacy in names:
extra.append(legacy)
if missing:
if extra:
msg = (
'Snapshot target has ("{}") but not ("{}") - is it an '
'unmigrated previous version archive?'
.format('", "'.join(extra), '", "'.join(missing))
)
else:
msg = (
'Snapshot target is not a snapshot table (missing "{}")'
.format('", "'.join(missing))
)
dbt.exceptions.raise_compiler_error(msg)
@available.parse_none
def expand_target_column_types(
self, from_relation: BaseRelation, to_relation: BaseRelation
) -> None:
if not isinstance(from_relation, self.Relation):
dbt.exceptions.invalid_type_error(
method_name='expand_target_column_types',
arg_name='from_relation',
got_value=from_relation,
expected_type=self.Relation)
if not isinstance(to_relation, self.Relation):
dbt.exceptions.invalid_type_error(
method_name='expand_target_column_types',
arg_name='to_relation',
got_value=to_relation,
expected_type=self.Relation)
self.expand_column_types(from_relation, to_relation)
def list_relations(self, database: str, schema: str) -> List[BaseRelation]:
if self._schema_is_cached(database, schema):
return self.cache.get_relations(database, schema)
information_schema = self.Relation.create(
database=database,
schema=schema,
identifier='',
quote_policy=self.config.quoting
).information_schema()
# we can't build the relations cache because we don't have a
# manifest so we can't run any operations.
relations = self.list_relations_without_caching(
information_schema, schema
)
logger.debug('with database={}, schema={}, relations={}'
.format(database, schema, relations))
return relations
def _make_match_kwargs(
self, database: str, schema: str, identifier: str
) -> Dict[str, str]:
quoting = self.config.quoting
if identifier is not None and quoting['identifier'] is False:
identifier = identifier.lower()
if schema is not None and quoting['schema'] is False:
schema = schema.lower()
if database is not None and quoting['database'] is False:
database = database.lower()
return filter_null_values({
'database': database,
'identifier': identifier,
'schema': schema,
})
def _make_match(
self,
relations_list: List[BaseRelation],
database: str,
schema: str,
identifier: str,
) -> List[BaseRelation]:
matches = []
search = self._make_match_kwargs(database, schema, identifier)
for relation in relations_list:
if relation.matches(**search):
matches.append(relation)
return matches
@available.parse_none
def get_relation(
self, database: str, schema: str, identifier: str
) -> Optional[BaseRelation]:
relations_list = self.list_relations(database, schema)
matches = self._make_match(relations_list, database, schema,
identifier)
if len(matches) > 1:
kwargs = {
'identifier': identifier,
'schema': schema,
'database': database,
}
dbt.exceptions.get_relation_returned_multiple_results(
kwargs, matches
)
elif matches:
return matches[0]
return None
@available.deprecated('get_relation', lambda *a, **k: False)
def already_exists(self, schema: str, name: str) -> bool:
"""DEPRECATED: Return if a model already exists in the database"""
database = self.config.credentials.database
relation = self.get_relation(database, schema, name)
return relation is not None
###
# ODBC FUNCTIONS -- these should not need to change for every adapter,
# although some adapters may override them
###
@abc.abstractmethod
@available.parse_none
def create_schema(self, database: str, schema: str):
"""Create the given schema if it does not exist."""
raise dbt.exceptions.NotImplementedException(
'`create_schema` is not implemented for this adapter!'
)
@abc.abstractmethod
def drop_schema(self, database: str, schema: str):
"""Drop the given schema (and everything in it) if it exists."""
raise dbt.exceptions.NotImplementedException(
'`drop_schema` is not implemented for this adapter!'
)
@available
@abc.abstractclassmethod
def quote(cls, identifier: str) -> str:
"""Quote the given identifier, as appropriate for the database."""
raise dbt.exceptions.NotImplementedException(
'`quote` is not implemented for this adapter!'
)
@available
def quote_as_configured(self, identifier: str, quote_key: str) -> str:
"""Quote or do not quote the given identifer as configured in the
project config for the quote key.
The quote key should be one of 'database' (on bigquery, 'profile'),
'identifier', or 'schema', or it will be treated as if you set `True`.
"""
try:
key = ComponentName(quote_key)
except ValueError:
return identifier
default = self.Relation.get_default_quote_policy().get_part(key)
if self.config.quoting.get(key, default):
return self.quote(identifier)
else:
return identifier
###
# Conversions: These must be implemented by concrete implementations, for
# converting agate types into their sql equivalents.
###
@abc.abstractclassmethod
def convert_text_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
"""Return the type in the database that best maps to the agate.Text
type for the given agate table and column index.
:param agate_table: The table
:param col_idx: The index into the agate table for the column.
:return: The name of the type in the database
"""
raise dbt.exceptions.NotImplementedException(
'`convert_text_type` is not implemented for this adapter!')
@abc.abstractclassmethod
def convert_number_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
"""Return the type in the database that best maps to the agate.Number
type for the given agate table and column index.
:param agate_table: The table
:param col_idx: The index into the agate table for the column.
:return: The name of the type in the database
"""
raise dbt.exceptions.NotImplementedException(
'`convert_number_type` is not implemented for this adapter!')
@abc.abstractclassmethod
def convert_boolean_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
"""Return the type in the database that best maps to the agate.Boolean
type for the given agate table and column index.
:param agate_table: The table
:param col_idx: The index into the agate table for the column.
:return: The name of the type in the database
"""
raise dbt.exceptions.NotImplementedException(
'`convert_boolean_type` is not implemented for this adapter!')
@abc.abstractclassmethod
def convert_datetime_type(
cls, agate_table: agate.Table, col_idx: int
) -> str:
"""Return the type in the database that best maps to the agate.DateTime
type for the given agate table and column index.
:param agate_table: The table
:param col_idx: The index into the agate table for the column.
:return: The name of the type in the database
"""
raise dbt.exceptions.NotImplementedException(
'`convert_datetime_type` is not implemented for this adapter!')
@abc.abstractclassmethod
def convert_date_type(cls, agate_table: agate.Table, col_idx: int) -> str:
"""Return the type in the database that best maps to the agate.Date
type for the given agate table and column index.
:param agate_table: The table
:param col_idx: The index into the agate table for the column.
:return: The name of the type in the database
"""
raise dbt.exceptions.NotImplementedException(
'`convert_date_type` is not implemented for this adapter!')
@abc.abstractclassmethod
def convert_time_type(cls, agate_table: agate.Table, col_idx: int) -> str:
"""Return the type in the database that best maps to the
agate.TimeDelta type for the given agate table and column index.
:param agate_table: The table
:param col_idx: The index into the agate table for the column.
:return: The name of the type in the database
"""
raise dbt.exceptions.NotImplementedException(
'`convert_time_type` is not implemented for this adapter!')
@available
@classmethod
def convert_type(cls, agate_table, col_idx):
return cls.convert_agate_type(agate_table, col_idx)
@classmethod
def convert_agate_type(cls, agate_table, col_idx):
agate_type = agate_table.column_types[col_idx]
conversions = [
(agate.Text, cls.convert_text_type),
(agate.Number, cls.convert_number_type),
(agate.Boolean, cls.convert_boolean_type),
(agate.DateTime, cls.convert_datetime_type),
(agate.Date, cls.convert_date_type),
(agate.TimeDelta, cls.convert_time_type),
]
for agate_cls, func in conversions:
if isinstance(agate_type, agate_cls):
return func(agate_table, col_idx)
###
# Operations involving the manifest
###
def execute_macro(
self,
macro_name: str,
manifest: Optional[Manifest] = None,
project: Optional[str] = None,
context_override: Optional[Dict[str, Any]] = None,
kwargs: Dict[str, Any] = None,
release: bool = False,
) -> agate.Table:
"""Look macro_name up in the manifest and execute its results.
:param macro_name: The name of the macro to execute.
:param manifest: The manifest to use for generating the base macro
execution context. If none is provided, use the internal manifest.
:param project: The name of the project to search in, or None for the
first match.
:param context_override: An optional dict to update() the macro
execution context.
:param kwargs: An optional dict of keyword args used to pass to the
macro.
:param release: If True, release the connection after executing.
"""
if kwargs is None:
kwargs = {}
if context_override is None:
context_override = {}
if manifest is None:
manifest = self._internal_manifest
macro = manifest.find_macro_by_name(macro_name, project)
if macro is None:
if project is None:
package_name = 'any package'
else:
package_name = 'the "{}" package'.format(project)
# The import of dbt.context.runtime below shadows 'dbt'
import dbt.exceptions
raise dbt.exceptions.RuntimeException(
'dbt could not find a macro with the name "{}" in {}'
.format(macro_name, package_name)
)
# This causes a reference cycle, as dbt.context.runtime.generate()
# ends up calling get_adapter, so the import has to be here.
import dbt.context.operation
macro_context = dbt.context.operation.generate(
macro,
self.config,
manifest
)
macro_context.update(context_override)
macro_function = macro.generator(macro_context)
try:
result = macro_function(**kwargs)
finally:
if release:
self.release_connection()
return result
@classmethod
def _catalog_filter_table(
cls, table: agate.Table, manifest: Manifest
) -> agate.Table:
"""Filter the table as appropriate for catalog entries. Subclasses can
override this to change filtering rules on a per-adapter basis.
"""
return table.where(_catalog_filter_schemas(manifest))
def get_catalog(self, manifest: Manifest) -> agate.Table:
"""Get the catalog for this manifest by running the get catalog macro.
Returns an agate.Table of catalog information.
"""
information_schemas = list(self._get_cache_schemas(manifest).keys())
# make it a list so macros can index into it.
kwargs = {'information_schemas': information_schemas}
table = self.execute_macro(GET_CATALOG_MACRO_NAME,
kwargs=kwargs,
release=True)
results = self._catalog_filter_table(table, manifest)
return results
def cancel_open_connections(self):
"""Cancel all open connections."""
return self.connections.cancel_open()
def calculate_freshness(
self,
source: BaseRelation,
loaded_at_field: str,
filter: Optional[str],
manifest: Optional[Manifest] = None
) -> Dict[str, Any]:
"""Calculate the freshness of sources in dbt, and return it"""
kwargs: Dict[str, Any] = {
'source': source,
'loaded_at_field': loaded_at_field,
'filter': filter,
}
# run the macro
table = self.execute_macro(
FRESHNESS_MACRO_NAME,
kwargs=kwargs,
release=True,
manifest=manifest
)
# now we have a 1-row table of the maximum `loaded_at_field` value and
# the current time according to the db.
if len(table) != 1 or len(table[0]) != 2:
dbt.exceptions.raise_compiler_error(
'Got an invalid result from "{}" macro: {}'.format(
FRESHNESS_MACRO_NAME, [tuple(r) for r in table]
)
)
if table[0][0] is None:
# no records in the table, so really the max_loaded_at was
# infinitely long ago. Just call it 0:00 January 1 year UTC
max_loaded_at = datetime(1, 1, 1, 0, 0, 0, tzinfo=pytz.UTC)
else:
max_loaded_at = _utc(table[0][0], source, loaded_at_field)
snapshotted_at = _utc(table[0][1], source, loaded_at_field)
age = (snapshotted_at - max_loaded_at).total_seconds()
return {
'max_loaded_at': max_loaded_at,
'snapshotted_at': snapshotted_at,
'age': age,
}
def pre_model_hook(self, config: Mapping[str, Any]) -> Any:
"""A hook for running some operation before the model materialization
runs. The hook can assume it has a connection available.
The only parameter is a configuration dictionary (the same one
available in the materialization context). It should be considered
read-only.
The pre-model hook may return anything as a context, which will be
passed to the post-model hook.
"""
pass
def post_model_hook(self, config: Mapping[str, Any], context: Any) -> None:
"""A hook for running some operation after the model materialization
runs. The hook can assume it has a connection available.
The first parameter is a configuration dictionary (the same one
available in the materialization context). It should be considered
read-only.
The second parameter is the value returned by pre_mdoel_hook.
"""
pass
| 36.054755
| 79
| 0.624251
|
8db96caafc2f8aeb4efab043cfeb8091c55dbbb1
| 1,630
|
py
|
Python
|
SimG4Core/PrintGeomInfo/test/python/g4OverlapCheckCalo_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
SimG4Core/PrintGeomInfo/test/python/g4OverlapCheckCalo_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 3
|
2018-08-23T13:40:24.000Z
|
2019-12-05T21:16:03.000Z
|
SimG4Core/PrintGeomInfo/test/python/g4OverlapCheckCalo_cfg.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("G4PrintGeometry")
#process.load('Configuration.Geometry.GeometryIdeal_cff')
#process.load('Configuration.Geometry.GeometryExtended_cff')
#process.load('Configuration.Geometry.GeometryExtended2015_cff')
#process.load('Configuration.Geometry.GeometryExtended2017_cff')
#process.load('Configuration.Geometry.GeometryExtended2019_cff')
#process.load('Configuration.Geometry.GeometryExtended2023D17_cff')
process.load('Configuration.Geometry.GeometryExtended2023D28_cff')
from SimG4Core.PrintGeomInfo.g4TestGeometry_cfi import *
process = checkOverlap(process)
process.MessageLogger.destinations = cms.untracked.vstring("calo2023D28.overlaps")
# enable Geant4 overlap check
process.g4SimHits.CheckOverlap = True
# Geant4 overlap check conditions
process.g4SimHits.G4CheckOverlap.Tolerance = cms.untracked.double(0.0)
process.g4SimHits.G4CheckOverlap.Resolution = cms.untracked.int32(10000)
# tells if NodeName is G4Region or G4PhysicalVolume
process.g4SimHits.G4CheckOverlap.RegionFlag = cms.untracked.bool(False)
# list of names
process.g4SimHits.G4CheckOverlap.NodeNames = cms.vstring('CALO')
# enable dump gdml file
process.g4SimHits.G4CheckOverlap.gdmlFlag = cms.untracked.bool(False)
# if defined a G4PhysicsVolume info is printed
process.g4SimHits.G4CheckOverlap.PVname = ''
# if defined a list of daughter volumes is printed
process.g4SimHits.G4CheckOverlap.LVname = ''
# extra output files, created if a name is not empty
process.g4SimHits.FileNameField = ''
process.g4SimHits.FileNameGDML = ''
process.g4SimHits.FileNameRegions = ''
#
| 40.75
| 82
| 0.814724
|
c71072021729ed9e9e4ec5735f95fcdf64fae732
| 980
|
py
|
Python
|
prototype_2/src/mouse_controller.py
|
Bhartendu-Kumar/Computer-Pointer-Controller
|
f753446e10757a6d304cecee55ddfb7213952d9f
|
[
"MIT"
] | 4
|
2020-06-07T10:26:56.000Z
|
2020-06-11T10:39:22.000Z
|
prototype_2/src/mouse_controller.py
|
Bhartendu-Kumar/Computer-Pointer-Controller
|
f753446e10757a6d304cecee55ddfb7213952d9f
|
[
"MIT"
] | null | null | null |
prototype_2/src/mouse_controller.py
|
Bhartendu-Kumar/Computer-Pointer-Controller
|
f753446e10757a6d304cecee55ddfb7213952d9f
|
[
"MIT"
] | 1
|
2020-06-11T10:39:27.000Z
|
2020-06-11T10:39:27.000Z
|
'''
This is a sample class that you can use to control the mouse pointer.
It uses the pyautogui library. You can set the precision for mouse movement
(how much the mouse moves) and the speed (how fast it moves) by changing
precision_dict and speed_dict.
Calling the move function with the x and y output of the gaze estimation model
will move the pointer.
'''
import pyautogui
class MouseController:
def __init__(self, precision, speed):
precision_dict={'high':100, 'low':1000, 'medium':500}
speed_dict={'fast':1, 'slow':10, 'medium':5}
pyautogui.FAILSAFE=False
self.precision=precision_dict[precision]
self.speed=speed_dict[speed]
def get_screen_size(self):
return pyautogui.size()
def move_to_center(self):
size=self.get_screen_size()
pyautogui.moveTo(int(size[0]/2), int(size[1]/2))
def move(self, x, y):
pyautogui.moveRel(x*self.precision, -1*y*self.precision, duration=self.speed)
| 33.793103
| 85
| 0.69898
|
557a1ae122c0048ab0a3385ea17a2c0e39661fb3
| 201
|
py
|
Python
|
plugin.video.mrknow/lib/entities/CRuleItem.py
|
mrknow/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 105
|
2015-11-28T00:03:11.000Z
|
2021-05-05T20:47:42.000Z
|
plugin.video.mrknow/lib/entities/CRuleItem.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 918
|
2015-11-28T14:12:40.000Z
|
2022-03-23T20:24:49.000Z
|
plugin.video.mrknow/lib/entities/CRuleItem.py
|
rrosajp/filmkodi
|
0162cde9ae25ddbf4a69330948714833ff2f78c9
|
[
"Apache-2.0"
] | 111
|
2015-12-01T14:06:10.000Z
|
2020-08-01T10:44:39.000Z
|
class CRuleItem(object):
def __init__(self):
self.infos = ''
self.order = ''
self.skill = ''
self.curr = ''
self.info_list = []
self.url_build = ''
| 20.1
| 27
| 0.477612
|
c182671d3c1e6f65ba757fcb56b9d340f3cee0fc
| 17,851
|
py
|
Python
|
doc/examples/applications/plot_rank_filters.py
|
RKDSOne/scikit-image
|
baa67eafcace9cde1b94ad2d467e2f2e0468e759
|
[
"BSD-3-Clause"
] | 1
|
2020-12-27T18:42:22.000Z
|
2020-12-27T18:42:22.000Z
|
doc/examples/applications/plot_rank_filters.py
|
RKDSOne/scikit-image
|
baa67eafcace9cde1b94ad2d467e2f2e0468e759
|
[
"BSD-3-Clause"
] | null | null | null |
doc/examples/applications/plot_rank_filters.py
|
RKDSOne/scikit-image
|
baa67eafcace9cde1b94ad2d467e2f2e0468e759
|
[
"BSD-3-Clause"
] | 2
|
2015-12-29T17:04:26.000Z
|
2020-10-17T15:47:30.000Z
|
"""
============
Rank filters
============
Rank filters are non-linear filters using the local greylevels ordering to
compute the filtered value. This ensemble of filters share a common base: the
local grey-level histogram extraction computed on the neighborhood of a pixel
(defined by a 2D structuring element). If the filtered value is taken as the
middle value of the histogram, we get the classical median filter.
Rank filters can be used for several purposes such as:
* image quality enhancement
e.g. image smoothing, sharpening
* image pre-processing
e.g. noise reduction, contrast enhancement
* feature extraction
e.g. border detection, isolated point detection
* post-processing
e.g. small object removal, object grouping, contour smoothing
Some well known filters are specific cases of rank filters [1]_ e.g.
morphological dilation, morphological erosion, median filters.
The different implementation availables in `skimage` are compared.
In this example, we will see how to filter a greylevel image using some of the
linear and non-linear filters availables in skimage. We use the `camera`
image from `skimage.data`.
.. [1] Pierre Soille, On morphological operators based on rank filters, Pattern
Recognition 35 (2002) 527-535.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
ima = data.camera()
hist = np.histogram(ima, bins=np.arange(0, 256))
plt.figure(figsize=(8, 3))
plt.subplot(1, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.plot(hist[1][:-1], hist[0], lw=2)
plt.title('histogram of grey values')
"""
.. image:: PLOT2RST.current_figure
Noise removal
=============
Some noise is added to the image, 1% of pixels are randomly set to 255, 1% are
randomly set to 0. The **median** filter is applied to remove the noise.
.. note::
there are different implementations of median filter :
`skimage.filter.median_filter` and `skimage.filter.rank.median`
"""
noise = np.random.random(ima.shape)
nima = data.camera()
nima[noise > 0.99] = 255
nima[noise < 0.01] = 0
from skimage.filter.rank import median
from skimage.morphology import disk
fig = plt.figure(figsize=[10, 7])
lo = median(nima, disk(1))
hi = median(nima, disk(5))
ext = median(nima, disk(20))
plt.subplot(2, 2, 1)
plt.imshow(nima, cmap=plt.cm.gray, vmin=0, vmax=255)
plt.xlabel('noised image')
plt.subplot(2, 2, 2)
plt.imshow(lo, cmap=plt.cm.gray, vmin=0, vmax=255)
plt.xlabel('median $r=1$')
plt.subplot(2, 2, 3)
plt.imshow(hi, cmap=plt.cm.gray, vmin=0, vmax=255)
plt.xlabel('median $r=5$')
plt.subplot(2, 2, 4)
plt.imshow(ext, cmap=plt.cm.gray, vmin=0, vmax=255)
plt.xlabel('median $r=20$')
"""
.. image:: PLOT2RST.current_figure
The added noise is efficiently removed, as the image defaults are small (1 pixel
wide), a small filter radius is sufficient. As the radius is increasing, objects
with a bigger size are filtered as well, such as the camera tripod. The median
filter is commonly used for noise removal because borders are preserved.
Image smoothing
================
The example hereunder shows how a local **mean** smoothes the camera man image.
"""
from skimage.filter.rank import mean
fig = plt.figure(figsize=[10, 7])
loc_mean = mean(nima, disk(10))
plt.subplot(1, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray, vmin=0, vmax=255)
plt.xlabel('original')
plt.subplot(1, 2, 2)
plt.imshow(loc_mean, cmap=plt.cm.gray, vmin=0, vmax=255)
plt.xlabel('local mean $r=10$')
"""
.. image:: PLOT2RST.current_figure
One may be interested in smoothing an image while preserving important borders
(median filters already achieved this), here we use the **bilateral** filter
that restricts the local neighborhood to pixel having a greylevel similar to
the central one.
.. note::
a different implementation is available for color images in
`skimage.filter.denoise_bilateral`.
"""
from skimage.filter.rank import bilateral_mean
ima = data.camera()
selem = disk(10)
bilat = bilateral_mean(ima.astype(np.uint16), disk(20), s0=10, s1=10)
# display results
fig = plt.figure(figsize=[10, 7])
plt.subplot(2, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray)
plt.xlabel('original')
plt.subplot(2, 2, 3)
plt.imshow(bilat, cmap=plt.cm.gray)
plt.xlabel('bilateral mean')
plt.subplot(2, 2, 2)
plt.imshow(ima[200:350, 350:450], cmap=plt.cm.gray)
plt.subplot(2, 2, 4)
plt.imshow(bilat[200:350, 350:450], cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
One can see that the large continuous part of the image (e.g. sky) is smoothed
whereas other details are preserved.
Contrast enhancement
====================
We compare here how the global histogram equalization is applied locally.
The equalized image [2]_ has a roughly linear cumulative distribution function
for each pixel neighborhood. The local version [3]_ of the histogram
equalization emphasizes every local greylevel variations.
.. [2] http://en.wikipedia.org/wiki/Histogram_equalization
.. [3] http://en.wikipedia.org/wiki/Adaptive_histogram_equalization
"""
from skimage import exposure
from skimage.filter import rank
ima = data.camera()
# equalize globally and locally
glob = exposure.equalize(ima) * 255
loc = rank.equalize(ima, disk(20))
# extract histogram for each image
hist = np.histogram(ima, bins=np.arange(0, 256))
glob_hist = np.histogram(glob, bins=np.arange(0, 256))
loc_hist = np.histogram(loc, bins=np.arange(0, 256))
plt.figure(figsize=(10, 10))
plt.subplot(321)
plt.imshow(ima, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.subplot(322)
plt.plot(hist[1][:-1], hist[0], lw=2)
plt.title('histogram of grey values')
plt.subplot(323)
plt.imshow(glob, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.subplot(324)
plt.plot(glob_hist[1][:-1], glob_hist[0], lw=2)
plt.title('histogram of grey values')
plt.subplot(325)
plt.imshow(loc, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.subplot(326)
plt.plot(loc_hist[1][:-1], loc_hist[0], lw=2)
plt.title('histogram of grey values')
"""
.. image:: PLOT2RST.current_figure
another way to maximize the number of greylevels used for an image is to apply
a local autoleveling, i.e. here a pixel greylevel is proportionally remapped
between local minimum and local maximum.
The following example shows how local autolevel enhances the camara man picture.
"""
from skimage.filter.rank import autolevel
ima = data.camera()
selem = disk(10)
auto = autolevel(ima.astype(np.uint16), disk(20))
# display results
fig = plt.figure(figsize=[10, 7])
plt.subplot(1, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray)
plt.xlabel('original')
plt.subplot(1, 2, 2)
plt.imshow(auto, cmap=plt.cm.gray)
plt.xlabel('local autolevel')
"""
.. image:: PLOT2RST.current_figure
This filter is very sensitive to local outlayers, see the little white spot in
the sky left part. This is due to a local maximum which is very high comparing
to the rest of the neighborhood. One can moderate this using the percentile
version of the autolevel filter which uses given percentiles (one inferior,
one superior) in place of local minimum and maximum. The example below
illustrates how the percentile parameters influence the local autolevel result.
"""
from skimage.filter.rank import percentile_autolevel
image = data.camera()
selem = disk(20)
loc_autolevel = autolevel(image, selem=selem)
loc_perc_autolevel0 = percentile_autolevel(image, selem=selem, p0=.00, p1=1.0)
loc_perc_autolevel1 = percentile_autolevel(image, selem=selem, p0=.01, p1=.99)
loc_perc_autolevel2 = percentile_autolevel(image, selem=selem, p0=.05, p1=.95)
loc_perc_autolevel3 = percentile_autolevel(image, selem=selem, p0=.1, p1=.9)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(np.hstack((image, loc_autolevel)))
ax0.set_title('original / autolevel')
ax1.imshow(
np.hstack((loc_perc_autolevel0, loc_perc_autolevel1)), vmin=0, vmax=255)
ax1.set_title('percentile autolevel 0%,1%')
ax2.imshow(
np.hstack((loc_perc_autolevel2, loc_perc_autolevel3)), vmin=0, vmax=255)
ax2.set_title('percentile autolevel 5% and 10%')
for ax in axes:
ax.axis('off')
"""
.. image:: PLOT2RST.current_figure
The morphological contrast enhancement filter replaces the central pixel by the
local maximum if the original pixel value is closest to local maximum, otherwise
by the minimum local.
"""
from skimage.filter.rank import morph_contr_enh
ima = data.camera()
enh = morph_contr_enh(ima, disk(5))
# display results
fig = plt.figure(figsize=[10, 7])
plt.subplot(2, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray)
plt.xlabel('original')
plt.subplot(2, 2, 3)
plt.imshow(enh, cmap=plt.cm.gray)
plt.xlabel('local morphlogical contrast enhancement')
plt.subplot(2, 2, 2)
plt.imshow(ima[200:350, 350:450], cmap=plt.cm.gray)
plt.subplot(2, 2, 4)
plt.imshow(enh[200:350, 350:450], cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
The percentile version of the local morphological contrast enhancement uses
percentile *p0* and *p1* instead of the local minimum and maximum.
"""
from skimage.filter.rank import percentile_morph_contr_enh
ima = data.camera()
penh = percentile_morph_contr_enh(ima, disk(5), p0=.1, p1=.9)
# display results
fig = plt.figure(figsize=[10, 7])
plt.subplot(2, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray)
plt.xlabel('original')
plt.subplot(2, 2, 3)
plt.imshow(penh, cmap=plt.cm.gray)
plt.xlabel('local percentile morphlogical\n contrast enhancement')
plt.subplot(2, 2, 2)
plt.imshow(ima[200:350, 350:450], cmap=plt.cm.gray)
plt.subplot(2, 2, 4)
plt.imshow(penh[200:350, 350:450], cmap=plt.cm.gray)
"""
.. image:: PLOT2RST.current_figure
Image threshold
===============
The Otsu's threshold [1]_ method can be applied locally using the local
greylevel distribution. In the example below, for each pixel, an "optimal"
threshold is determined by maximizing the variance between two classes of pixels
of the local neighborhood defined by a structuring element.
The example compares the local threshold with the global threshold
`skimage.filter.threshold_otsu`.
.. note::
Local thresholding is much slower than global one. There exists a function
for global Otsu thresholding: `skimage.filter.threshold_otsu`.
.. [1] http://en.wikipedia.org/wiki/Otsu's_method
"""
from skimage.filter.rank import otsu
from skimage.filter import threshold_otsu
p8 = data.page()
radius = 10
selem = disk(radius)
# t_loc_otsu is an image
t_loc_otsu = otsu(p8, selem)
loc_otsu = p8 >= t_loc_otsu
# t_glob_otsu is a scalar
t_glob_otsu = threshold_otsu(p8)
glob_otsu = p8 >= t_glob_otsu
plt.figure()
plt.subplot(2, 2, 1)
plt.imshow(p8, cmap=plt.cm.gray)
plt.xlabel('original')
plt.colorbar()
plt.subplot(2, 2, 2)
plt.imshow(t_loc_otsu, cmap=plt.cm.gray)
plt.xlabel('local Otsu ($radius=%d$)' % radius)
plt.colorbar()
plt.subplot(2, 2, 3)
plt.imshow(p8 >= t_loc_otsu, cmap=plt.cm.gray)
plt.xlabel('original>=local Otsu' % t_glob_otsu)
plt.subplot(2, 2, 4)
plt.imshow(glob_otsu, cmap=plt.cm.gray)
plt.xlabel('global Otsu ($t=%d$)' % t_glob_otsu)
"""
.. image:: PLOT2RST.current_figure
The following example shows how local Otsu's threshold handles a global level
shift applied to a synthetic image .
"""
n = 100
theta = np.linspace(0, 10 * np.pi, n)
x = np.sin(theta)
m = (np.tile(x, (n, 1)) * np.linspace(0.1, 1, n) * 128 + 128).astype(np.uint8)
radius = 10
t = rank.otsu(m, disk(radius))
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(m)
plt.xlabel('original')
plt.subplot(1, 2, 2)
plt.imshow(m >= t, interpolation='nearest')
plt.xlabel('local Otsu ($radius=%d$)' % radius)
"""
.. image:: PLOT2RST.current_figure
Image morphology
================
Local maximum and local minimum are the base operators for greylevel
morphology.
.. note::
`skimage.dilate` and `skimage.erode` are equivalent filters (see below for
comparison).
Here is an example of the classical morphological greylevel filters: opening,
closing and morphological gradient.
"""
from skimage.filter.rank import maximum, minimum, gradient
ima = data.camera()
closing = maximum(minimum(ima, disk(5)), disk(5))
opening = minimum(maximum(ima, disk(5)), disk(5))
grad = gradient(ima, disk(5))
# display results
fig = plt.figure(figsize=[10, 7])
plt.subplot(2, 2, 1)
plt.imshow(ima, cmap=plt.cm.gray)
plt.xlabel('original')
plt.subplot(2, 2, 2)
plt.imshow(closing, cmap=plt.cm.gray)
plt.xlabel('greylevel closing')
plt.subplot(2, 2, 3)
plt.imshow(opening, cmap=plt.cm.gray)
plt.xlabel('greylevel opening')
plt.subplot(2, 2, 4)
plt.imshow(grad, cmap=plt.cm.gray)
plt.xlabel('morphological gradient')
"""
.. image:: PLOT2RST.current_figure
Feature extraction
===================
Local histogram can be exploited to compute local entropy, which is related to
the local image complexity. Entropy is computed using base 2 logarithm i.e. the
filter returns the minimum number of bits needed to encode local greylevel
distribution.
`skimage.rank.entropy` returns local entropy on a given structuring element.
The following example shows this filter applied on 8- and 16- bit images.
.. note::
to better use the available image bit, the function returns 10x entropy for
8-bit images and 1000x entropy for 16-bit images.
"""
from skimage import data
from skimage.filter.rank import entropy
from skimage.morphology import disk
import numpy as np
import matplotlib.pyplot as plt
# defining a 8- and a 16-bit test images
a8 = data.camera()
a16 = data.camera().astype(np.uint16) * 4
ent8 = entropy(a8, disk(5)) # pixel value contain 10x the local entropy
ent16 = entropy(a16, disk(5)) # pixel value contain 1000x the local entropy
# display results
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 1)
plt.imshow(a8, cmap=plt.cm.gray)
plt.xlabel('8-bit image')
plt.colorbar()
plt.subplot(2, 2, 2)
plt.imshow(ent8, cmap=plt.cm.jet)
plt.xlabel('entropy*10')
plt.colorbar()
plt.subplot(2, 2, 3)
plt.imshow(a16, cmap=plt.cm.gray)
plt.xlabel('16-bit image')
plt.colorbar()
plt.subplot(2, 2, 4)
plt.imshow(ent16, cmap=plt.cm.jet)
plt.xlabel('entropy*1000')
plt.colorbar()
"""
.. image:: PLOT2RST.current_figure
Implementation
================
The central part of the `skimage.rank` filters is build on a sliding window that
update local greylevel histogram. This approach limits the algorithm complexity
to O(n) where n is the number of image pixels. The complexity is also limited
with respect to the structuring element size.
"""
from time import time
from scipy.ndimage.filters import percentile_filter
from skimage.morphology import dilation
from skimage.filter import median_filter
from skimage.filter.rank import median, maximum
def exec_and_timeit(func):
"""Decorator that returns both function results and execution time."""
def wrapper(*arg):
t1 = time()
res = func(*arg)
t2 = time()
ms = (t2 - t1) * 1000.0
return (res, ms)
return wrapper
@exec_and_timeit
def cr_med(image, selem):
return median(image=image, selem=selem)
@exec_and_timeit
def cr_max(image, selem):
return maximum(image=image, selem=selem)
@exec_and_timeit
def cm_dil(image, selem):
return dilation(image=image, selem=selem)
@exec_and_timeit
def ctmf_med(image, radius):
return median_filter(image=image, radius=radius)
@exec_and_timeit
def ndi_med(image, n):
return percentile_filter(image, 50, size=n * 2 - 1)
"""
Comparison between
* `rank.maximum`
* `cmorph.dilate`
on increasing structuring element size
"""
a = data.camera()
rec = []
e_range = range(1, 20, 2)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_max(a, elem)
rcm, ms_rcm = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
plt.figure()
plt.title('increasing element size')
plt.ylabel('time (ms)')
plt.xlabel('element radius')
plt.plot(e_range, rec)
plt.legend(['crank.maximum', 'cmorph.dilate'])
"""
and increasing image size
.. image:: PLOT2RST.current_figure
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = range(100, 1000, 100)
for s in s_range:
a = (np.random.random((s, s)) * 256).astype('uint8')
(rc, ms_rc) = cr_max(a, elem)
(rcm, ms_rcm) = cm_dil(a, elem)
rec.append((ms_rc, ms_rcm))
rec = np.asarray(rec)
plt.figure()
plt.title('increasing image size')
plt.ylabel('time (ms)')
plt.xlabel('image size')
plt.plot(s_range, rec)
plt.legend(['crank.maximum', 'cmorph.dilate'])
"""
.. image:: PLOT2RST.current_figure
Comparison between:
* `rank.median`
* `ctmf.median_filter`
* `ndimage.percentile`
on increasing structuring element size
"""
a = data.camera()
rec = []
e_range = range(2, 30, 4)
for r in e_range:
elem = disk(r + 1)
rc, ms_rc = cr_med(a, elem)
rctmf, ms_rctmf = ctmf_med(a, r)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_rctmf, ms_ndi))
rec = np.asarray(rec)
plt.figure()
plt.title('increasing element size')
plt.plot(e_range, rec)
plt.legend(['rank.median', 'ctmf.median_filter', 'ndimage.percentile'])
plt.ylabel('time (ms)')
plt.xlabel('element radius')
"""
.. image:: PLOT2RST.current_figure
comparison of outcome of the three methods
"""
plt.figure()
plt.imshow(np.hstack((rc, rctmf, rndi)))
plt.xlabel('rank.median vs ctmf.median_filter vs ndimage.percentile')
"""
.. image:: PLOT2RST.current_figure
and increasing image size
"""
r = 9
elem = disk(r + 1)
rec = []
s_range = [100, 200, 500, 1000]
for s in s_range:
a = (np.random.random((s, s)) * 256).astype('uint8')
(rc, ms_rc) = cr_med(a, elem)
rctmf, ms_rctmf = ctmf_med(a, r)
rndi, ms_ndi = ndi_med(a, r)
rec.append((ms_rc, ms_rctmf, ms_ndi))
rec = np.asarray(rec)
plt.figure()
plt.title('increasing image size')
plt.plot(s_range, rec)
plt.legend(['rank.median', 'ctmf.median_filter', 'ndimage.percentile'])
plt.ylabel('time (ms)')
plt.xlabel('image size')
"""
.. image:: PLOT2RST.current_figure
"""
plt.show()
| 24.793056
| 80
| 0.721808
|
40b80849c9304ba37916ed8f099ee1706a4e6280
| 4,302
|
py
|
Python
|
splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py
|
mickotronic/eventgen
|
c0a8b15f50b2216582ac52756923f127630823b2
|
[
"Apache-2.0"
] | null | null | null |
splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py
|
mickotronic/eventgen
|
c0a8b15f50b2216582ac52756923f127630823b2
|
[
"Apache-2.0"
] | null | null | null |
splunk_eventgen/lib/plugins/generator/perdayvolumegenerator.py
|
mickotronic/eventgen
|
c0a8b15f50b2216582ac52756923f127630823b2
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import random
from splunk_eventgen.lib.generatorplugin import GeneratorPlugin
from splunk_eventgen.lib.logging_config import logger
class PerDayVolumeGenerator(GeneratorPlugin):
def __init__(self, sample):
GeneratorPlugin.__init__(self, sample)
# TODO: Make this work with replay mode.
def gen(self, count, earliest, latest, samplename=None):
# count in this plugin is a measurement of byteself._sample.
size = count
logger.debug(
"PerDayVolumeGenerator Called with a Size of: %s with Earliest: %s and Latest: %s"
% (size, earliest, latest)
)
# very similar to the default generator. only difference is we go by size instead of count.
try:
self._sample.loadSample()
logger.debug("File sample loaded successfully.")
except TypeError:
logger.error(
"Error loading sample file for sample '%s'" % self._sample.name
)
return
logger.debug(
"Generating sample '%s' in app '%s' with count %d, et: '%s', lt '%s'"
% (self._sample.name, self._sample.app, size, earliest, latest)
)
startTime = datetime.datetime.now()
# Create a counter for the current byte size of the read in samples
currentSize = 0
# If we're random, fill random events from sampleDict into eventsDict
eventsDict = []
if self._sample.randomizeEvents:
sdlen = len(self._sample.sampleDict)
logger.debug(
"Random filling eventsDict for sample '%s' in app '%s' with %d bytes"
% (self._sample.name, self._sample.app, size)
)
while currentSize < size:
currentevent = self._sample.sampleDict[random.randint(0, sdlen - 1)]
eventsDict.append(currentevent)
currentSize += len(currentevent["_raw"])
# If we're bundlelines, create count copies of the sampleDict
elif self._sample.bundlelines:
logger.debug(
"Bundlelines, filling eventsDict for sample '%s' in app '%s' with %d copies of sampleDict"
% (self._sample.name, self._sample.app, size)
)
while currentSize <= size:
sizeofsample = sum(
len(sample["_raw"]) for sample in self._sample.sampleDict
)
eventsDict.extend(self._sample.sampleDict)
currentSize += sizeofsample
# Otherwise fill count events into eventsDict or keep making copies of events out of sampleDict until
# eventsDict is as big as count
else:
logger.debug("Simple replay in order, processing")
# I need to check the sample and load events in order until the size is smaller than read events from file
# or i've read the entire file.
linecount = 0
currentreadsize = 0
linesinfile = len(self._sample.sampleDict)
logger.debug("Lines in files: %s " % linesinfile)
while currentreadsize <= size:
targetline = linecount % linesinfile
sizeremaining = size - currentreadsize
targetlinesize = len(self._sample.sampleDict[targetline]["_raw"])
if size < targetlinesize:
logger.error(
"Size is too small for sample {}. We need {} bytes but size of one event is {} bytes.".format(
self._sample.name, size, targetlinesize
)
)
break
if targetlinesize <= sizeremaining:
currentreadsize += targetlinesize
eventsDict.append(self._sample.sampleDict[targetline])
else:
break
linecount += 1
logger.debug(
"Events fill complete for sample '%s' in app '%s' length %d"
% (self._sample.name, self._sample.app, len(eventsDict))
)
# build the events and replace tokens
self.build_events(eventsDict, startTime, earliest, latest)
def load():
return PerDayVolumeGenerator
| 41.76699
| 118
| 0.576709
|
1f2cdcf08d4fa8925dbcfbe4747b34b3f54bb70b
| 7,859
|
py
|
Python
|
examples/gym/train_acer_gym.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | 2
|
2020-05-20T06:15:20.000Z
|
2020-05-20T06:15:27.000Z
|
examples/gym/train_acer_gym.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | null | null | null |
examples/gym/train_acer_gym.py
|
WhenTheyCry96/chainerrl
|
0f32aae2855dbb6288ae628be6271739ced6c42c
|
[
"MIT"
] | null | null | null |
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import os
# This prevents numpy from using multiple threads
os.environ['OMP_NUM_THREADS'] = '1' # NOQA
import chainer
from chainer import functions as F
from chainer import links as L
import gym
from gym import spaces
import gym.wrappers
import numpy as np
import chainerrl
from chainerrl.action_value import DiscreteActionValue
from chainerrl.agents import acer
from chainerrl.distribution import SoftmaxDistribution
from chainerrl import experiments
from chainerrl.initializers import LeCunNormal
from chainerrl import links
from chainerrl import misc
from chainerrl.optimizers import rmsprop_async
from chainerrl import policies
from chainerrl import q_functions
from chainerrl.replay_buffer import EpisodicReplayBuffer
from chainerrl import v_functions
def main():
import logging
parser = argparse.ArgumentParser()
parser.add_argument('processes', type=int)
parser.add_argument('--env', type=str, default='CartPole-v0')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 32)')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--t-max', type=int, default=50)
parser.add_argument('--n-times-replay', type=int, default=4)
parser.add_argument('--n-hidden-channels', type=int, default=100)
parser.add_argument('--n-hidden-layers', type=int, default=2)
parser.add_argument('--replay-capacity', type=int, default=5000)
parser.add_argument('--replay-start-size', type=int, default=10 ** 3)
parser.add_argument('--disable-online-update', action='store_true')
parser.add_argument('--beta', type=float, default=1e-2)
parser.add_argument('--profile', action='store_true')
parser.add_argument('--steps', type=int, default=8 * 10 ** 7)
parser.add_argument('--eval-interval', type=int, default=10 ** 5)
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--reward-scale-factor', type=float, default=1e-2)
parser.add_argument('--rmsprop-epsilon', type=float, default=1e-2)
parser.add_argument('--render', action='store_true', default=False)
parser.add_argument('--lr', type=float, default=7e-4)
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default='')
parser.add_argument('--logger-level', type=int, default=logging.DEBUG)
parser.add_argument('--monitor', action='store_true')
parser.add_argument('--truncation-threshold', type=float, default=5)
parser.add_argument('--trust-region-delta', type=float, default=0.1)
args = parser.parse_args()
logging.basicConfig(level=args.logger_level)
# Set a random seed used in ChainerRL.
# If you use more than one processes, the results will be no longer
# deterministic even with the same random seed.
misc.set_random_seed(args.seed)
# Set different random seeds for different subprocesses.
# If seed=0 and processes=4, subprocess seeds are [0, 1, 2, 3].
# If seed=1 and processes=4, subprocess seeds are [4, 5, 6, 7].
process_seeds = np.arange(args.processes) + args.seed * args.processes
assert process_seeds.max() < 2 ** 32
args.outdir = experiments.prepare_output_dir(args, args.outdir)
def make_env(process_idx, test):
env = gym.make(args.env)
# Use different random seeds for train and test envs
process_seed = int(process_seeds[process_idx])
env_seed = 2 ** 32 - 1 - process_seed if test else process_seed
env.seed(env_seed)
# Cast observations to float32 because our model uses float32
env = chainerrl.wrappers.CastObservationToFloat32(env)
if args.monitor and process_idx == 0:
env = gym.wrappers.Monitor(env, args.outdir)
if not test:
# Scale rewards (and thus returns) to a reasonable range so that
# training is easier
env = chainerrl.wrappers.ScaleReward(env, args.reward_scale_factor)
if args.render and process_idx == 0 and not test:
env = chainerrl.wrappers.Render(env)
return env
sample_env = gym.make(args.env)
timestep_limit = sample_env.spec.tags.get(
'wrapper_config.TimeLimit.max_episode_steps')
obs_space = sample_env.observation_space
action_space = sample_env.action_space
if isinstance(action_space, spaces.Box):
model = acer.ACERSDNSeparateModel(
pi=policies.FCGaussianPolicy(
obs_space.low.size, action_space.low.size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers,
bound_mean=True,
min_action=action_space.low,
max_action=action_space.high),
v=v_functions.FCVFunction(
obs_space.low.size,
n_hidden_channels=args.n_hidden_channels,
n_hidden_layers=args.n_hidden_layers),
adv=q_functions.FCSAQFunction(
obs_space.low.size, action_space.low.size,
n_hidden_channels=args.n_hidden_channels // 4,
n_hidden_layers=args.n_hidden_layers),
)
else:
model = acer.ACERSeparateModel(
pi=links.Sequence(
L.Linear(obs_space.low.size, args.n_hidden_channels),
F.relu,
L.Linear(args.n_hidden_channels, action_space.n,
initialW=LeCunNormal(1e-3)),
SoftmaxDistribution),
q=links.Sequence(
L.Linear(obs_space.low.size, args.n_hidden_channels),
F.relu,
L.Linear(args.n_hidden_channels, action_space.n,
initialW=LeCunNormal(1e-3)),
DiscreteActionValue),
)
opt = rmsprop_async.RMSpropAsync(
lr=args.lr, eps=args.rmsprop_epsilon, alpha=0.99)
opt.setup(model)
opt.add_hook(chainer.optimizer.GradientClipping(40))
replay_buffer = EpisodicReplayBuffer(args.replay_capacity)
agent = acer.ACER(model, opt, t_max=args.t_max, gamma=0.99,
replay_buffer=replay_buffer,
n_times_replay=args.n_times_replay,
replay_start_size=args.replay_start_size,
disable_online_update=args.disable_online_update,
use_trust_region=True,
trust_region_delta=args.trust_region_delta,
truncation_threshold=args.truncation_threshold,
beta=args.beta)
if args.load:
agent.load(args.load)
if args.demo:
env = make_env(0, True)
eval_stats = experiments.eval_performance(
env=env,
agent=agent,
n_runs=args.eval_n_runs,
max_episode_len=timestep_limit)
print('n_runs: {} mean: {} median: {} stdev {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'],
eval_stats['stdev']))
else:
experiments.train_agent_async(
agent=agent,
outdir=args.outdir,
processes=args.processes,
make_env=make_env,
profile=args.profile,
steps=args.steps,
eval_n_runs=args.eval_n_runs,
eval_interval=args.eval_interval,
max_episode_len=timestep_limit)
if __name__ == '__main__':
main()
| 41.803191
| 79
| 0.659499
|
901a11fe27b3aff5acafa6b7e0068861f1af2907
| 32,860
|
py
|
Python
|
tasks/google/cloud/tasks_v2beta2/proto/queue_pb2.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | null | null | null |
tasks/google/cloud/tasks_v2beta2/proto/queue_pb2.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | 40
|
2019-07-16T10:04:48.000Z
|
2020-01-20T09:04:59.000Z
|
tasks/google/cloud/tasks_v2beta2/proto/queue_pb2.py
|
q-logic/google-cloud-python
|
a65065c89c059bc564bbdd79288a48970907c399
|
[
"Apache-2.0"
] | 2
|
2019-07-18T00:05:31.000Z
|
2019-11-27T14:17:22.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/tasks_v2beta2/proto/queue.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.cloud.tasks_v2beta2.proto import (
target_pb2 as google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_target__pb2,
)
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/cloud/tasks_v2beta2/proto/queue.proto",
package="google.cloud.tasks.v2beta2",
syntax="proto3",
serialized_options=_b(
"\n\036com.google.cloud.tasks.v2beta2B\nQueueProtoP\001Z?google.golang.org/genproto/googleapis/cloud/tasks/v2beta2;tasks"
),
serialized_pb=_b(
'\n,google/cloud/tasks_v2beta2/proto/queue.proto\x12\x1agoogle.cloud.tasks.v2beta2\x1a\x19google/api/resource.proto\x1a-google/cloud/tasks_v2beta2/proto/target.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xbf\x04\n\x05Queue\x12\x0c\n\x04name\x18\x01 \x01(\t\x12Q\n\x16\x61pp_engine_http_target\x18\x03 \x01(\x0b\x32/.google.cloud.tasks.v2beta2.AppEngineHttpTargetH\x00\x12=\n\x0bpull_target\x18\x04 \x01(\x0b\x32&.google.cloud.tasks.v2beta2.PullTargetH\x00\x12;\n\x0brate_limits\x18\x05 \x01(\x0b\x32&.google.cloud.tasks.v2beta2.RateLimits\x12=\n\x0cretry_config\x18\x06 \x01(\x0b\x32\'.google.cloud.tasks.v2beta2.RetryConfig\x12\x36\n\x05state\x18\x07 \x01(\x0e\x32\'.google.cloud.tasks.v2beta2.Queue.State\x12.\n\npurge_time\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"E\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0b\n\x07RUNNING\x10\x01\x12\n\n\x06PAUSED\x10\x02\x12\x0c\n\x08\x44ISABLED\x10\x03:\\\xea\x41Y\n\x1f\x63loudtasks.googleapis.com/Queue\x12\x36projects/{project}/locations/{location}/queues/{queue}B\r\n\x0btarget_type"k\n\nRateLimits\x12\'\n\x1fmax_tasks_dispatched_per_second\x18\x01 \x01(\x01\x12\x16\n\x0emax_burst_size\x18\x02 \x01(\x05\x12\x1c\n\x14max_concurrent_tasks\x18\x03 \x01(\x05"\x81\x02\n\x0bRetryConfig\x12\x16\n\x0cmax_attempts\x18\x01 \x01(\x05H\x00\x12\x1c\n\x12unlimited_attempts\x18\x02 \x01(\x08H\x00\x12\x35\n\x12max_retry_duration\x18\x03 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bmin_backoff\x18\x04 \x01(\x0b\x32\x19.google.protobuf.Duration\x12.\n\x0bmax_backoff\x18\x05 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x15\n\rmax_doublings\x18\x06 \x01(\x05\x42\x0e\n\x0cnum_attemptsBo\n\x1e\x63om.google.cloud.tasks.v2beta2B\nQueueProtoP\x01Z?google.golang.org/genproto/googleapis/cloud/tasks/v2beta2;tasksb\x06proto3'
),
dependencies=[
google_dot_api_dot_resource__pb2.DESCRIPTOR,
google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_target__pb2.DESCRIPTOR,
google_dot_protobuf_dot_duration__pb2.DESCRIPTOR,
google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR,
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
],
)
_QUEUE_STATE = _descriptor.EnumDescriptor(
name="State",
full_name="google.cloud.tasks.v2beta2.Queue.State",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="STATE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="RUNNING", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="PAUSED", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="DISABLED", index=3, number=3, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=643,
serialized_end=712,
)
_sym_db.RegisterEnumDescriptor(_QUEUE_STATE)
_QUEUE = _descriptor.Descriptor(
name="Queue",
full_name="google.cloud.tasks.v2beta2.Queue",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="name",
full_name="google.cloud.tasks.v2beta2.Queue.name",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="app_engine_http_target",
full_name="google.cloud.tasks.v2beta2.Queue.app_engine_http_target",
index=1,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="pull_target",
full_name="google.cloud.tasks.v2beta2.Queue.pull_target",
index=2,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="rate_limits",
full_name="google.cloud.tasks.v2beta2.Queue.rate_limits",
index=3,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="retry_config",
full_name="google.cloud.tasks.v2beta2.Queue.retry_config",
index=4,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="state",
full_name="google.cloud.tasks.v2beta2.Queue.state",
index=5,
number=7,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="purge_time",
full_name="google.cloud.tasks.v2beta2.Queue.purge_time",
index=6,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[_QUEUE_STATE],
serialized_options=_b(
"\352AY\n\037cloudtasks.googleapis.com/Queue\0226projects/{project}/locations/{location}/queues/{queue}"
),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="target_type",
full_name="google.cloud.tasks.v2beta2.Queue.target_type",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=246,
serialized_end=821,
)
_RATELIMITS = _descriptor.Descriptor(
name="RateLimits",
full_name="google.cloud.tasks.v2beta2.RateLimits",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="max_tasks_dispatched_per_second",
full_name="google.cloud.tasks.v2beta2.RateLimits.max_tasks_dispatched_per_second",
index=0,
number=1,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_burst_size",
full_name="google.cloud.tasks.v2beta2.RateLimits.max_burst_size",
index=1,
number=2,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_concurrent_tasks",
full_name="google.cloud.tasks.v2beta2.RateLimits.max_concurrent_tasks",
index=2,
number=3,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=823,
serialized_end=930,
)
_RETRYCONFIG = _descriptor.Descriptor(
name="RetryConfig",
full_name="google.cloud.tasks.v2beta2.RetryConfig",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="max_attempts",
full_name="google.cloud.tasks.v2beta2.RetryConfig.max_attempts",
index=0,
number=1,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="unlimited_attempts",
full_name="google.cloud.tasks.v2beta2.RetryConfig.unlimited_attempts",
index=1,
number=2,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_retry_duration",
full_name="google.cloud.tasks.v2beta2.RetryConfig.max_retry_duration",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="min_backoff",
full_name="google.cloud.tasks.v2beta2.RetryConfig.min_backoff",
index=3,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_backoff",
full_name="google.cloud.tasks.v2beta2.RetryConfig.max_backoff",
index=4,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_doublings",
full_name="google.cloud.tasks.v2beta2.RetryConfig.max_doublings",
index=5,
number=6,
type=5,
cpp_type=1,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="num_attempts",
full_name="google.cloud.tasks.v2beta2.RetryConfig.num_attempts",
index=0,
containing_type=None,
fields=[],
)
],
serialized_start=933,
serialized_end=1190,
)
_QUEUE.fields_by_name[
"app_engine_http_target"
].message_type = (
google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_target__pb2._APPENGINEHTTPTARGET
)
_QUEUE.fields_by_name[
"pull_target"
].message_type = (
google_dot_cloud_dot_tasks__v2beta2_dot_proto_dot_target__pb2._PULLTARGET
)
_QUEUE.fields_by_name["rate_limits"].message_type = _RATELIMITS
_QUEUE.fields_by_name["retry_config"].message_type = _RETRYCONFIG
_QUEUE.fields_by_name["state"].enum_type = _QUEUE_STATE
_QUEUE.fields_by_name[
"purge_time"
].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP
_QUEUE_STATE.containing_type = _QUEUE
_QUEUE.oneofs_by_name["target_type"].fields.append(
_QUEUE.fields_by_name["app_engine_http_target"]
)
_QUEUE.fields_by_name[
"app_engine_http_target"
].containing_oneof = _QUEUE.oneofs_by_name["target_type"]
_QUEUE.oneofs_by_name["target_type"].fields.append(_QUEUE.fields_by_name["pull_target"])
_QUEUE.fields_by_name["pull_target"].containing_oneof = _QUEUE.oneofs_by_name[
"target_type"
]
_RETRYCONFIG.fields_by_name[
"max_retry_duration"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_RETRYCONFIG.fields_by_name[
"min_backoff"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_RETRYCONFIG.fields_by_name[
"max_backoff"
].message_type = google_dot_protobuf_dot_duration__pb2._DURATION
_RETRYCONFIG.oneofs_by_name["num_attempts"].fields.append(
_RETRYCONFIG.fields_by_name["max_attempts"]
)
_RETRYCONFIG.fields_by_name[
"max_attempts"
].containing_oneof = _RETRYCONFIG.oneofs_by_name["num_attempts"]
_RETRYCONFIG.oneofs_by_name["num_attempts"].fields.append(
_RETRYCONFIG.fields_by_name["unlimited_attempts"]
)
_RETRYCONFIG.fields_by_name[
"unlimited_attempts"
].containing_oneof = _RETRYCONFIG.oneofs_by_name["num_attempts"]
DESCRIPTOR.message_types_by_name["Queue"] = _QUEUE
DESCRIPTOR.message_types_by_name["RateLimits"] = _RATELIMITS
DESCRIPTOR.message_types_by_name["RetryConfig"] = _RETRYCONFIG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Queue = _reflection.GeneratedProtocolMessageType(
"Queue",
(_message.Message,),
dict(
DESCRIPTOR=_QUEUE,
__module__="google.cloud.tasks_v2beta2.proto.queue_pb2",
__doc__="""A queue is a container of related tasks. Queues are
configured to manage how those tasks are dispatched. Configurable
properties include rate limits, retry options, target types, and others.
Attributes:
name:
Caller-specified and required in [CreateQueue][google.cloud.ta
sks.v2beta2.CloudTasks.CreateQueue], after which it becomes
output only. The queue name. The queue name must have the
following format:
``projects/PROJECT_ID/locations/LOCATION_ID/queues/QUEUE_ID``
- ``PROJECT_ID`` can contain letters ([A-Za-z]), numbers
([0-9]), hyphens (-), colons (:), or periods (.). For more
information, see `Identifying projects
<https://cloud.google.com/resource-manager/docs/creating-
managing-projects#identifying_projects>`_ - ``LOCATION_ID``
is the canonical ID for the queue's location. The list of
available locations can be obtained by calling [ListLocatio
ns][google.cloud.location.Locations.ListLocations]. For
more information, see
https://cloud.google.com/about/locations/. - ``QUEUE_ID`` can
contain letters ([A-Za-z]), numbers ([0-9]), or hyphens
(-). The maximum length is 100 characters.
target_type:
Caller-specified and required in [CreateQueue][google.cloud.ta
sks.v2beta2.CloudTasks.CreateQueue][], after which the queue
config type becomes output only, though fields within the
config are mutable. The queue's target. The target applies
to all tasks in the queue.
app_engine_http_target:
App Engine HTTP target. An App Engine queue is a queue that
has an [AppEngineHttpTarget][google.cloud.tasks.v2beta2.AppEng
ineHttpTarget].
pull_target:
Pull target. A pull queue is a queue that has a
[PullTarget][google.cloud.tasks.v2beta2.PullTarget].
rate_limits:
Rate limits for task dispatches.
[rate\_limits][google.cloud.tasks.v2beta2.Queue.rate\_limits]
and [retry\_config][google.cloud.tasks.v2beta2.Queue.retry\_co
nfig] are related because they both control task attempts
however they control how tasks are attempted in different
ways: -
[rate\_limits][google.cloud.tasks.v2beta2.Queue.rate\_limits]
controls the total rate of dispatches from a queue (i.e. all
traffic dispatched from the queue, regardless of whether
the dispatch is from a first attempt or a retry). - [retry
\_config][google.cloud.tasks.v2beta2.Queue.retry\_config]
controls what happens to particular a task after its first
attempt fails. That is, [retry\_config][google.cloud.tas
ks.v2beta2.Queue.retry\_config] controls task retries (the
second attempt, third attempt, etc).
retry_config:
Settings that determine the retry behavior. - For tasks
created using Cloud Tasks: the queue-level retry settings
apply to all tasks in the queue that were created using Cloud
Tasks. Retry settings cannot be set on individual tasks. -
For tasks created using the App Engine SDK: the queue-level
retry settings apply to all tasks in the queue which do not
have retry settings explicitly set on the task and were
created by the App Engine SDK. See `App Engine
documentation <https://cloud.google.com/appengine/docs/standar
d/python/taskqueue/push/retrying-tasks>`_.
state:
Output only. The state of the queue. ``state`` can only be
changed by called [PauseQueue][google.cloud.tasks.v2beta2.Clou
dTasks.PauseQueue], [ResumeQueue][google.cloud.tasks.v2beta2.C
loudTasks.ResumeQueue], or uploading `queue.yaml/xml <https://
cloud.google.com/appengine/docs/python/config/queueref>`_. [U
pdateQueue][google.cloud.tasks.v2beta2.CloudTasks.UpdateQueue]
cannot be used to change ``state``.
purge_time:
Output only. The last time this queue was purged. All tasks
that were
[created][google.cloud.tasks.v2beta2.Task.create\_time] before
this time were purged. A queue can be purged using [PurgeQueu
e][google.cloud.tasks.v2beta2.CloudTasks.PurgeQueue], the `App
Engine Task Queue SDK, or the Cloud Console <https://cloud.goo
gle.com/appengine/docs/standard/python/taskqueue/push/deleting
-tasks-and-queues#purging_all_tasks_from_a_queue>`_. Purge
time will be truncated to the nearest microsecond. Purge time
will be unset if the queue has never been purged.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta2.Queue)
),
)
_sym_db.RegisterMessage(Queue)
RateLimits = _reflection.GeneratedProtocolMessageType(
"RateLimits",
(_message.Message,),
dict(
DESCRIPTOR=_RATELIMITS,
__module__="google.cloud.tasks_v2beta2.proto.queue_pb2",
__doc__="""Rate limits.
This message determines the maximum rate that tasks can be dispatched by
a queue, regardless of whether the dispatch is a first task attempt or a
retry.
Note: The debugging command,
[RunTask][google.cloud.tasks.v2beta2.CloudTasks.RunTask], will run a
task even if the queue has reached its
[RateLimits][google.cloud.tasks.v2beta2.RateLimits].
Attributes:
max_tasks_dispatched_per_second:
The maximum rate at which tasks are dispatched from this
queue. If unspecified when the queue is created, Cloud Tasks
will pick the default. - For [App Engine
queues][google.cloud.tasks.v2beta2.AppEngineHttpTarget], the
maximum allowed value is 500. - This field is output only
for [pull queues][google.cloud.tasks.v2beta2.PullTarget].
In addition to the ``max_tasks_dispatched_per_second``
limit, a maximum of 10 QPS of
[LeaseTasks][google.cloud.tasks.v2beta2.CloudTasks.LeaseTasks]
requests are allowed per pull queue. This field has the same
meaning as `rate in queue.yaml/xml <https://cloud.google.com/a
ppengine/docs/standard/python/config/queueref#rate>`_.
max_burst_size:
Output only. The max burst size. Max burst size limits how
fast tasks in queue are processed when many tasks are in the
queue and the rate is high. This field allows the queue to
have a high rate so processing starts shortly after a task is
enqueued, but still limits resource usage when many tasks are
enqueued in a short period of time. The `token bucket
<https://wikipedia.org/wiki/Token_Bucket>`_ algorithm is used
to control the rate of task dispatches. Each queue has a token
bucket that holds tokens, up to the maximum specified by
``max_burst_size``. Each time a task is dispatched, a token is
removed from the bucket. Tasks will be dispatched until the
queue's bucket runs out of tokens. The bucket will be
continuously refilled with new tokens based on [max\_tasks\_di
spatched\_per\_second][google.cloud.tasks.v2beta2.RateLimits.m
ax\_tasks\_dispatched\_per\_second]. Cloud Tasks will pick
the value of ``max_burst_size`` based on the value of [max\_ta
sks\_dispatched\_per\_second][google.cloud.tasks.v2beta2.RateL
imits.max\_tasks\_dispatched\_per\_second]. For App Engine
queues that were created or updated using ``queue.yaml/xml``,
``max_burst_size`` is equal to `bucket\_size <https://cloud.go
ogle.com/appengine/docs/standard/python/config/queueref#bucket
_size>`_. Since ``max_burst_size`` is output only, if [Update
Queue][google.cloud.tasks.v2beta2.CloudTasks.UpdateQueue] is
called on a queue created by ``queue.yaml/xml``,
``max_burst_size`` will be reset based on the value of [max\_t
asks\_dispatched\_per\_second][google.cloud.tasks.v2beta2.Rate
Limits.max\_tasks\_dispatched\_per\_second], regardless of
whether [max\_tasks\_dispatched\_per\_second][google.cloud.tas
ks.v2beta2.RateLimits.max\_tasks\_dispatched\_per\_second] is
updated.
max_concurrent_tasks:
The maximum number of concurrent tasks that Cloud Tasks allows
to be dispatched for this queue. After this threshold has been
reached, Cloud Tasks stops dispatching tasks until the number
of concurrent requests decreases. If unspecified when the
queue is created, Cloud Tasks will pick the default. The
maximum allowed value is 5,000. This field is output only for
[pull queues][google.cloud.tasks.v2beta2.PullTarget] and
always -1, which indicates no limit. No other queue types can
have ``max_concurrent_tasks`` set to -1. This field has the
same meaning as `max\_concurrent\_requests in queue.yaml/xml
<https://cloud.google.com/appengine/docs/standard/python/config
/queueref#max_concurrent_requests>`_.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta2.RateLimits)
),
)
_sym_db.RegisterMessage(RateLimits)
RetryConfig = _reflection.GeneratedProtocolMessageType(
"RetryConfig",
(_message.Message,),
dict(
DESCRIPTOR=_RETRYCONFIG,
__module__="google.cloud.tasks_v2beta2.proto.queue_pb2",
__doc__="""Retry config.
These settings determine how a failed task attempt is retried.
Attributes:
num_attempts:
Number of attempts per task. If unspecified when the queue is
created, Cloud Tasks will pick the default. This field has
the same meaning as `task\_retry\_limit in queue.yaml/xml <htt
ps://cloud.google.com/appengine/docs/standard/python/config/qu
eueref#retry_parameters>`_.
max_attempts:
The maximum number of attempts for a task. Cloud Tasks will
attempt the task ``max_attempts`` times (that is, if the first
attempt fails, then there will be ``max_attempts - 1``
retries). Must be > 0.
unlimited_attempts:
If true, then the number of attempts is unlimited.
max_retry_duration:
If positive, ``max_retry_duration`` specifies the time limit
for retrying a failed task, measured from when the task was
first attempted. Once ``max_retry_duration`` time has passed
*and* the task has been attempted [max\_attempts][google.cloud
.tasks.v2beta2.RetryConfig.max\_attempts] times, no further
attempts will be made and the task will be deleted. If zero,
then the task age is unlimited. If unspecified when the queue
is created, Cloud Tasks will pick the default. This field is
output only for [pull
queues][google.cloud.tasks.v2beta2.PullTarget].
``max_retry_duration`` will be truncated to the nearest
second. This field has the same meaning as `task\_age\_limit
in queue.yaml/xml <https://cloud.google.com/appengine/docs/sta
ndard/python/config/queueref#retry_parameters>`_.
min_backoff:
A task will be
[scheduled][google.cloud.tasks.v2beta2.Task.schedule\_time]
for retry between [min\_backoff][google.cloud.tasks.v2beta2.Re
tryConfig.min\_backoff] and [max\_backoff][google.cloud.tasks.
v2beta2.RetryConfig.max\_backoff] duration after it fails, if
the queue's
[RetryConfig][google.cloud.tasks.v2beta2.RetryConfig]
specifies that the task should be retried. If unspecified
when the queue is created, Cloud Tasks will pick the default.
This field is output only for [pull
queues][google.cloud.tasks.v2beta2.PullTarget].
``min_backoff`` will be truncated to the nearest second. This
field has the same meaning as `min\_backoff\_seconds in
queue.yaml/xml <https://cloud.google.com/appengine/docs/standa
rd/python/config/queueref#retry_parameters>`_.
max_backoff:
A task will be
[scheduled][google.cloud.tasks.v2beta2.Task.schedule\_time]
for retry between [min\_backoff][google.cloud.tasks.v2beta2.Re
tryConfig.min\_backoff] and [max\_backoff][google.cloud.tasks.
v2beta2.RetryConfig.max\_backoff] duration after it fails, if
the queue's
[RetryConfig][google.cloud.tasks.v2beta2.RetryConfig]
specifies that the task should be retried. If unspecified
when the queue is created, Cloud Tasks will pick the default.
This field is output only for [pull
queues][google.cloud.tasks.v2beta2.PullTarget].
``max_backoff`` will be truncated to the nearest second. This
field has the same meaning as `max\_backoff\_seconds in
queue.yaml/xml <https://cloud.google.com/appengine/docs/standa
rd/python/config/queueref#retry_parameters>`_.
max_doublings:
The time between retries will double ``max_doublings`` times.
A task's retry interval starts at [min\_backoff][google.cloud.
tasks.v2beta2.RetryConfig.min\_backoff], then doubles
``max_doublings`` times, then increases linearly, and finally
retries retries at intervals of [max\_backoff][google.cloud.ta
sks.v2beta2.RetryConfig.max\_backoff] up to [max\_attempts][go
ogle.cloud.tasks.v2beta2.RetryConfig.max\_attempts] times.
For example, if [min\_backoff][google.cloud.tasks.v2beta2.Retr
yConfig.min\_backoff] is 10s, [max\_backoff][google.cloud.task
s.v2beta2.RetryConfig.max\_backoff] is 300s, and
``max_doublings`` is 3, then the a task will first be retried
in 10s. The retry interval will double three times, and then
increase linearly by 2^3 \* 10s. Finally, the task will retry
at intervals of [max\_backoff][google.cloud.tasks.v2beta2.Retr
yConfig.max\_backoff] until the task has been attempted [max\_
attempts][google.cloud.tasks.v2beta2.RetryConfig.max\_attempts
] times. Thus, the requests will retry at 10s, 20s, 40s, 80s,
160s, 240s, 300s, 300s, .... If unspecified when the queue is
created, Cloud Tasks will pick the default. This field is
output only for [pull
queues][google.cloud.tasks.v2beta2.PullTarget]. This field
has the same meaning as `max\_doublings in queue.yaml/xml <htt
ps://cloud.google.com/appengine/docs/standard/python/config/qu
eueref#retry_parameters>`_.
""",
# @@protoc_insertion_point(class_scope:google.cloud.tasks.v2beta2.RetryConfig)
),
)
_sym_db.RegisterMessage(RetryConfig)
DESCRIPTOR._options = None
_QUEUE._options = None
# @@protoc_insertion_point(module_scope)
| 41.966794
| 1,874
| 0.650091
|
149133f641bf8fcb0f289d4dd1126fbb6e7f7437
| 2,746
|
py
|
Python
|
ayb/net/http.py
|
msolo/ayb
|
3bb804846033cc0b054f06c2e6298131d22d49f2
|
[
"BSD-3-Clause"
] | null | null | null |
ayb/net/http.py
|
msolo/ayb
|
3bb804846033cc0b054f06c2e6298131d22d49f2
|
[
"BSD-3-Clause"
] | null | null | null |
ayb/net/http.py
|
msolo/ayb
|
3bb804846033cc0b054f06c2e6298131d22d49f2
|
[
"BSD-3-Clause"
] | null | null | null |
import asynchat
import asyncore
import os
import socket
import string
import sys
import StringIO
import mimetools
ROOT = "."
PORT = 8000
class HTTPChannel(asynchat.async_chat):
def __init__(self, server, sock, addr):
asynchat.async_chat.__init__(self, sock)
self.server = server
self.set_terminator("\r\n\r\n")
self.header = None
self.data = ""
self.shutdown = 0
def collect_incoming_data(self, data):
self.data = self.data + data
if len(self.data) > 16384:
# limit the header size to prevent attacks
self.shutdown = 1
def found_terminator(self):
if not self.header:
# parse http header
fp = StringIO.StringIO(self.data)
request = string.split(fp.readline(), None, 2)
if len(request) != 3:
# badly formed request; just shut down
self.shutdown = 1
else:
# parse message header
self.header = mimetools.Message(fp)
self.set_terminator("\r\n")
self.server.handle_request(
self, request[0], request[1], self.header
)
self.close_when_done()
self.data = ""
else:
pass # ignore body data, for now
def pushstatus(self, status, explanation="OK"):
self.push("HTTP/1.0 %d %s\r\n" % (status, explanation))
class FileProducer:
# a producer which reads data from a file object
def __init__(self, file):
self.file = file
def more(self):
if self.file:
data = self.file.read(2048)
if data:
return data
self.file = None
return ""
class HTTPServer(asyncore.dispatcher):
def __init__(self, port=None, request=None):
asyncore.dispatcher.__init__(self)
if not port:
port = 80
self.port = port
if request:
self.handle_request = request # external request handler
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(5)
def handle_accept(self):
conn, addr = self.accept()
HTTPChannel(self, conn, addr)
def handle_request(self, channel, method, path, header):
try:
# this is not safe!
while path[:1] == "/":
path = path[1:]
filename = os.path.join(ROOT, path)
print path, "=>", filename
file = open(filename, "r")
except IOError:
channel.pushstatus(404, "Not found")
channel.push("Content-type: text/html\r\n")
channel.push("\r\n")
channel.push("<html><body>File not found.</body></html>\r\n")
else:
channel.pushstatus(200, "OK")
channel.push("Content-type: text/html\r\n")
channel.push("\r\n")
channel.push_with_producer(FileProducer(file))
if __name__ == '__main__':
s = HTTPServer(PORT)
print "serving at port", PORT
asyncore.loop()
| 25.425926
| 67
| 0.630736
|
faa27f9fb4cb28649b8bd961b2b4e6571733a175
| 792
|
py
|
Python
|
0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.10-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.10-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
0-notes/job-search/Cracking the Coding Interview/C10SortingSearching/questions/10.10-question.py
|
eengineergz/Lambda
|
1fe511f7ef550aed998b75c18a432abf6ab41c5f
|
[
"MIT"
] | null | null | null |
# Rank from Stream
# Imagine you are reading in a stream of integers.
# Periodically, you wish to be able to look up the rank off a number x,
# the number of values less than or equal to x.
# Implement the data structures and algorithms to support these operations.
# That is, implement the method track(int x), which is called when each
# number is generated.
# The method getRankOfNumber(int x), which returns the number of values less
# than or equal to x (not including this instance of x itself).
'''
EXAMPLE:
Stream (in order of appearance): 5, 1, 4, 4, 5, 9, 7, 13, 3
getRankOfNumber(1) = 0 // values:
getRankOfNumber(3) = 1 // values: s[1] = 1
getRankOfNumber(4) = 3 // values: s[1] = 1, s[2] = 4, s[8] = 3
'''
# time complexity: O()
# space complexity: O()
| 36
| 76
| 0.67803
|
aed1e76940a0c934b9b2b8d42ea0e4fa4b96d2b4
| 11,190
|
py
|
Python
|
Lib/unittest/test/test_runner.py
|
deadsnakes/python3.2
|
c0deccc710b5c1c8dd40a1c6d46a8271b60617f1
|
[
"PSF-2.0"
] | null | null | null |
Lib/unittest/test/test_runner.py
|
deadsnakes/python3.2
|
c0deccc710b5c1c8dd40a1c6d46a8271b60617f1
|
[
"PSF-2.0"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
Lib/unittest/test/test_runner.py
|
deadsnakes/python3.2
|
c0deccc710b5c1c8dd40a1c6d46a8271b60617f1
|
[
"PSF-2.0"
] | null | null | null |
import io
import os
import sys
import pickle
import subprocess
import unittest
from .support import LoggingResult, ResultWithNoStartTestRunStopTestRun
class TestCleanUp(unittest.TestCase):
def testCleanUp(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
self.assertEqual(test._cleanups, [])
cleanups = []
def cleanup1(*args, **kwargs):
cleanups.append((1, args, kwargs))
def cleanup2(*args, **kwargs):
cleanups.append((2, args, kwargs))
test.addCleanup(cleanup1, 1, 2, 3, four='hello', five='goodbye')
test.addCleanup(cleanup2)
self.assertEqual(test._cleanups,
[(cleanup1, (1, 2, 3), dict(four='hello', five='goodbye')),
(cleanup2, (), {})])
self.assertTrue(test.doCleanups())
self.assertEqual(cleanups, [(2, (), {}), (1, (1, 2, 3), dict(four='hello', five='goodbye'))])
def testCleanUpWithErrors(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
class MockOutcome(object):
success = True
errors = []
test = TestableTest('testNothing')
test._outcomeForDoCleanups = MockOutcome
exc1 = Exception('foo')
exc2 = Exception('bar')
def cleanup1():
raise exc1
def cleanup2():
raise exc2
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
self.assertFalse(test.doCleanups())
self.assertFalse(MockOutcome.success)
(Type1, instance1, _), (Type2, instance2, _) = reversed(MockOutcome.errors)
self.assertEqual((Type1, instance1), (Exception, exc1))
self.assertEqual((Type2, instance2), (Exception, exc2))
def testCleanupInRun(self):
blowUp = False
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
if blowUp:
raise Exception('foo')
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
def cleanup2():
ordering.append('cleanup2')
test.addCleanup(cleanup1)
test.addCleanup(cleanup2)
def success(some_test):
self.assertEqual(some_test, test)
ordering.append('success')
result = unittest.TestResult()
result.addSuccess = success
test.run(result)
self.assertEqual(ordering, ['setUp', 'test', 'tearDown',
'cleanup2', 'cleanup1', 'success'])
blowUp = True
ordering = []
test = TestableTest('testNothing')
test.addCleanup(cleanup1)
test.run(result)
self.assertEqual(ordering, ['setUp', 'cleanup1'])
def testTestCaseDebugExecutesCleanups(self):
ordering = []
class TestableTest(unittest.TestCase):
def setUp(self):
ordering.append('setUp')
self.addCleanup(cleanup1)
def testNothing(self):
ordering.append('test')
def tearDown(self):
ordering.append('tearDown')
test = TestableTest('testNothing')
def cleanup1():
ordering.append('cleanup1')
test.addCleanup(cleanup2)
def cleanup2():
ordering.append('cleanup2')
test.debug()
self.assertEqual(ordering, ['setUp', 'test', 'tearDown', 'cleanup1', 'cleanup2'])
class Test_TextTestRunner(unittest.TestCase):
"""Tests for TextTestRunner."""
def test_init(self):
runner = unittest.TextTestRunner()
self.assertFalse(runner.failfast)
self.assertFalse(runner.buffer)
self.assertEqual(runner.verbosity, 1)
self.assertEqual(runner.warnings, None)
self.assertTrue(runner.descriptions)
self.assertEqual(runner.resultclass, unittest.TextTestResult)
def test_multiple_inheritance(self):
class AResult(unittest.TestResult):
def __init__(self, stream, descriptions, verbosity):
super(AResult, self).__init__(stream, descriptions, verbosity)
class ATextResult(unittest.TextTestResult, AResult):
pass
# This used to raise an exception due to TextTestResult not passing
# on arguments in its __init__ super call
ATextResult(None, None, 1)
def testBufferAndFailfast(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True,
buffer=True)
# Use our result object
runner._makeResult = lambda: result
runner.run(Test('testFoo'))
self.assertTrue(result.failfast)
self.assertTrue(result.buffer)
def testRunnerRegistersResult(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
originalRegisterResult = unittest.runner.registerResult
def cleanup():
unittest.runner.registerResult = originalRegisterResult
self.addCleanup(cleanup)
result = unittest.TestResult()
runner = unittest.TextTestRunner(stream=io.StringIO())
# Use our result object
runner._makeResult = lambda: result
self.wasRegistered = 0
def fakeRegisterResult(thisResult):
self.wasRegistered += 1
self.assertEqual(thisResult, result)
unittest.runner.registerResult = fakeRegisterResult
runner.run(unittest.TestSuite())
self.assertEqual(self.wasRegistered, 1)
def test_works_with_result_without_startTestRun_stopTestRun(self):
class OldTextResult(ResultWithNoStartTestRunStopTestRun):
separator2 = ''
def printErrors(self):
pass
class Runner(unittest.TextTestRunner):
def __init__(self):
super(Runner, self).__init__(io.StringIO())
def _makeResult(self):
return OldTextResult()
runner = Runner()
runner.run(unittest.TestSuite())
def test_startTestRun_stopTestRun_called(self):
class LoggingTextResult(LoggingResult):
separator2 = ''
def printErrors(self):
pass
class LoggingRunner(unittest.TextTestRunner):
def __init__(self, events):
super(LoggingRunner, self).__init__(io.StringIO())
self._events = events
def _makeResult(self):
return LoggingTextResult(self._events)
events = []
runner = LoggingRunner(events)
runner.run(unittest.TestSuite())
expected = ['startTestRun', 'stopTestRun']
self.assertEqual(events, expected)
def test_pickle_unpickle(self):
# Issue #7197: a TextTestRunner should be (un)pickleable. This is
# required by test_multiprocessing under Windows (in verbose mode).
stream = io.StringIO("foo")
runner = unittest.TextTestRunner(stream)
for protocol in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(runner, protocol)
obj = pickle.loads(s)
# StringIO objects never compare equal, a cheap test instead.
self.assertEqual(obj.stream.getvalue(), stream.getvalue())
def test_resultclass(self):
def MockResultClass(*args):
return args
STREAM = object()
DESCRIPTIONS = object()
VERBOSITY = object()
runner = unittest.TextTestRunner(STREAM, DESCRIPTIONS, VERBOSITY,
resultclass=MockResultClass)
self.assertEqual(runner.resultclass, MockResultClass)
expectedresult = (runner.stream, DESCRIPTIONS, VERBOSITY)
self.assertEqual(runner._makeResult(), expectedresult)
def test_warnings(self):
"""
Check that warnings argument of TextTestRunner correctly affects the
behavior of the warnings.
"""
# see #10535 and the _test_warnings file for more information
def get_parse_out_err(p):
return [b.splitlines() for b in p.communicate()]
opts = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(__file__))
ae_msg = b'Please use assertEqual instead.'
at_msg = b'Please use assertTrue instead.'
# no args -> all the warnings are printed, unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py'], **opts)
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
# check that the total number of warnings in the output is correct
self.assertEqual(len(out), 12)
# check that the numbers of the different kind of warnings is correct
for msg in [b'dw', b'iw', b'uw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg, b'rw']:
self.assertEqual(out.count(msg), 1)
args_list = (
# passing 'ignore' as warnings arg -> no warnings
[sys.executable, '_test_warnings.py', 'ignore'],
# -W doesn't affect the result if the arg is passed
[sys.executable, '-Wa', '_test_warnings.py', 'ignore'],
# -W affects the result if the arg is not passed
[sys.executable, '-Wi', '_test_warnings.py']
)
# in all these cases no warnings are printed
for args in args_list:
p = subprocess.Popen(args, **opts)
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 0)
# passing 'always' as warnings arg -> all the warnings printed,
# unittest warnings only once
p = subprocess.Popen([sys.executable, '_test_warnings.py', 'always'],
**opts)
out, err = get_parse_out_err(p)
self.assertIn(b'OK', err)
self.assertEqual(len(out), 14)
for msg in [b'dw', b'iw', b'uw', b'rw']:
self.assertEqual(out.count(msg), 3)
for msg in [ae_msg, at_msg]:
self.assertEqual(out.count(msg), 1)
def testStdErrLookedUpAtInstantiationTime(self):
# see issue 10786
old_stderr = sys.stderr
f = io.StringIO()
sys.stderr = f
try:
runner = unittest.TextTestRunner()
self.assertTrue(runner.stream.stream is f)
finally:
sys.stderr = old_stderr
def testSpecifiedStreamUsed(self):
# see issue 10786
f = io.StringIO()
runner = unittest.TextTestRunner(f)
self.assertTrue(runner.stream.stream is f)
| 33.704819
| 101
| 0.592761
|
4d6ecaef08ba17e990185613fb50af05c8e3b4bf
| 4,981
|
py
|
Python
|
runfile_localVol.py
|
buwu-DWJ/neural_locVol
|
a3703fa83edb4694f8c1596676869b2533ade7ad
|
[
"MIT"
] | null | null | null |
runfile_localVol.py
|
buwu-DWJ/neural_locVol
|
a3703fa83edb4694f8c1596676869b2533ade7ad
|
[
"MIT"
] | null | null | null |
runfile_localVol.py
|
buwu-DWJ/neural_locVol
|
a3703fa83edb4694f8c1596676869b2533ade7ad
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# MIT License
# Copyright (c) 2020 Christa Cuchiero, Wahid Khosrawi, Josef Teichmann
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""runfile_localVol.py:
基于输入的给定参数和期权画 local vol 图
This file implements the the functionality for computing and plotting local-vol
implied volatilities for the sampled parameters given as input and options also
given as input. Starting point is the main function.
"""
import matplotlib.pyplot as plt
import numpy as np, tensorflow as tf
import os
from compute_pM import MC_pM_locvol
from finModels.helpers_finModels import fin_surface
import pickle
import helpers
# Env variables
HOSTNAME = os.uname()[1]
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def makePrices(para_locVol, option, para_MC, just_ops = False):
'''Compute locVol prices for the given parameters. '''
version = para_locVol['version']
if not os.path.exists('caliRes/'+version):
os.mkdir('caliRes/'+version)
Batchsize_tf = tf.placeholder(dtype=tf.int32, shape=())
strikes_tf = [tf.placeholder(dtype=tf.float32, shape=(option.nK[i]) ) for i in range(option.nT) ]
pM_tf = MC_pM_locvol(para_locVol, option, para_MC, Batchsize_tf, strikes_tf)
if just_ops:
return(pM_tf, Batchsize_tf, strikes_tf)
N_mc_data = para_MC['N_mc_data']
N_mc_inner = para_MC['N_mc_inner']
helpers.init_folders_and_readme(mode='create_data', version=version)
print('I run this script on host: {}'.format(HOSTNAME), version)
# Check if Data version already exists
if os.path.isfile('caliRes/pM_'+version+'.pkl'):
if version != 'test':
raise Exception('Data file already exists. Give new name to store it.\n\n')
# Compute the number of MC rounds we need to get to Batchsize
N_MC_outer = int(np.trunc(N_mc_data/N_mc_inner))
D = dict(zip(strikes_tf, option.K) )
with tf.Session() as sess:
print('\nData computation progress:')
pM = sess.run(
pM_tf,
feed_dict={**D,Batchsize_tf: N_mc_inner}
)
prog = 100 /(N_MC_outer)
print('\rMC simulation {:7} | progress: {:10.4}% '.format(1,prog), end='')
for i in np.arange(2, N_MC_outer+1):
prog = 100*i /(N_MC_outer)
pM_aux = sess.run(pM_tf, feed_dict={**D,Batchsize_tf: N_mc_inner})
pM = [ pM[iteraux] + pM_aux[iteraux] for iteraux in range( len(pM) ) ]
print('\rMC simulation {:7} | progress: {:10.4}% '.format(i,prog), end='')
pM = [pM[iteraux] / N_MC_outer for iteraux in range( len(pM) ) ]
# Save the prices to disk for later calibration
with open('data/locVolPrices/pM_'+version+'.pkl', 'wb') as f:
pickle.dump(pM, f)
def plot_IV(para, option):
'''load computed prices and create fin_surf instance. Also do all needed conversions'''
log_m = option.log_m_list(para['S0'])
version = para['version']
with open('data/locVolPrices/pM_'+version+'.pkl', 'rb') as f:
pM = pickle.load(f)
# Constuct the object that handles conversion
data_handler = fin_surface(mats=option.T, strikes=option.K, spot=para['S0'], version = version)
data_handler.paralocVol = para
data_handler.feed_prices(prices= [p for p in pM] )
data_handler.convert(direction='price->iV')
for i in range(len(pM)):
plt.plot(log_m[i], data_handler.iV[i] )
if not os.path.exists('caliRes/'+version):
os.mkdir('caliRes/'+version)
plt.savefig('caliRes/'+version+'/plot_{}.png'.format(str(i+1).zfill(3)) )
plt.close()
return(data_handler)
def main(para_locVol, option, para_MC, compute_prices):
'''wrapper function. Depending on compute_prices data is computed or just loaded.'''
if compute_prices:
makePrices(para_locVol, option, para_MC)
finsurf = plot_IV(para_locVol, option)
return(finsurf)
if __name__ == "__main__":
pass
| 37.171642
| 103
| 0.687011
|
80ed2e6cc6df195a0f9df32190d725ff8b026d2a
| 8,035
|
py
|
Python
|
scripts/implementations.py
|
yannvon/higgs-boson
|
4d0804e1987a9e08f6417f8d7119c37572f35515
|
[
"MIT"
] | null | null | null |
scripts/implementations.py
|
yannvon/higgs-boson
|
4d0804e1987a9e08f6417f8d7119c37572f35515
|
[
"MIT"
] | null | null | null |
scripts/implementations.py
|
yannvon/higgs-boson
|
4d0804e1987a9e08f6417f8d7119c37572f35515
|
[
"MIT"
] | 1
|
2021-11-01T12:55:40.000Z
|
2021-11-01T12:55:40.000Z
|
import numpy as np
# ----- Helper functions linear regression -----------------------------------------------------------
def calculate_mse(e):
"""Calculate the mse for vector e."""
return 1/2 * np.mean(e**2)
def compute_loss_mse(y, tx, w):
"""Calculate the loss using mean squared error loss function"""
e = y - tx @ w
return calculate_mse(e)
def compute_gradient(y, tx, w):
"""Compute the gradient."""
N = len(y)
e = y - tx @ w
gradient = -(1.0 / N) * (tx.T @ e)
return gradient, e
# ----- Implement ML methods -------------------------------------------------------------------------
# Linear regression using gradient descent
def least_squares_GD(y, tx, initial_w, max_iters, gamma):
"""Gradient descent algorithm."""
weights = initial_w
for n_iter in range(max_iters):
# Compute gradient and loss
gradient, e = compute_gradient(y, tx, weights)
loss = calculate_mse(e)
# Update w by gradient
# print("loss", loss)
weights = weights - gamma * gradient
# return the last w and loss
return weights, loss
# Linear regression using stochastic gradient descent
def least_squares_SGD(y, tx, initial_w, max_iters, gamma):
"""Stochastic gradient descent with batch size of 1"""
np.random.seed(1)
weights = initial_w
min_weights = weights
min_loss = calculate_mse(y - tx.dot(weights))
for n_iter in range(max_iters):
#Select a random element of y and tx
r = np.random.randint(0, len(y))
y_elem = np.array([y[r]])
tx_elem = np.array([tx[r]])
# Compute its stochastic gradient
gradient, err = compute_gradient(y_elem, tx_elem, weights)
# Update w
weights = weights - gamma * gradient
#Compute loss using mean squared error
loss = calculate_mse(y - tx.dot(weights))
if(loss < min_loss):
min_loss = loss
min_weights = weights
#print("loss", loss)
return min_weights, min_loss
# Least squares regression using normal equations
def least_squares(y, tx):
"""calculate the least squares solution."""
weights = np.linalg.solve(tx.T @ tx, tx.T @ y)
return weights, compute_loss_mse(y, tx, weights)
# Ridge regression using normal equations
def ridge_regression(y, tx, lambda_):
"""implement ridge regression."""
l = 2 * tx.shape[0] * lambda_
weights = np.linalg.solve(tx.T @ tx + l * np.identity(tx.shape[1]), tx.T @ y)
return weights, compute_loss_mse(y, tx, weights)
# Logistic regression using gradient descent
def logistic_regression(y, tx, initial_w, max_iters, gamma):
"""implements logistic regression using gradient descent."""
w = initial_w
# start the logistic regression
for iter in range(max_iters):
# get loss, gradient and update w.
loss = calculate_loss_logistic_regression(y, tx, w)
gradient = calculate_gradient_logistic_regression(y, tx, w)
w = w - gamma * gradient
# log info
if iter % 100 == 0:
print("Current iteration={i}, loss={l}".format(i=iter, l=loss))
loss = calculate_loss_logistic_regression(y, tx, w)
print("loss={l}".format(l=loss))
return w, loss
# Regularized logistic regression using gradient descent
def reg_logistic_regression(y, tx, lambda_, initial_w, max_iters, gamma):
"""implements regularized logistic regression using gradient descent."""
w = initial_w
# start the logistic regression
for iter in range(max_iters):
# get loss, gradient and update w.
loss = calculate_loss_reg_logistic_regression(y, tx, w, lambda_)
gradient = calculate_gradient_reg_logistic_regression(y, tx, w, lambda_)
w = w - gamma * gradient
# log info
if iter % 100 == 0:
print("Current iteration={i}, loss={l}".format(i=iter, l=loss))
print("Weights size:" + str(np.squeeze(w.T @ w)))
loss = calculate_loss_reg_logistic_regression(y, tx, w, lambda_)
print("loss={l}".format(l=loss))
return w, loss
# Regularized logistic regression using SGD
def reg_logistic_regression_SGD(y, tx, lambda_, initial_w, max_iters, gamma):
"""implements regularized logistic regression using stochastic gradient descent."""
w = initial_w
min_weights = w
min_loss = calculate_loss_reg_logistic_regression(y, tx, w, lambda_)
# start the logistic regression
for iter in range(max_iters):
# get loss, gradient and update w.
# stochastic -> select random element of y and tx
r = np.random.randint(0, len(y))
y_elem = np.array([y[r]])
tx_elem = np.array([tx[r]])
gradient = calculate_gradient_reg_logistic_regression(y_elem, tx_elem, w, lambda_)
w = w - gamma * gradient
# log info
if iter % 10000 == 0:
loss = calculate_loss_reg_logistic_regression(y, tx, w, lambda_)
print("Current iteration={i}, loss={l}".format(i=iter, l=loss))
print("weights size:" + str(np.squeeze(w.T @ w)))
loss = calculate_loss_reg_logistic_regression(y, tx, w, lambda_)
print("loss={l}".format(l=loss))
return w, loss
# ----- Helper functions for logistic regression ----------------------------------------------------
def sigmoid(t):
"""apply sigmoid function on t."""
return 1.0 / (1 + np.exp(-t))
def calculate_loss_logistic_regression(y, tx, w):
"""compute the cost by negative log likelihood."""
# Note: this function takes y with values either 0 or 1
return - np.squeeze((y.T @ np.log(sigmoid(tx @ w)) + (1 - y).T @ np.log(1 - sigmoid(tx @ w))))
def calculate_gradient_logistic_regression(y, tx, w):
"""compute the gradient of loss."""
# Note: this function takes y with values either 0 or 1
return tx.T @ (sigmoid(tx @ w) - y)
# ----- Helper functions for penalized logistic regression -------------------------------------------
def calculate_loss_reg_logistic_regression(y, tx, w, lambda_):
"""compute the cost by negative log likelihood."""
# Here we use lambda as defined as in class !
return calculate_loss_logistic_regression(y, tx, w) + lambda_ / 2 * np.squeeze(w.T @ w)
def calculate_gradient_reg_logistic_regression(y, tx, w, lambda_):
"""compute the gradient of loss."""
# Here we use lambda as defined as in class !
return calculate_gradient_logistic_regression(y, tx, w) + lambda_ * w
# ----- Additional section: Newton method ---------------------------------------------------------------------------
# Note this Newton does not have the regularization term, easy to add !
def calculate_hessian(y, tx, w):
"""return the hessian of the loss function."""
# compute S matrix
N = tx.shape[0]
S = np.zeros((N,N))
for n in range(N):
sig = sigmoid(tx[n].T @ w)
S[n, n] = sig * (1 - sig)
H = tx.T @ S @ tx
return H
def learning_by_newton_method(y, tx, w, lambda_, gamma):
"""
Do one step on Newton's method.
return the loss and updated w.
"""
loss = calculate_loss_logistic_regression(y, tx, w) + lambda_ / 2 * np.squeeze(w.T @ w)
gradient = calculate_gradient_logistic_regression(y, tx, w) + lambda_ * w
hessian = calculate_hessian(y, tx, w) + lambda_
w = w - gamma * np.linalg.inv(hessian) @ gradient
return w, loss
def logistic_regression_newton(y, tx, lambda_, initial_w, max_iters, gamma):
''' Perform logistic regression with Newton's method '''
w = initial_w
# start the logistic regression
for iter in range(max_iters):
# get loss and update w.
w, loss = learning_by_newton_method(y, tx, w, lambda_, gamma)
# log info
if iter % 100 == 0:
print("Current iteration={i}, the loss={l}".format(i=iter, l=loss))
print("Weights size:" + str(np.squeeze(w.T @ w)))
loss = calculate_loss_logistic_regression(y, tx, w)
print("loss={l}".format(l=loss))
return w, loss
| 35.711111
| 117
| 0.620162
|
ab9e2c04538ab2e0dd58674edd5cc38457997567
| 7,037
|
py
|
Python
|
plugins/module_utils/oci_key_management_custom_helpers.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 108
|
2020-05-19T20:46:10.000Z
|
2022-03-25T14:10:01.000Z
|
plugins/module_utils/oci_key_management_custom_helpers.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 90
|
2020-06-14T22:07:11.000Z
|
2022-03-07T05:40:29.000Z
|
plugins/module_utils/oci_key_management_custom_helpers.py
|
slmjy/oci-ansible-collection
|
349c91e2868bf4706a6e3d6fb3b47fc622bfe11b
|
[
"Apache-2.0"
] | 42
|
2020-08-30T23:09:12.000Z
|
2022-03-25T16:58:01.000Z
|
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
logger = oci_common_utils.get_logger("oci_key_management_custom_helpers")
def _debug(s):
get_logger().debug(s)
def _info(s):
get_logger().info(s)
def get_logger():
return logger
class KeyVersionHelperCustom:
def get_module_resource_id_param(self):
return "id"
def get_module_resource_id(self):
return self.module.params.get("id")
# Currently the module doesn't have the keyVersionId param
# The ID is returned after create in CreateOperationLifecycleStateWaiter
# This customization can be replaced with a new waiter logic in the future
def get_resource(self):
if self.module.params.get("id") is None:
return None
else:
return oci_common_utils.call_with_backoff(
self.client.get_key_version,
key_id=self.module.params.get("key_id"),
key_version_id=self.module.params.get("id"),
)
# there is no concept of idempotency for this module
# it re-executes create new version every time module is invoked
def get_matching_resource(self):
return None
def is_create(self):
return True
class VaultActionsHelperCustom:
REPLICA_CREATION_SUMMARY_STATUS = ["CREATED", "CREATING"]
REPLICA_DELETION_SUMMARY_STATUS = ["DELETED", "DELETING"]
def is_action_necessary(self, action, resource=None):
# For replica actions: create/delete we check if the replica exists by checking if there are any replication details
# and if for the given region whether the status is same as what we are expecting to change it to.
if action == "create_vault_replica":
replication_details = getattr(resource, "replica_details", None)
if replication_details is None:
return True
region_param = self.module.params.get("replica_region")
existing_replicas = self.client.list_vault_replicas(
self.module.params.get("vault_id")
)
for replica in existing_replicas:
existing_region = getattr(replica, "region", None)
existing_status = getattr(replica, "status", None)
if (
existing_region == region_param
and existing_status in self.REPLICA_CREATION_SUMMARY_STATUS
):
return False
return True
elif action == "delete_vault_replica":
replication_details = getattr(resource, "replica_details", None)
if replication_details is None:
return False
region_param = self.module.params.get("replica_region")
existing_replicas = self.client.list_vault_replicas(
self.module.params.get("vault_id")
)
for replica in existing_replicas:
existing_region = getattr(replica, "region", None)
existing_status = getattr(replica, "status", None)
if (
getattr(replica, "region", None) == region_param
and existing_status in self.REPLICA_CREATION_SUMMARY_STATUS
):
return True
return False
elif kms_is_action_necessary(self, action, resource) is False:
return False
return super(VaultActionsHelperCustom, self).is_action_necessary(
action, resource
)
# waiting as the change compartment for vault takes time to come back to Active state
def get_default_module_wait_timeout(self):
return int(1 * 2400)
class KeyActionsHelperCustom:
def is_action_necessary(self, action, resource):
if kms_is_action_necessary(self, action, resource) is False:
return False
return super(KeyActionsHelperCustom, self).is_action_necessary(action, resource)
class KeyVersionActionsHelperCustom:
def is_action_necessary(self, action, resource):
if kms_is_action_necessary(self, action, resource) is False:
return False
return super(KeyVersionActionsHelperCustom, self).is_action_necessary(
action, resource
)
def kms_is_action_necessary(resource_helper, action, resource):
# For schedule_key_deletion key (idempotence), we see that the updated key does not have time_of_deletion set, as the key is in PENDING_DELETION state.
if (
hasattr(resource, "lifecycle_state")
and (
resource.lifecycle_state == "PENDING_DELETION"
or resource.lifecycle_state == "DELETED"
)
and hasattr(resource, "time_of_deletion")
and resource.time_of_deletion is not None
and action == "schedule_key_deletion"
and resource_helper.module.params.get("time_of_deletion") is None
):
return False
else:
# Idempotency for modules with delete date like KMS (consider only in deleted lifecycle_state)
# If the deleted date is equal to the request delete date, we should not execute the action (changed=false)
# If the deleted date is different, we will execute the action and return server errors
if (
hasattr(resource, "lifecycle_state")
and (
resource.lifecycle_state == "PENDING_DELETION"
or resource.lifecycle_state == "DELETED"
)
and hasattr(resource, "time_of_deletion")
and resource.time_of_deletion is not None
and resource_helper.module.params.get("time_of_deletion") is not None
):
if resource.time_of_deletion == oci_common_utils.deserialize_datetime(
resource_helper.module.params["time_of_deletion"]
):
return False
else:
resource_helper.module.warn(
"This resource was deleted on: {0}. To change the deletion date, "
"cancel the current deletion and delete this resource again using the new requested date {1}".format(
resource.time_of_deletion.isoformat(sep="T"),
resource_helper.module.params["time_of_deletion"],
)
)
return True
class SecretActionsHelperCustom:
def is_action_necessary(self, action, resource):
if kms_is_action_necessary(self, action, resource) is False:
return False
return super(SecretActionsHelperCustom, self).is_action_necessary(
action, resource
)
| 39.533708
| 155
| 0.64843
|
4795b6799991a11796a10c641edcd27eee2ee7ce
| 2,820
|
py
|
Python
|
examples/smtp-vrfy.py
|
karpierz/libcurl
|
531bd28ab32fb07c152e5b5ca4bd4dbde059b9a8
|
[
"Zlib"
] | null | null | null |
examples/smtp-vrfy.py
|
karpierz/libcurl
|
531bd28ab32fb07c152e5b5ca4bd4dbde059b9a8
|
[
"Zlib"
] | null | null | null |
examples/smtp-vrfy.py
|
karpierz/libcurl
|
531bd28ab32fb07c152e5b5ca4bd4dbde059b9a8
|
[
"Zlib"
] | null | null | null |
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2021, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
#***************************************************************************
"""
SMTP example showing how to verify an e-mail address
"""
import sys
import ctypes as ct
import libcurl as lcurl
from curltestutils import * # noqa
if not lcurl.CURL_AT_LEAST_VERSION(7, 34, 0):
print("This example requires curl 7.34.0 or later", file=sys.stderr)
sys.exit(-1)
# This is a simple example showing how to verify an e-mail address from an
# SMTP server.
#
# Notes:
#
# 1) Not all email servers support this command and even if your email server
# does support it, it may respond with a 252 response code even though the
# address does not exist.
def main(argv=sys.argv[1:]):
curl: ct.POINTER(lcurl.CURL) = lcurl.easy_init()
with curl_guard(False, curl):
if not curl: return 1
# This is the URL for your mailserver
lcurl.easy_setopt(curl, lcurl.CURLOPT_URL, b"smtp://mail.example.com")
# Note that the CURLOPT_MAIL_RCPT takes a list, not a char array
recipients = ct.POINTER(lcurl.slist)()
recipients = lcurl.slist_append(recipients, b"<recipient@example.com>")
lcurl.easy_setopt(curl, lcurl.CURLOPT_MAIL_RCPT, recipients)
# Perform the custom request
res: lcurl.CURLcode = lcurl.easy_perform(curl)
# Check for errors
if res != lcurl.CURLE_OK:
handle_easy_perform_error(res)
# Free the list of recipients
lcurl.slist_free_all(recipients)
# curl will not send the QUIT command until you call cleanup, so you
# should be able to re-use this connection for additional requests. It
# may not be a good idea to keep the connection open for a very long time
# though (more than a few minutes may result in the server timing out the
# connection) and you do want to clean up in the end.
return 0
sys.exit(main())
| 35.25
| 81
| 0.60922
|
ff33f8c364e27fa9568b5f2ad9297e30b1b78e80
| 5,206
|
py
|
Python
|
FLutils/data_utils.py
|
Rand2AI/FedBoosting
|
999dc879a2fe06563f27fab0a356e07d342dfc34
|
[
"MIT"
] | 3
|
2021-12-10T19:25:30.000Z
|
2021-12-10T20:58:55.000Z
|
FLutils/data_utils.py
|
Rand2AI/FedBoosting
|
999dc879a2fe06563f27fab0a356e07d342dfc34
|
[
"MIT"
] | null | null | null |
FLutils/data_utils.py
|
Rand2AI/FedBoosting
|
999dc879a2fe06563f27fab0a356e07d342dfc34
|
[
"MIT"
] | 1
|
2021-11-25T14:03:49.000Z
|
2021-11-25T14:03:49.000Z
|
import numpy as np
import base64, json, cv2
class DataHandler:
def __init__(self, debug = False):
self.debug = debug
self.train_data = None
self.evaluate_data = None
self.test_data = None
def split_data(self, ratio):
thres = int(len(self.train_data)*ratio)
self.evaluate_data = self.train_data[thres:len(self.train_data)]
self.train_data = self.train_data[0:thres]
print(f"Number of training images is {len(self.train_data)}")
print(f"Number of evaluating images is {len(self.evaluate_data)}")
def process_train_data(self, Datapath):
TrainFile = Datapath + '/train_FL.json'
if self.debug:
train_data = []
with open(TrainFile, 'r', encoding='utf-8') as imgf:
train_data_line = imgf.readline()
while train_data_line:
train_data.append(train_data_line)
if len(train_data)==50000: break
train_data_line = imgf.readline()
else:
with open(TrainFile, 'r', encoding='utf-8') as imgf:
train_data = imgf.readlines()
self.train_data = train_data
print(f"Number of total images is {len(self.train_data)}")
def process_test_data(self, Datapath):
TestFile = Datapath + '/test_FL.json'
if self.debug:
test_data = []
with open(TestFile, 'r', encoding='utf-8') as imgf:
test_data_line = imgf.readline()
while test_data_line:
test_data.append(test_data_line)
if len(test_data) == 10000: break
test_data_line = imgf.readline()
else:
with open(TestFile, 'r', encoding='utf-8') as imgf:
test_data = imgf.readlines()
self.test_data = test_data
def assign_data_to_clients(self, clients):
for client in clients:
client.receive_data(self.train_data, self.evaluate_data)
def gen_character(filepath):
char = ''
with open(filepath, encoding='utf-8') as fid:
for ch in fid.readlines():
ch = ch.strip('\r\n')
char += ch
char_to_id = {j: i for i, j in enumerate(char)}
return char_to_id
class sequence_order_num:
def __init__(self, total, batchsize=64):
self.total = total
self.range = [i for i in range(total)]
self.index = 0
max_index = int(total / batchsize)
self.index_list = [i for i in range(max_index)]
np.random.shuffle(self.index_list)
def get(self, batchsize):
s_o = []
if self.index + batchsize > self.total:
s_o_1 = self.range[self.index:self.total]
self.index = (self.index + batchsize) - self.total
s_o_2 = self.range[0:self.index]
s_o.extend(s_o_1)
s_o.extend(s_o_2)
else:
s_o = self.range[self.index:self.index + batchsize]
self.index = self.index + batchsize
return s_o
def shuffle_batch(self, batchsize):
if self.index== len(self.index_list): self.index=0
start_index = self.index_list[self.index]*batchsize
end_index = start_index + batchsize
s_o = self.range[start_index:end_index]
self.index += 1
return s_o
def generator(client_train_dict: dict, data, mode="train"):
if mode=="train":
batchsize = client_train_dict["batch_size"]
else:
batchsize = client_train_dict["val_batch_size"]
batchsize = min(batchsize, len(data))
char_to_id = gen_character(client_train_dict["char_file"])
idlist = sequence_order_num(total=len(data), batchsize=batchsize)
while True:
index = idlist.get(batchsize)
x_generator = np.zeros((len(index), client_train_dict["image_size"][0], client_train_dict["image_size"][1], 3), dtype=np.float32)
y_generator = np.ones([len(index), client_train_dict["max_label_length"]]) * 10000
input_length_generator = np.zeros([len(index), 1])
label_length_generator = np.zeros([len(index), 1])
for ind, i in enumerate(index):
temp = json.loads(data[i].strip('\r\n'))
IdNumber = temp['label'].upper()
labelL = len(IdNumber)
Img = temp['img'].encode('utf-8')
Img = cv2.imdecode(np.frombuffer(base64.b64decode(Img), np.uint8), 1)
if len(Img.shape) < 3 or Img.shape[2] == 1:
Img = cv2.merge([Img, Img, Img])
img1 = cv2.resize(Img, (100, 32))
inputL = img1.shape[1] // 4
img1 = (np.array(img1, 'f') - 127.5) / 127.5
x_generator[ind] = img1
y_generator[ind, :labelL] = [char_to_id[i] for i in IdNumber]
input_length_generator[ind] = inputL
label_length_generator[ind] = labelL
inputs = {'the_image': x_generator,
'the_label_text': y_generator,
'the_length_image': input_length_generator,
'the_length_texts': label_length_generator
}
outputs = {'loss_ctc': np.zeros([len(index)])}
yield inputs, outputs
| 40.671875
| 137
| 0.589512
|
d194d3f18fd5f7516046fb8774134e53d7e5a1f9
| 2,303
|
py
|
Python
|
src/emmental/scorer.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
src/emmental/scorer.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
src/emmental/scorer.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
import logging
from functools import partial
from typing import Callable, Dict, List
from numpy import ndarray
from emmental.metrics import METRICS
from emmental.utils.utils import array_to_numpy
logger = logging.getLogger(__name__)
class Scorer(object):
r"""A class to score tasks.
Args:
metrics(list): a list of metric names which provides
in emmental (e.g., accuracy), defaults to [].
customize_metric_funcs(dict): a dict of customize metric where key is the metric
name and value is the metric function which takes gold, preds, probs, uids as
input, defaults to {}.
"""
def __init__(
self, metrics: List[str] = [], customize_metric_funcs: Dict[str, Callable] = {}
) -> None:
self.metrics: Dict[str, Callable] = dict()
for metric in metrics:
if metric in METRICS:
self.metrics[metric] = METRICS[metric] # type: ignore
elif metric.startswith("accuracy@"):
self.metrics[metric] = partial(
METRICS["accuracy"], topk=int(metric.split("@")[1]) # type: ignore
)
else:
raise ValueError(f"Unrecognized metric: {metric}")
self.metrics.update(customize_metric_funcs)
def score(
self, golds: ndarray, preds: ndarray, probs: ndarray, uids: List[str] = None
) -> Dict[str, float]:
"""Calculate the score.
Args:
golds(ndarray): Ground truth values.
probs(ndarray): Predicted probabilities.
preds(ndarray): Predicted values.
uids(list, optional): Unique ids, defaults to None.
Returns:
dict: score dict.
"""
metric_dict = dict()
for metric_name, metric in self.metrics.items():
# handle no examples
if len(golds) == 0:
metric_dict[metric_name] = float("nan")
continue
golds = array_to_numpy(golds)
preds = array_to_numpy(preds)
probs = array_to_numpy(probs)
res = metric(golds, preds, probs, uids)
if isinstance(res, dict):
metric_dict.update(res)
else:
metric_dict[metric_name] = res
return metric_dict
| 29.909091
| 87
| 0.587929
|
86ae30d50ec1f56010a8c5e3678c98dcd5c9fc24
| 41
|
py
|
Python
|
py_pkg_demo/mod_a.py
|
webee/python-packaging-demo
|
0c60831420d7f2cd6fec270ddd33bd324366e30f
|
[
"MIT"
] | null | null | null |
py_pkg_demo/mod_a.py
|
webee/python-packaging-demo
|
0c60831420d7f2cd6fec270ddd33bd324366e30f
|
[
"MIT"
] | null | null | null |
py_pkg_demo/mod_a.py
|
webee/python-packaging-demo
|
0c60831420d7f2cd6fec270ddd33bd324366e30f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
def a():
return "a"
| 6.833333
| 14
| 0.512195
|
82ff85742f2926c89480d45e6fba754a533be242
| 5,038
|
py
|
Python
|
tests/test_processing.py
|
IoT-Inspector/unblob
|
4a6c871dae6805a922e55d30a7925910dc6a4eda
|
[
"MIT"
] | 17
|
2021-11-23T10:05:24.000Z
|
2022-03-10T15:40:41.000Z
|
tests/test_processing.py
|
IoT-Inspector/unblob
|
4a6c871dae6805a922e55d30a7925910dc6a4eda
|
[
"MIT"
] | 184
|
2021-11-22T12:25:05.000Z
|
2022-03-31T16:27:41.000Z
|
tests/test_processing.py
|
IoT-Inspector/unblob
|
4a6c871dae6805a922e55d30a7925910dc6a4eda
|
[
"MIT"
] | 2
|
2021-11-25T09:34:01.000Z
|
2022-02-18T00:14:23.000Z
|
from pathlib import Path
from typing import List
import attr
import pytest
from unblob.models import UnknownChunk, ValidChunk
from unblob.processing import (
ExtractionConfig,
calculate_buffer_size,
calculate_entropy,
calculate_unknown_chunks,
draw_entropy_plot,
remove_inner_chunks,
)
def assert_same_chunks(expected, actual, explanation=None):
"""An assert, that ignores the chunk.id-s"""
assert len(expected) == len(actual), explanation
for i, (e, a) in enumerate(zip(expected, actual)):
assert attr.evolve(e, id="") == attr.evolve(a, id=""), explanation
@pytest.mark.parametrize(
"chunks, expected, explanation",
[
([], [], "Empty list as chunks (No chunk found)"),
(
[
ValidChunk(1, 2),
],
[ValidChunk(1, 2)],
"Only one chunk",
),
(
[
ValidChunk(0, 5),
ValidChunk(1, 2),
],
[ValidChunk(0, 5)],
"One chunk within another",
),
(
[
ValidChunk(10, 20),
ValidChunk(11, 13),
ValidChunk(14, 19),
],
[ValidChunk(10, 20)],
"Multiple chunks within 1 outer chunk",
),
(
[
ValidChunk(11, 13),
ValidChunk(10, 20),
ValidChunk(14, 19),
],
[ValidChunk(10, 20)],
"Multiple chunks within 1 outer chunk, in different order",
),
(
[
ValidChunk(1, 5),
ValidChunk(6, 10),
],
[ValidChunk(1, 5), ValidChunk(6, 10)],
"Multiple outer chunks",
),
(
[
ValidChunk(1, 5),
ValidChunk(2, 3),
ValidChunk(6, 10),
ValidChunk(7, 8),
],
[ValidChunk(1, 5), ValidChunk(6, 10)],
"Multiple outer chunks, with chunks inside",
),
],
)
def test_remove_inner_chunks(
chunks: List[ValidChunk], expected: List[ValidChunk], explanation: str
):
assert_same_chunks(expected, remove_inner_chunks(chunks), explanation)
@pytest.mark.parametrize(
"chunks, file_size, expected",
[
([], 0, []),
([], 10, []),
([ValidChunk(0x0, 0x5)], 5, []),
([ValidChunk(0x0, 0x5), ValidChunk(0x5, 0xA)], 10, []),
([ValidChunk(0x0, 0x5), ValidChunk(0x5, 0xA)], 12, [UnknownChunk(0xA, 0xC)]),
([ValidChunk(0x3, 0x5)], 5, [UnknownChunk(0x0, 0x3)]),
([ValidChunk(0x0, 0x5), ValidChunk(0x7, 0xA)], 10, [UnknownChunk(0x5, 0x7)]),
(
[ValidChunk(0x8, 0xA), ValidChunk(0x0, 0x5), ValidChunk(0xF, 0x14)],
20,
[UnknownChunk(0x5, 0x8), UnknownChunk(0xA, 0xF)],
),
],
)
def test_calculate_unknown_chunks(
chunks: List[ValidChunk], file_size: int, expected: List[UnknownChunk]
):
assert_same_chunks(expected, calculate_unknown_chunks(chunks, file_size))
@pytest.mark.parametrize(
"file_size, chunk_count, min_limit, max_limit, expected",
[
(1000, 1, 10, 100, 100),
(1000, 10, 10, 100, 100),
(1000, 100, 10, 100, 10),
],
)
def test_calculate_buffer_size(
file_size: int, chunk_count: int, min_limit: int, max_limit: int, expected: int
):
assert expected == calculate_buffer_size(
file_size, chunk_count=chunk_count, min_limit=min_limit, max_limit=max_limit
)
def test_draw_entropy_plot_error():
with pytest.raises(TypeError):
draw_entropy_plot([])
@pytest.mark.parametrize(
"percentages",
[
pytest.param([0.0] * 100, id="zero-array"),
pytest.param([99.99] * 100, id="99-array"),
pytest.param([100.0] * 100, id="100-array"),
],
)
def test_draw_entropy_plot_no_exception(percentages: List[float]):
assert draw_entropy_plot(percentages) is None
@pytest.mark.parametrize(
"path, draw_plot",
[
pytest.param(Path("/proc/self/exe"), True, id="draw-plot"),
pytest.param(Path("/proc/self/exe"), False, id="no-plot"),
],
)
def test_calculate_entropy_no_exception(path: Path, draw_plot: bool):
assert calculate_entropy(path, draw_plot=draw_plot) is None
@pytest.mark.parametrize(
"extract_root, path, extract_dir_prefix",
[
("/extract", "firmware", "firmware"),
("/extract", "relative/firmware", "firmware"),
("/extract", "/extract/dir/firmware", "dir/firmware"),
("/extract/dir", "/extract/dir/firmware", "firmware"),
("/extract", "/some/place/else/firmware", "firmware"),
],
)
def test_ExtractionConfig_get_extract_dir_for(
extract_root: str, path: str, extract_dir_prefix: str
):
cfg = ExtractionConfig(extract_root=Path(extract_root), entropy_depth=0)
assert cfg.get_extract_dir_for(Path(path)) == (
cfg.extract_root / Path(extract_dir_prefix + cfg.extract_suffix)
)
| 29.121387
| 85
| 0.568083
|
64cf3a585857525c7a514426b4f081636ce23ca2
| 540
|
py
|
Python
|
backend/app/alembic/versions/cd18d2dcd1fd_.py
|
lianjy357/vue-element-admin-fastapi
|
70f697af33ca747a154d0de129f4cbd7f9d03f7b
|
[
"MIT"
] | 10
|
2020-12-16T07:31:29.000Z
|
2022-01-27T08:01:22.000Z
|
backend/app/alembic/versions/cd18d2dcd1fd_.py
|
lianjy357/vue-element-admin-fastapi
|
70f697af33ca747a154d0de129f4cbd7f9d03f7b
|
[
"MIT"
] | null | null | null |
backend/app/alembic/versions/cd18d2dcd1fd_.py
|
lianjy357/vue-element-admin-fastapi
|
70f697af33ca747a154d0de129f4cbd7f9d03f7b
|
[
"MIT"
] | 3
|
2021-03-18T11:38:21.000Z
|
2021-09-02T06:23:15.000Z
|
"""
Revision ID: cd18d2dcd1fd
Revises: 250e78708916
Create Date: 2020-06-28 10:18:25.456332
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cd18d2dcd1fd'
down_revision = '250e78708916'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| 18.62069
| 65
| 0.681481
|
766b8395bc2aaae41e1fabb19b985330a9f48e99
| 2,503
|
py
|
Python
|
test/functional/feature_help.py
|
lionco-in/lioncoin
|
3335eea7bef6ca4f309d9ed9b9c92207084da20e
|
[
"MIT"
] | null | null | null |
test/functional/feature_help.py
|
lionco-in/lioncoin
|
3335eea7bef6ca4f309d9ed9b9c92207084da20e
|
[
"MIT"
] | null | null | null |
test/functional/feature_help.py
|
lionco-in/lioncoin
|
3335eea7bef6ca4f309d9ed9b9c92207084da20e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Verify that starting lioncoin with -h works as expected."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class HelpTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes)
# Don't start the node
def get_node_output(self, *, ret_code_expected):
ret_code = self.nodes[0].process.wait(timeout=5)
assert_equal(ret_code, ret_code_expected)
self.nodes[0].stdout.seek(0)
self.nodes[0].stderr.seek(0)
out = self.nodes[0].stdout.read()
err = self.nodes[0].stderr.read()
self.nodes[0].stdout.close()
self.nodes[0].stderr.close()
# Clean up TestNode state
self.nodes[0].running = False
self.nodes[0].process = None
self.nodes[0].rpc_connected = False
self.nodes[0].rpc = None
return out, err
def run_test(self):
self.log.info("Start lioncoin with -h for help text")
self.nodes[0].start(extra_args=['-h'])
# Node should exit immediately and output help to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'Options' in output
self.log.info("Help text received: {} (...)".format(output[0:60]))
self.log.info("Start lioncoin with -version for version information")
self.nodes[0].start(extra_args=['-version'])
# Node should exit immediately and output version to stdout.
output, _ = self.get_node_output(ret_code_expected=0)
assert b'version' in output
self.log.info("Version text received: {} (...)".format(output[0:60]))
# Test that arguments not in the help results in an error
self.log.info("Start lioncoind with -fakearg to make sure it does not start")
self.nodes[0].start(extra_args=['-fakearg'])
# Node should exit immediately and output an error to stderr
_, output = self.get_node_output(ret_code_expected=1)
assert b'Error parsing command line arguments' in output
self.log.info("Error message received: {} (...)".format(output[0:60]))
if __name__ == '__main__':
HelpTest().main()
| 39.730159
| 85
| 0.664003
|
8591a6d6f4d088d5a1ab55160f05088a775db01d
| 306
|
py
|
Python
|
config.py
|
yucheng6039/WebAuto
|
13fa954dd58407ee23e89be89f73cb97f5c11108
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
yucheng6039/WebAuto
|
13fa954dd58407ee23e89be89f73cb97f5c11108
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
yucheng6039/WebAuto
|
13fa954dd58407ee23e89be89f73cb97f5c11108
|
[
"Apache-2.0"
] | null | null | null |
import os
BaseDir = os.path.dirname(os.path.abspath(__file__))
# http://npm.taobao.org/mirrors/chromedriver/ 请在镜像库下载webdriver
Driver_Path = os.path.join(BaseDir, "static/driver/chromedriver")
Start_Url = "https://spectrum-test.cytekbio.com/"
Step_Time = 0.2 # 每一步间隔
DefaultTimeOut = 5 # 默认等待时间
| 21.857143
| 66
| 0.738562
|
9c808faa440423eb91adc1678e4575a09d81ca68
| 18,169
|
py
|
Python
|
pyaoscx/configuration.py
|
aruba/pyaoscx
|
913506eef8643f4e7b92f0d1633310e62ec5f7e2
|
[
"Apache-2.0"
] | 25
|
2020-03-12T15:55:03.000Z
|
2022-03-25T14:39:17.000Z
|
pyaoscx/configuration.py
|
aruba/pyaoscx
|
913506eef8643f4e7b92f0d1633310e62ec5f7e2
|
[
"Apache-2.0"
] | 13
|
2020-09-25T18:59:30.000Z
|
2022-01-19T05:46:51.000Z
|
pyaoscx/configuration.py
|
aruba/pyaoscx
|
913506eef8643f4e7b92f0d1633310e62ec5f7e2
|
[
"Apache-2.0"
] | 11
|
2020-05-20T15:08:13.000Z
|
2022-03-21T18:42:12.000Z
|
# (C) Copyright 2019-2021 Hewlett Packard Enterprise Development LP.
# Apache License 2.0
from pyaoscx.exceptions.generic_op_error import GenericOperationError
from pyaoscx.exceptions.response_error import ResponseError
from pyaoscx.exceptions.verification_error import VerificationError
from pyaoscx.pyaoscx_module import PyaoscxModule
import pyaoscx.utils.util as utils
import logging
import json
from urllib.parse import quote_plus
class Configuration():
"""
Represents a Device's Configuration and all of its attributes.
Keeping all configuration information
"""
base_uri = "system"
def __init__(self, session):
self.session = session
# Used to set attributes
self.config_attrs = []
self.materialized = False
# Attribute used to know if object was changed recently
self.__modified = False
@PyaoscxModule.connected
def get(self):
"""
Perform a GET call to retrieve system attributes
"""
logging.info("Retrieving the switch attributes and capabilities")
depth = self.session.api.default_depth
uri = "{base_url}{class_uri}?depth={depth}".format(
base_url=self.session.base_url,
class_uri=Configuration.base_uri,
depth=depth
)
try:
response = self.session.s.get(
uri, verify=False, proxies=self.session.proxy)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
data = json.loads(response.text)
# Create class attributes using util.create_attrs
utils.create_attrs(self, data)
# Second GET request to obtain just the variables that are writable
selector = self.session.api.default_selector
payload = {
"depth": depth,
"selector": selector
}
uri = "{base_url}{class_uri}".format(
base_url=self.session.base_url,
class_uri=Configuration.base_uri,
)
try:
response = self.session.s.get(
uri, verify=False,
proxies=self.session.proxy,
params=payload)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# Configurable data
config_data = json.loads(response.text)
# Set self.config_attrs and delete ID from it
utils.set_config_attrs(self, config_data, "config_attrs")
# Set original attributes
self.__original_attributes = config_data
# Set device as materialized
self.materialized = True
@PyaoscxModule.connected
def apply(self):
"""
Main method used to update System Attributes
Checks whether the System is materialized
Calls self.update() if the configuration is being updated
:return modified: Boolean, True if object was modified
"""
modified = False
if self.materialized:
modified = self.update()
else:
raise VerificationError("Device", "Not materialized")
return modified
@PyaoscxModule.connected
def update(self):
"""
Perform a PUT call to apply changes to a Device Configuration
:return modified: Boolean, True if object was modified
"""
system_data = {}
system_data = utils.get_attrs(self, self.config_attrs)
uri = "{base_url}{class_uri}".format(
base_url=self.session.base_url,
class_uri=Configuration.base_uri
)
# Compare dictionaries
if system_data == self.__original_attributes:
# Object was not modified
modified = False
else:
put_data = json.dumps(system_data, sort_keys=True, indent=4)
try:
response = self.session.s.put(
uri, verify=False, data=put_data,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("PUT", e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(
response.text,
response.status_code,
"UPDATE SYSTEM ATTRIBUTES")
else:
logging.info("SUCCESS: Updating System Attributes")
# Set new original attributes
self.__original_attributes = system_data
# Object was modified, returns True
modified = True
return modified
####################################################################
# IMPERATIVE FUNCTIONS
####################################################################
def get_full_config(self, config_name="running-config"):
"""
Perform a GET request to obtain the device's full config
:param config_name: String with the local-config name wanted
Defaults to running-config
:return config_data: Data containing the full configuration
"""
uri = "{base_url}fullconfigs/{cfg}".format(
base_url=self.session.base_url,
cfg=config_name
)
try:
response = self.session.s.get(
uri, verify=False,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# Configurable data
config_data = json.loads(response.text)
return config_data
def tftp_switch_config_from_remote_location(self, config_file_location,
config_name, vrf):
"""
TFTP switch config from TFTP server.
:param config_file_location: TFTP server address and path for
uploading configuration.
:param config_name: Config file or checkpoint to be uploaded to.
When using TFTP only running-config or startup-config can be used.
:param vrf: VRF to be used to contact TFTP server, required if
remote_output_file_tftp_path is provided.
:return success: Return True if response is successful or False if
it was not.
"""
success = False
uri = "{base_url}fullconfigs/"\
"{cfg}?from={dest}&vrf={vrf}".format(
base_url=self.session.base_url,
cfg=config_name,
dest=config_file_location,
vrf=vrf)
try:
response = self.session.s.put(
uri, verify=False,
proxies=self.session.proxy)
success = True
except Exception as e:
raise ResponseError("PUT", e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(response.text, response.status_code)
return success
def copy_switch_config_to_remote_location(self, config_name, config_type,
destination, vrf):
"""
Copy TFTP switch config to TFTP server using a PUT request
:param config_name: String with the config file or checkpoint to be
downloaded. When using TFTP
only running-config or startup-config can be used
:param config_type: Configuration type to be downloaded, JSON or CLI
version of the config. 'json' or 'cli'
:param destination: TFTP server address and path for
copying off configuration, must be reachable through provided vrf
:param vrf: VRF to be used to contact TFTP server
:return True if completed
"""
uri = "{base_url}fullconfigs/"\
"{cfg}?to={dest}&type={type}"\
"&vrf={vrf}".format(
base_url=self.session.base_url,
cfg=config_name,
dest=destination,
type=config_type,
vrf=vrf)
try:
response = self.session.s.get(
uri, verify=False,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("GET", e)
if not utils._response_ok(response, "GET"):
raise GenericOperationError(response.text, response.status_code)
# If no errors, return True for completion
return True
def backup_configuration(self, config_name, output_file=None,
vrf=None, config_type="json",
remote_file_tftp_path=None):
"""
Obtains the switch's full config in json format and saves it to
a local file or a remote location over TFTP
:param config_name: String with the config file or checkpoint to be
downloaded. When using TFTP
only running-config or startup-config can be used
:param output_file: String with the File name and path for locally
downloading configuration, only JSON version of configuration will
be downloaded
:param vrf: VRF to be used to contact TFTP server
:param config_type: Configuration type to be downloaded, JSON or CLI
version of the config. 'json' or 'cli'
Defaults to json
:param remote_file_tftp_path: TFTP server address and path for
copying off configuration, must be reachable through provided vrf
:return bool: True if success
"""
success = False
if remote_file_tftp_path is not None:
tftp_path = remote_file_tftp_path
if vrf is None:
raise VerificationError(
"Backup Config",
"VRF needs to be provided in order to TFTP "
"the configuration from the switch")
tftp_path_encoded = quote_plus(tftp_path)
if config_name != "running-config" and \
config_name != "startup-config":
raise VerificationError(
"Backup Config",
"Only running-config or " +
"startup-config can be backed-up using TFTP")
success = self.copy_switch_config_to_remote_location(
config_name, config_type, tftp_path_encoded, vrf)
else:
config_json = self.get_full_config()
with open(output_file, "w") as to_file:
formatted_file = json.dumps(config_json, indent=4)
to_file.write(formatted_file)
success = True
# Return result
return success
def create_checkpoint(self, source_config, destination_config):
"""
Perform a PUT request to create a new checkpoint or copy an
existing checkpoint to AOS-CX switch config.
:param source_config: Name of the source configuration
from which checkpoint needs to be created or copied.
:param destination_config: Name of the destination configuration
or name of checkpoint.
:return bool: True if success
"""
success = False
uri = (
"{base_url}fullconfigs/{dest}?from={prefix}fullconfigs/{src}"
).format(
base_url=self.session.base_url,
prefix=self.session.resource_prefix,
dest=destination_config,
src=source_config)
try:
response = self.session.s.put(
uri, verify=False,
proxies=self.session.proxy)
except Exception as e:
raise ResponseError("PUT", e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(response.text, response.status_code)
success = True
# Return result
return success
def setup_mgmt_nameservers_dns(self, primary=None, secondary=None):
"""
Setup primary and secondary name servers on a mgmt interface
:param primary: Primary nameservers on mgmt interface,
a IPv4 address.
Example:
"10.10.2.10"
:param secondary: Secondary nameservers on mgmt interface,
a IP address.
Example:
"10.10.2.10"
:return modified: Return True if coinfig was modified
"""
if "mode" in self.mgmt_intf:
mgmt_if_mode = self.mgmt_intf["mode"]
else:
mgmt_if_mode = "dhcp"
if mgmt_if_mode != "static":
message_part1 = "The management interface must have static"
message_part2 = "IP to configure management interface name servers"
raise Exception(message_part1 + " " + message_part2)
if primary is not None:
self.mgmt_intf["dns_server_1"] = primary
elif secondary is not None:
self.mgmt_intf["dns_server_2"] = secondary
return self.apply()
def delete_mgmt_nameservers_dns(self):
"""
Delete primary and secondary name servers on a mgmt interface
:return modified: Return True if coinfig was modified
"""
if "dns_server_1" in self.mgmt_intf:
self.mgmt_intf.pop("dns_server_1")
if "dns_server_2" in self.mgmt_intf:
self.mgmt_intf.pop("dns_server_2")
return self.apply()
def upload_switch_config(self,
config_name=None,
config_file=None,
config_json=None,
vrf=None,
remote_file_tftp_path=None):
"""
Uploads configuration from a configuration file.
:param config_name: String with the Config file or checkpoint
to be uploaded to.
When using TFTP only running-config or startup-config can be used.
Default: None.
:param config_file: String with the File name and path for locally
downloading configuration, only JSON version of configuration
will be downloaded.
Default: None.
:param config_json: String with the JSON file name and path for
locally uploading configuration, only JSON version of configuration
can be uploaded.
Default: None.
:param vrf: String for VRF to be used to contact TFTP server, required
if remote_output_file_tftp_path is provided.
Default: None.
:param remote_file_tftp_path: String for TFTP server address and path
for copying off configuration, must be reachable through provided
vrf.
Default: None.
:return success: Return boolean True if response is successful or False
if it was not.
"""
success = False
if remote_file_tftp_path is not None:
if vrf is None:
raise VerificationError(
"Upload Config",
"VRF needs to be provided in order to TFTP "
"the configuration onto the switch")
tftp_path_encoded = quote_plus(
remote_file_tftp_path)
if config_name != "running-config" and\
config_name != "startup-config":
raise VerificationError(
"Upload Config",
"Only running-config or startup-config "
"can be uploaded using TFTP")
success = self.tftp_switch_config_from_remote_location(
tftp_path_encoded, config_name, vrf)
else:
success = self.upload_switch_config_from_local(
config_json, config_file, config_name)
return success
def upload_switch_config_from_local(self,
config_json=None,
config_file=None,
config_name=None):
"""
Uploads configuration from a configuration file.
:param config_name: String with the Config file or checkpoint
to be uploaded to. When using TFTP only running-config or
startup-config can be used.
Default: None.
:param config_file: String with the File name and path for
locally downloading configuration, only JSON version of
configuration will be downloaded.
Default: None.
:param config_json: String with the JSON file name and path for
locally uploading configuration, only JSON version of
configuration can be uploaded.
Default: None.
:return success: Return boolean True if response is successful
or False if it was not.
"""
success = False
if config_json:
with open(config_json) as json_file:
config_json = json.load(json_file)
if config_file:
with open(config_file) as json_file:
config_json = json.load(json_file)
config_json = json.dumps(config_json)
# Create URI from the session base url and the configuration name
uri = "{base_url}fullconfigs/{cfg}".format(
base_url=self.session.base_url,
cfg=config_name
)
try:
# Put (REST) configuration file
response = self.session.s.put(
url=uri,
verify=False,
proxies=self.session.proxy,
data=config_json)
success = True
except Exception as e:
raise ResponseError("PUT", e)
if not utils._response_ok(response, "PUT"):
raise GenericOperationError(
response.text, response.status_code)
return success
| 35.07529
| 79
| 0.584622
|
09607d737a45b3501767844c6567be7348cc58c0
| 16,916
|
py
|
Python
|
Chapter6/01_cnn_improved.py
|
SDCND/Hands-On-Vision-and-Behavior-for-Self-Driving-Cars
|
1802d0ad3184bf53a3920921eeebf3bf9ae979dd
|
[
"MIT"
] | null | null | null |
Chapter6/01_cnn_improved.py
|
SDCND/Hands-On-Vision-and-Behavior-for-Self-Driving-Cars
|
1802d0ad3184bf53a3920921eeebf3bf9ae979dd
|
[
"MIT"
] | null | null | null |
Chapter6/01_cnn_improved.py
|
SDCND/Hands-On-Vision-and-Behavior-for-Self-Driving-Cars
|
1802d0ad3184bf53a3920921eeebf3bf9ae979dd
|
[
"MIT"
] | null | null | null |
import cv2
import keras
#from keras.datasets import mnist, cifar10
#from keras.models import Sequential
#from keras.layers import Dense, Dropout, Flatten, BatchNormalization, SpatialDropout2D
#from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
#from keras.callbacks import ModelCheckpoint, EarlyStopping
#from keras.optimizers import Adam
#from keras.losses import categorical_crossentropy
import tensorflow.keras
from tensorflow.keras.datasets import mnist, cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, BatchNormalization, SpatialDropout2D
from tensorflow.keras.layers import Conv2D, MaxPooling2D, AveragePooling2D
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import categorical_crossentropy
from time import time
import numpy as np
import sys
sys.path.append('../')
from utils import show_history,save
#from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import ImageDataGenerator
use_mnist = True
# Customize the training
name = "mnist" if use_mnist else "cifar10"
batch_size = 64
num_classes = 10
epochs = 250
augment = True
patience = 20
datagen = ImageDataGenerator(rotation_range=15, width_shift_range=[-5, 0, 5], horizontal_flip=True)
#datagen = ImageDataGenerator(rotation_range=15, width_shift_range=[-8, -4, 0, 4, 8], horizontal_flip=True, height_shift_range=[-5, 0, 5], zoom_range=[0.9, 1.1])
print("Dataset in use: ", name.upper())
# Loading test and training datasets
if use_mnist:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape)
x_train = np.reshape(x_train, np.append(x_train.shape, (1)))
print(x_train.shape)
x_test = np.reshape(x_test, np.append(x_test.shape, (1)))
else:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
save(name + "_train.jpg", cv2.hconcat([x_train[0], x_train[1], x_train[2], x_train[3], x_train[4]]))
save(name + "_test.jpg", cv2.hconcat([x_test[0], x_test[1], x_test[2], x_test[3], x_test[4]]))
print('X Train', x_train.shape, ' - X Test', x_test.shape)
print('Y Train', y_train.shape, ' - Y Test', y_test.shape)
print('First 5 labels, train:', y_train[0], y_train[1], y_train[2], y_train[3], y_train[4])
print('First 5 labels, test:', y_test[0], y_test[1], y_test[2], y_test[3], y_test[4])
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model_name = name + ".h5"
checkpoint = ModelCheckpoint(model_name, monitor='val_loss', mode='min', verbose=1, save_best_only=True)
early_stopping = EarlyStopping(min_delta=0.0005, patience=patience, verbose=1)
def create_model_0():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='elu', input_shape=x_train.shape[1:]))
model.add(AveragePooling2D())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=64, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_1():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
model.add(AveragePooling2D())
model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu'))
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_2():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_3():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=512, activation='relu'))
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_4():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=256, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_5():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_bn_1():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=256, activation='relu'))
model.add(Dense(units=128, activation='relu'))
model.add(Dense(units=num_classes, activation = 'softmax'))
return model
def create_model_bn_2_dropout():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=384, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=192, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(units=num_classes, activation='softmax'))
return model
def create_model_bn_3_dropout():
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Dropout(0.5))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=384, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=192, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(units=num_classes, activation='softmax'))
return model
def create_model_bn_4_dropout():
# Max Accuracy: 0.73944
# Max Validation Accuracy: 0.8144999742507935
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='elu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.3))
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(SpatialDropout2D(0.3))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Dropout(0.2))
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=384, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=192, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(units=num_classes, activation='softmax'))
return model
def create_model_bn_5_dropout():
#Max Accuracy: 0.78884
#Max Validation Accuracy: 0.8482999801635742
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=20, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=20, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(SpatialDropout2D(0.25))
model.add(Conv2D(filters=40, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=40, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(MaxPooling2D())
model.add(Dropout(0.15))
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=384, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=192, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(units=num_classes, activation='softmax'))
return model
def create_model_bn_6_dropout():
# Max Accuracy: 0.84324
# Max Validation Accuracy: 0.8779000043869019
model = Sequential()
# Convolutional layers
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', input_shape=x_train.shape[1:], padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(filters=48, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=48, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(SpatialDropout2D(0.2))
model.add(Conv2D(filters=72, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(filters=72, kernel_size=(3, 3), activation='relu', padding="same"))
model.add(BatchNormalization())
model.add(AveragePooling2D())
model.add(Dropout(0.1))
# Fully Connected layers
model.add(Flatten())
model.add(Dense(units=384, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(units=192, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(units=num_classes, activation='softmax'))
return model
# Choose the model that you want to train
model = create_model_bn_6_dropout()
model.summary()
opt = Adam()
model.compile(loss=categorical_crossentropy, optimizer=opt, metrics=['accuracy'])
start = time()
if augment:
it_train = datagen.flow(x_train, y_train, batch_size=batch_size)
history_object = model.fit(it_train, epochs=epochs, validation_data=(x_test, y_test), shuffle=True, callbacks=[checkpoint, early_stopping])
else:
history_object = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test), shuffle=True, callbacks=[checkpoint, early_stopping])
print("Training time:", time()-start)
show_history(history_object)
| 40.4689
| 174
| 0.708028
|
f0fc9a319c71e75beb6ee5ac025d21a108f88d9c
| 3,200
|
py
|
Python
|
vtapi3/vtapi3base.py
|
drobotun/virustotalapi3
|
7d9e278dc58fa9702ddf8c62de09c852a7316148
|
[
"MIT"
] | 8
|
2020-01-10T00:19:06.000Z
|
2022-01-12T18:13:27.000Z
|
vtapi3/vtapi3base.py
|
drobotun/virustotalapi3
|
7d9e278dc58fa9702ddf8c62de09c852a7316148
|
[
"MIT"
] | 2
|
2020-02-07T22:01:55.000Z
|
2020-02-11T19:52:48.000Z
|
vtapi3/vtapi3base.py
|
drobotun/virustotalapi3
|
7d9e278dc58fa9702ddf8c62de09c852a7316148
|
[
"MIT"
] | 3
|
2020-01-10T00:19:18.000Z
|
2020-12-08T17:13:41.000Z
|
"""The module describes the VirusTotalAPI base class
Author: Evgeny Drobotun (c) 2019
License: MIT (https://github.com/drobotun/virustotalapi3/blob/master/LICENSE)
More information: https://virustotalapi3.readthedocs.io/en/latest/base_class.html
"""
import requests
class VirusTotalAPI:
"""A base class for subclasses that implement methods for working with files, URLs, domain
names, and IP addresses.
Attributes:
base_url: The base URL for sending requests (str).
headers: Request header containing API key (dict).
timeout: Server response timeout. A tuple that includes a timeout value for 'connect' and
a timeout value for 'read'. If specify a single timeout value, it will be applied to
both timeout 'connect' and timeout 'read'.
proxies: The Protocol and the URL of the proxy server (dict).
_version_api: VirusTotal API version (str).
_last_http_error: HTTP status code of last operation (int).
_last_result: Result of the last execution of a subclass method of this class.
Constants: HTTP error codes constants.
Methods:
get_version_api(): Return the API version values.
get_last_http_error(): Return the HTTP status code of last operation.
get_last_result(): Return the result of executing methods of subclasses of this class.
"""
HTTP_OK = requests.codes['ok']
HTTP_BAD_REQUEST_ERROR = requests.codes['bad_request']
HTTP_AUTHENTICATION_REQUIRED_ERROR = requests.codes['unauthorized']
HTTP_FORBIDDEN_ERROR = requests.codes['forbidden']
HTTP_NOT_FOUND_ERROR = requests.codes['not_found']
HTTP_ALREADY_EXISTS_ERROR = requests.codes['conflict']
HTTP_QUOTA_EXCEEDED_ERROR = requests.codes['too_many_requests']
HTTP_TRANSIENT_ERROR = requests.codes['service_unavailable']
def __init__(self, api_key=None, timeout=None, proxies=None):
"""Inits VirusTotalAPI.
Args:
api_key: your API key to access the functions of the service VirusTotal (str).
timeout: Server response timeout (int). Optional.
proxies: The Protocol and the URL of the proxy server (dict). Optional.
"""
self.base_url = 'https://www.virustotal.com/api/v3'
self.headers = {'x-apikey' : api_key}
self.timeout = timeout
self.proxies = proxies
self._version_api = 'version 3'
self._last_http_error = None
self._last_result = None
def get_version_api(self):
"""Return the API version values.
Return:
String containing API version ('version 3').
"""
return self._version_api
def get_last_http_error(self):
"""Return the HTTP status code of last operation.
Return:
HTTP status code of last operation.
"""
return self._last_http_error
def get_last_result(self):
"""Return the result of executing methods of subclasses of this class.
Return:
Result of the last execution of a subclass method of this class.
"""
return self._last_result
| 38.554217
| 99
| 0.665938
|
904e840fec14e6e7f06f8029c4057ddb95f52a26
| 2,632
|
py
|
Python
|
tokenwiser/pipeline/_concat.py
|
Btibert3/tokenwiser
|
64f78be285d24ebc53bcc6991466517aed633888
|
[
"Apache-2.0"
] | 50
|
2020-11-21T04:29:34.000Z
|
2022-02-12T11:16:52.000Z
|
tokenwiser/pipeline/_concat.py
|
Btibert3/tokenwiser
|
64f78be285d24ebc53bcc6991466517aed633888
|
[
"Apache-2.0"
] | 33
|
2020-11-26T11:03:52.000Z
|
2021-12-04T20:27:44.000Z
|
tokenwiser/pipeline/_concat.py
|
Btibert3/tokenwiser
|
64f78be285d24ebc53bcc6991466517aed633888
|
[
"Apache-2.0"
] | 7
|
2021-04-07T08:54:34.000Z
|
2021-11-11T00:18:17.000Z
|
from sklearn.pipeline import _name_estimators
from sklearn.base import BaseEstimator
class TextConcat(BaseEstimator):
"""
A component like `FeatureUnion` but this also concatenates the text.
Arguments:
transformer_list: list of (name, text-transformer)-tuples
Example:
```python
from tokenwiser.textprep import HyphenTextPrep, Cleaner
from tokenwiser.pipeline import TextConcat
tc = TextConcat([("hyp", HyphenTextPrep()), ("clean", Cleaner())])
results = tc.fit_transform(["dinosaurhead", "another$$ sentence$$"])
expected = ['di no saur head dinosaurhead', 'an other $$ sen tence$$ another sentence']
assert results == expected
```
"""
def __init__(self, transformer_list):
self.transformer_list = transformer_list
def fit(self, X, y=None):
"""
Fits the components in a single batch.
"""
names = [n for n, t in self.transformer_list]
if len(names) != len(set(names)):
raise ValueError("Make sure that the names of each step are unique.")
return self
def partial_fit(self, X, y=None):
"""
Fits the components, but allow for batches.
"""
names = [n for n, t in self.transformer_list]
if len(names) != len(set(names)):
raise ValueError("Make sure that the names of each step are unique.")
return self
def transform(self, X, y=None):
"""
Transformers the text.
"""
names = [n for n, t in self.transformer_list]
if len(names) != len(set(names)):
raise ValueError("Make sure that the names of each step are unique.")
results = {}
for name, tfm in self.transformer_list:
results[name] = tfm.transform(X)
return [" ".join([results[n][i] for n in names]) for i in range(len(X))]
def fit_transform(self, X, y=None):
"""
Fits the components and transforms the text in one step.
"""
return self.fit(X, y).transform(X, y)
def make_concat(*steps):
"""
Utility function to generate a `TextConcat`
Arguments:
steps: a collection of text-transformers
```python
from tokenwiser.textprep import HyphenTextPrep, Cleaner
from tokenwiser.pipeline import make_concat
tc = make_concat(HyphenTextPrep(), Cleaner())
results = tc.fit_transform(["dinosaurhead", "another$$ sentence$$"])
expected = ['di no saur head dinosaurhead', 'an other $$ sen tence$$ another sentence']
assert results == expected
```
"""
return TextConcat(_name_estimators(steps))
| 30.964706
| 91
| 0.62576
|
cf03b6895dca4dd89d0d4717f54326562928d4d6
| 199
|
py
|
Python
|
HolePlateMaker/addBlock.py
|
henjin0/HolePlateMaker
|
daf7ef5269b03f2ac2fdc9a8132f945b14177aa0
|
[
"MIT"
] | 2
|
2020-10-10T21:46:18.000Z
|
2021-11-01T03:53:16.000Z
|
HolePlateMaker/addBlock.py
|
henjin0/HolePlateMaker
|
daf7ef5269b03f2ac2fdc9a8132f945b14177aa0
|
[
"MIT"
] | null | null | null |
HolePlateMaker/addBlock.py
|
henjin0/HolePlateMaker
|
daf7ef5269b03f2ac2fdc9a8132f945b14177aa0
|
[
"MIT"
] | null | null | null |
import numpy as np
from stl import mesh
def addBlock(curMesh, newMesh):
curMesh = mesh.Mesh(np.concatenate([
curMesh.data.copy(),
newMesh.data.copy(),
]))
return curMesh
| 19.9
| 40
| 0.643216
|
3071e285c0629c0422781be0bdf91b00ad65a122
| 110
|
py
|
Python
|
scheduler/reminders/admin.py
|
maplefeline/scheduler
|
cb17aca882d64944efdc7622a4536790457170e4
|
[
"MIT"
] | null | null | null |
scheduler/reminders/admin.py
|
maplefeline/scheduler
|
cb17aca882d64944efdc7622a4536790457170e4
|
[
"MIT"
] | 18
|
2020-12-01T06:54:23.000Z
|
2022-03-02T03:06:10.000Z
|
scheduler/reminders/admin.py
|
maplefeline/scheduler
|
cb17aca882d64944efdc7622a4536790457170e4
|
[
"MIT"
] | null | null | null |
from django.contrib import admin # type: ignore
from .models import Reminder
admin.site.register(Reminder)
| 18.333333
| 48
| 0.790909
|
47775deaac79c513855b692c3e7ceb07a2efcf53
| 392
|
py
|
Python
|
Panda3D/panda3 - GLSL2/ex04 - Transparency/main.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
Panda3D/panda3 - GLSL2/ex04 - Transparency/main.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
Panda3D/panda3 - GLSL2/ex04 - Transparency/main.py
|
hoppfull/Legacy-Python
|
43f465bfdb76c91f2ac16aabb0783fdf5f459adb
|
[
"MIT"
] | null | null | null |
import direct.showbase.ShowBase as p3d_SB
import panda3d.core as p3d_Core
class MyApp(p3d_SB.ShowBase):
def __init__(self):
p3d_SB.ShowBase.__init__(self)
mesh_np = loader.loadModel("res/plane")
mesh_np.reparentTo(render)
mesh_np.set_shader( p3d_Core.Shader.load(p3d_Core.Shader.SL_GLSL, "res/vs.glsl", "res/fs.glsl") )
if __name__ == "__main__":
myapp = MyApp()
myapp.run()
| 28
| 99
| 0.742347
|
2a73ffc3657e756bbd45de9ad0c61ebd80e07d31
| 10,477
|
py
|
Python
|
src/wavestate/model/system/algo_AC.py
|
wavestate/wavestate-model
|
d5e9cd3bd7352e07cc789b40a4d9452975b27237
|
[
"Apache-2.0"
] | null | null | null |
src/wavestate/model/system/algo_AC.py
|
wavestate/wavestate-model
|
d5e9cd3bd7352e07cc789b40a4d9452975b27237
|
[
"Apache-2.0"
] | null | null | null |
src/wavestate/model/system/algo_AC.py
|
wavestate/wavestate-model
|
d5e9cd3bd7352e07cc789b40a4d9452975b27237
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import collections
from .. import base
from . import algo_phys
class PhysicsACAlgorithm(object):
def __init__(self, pa, dc):
self.pa = pa
self.log = pa.log
self.pbg = pa.pbg
self.dc = dc
# pg.print_parameters_eval()
self.bg = pa.bg
self.fs = pa.fs
self.check_build = pa.check_build
# {(obj, lport)}
self._drives = set()
# obj-> (lportRow, lportCol) -> kmatrix
self._object_edges = collections.defaultdict(dict)
# indicates which outputs to monitor for sensitivity
# views = {(self._obj, lport)}
self._views = set()
# indicates which outputs to monitor for sensitivity
# views = {(self._obj, lport) : kmatrix}
self._noise = dict()
self._build_matrix_AC()
self._solve_matrix_AC()
return
def _parameter_to_fdict(self, fparam):
if isinstance(fparam, str):
# TODO, check that the key exists
return base.FrequencyKey({fparam: 1})
elif isinstance(fparam, collections.Mapping):
return base.FrequencyKey(fparam)
def _optical_frequency_allowed(self, fk):
return fk in self.fs.freq_set_optical
def __call__(
self,
port_fr,
port_to,
obj_fr=None,
obj_to=None,
dir_fr="in",
dir_to="out",
demod={},
quadrature=1,
):
"""
Gets the power or current at a photodiode.
units, defaults to 'W' for total power, but may be
'A', or 'Q' for quanta-based counts. For multiple-wavelengths these
can look inconsistent.
demod takes a frequency dictionary.
"""
oLp_fr = self.bg.rBp2oLp(port_fr, dir=dir_fr)
oLp_to = self.bg.rBp2oLp(port_to, dir=dir_to)
demod = self.fs.parameter_to_fk(demod)
seq, req, edges = self._solutions_AC_SRE
solvect = edges[oLp_fr, oLp_to]
if quadrature == "I":
quadrature = 1
elif quadrature == "Q":
quadrature = 1j
else:
quadrature = quadrature / abs(quadrature)
# TODO, make a upperphoton_pmAC quantum keyset
# from icecream import ic
# ic(solvect.kmatrix)
datavec_p = solvect.kmatrix[(demod, "+AC")][(base.FrequencyKey({}), "+AC")][
..., 0, 0
]
datavec_m = solvect.kmatrix[(demod, "-AC")][(base.FrequencyKey({}), "+AC")][
..., 0, 0
]
# TODO, check realness
return quadrature * datavec_p + quadrature.conjugate() * datavec_m
def _solve_matrix_AC(self):
SREIO = self.SREIO_AC(
subtract_1=True,
)
(seq, req, edges, inputs, outputs) = SREIO
del SREIO
with self.log.heading("AC_inversion"):
seq, req, edges = matrix.SREkmatrix_inverse(
seq,
req,
edges,
outputs_set=outputs,
inputs_set=inputs,
verbose=False,
log=self.log,
)
edges2 = dict()
for (n_fr, n_to), edge in edges.items():
oLp_fr = n_fr
oLp_to = n_to
edges2[oLp_fr, oLp_to] = edge
self._solutions_AC_SRE = seq, req, edges2
def _build_matrix_AC(self):
for obj in self.pbg.object_iter():
try:
visit_algo = obj.visit_matrix_algorithm_DCAC
except AttributeError:
continue
else:
# TODO verbose option for found objects?
# print(obj)
pass
manip = PhysicsAlgorithmACManipulator(
obj=obj,
ac_algo=self,
)
visit_algo(manip)
return
def SREIO_AC(self, subtract_1=False):
seq = collections.defaultdict(set)
req = collections.defaultdict(set)
edges = dict()
for oLp_fr, eset in self.bg.link_seq.items():
m_fr = oLp_fr
# print("FT: ", m_fr, [map_nodes(oLp_to) for oLp_to in eset])
for oLp_to in eset:
m_to = oLp_to
edges[m_fr, m_to] = 1
seq[m_fr].add(m_to)
req[m_to].add(m_fr)
for obj, edict in self._object_edges.items():
for (l_fr, l_to), e_kmat in edict.items():
m_to = (obj, l_to)
m_fr = (obj, l_fr)
edges[m_fr, m_to] = e_kmat
seq[m_fr].add(m_to)
req[m_to].add(m_fr)
if subtract_1:
for e_key, e_val in edges.items():
edges[e_key] = -e_val
nodes = set(seq.keys())
nodes.update(req.keys())
for node in nodes:
seq[node].add(node)
req[node].add(node)
kdiag = edges.get((node, node), 0)
kdiag = 1 + kdiag
edges[node, node] = kdiag
inputs = set()
for k_in in self._drives:
inputs.add(k_in)
outputs = set()
for k_in in self._views:
outputs.add(k_in)
return (seq, req, edges, inputs, outputs)
class PhysicsAlgorithmACView(algo_phys.PhysicsAlgorithmView):
_ac_algo = None
def __init__(self, ac_algo, **kw):
super(PhysicsAlgorithmACView, self).__init__(
bg_algo=ac_algo.bg, pbg=ac_algo.pbg, pa_algo=ac_algo.pa, **kw
)
self._ac_algo = ac_algo
self._dc_algo = ac_algo.dc
# is a dictionary of known basis elements as of this linkage
def link_basis(self, lport):
op = (self._obj, lport)
btype = self._bg_algo.link_basis_types[op]
return {
"optical": self._pa_algo.optical_basis_AC,
"mechanical": self._pa_algo.mechanical_basis_AC,
"signal": self._pa_algo.signal_basis_AC,
"electrical": self._pa_algo.electrical_basis_AC,
}[btype]
def configure_optical_wavelength(self, wdict):
# TODO, check that the wavenumber exists
# assert(len(self._pa_algo.fs.freq_set_wavelengths) == 1)
return base.FrequencyKey(wdict)
def parameter_to_fdict(self, fparam):
return self._ac_algo._parameter_to_fdict(fparam)
def optical_frequency_allowed(self, fk):
return self._ac_algo._optical_frequency_allowed(fk)
def basis_frequencies_DC(self, with_keys=False):
# print("FVs: ", self._pa_algo._basis_frequency_values_DC_optical)
if with_keys:
return zip(
self._pa_algo.optical_basis_DC["frequency"].enumerated,
self._pa_algo._basis_frequency_values_DC_optical,
)
else:
return self._pa_algo._basis_frequency_values_DC_optical
def basis_WFQ_optical_pm(self):
"""
This method iterates over all wavenumber, frequencies and quantum,
with conjugation or negative-frequency shifting for negative quantum
usage:
for (Wk, Fk, Qk), (wnval, fval, conj) in manip.basis_WFQ_pm():
while this method doesn't do a ton for DC analysis. It is overloaded
for AC analysis to make things simpler
"""
for Wk, wnval in self.basis_wavenumbers(with_keys=True):
for Fk, fval in self.basis_frequencies_DC(with_keys=True):
for Qk, conj in (("+AC", False), ("-AC", True)):
if not conj:
yield (Wk, Fk, Qk), (
wnval,
fval + self._ac_algo.fs.AC_fspan,
conj,
)
else:
yield (Wk, Fk, Qk), (
wnval,
fval - self._ac_algo.fs.AC_fspan,
conj,
)
return
class PhysicsAlgorithmACManipulator(PhysicsAlgorithmACView):
is_DC = False
is_AC = True
def get_field_DC(self, lport):
return self._dc_algo._solutions_DC[(self._obj, lport)]
def add_drive(self, lport):
self._ac_algo._drives.add((self._obj, lport))
def add_view(self, lport):
self._ac_algo._views.add((self._obj, lport))
def add_noise(self, lport, nmatrix):
self._ac_algo._noise[(self._obj, lport)] = nmatrix
def add_link(self, lport_fr, lport_to, kmatrix, lowering_only=False):
"""
Adds a link to the system matrix
The lowering_only indicates that only the lowering (non conjugated)
operator is supplied, and so needs some additional completion
"""
if lowering_only:
km_new = dict()
for krow, kvect in kmatrix.kmatrix.items():
kvect_new_p = km_new.setdefault(krow + ("+AC",), dict())
kvect_new_n = km_new.setdefault(krow + ("-AC",), dict())
for kcol, kdm in kvect.items():
kvect_new_p[kcol + ("+AC",)] = kdm
kvect_new_n[kcol + ("-AC",)] = kdm.conjugate()
# TODO, not always using the optical basis for this
kmatrix = kmatrix.__class__(
stR=kmatrix.stR + (self._pa_algo.optical_basis_AC["quantum"],),
stC=kmatrix.stC + (self._pa_algo.optical_basis_AC["quantum"],),
dtR=kmatrix.dtR,
dtC=kmatrix.dtC,
kmatrix=km_new,
build=False,
check=self.check_build,
)
if isinstance(kmatrix, base.KeyMatrixBase):
basisR = set(self.link_basis(lport_to).values())
assert set(kmatrix.stR + kmatrix.dtR) <= basisR
basisC = set(self.link_basis(lport_fr).values())
assert set(kmatrix.stC + kmatrix.dtC) <= basisC
self._ac_algo._object_edges[self._obj][lport_fr, lport_to] = kmatrix
else:
# is probably just a number or array
self._ac_algo._object_edges[self._obj][lport_fr, lport_to] = kmatrix
| 33.688103
| 84
| 0.557507
|
c34a1bed0463b8d5be16f7a78a40e6f179a5eee9
| 184
|
py
|
Python
|
unipipeline/answer/uni_answer_params.py
|
aliaksandr-master/unipipeline
|
d8eac38534172aee59ab5777321cabe67f3779ef
|
[
"MIT"
] | null | null | null |
unipipeline/answer/uni_answer_params.py
|
aliaksandr-master/unipipeline
|
d8eac38534172aee59ab5777321cabe67f3779ef
|
[
"MIT"
] | 1
|
2021-09-14T13:08:13.000Z
|
2021-09-14T13:08:13.000Z
|
unipipeline/answer/uni_answer_params.py
|
aliaksandr-master/unipipeline
|
d8eac38534172aee59ab5777321cabe67f3779ef
|
[
"MIT"
] | null | null | null |
from uuid import UUID
from pydantic import BaseModel
class UniAnswerParams(BaseModel):
topic: str
id: UUID
class Config:
frozen = True
extra = 'forbid'
| 14.153846
| 33
| 0.652174
|
da6f710e488b0728db33dd17fa3e7155b22d190f
| 3,468
|
py
|
Python
|
python/paml/codeml-parallel.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 10
|
2016-01-13T00:39:30.000Z
|
2020-11-30T05:56:19.000Z
|
python/paml/codeml-parallel.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 1
|
2017-02-09T22:46:49.000Z
|
2017-02-09T22:46:49.000Z
|
python/paml/codeml-parallel.py
|
lotharwissler/bioinformatics
|
83a53771222ecb0759e3b4bfa2018d2cd7647643
|
[
"MIT"
] | 10
|
2015-10-09T00:29:16.000Z
|
2019-06-09T05:32:15.000Z
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
import tempfile
from low import * # custom functions, written by myself
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -f <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f nt alignment file" )
stdout( " -t tree file (newick format)" )
stdout( " -m models to run (comma separates)" )
stdout( " -p path to PAML codeml" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:t:m:p:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['aln'] = value
if key == '-t': args['tree'] = value
if key == '-p': args['codeml'] = value
if key == '-m': args['models'] = value.split(",")
if not args.has_key('aln'):
stderr( "aln file missing." )
show_help()
if not file_exists( args.get('aln') ):
stderr( "aln file does not exist." )
show_help()
if not args.has_key('tree'):
stderr( "tree file missing." )
show_help()
if not file_exists( args.get('tree') ):
stderr( "tree file does not exist." )
show_help()
if not args.has_key('models'):
stderr( "no models to run." )
show_help()
if not file_exists( args.get('codeml') ):
stderr( "codeml binary not found." )
show_help()
args['pamlfolder'] = os.path.split(args.get('codeml'))[0] + '/'
if not dir_exists( args.get('pamlfolder') ):
stderr( "paml folder does not exist" )
show_help()
return args
# =============================================================================
# =============================================================================
def main( args ):
models = args['models']
aln, tree = args['aln'], args['tree']
codemlbin = args['codeml']
ctlbase = args['pamlfolder'] + 'codeml.ctl.'
for model in models:
if tree.count("."):
ext = os.path.splitext(tree)[1]
outfile = aln+".codeml"+ext+"."+model
else:
outfile = aln+".codeml."+model
tempdir = tempfile.mkdtemp(suffix=model, prefix='tmp.codeml.', dir='.')
os.system("cp %s %s" %(aln, tempdir + '/in-aln'))
os.system("cp %s %s" %(tree, tempdir + '/in-tree'))
os.system("cp %s %s" %(ctlbase + model, tempdir + '/codeml.ctl'))
os.chdir(tempdir)
os.system(codemlbin)
os.chdir("..")
os.system("mv %s/out-codeml %s" %(tempdir, outfile))
os.system("rm -rf %s" % tempdir)
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
args = handle_arguments( )
main( args )
| 33.669903
| 83
| 0.504614
|
60ee73675e706c52e4c3c69bb789bff3cc66cb34
| 6,749
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/clostridiumcellulovorans.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/clostridiumcellulovorans.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/clostridiumcellulovorans.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Clostridium cellulovorans.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:14:08.272584
The undirected graph Clostridium cellulovorans has 4180 nodes and 337028
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03859 and has 29 connected components, where the component
with most nodes has 4114 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 122, the mean node degree is 161.26,
and the node degree mode is 1. The top 5 most central nodes are 573061.Clocel_0020
(degree 1795), 573061.Clocel_2730 (degree 1461), 573061.Clocel_2840 (degree
1302), 573061.Clocel_2962 (degree 1292) and 573061.Clocel_0022 (degree
1221).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ClostridiumCellulovorans
# Then load the graph
graph = ClostridiumCellulovorans()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def ClostridiumCellulovorans(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Clostridium cellulovorans graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Clostridium cellulovorans graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 21:14:08.272584
The undirected graph Clostridium cellulovorans has 4180 nodes and 337028
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03859 and has 29 connected components, where the component
with most nodes has 4114 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 122, the mean node degree is 161.26,
and the node degree mode is 1. The top 5 most central nodes are 573061.Clocel_0020
(degree 1795), 573061.Clocel_2730 (degree 1461), 573061.Clocel_2840 (degree
1302), 573061.Clocel_2962 (degree 1292) and 573061.Clocel_0022 (degree
1221).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import ClostridiumCellulovorans
# Then load the graph
graph = ClostridiumCellulovorans()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="ClostridiumCellulovorans",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.335079
| 223
| 0.706327
|
e964c46252258591545f1008d908b1195525ce0d
| 586
|
py
|
Python
|
manga-or-illust/client.py
|
qhgz2013/ml-experiment
|
95f553f5331d3a8f54f3619f65dc0ec0a19b36af
|
[
"Apache-2.0"
] | null | null | null |
manga-or-illust/client.py
|
qhgz2013/ml-experiment
|
95f553f5331d3a8f54f3619f65dc0ec0a19b36af
|
[
"Apache-2.0"
] | null | null | null |
manga-or-illust/client.py
|
qhgz2013/ml-experiment
|
95f553f5331d3a8f54f3619f65dc0ec0a19b36af
|
[
"Apache-2.0"
] | null | null | null |
import requests
import base64
def main():
print("Input an empty path to exit this program.")
while True:
path = input('Input image path: ')
with open(path, 'rb') as file:
length = file.seek(0, 2)
file.seek(0)
img_binary = file.read(length)
img_binary = base64.encodebytes(img_binary)
img = str(img_binary, 'utf-8')
req = requests.post('http://localhost:10087/classify', data={'image': img})
resp = str(req.content, 'utf-8')
print(resp)
if __name__ == '__main__':
main()
| 26.636364
| 83
| 0.569966
|
57eef4dad804ba2a80f9ec5e50bb5aa946744b92
| 23,134
|
py
|
Python
|
io_scene_webaverse/blender/exp/gltf2_blender_gather_nodes.py
|
chrislatorres/blender-plugin
|
b0cd2dc6af8652a3ab841a8b620dbcf1a5a281e7
|
[
"Apache-2.0"
] | 3
|
2021-02-01T09:09:30.000Z
|
2021-11-12T14:39:28.000Z
|
io_scene_webaverse/blender/exp/gltf2_blender_gather_nodes.py
|
chrislatorres/blender-plugin
|
b0cd2dc6af8652a3ab841a8b620dbcf1a5a281e7
|
[
"Apache-2.0"
] | 4
|
2021-04-01T10:58:30.000Z
|
2021-08-23T12:27:42.000Z
|
io_scene_webaverse/blender/exp/gltf2_blender_gather_nodes.py
|
chrislatorres/blender-plugin
|
b0cd2dc6af8652a3ab841a8b620dbcf1a5a281e7
|
[
"Apache-2.0"
] | 3
|
2021-02-22T21:39:41.000Z
|
2021-11-22T15:12:47.000Z
|
# Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import bpy
from mathutils import Matrix, Quaternion, Vector
from . import gltf2_blender_export_keys
from io_scene_webaverse.blender.com import gltf2_blender_math
from io_scene_webaverse.blender.exp.gltf2_blender_gather_cache import cached
from io_scene_webaverse.blender.exp import gltf2_blender_gather_skins
from io_scene_webaverse.blender.exp import gltf2_blender_gather_cameras
from io_scene_webaverse.blender.exp import gltf2_blender_gather_mesh
from io_scene_webaverse.blender.exp import gltf2_blender_gather_joints
from io_scene_webaverse.blender.exp import gltf2_blender_gather_lights
from ..com.gltf2_blender_extras import generate_extras
from io_scene_webaverse.io.com import gltf2_io
from io_scene_webaverse.io.com import gltf2_io_extensions
from io_scene_webaverse.io.exp.gltf2_io_user_extensions import export_user_extensions
from io_scene_webaverse.io.com.gltf2_io_debug import print_console
def gather_node(blender_object, library, blender_scene, dupli_object_parent, export_settings):
# custom cache to avoid cache miss when called from animation
# with blender_scene=None
# invalidate cache if export settings have changed
if not hasattr(gather_node, "__export_settings") or export_settings != gather_node.__export_settings:
gather_node.__cache = {}
gather_node.__export_settings = export_settings
if blender_scene is None and (blender_object.name, library) in gather_node.__cache:
return gather_node.__cache[(blender_object.name, library)]
node = __gather_node(blender_object, library, blender_scene, dupli_object_parent, export_settings)
gather_node.__cache[(blender_object.name, library)] = node
return node
@cached
def __gather_node(blender_object, library, blender_scene, dupli_object_parent, export_settings):
children = __gather_children(blender_object, blender_scene, export_settings)
camera = None
mesh = None
skin = None
weights = None
# If blender_scene is None, we are coming from animation export
# Check to know if object is exported is already done, so we don't check
# again if object is instanced in scene : this check was already done when exporting object itself
if not __filter_node(blender_object, blender_scene, export_settings):
if children:
# This node should be filtered out, but has un-filtered children present.
# So, export this node, excluding its camera, mesh, skin, and weights.
# The transformations and animations on this node will have visible effects on children.
pass
else:
# This node is filtered out, and has no un-filtered children or descendants.
return None
else:
# This node is being fully exported.
camera = __gather_camera(blender_object, export_settings)
mesh = __gather_mesh(blender_object, library, export_settings)
skin = __gather_skin(blender_object, export_settings)
weights = __gather_weights(blender_object, export_settings)
node = gltf2_io.Node(
camera=camera,
children=children,
extensions=__gather_extensions(blender_object, export_settings),
extras=__gather_extras(blender_object, export_settings),
matrix=__gather_matrix(blender_object, export_settings),
mesh=mesh,
name=__gather_name(blender_object, export_settings),
rotation=None,
scale=None,
skin=skin,
translation=None,
weights=weights
)
# If node mesh is skined, transforms should be ignored at import, so no need to set them here
if node.skin is None:
node.translation, node.rotation, node.scale = __gather_trans_rot_scale(blender_object, export_settings)
if export_settings[gltf2_blender_export_keys.YUP]:
# Checking node.extensions is making sure that the type of lamp is managed, and will be exported
if blender_object.type == 'LIGHT' and export_settings[gltf2_blender_export_keys.LIGHTS] and node.extensions:
correction_node = __get_correction_node(blender_object, export_settings)
correction_node.extensions = {"KHR_lights_punctual": node.extensions["KHR_lights_punctual"]}
del node.extensions["KHR_lights_punctual"]
node.children.append(correction_node)
if blender_object.type == 'CAMERA' and export_settings[gltf2_blender_export_keys.CAMERAS]:
correction_node = __get_correction_node(blender_object, export_settings)
correction_node.camera = node.camera
node.children.append(correction_node)
node.camera = None
export_user_extensions('gather_node_hook', export_settings, node, blender_object)
return node
def __filter_node(blender_object, blender_scene, export_settings):
if blender_object.users == 0:
return False
if blender_scene is not None:
instanced = any([blender_object.name in layer.objects for layer in blender_scene.view_layers])
if instanced is False:
# Check if object is from a linked collection
if any([blender_object.name in coll.objects for coll in bpy.data.collections if coll.library is not None]):
pass
else:
# Not instanced, not linked -> We don't keep this object
return False
if export_settings[gltf2_blender_export_keys.SELECTED] and blender_object.select_get() is False:
return False
return True
def __gather_camera(blender_object, export_settings):
if blender_object.type != 'CAMERA':
return None
return gltf2_blender_gather_cameras.gather_camera(blender_object.data, export_settings)
def __gather_children(blender_object, blender_scene, export_settings):
children = []
# standard children
for _child_object in blender_object.children:
if _child_object.parent_bone:
# this is handled further down,
# as the object should be a child of the specific bone,
# not the Armature object
continue
child_object = _child_object.proxy if _child_object.proxy else _child_object
node = gather_node(child_object,
child_object.library.name if child_object.library else None,
blender_scene, None, export_settings)
if node is not None:
children.append(node)
# blender dupli objects
if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection:
for dupli_object in blender_object.instance_collection.objects:
if dupli_object.parent is not None:
continue
if dupli_object.type == "ARMATURE":
continue # There is probably a proxy
node = gather_node(dupli_object,
dupli_object.library.name if dupli_object.library else None,
blender_scene, blender_object.name, export_settings)
if node is not None:
children.append(node)
# blender bones
if blender_object.type == "ARMATURE":
root_joints = []
if export_settings["gltf_def_bones"] is False:
bones = blender_object.pose.bones
else:
bones, _, _ = gltf2_blender_gather_skins.get_bone_tree(None, blender_object)
bones = [blender_object.pose.bones[b.name] for b in bones]
for blender_bone in bones:
if not blender_bone.parent:
joint = gltf2_blender_gather_joints.gather_joint(blender_object, blender_bone, export_settings)
children.append(joint)
root_joints.append(joint)
# handle objects directly parented to bones
direct_bone_children = [child for child in blender_object.children if child.parent_bone]
def find_parent_joint(joints, name):
for joint in joints:
if joint.name == name:
return joint
parent_joint = find_parent_joint(joint.children, name)
if parent_joint:
return parent_joint
return None
for child in direct_bone_children:
# find parent joint
parent_joint = find_parent_joint(root_joints, child.parent_bone)
if not parent_joint:
continue
child_node = gather_node(child, None, None, None, export_settings)
if child_node is None:
continue
blender_bone = blender_object.pose.bones[parent_joint.name]
# fix rotation
if export_settings[gltf2_blender_export_keys.YUP]:
rot = child_node.rotation
if rot is None:
rot = [0, 0, 0, 1]
rot_quat = Quaternion(rot)
axis_basis_change = Matrix(
((1.0, 0.0, 0.0, 0.0), (0.0, 0.0, -1.0, 0.0), (0.0, 1.0, 0.0, 0.0), (0.0, 0.0, 0.0, 1.0)))
mat = child.matrix_parent_inverse @ child.matrix_basis
mat = mat @ axis_basis_change
_, rot_quat, _ = mat.decompose()
child_node.rotation = [rot_quat[1], rot_quat[2], rot_quat[3], rot_quat[0]]
# fix translation (in blender bone's tail is the origin for children)
trans, _, _ = child.matrix_local.decompose()
if trans is None:
trans = [0, 0, 0]
# bones go down their local y axis
if blender_bone.matrix.to_scale()[1] >= 1e-6:
bone_tail = [0, blender_bone.length / blender_bone.matrix.to_scale()[1], 0]
else:
bone_tail = [0,0,0] # If scale is 0, tail == head
child_node.translation = [trans[idx] + bone_tail[idx] for idx in range(3)]
parent_joint.children.append(child_node)
return children
def __gather_extensions(blender_object, export_settings):
extensions = {}
if export_settings["gltf_lights"] and (blender_object.type == "LAMP" or blender_object.type == "LIGHT"):
blender_lamp = blender_object.data
light = gltf2_blender_gather_lights.gather_lights_punctual(
blender_lamp,
export_settings
)
if light is not None:
light_extension = gltf2_io_extensions.ChildOfRootExtension(
name="KHR_lights_punctual",
path=["lights"],
extension=light
)
extensions["KHR_lights_punctual"] = gltf2_io_extensions.Extension(
name="KHR_lights_punctual",
extension={
"light": light_extension
}
)
return extensions if extensions else None
def __gather_extras(blender_object, export_settings):
if export_settings['gltf_extras']:
return generate_extras(blender_object)
return None
def __gather_matrix(blender_object, export_settings):
# return blender_object.matrix_local
return []
def __gather_mesh(blender_object, library, export_settings):
if blender_object.type in ['CURVE', 'SURFACE', 'FONT']:
return __gather_mesh_from_nonmesh(blender_object, library, export_settings)
if blender_object.type != "MESH":
return None
# If not using vertex group, they are irrelevant for caching --> ensure that they do not trigger a cache miss
vertex_groups = blender_object.vertex_groups
modifiers = blender_object.modifiers
if len(vertex_groups) == 0:
vertex_groups = None
if len(modifiers) == 0:
modifiers = None
if export_settings[gltf2_blender_export_keys.APPLY]:
armature_modifiers = {}
if export_settings[gltf2_blender_export_keys.SKINS]:
# temporarily disable Armature modifiers if exporting skins
for idx, modifier in enumerate(blender_object.modifiers):
if modifier.type == 'ARMATURE':
armature_modifiers[idx] = modifier.show_viewport
modifier.show_viewport = False
depsgraph = bpy.context.evaluated_depsgraph_get()
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
for prop in blender_object.data.keys():
blender_mesh[prop] = blender_object.data[prop]
skip_filter = True
if export_settings[gltf2_blender_export_keys.SKINS]:
# restore Armature modifiers
for idx, show_viewport in armature_modifiers.items():
blender_object.modifiers[idx].show_viewport = show_viewport
else:
blender_mesh = blender_object.data
skip_filter = False
# If no skin are exported, no need to have vertex group, this will create a cache miss
if not export_settings[gltf2_blender_export_keys.SKINS]:
vertex_groups = None
modifiers = None
else:
# Check if there is an armature modidier
if len([mod for mod in blender_object.modifiers if mod.type == "ARMATURE"]) == 0:
vertex_groups = None # Not needed if no armature, avoid a cache miss
modifiers = None
materials = tuple(ms.material for ms in blender_object.material_slots)
material_names = tuple(None if mat is None else mat.name for mat in materials)
# retrieve armature
# Because mesh data will be transforms to skeleton space,
# we can't instantiate multiple object at different location, skined by same armature
blender_object_for_skined_data = None
if export_settings[gltf2_blender_export_keys.SKINS]:
for idx, modifier in enumerate(blender_object.modifiers):
if modifier.type == 'ARMATURE':
blender_object_for_skined_data = blender_object
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
library,
blender_object_for_skined_data,
vertex_groups,
modifiers,
skip_filter,
material_names,
export_settings)
if export_settings[gltf2_blender_export_keys.APPLY]:
blender_mesh_owner.to_mesh_clear()
return result
def __gather_mesh_from_nonmesh(blender_object, library, export_settings):
"""Handles curves, surfaces, text, etc."""
needs_to_mesh_clear = False
try:
# Convert to a mesh
try:
if export_settings[gltf2_blender_export_keys.APPLY]:
depsgraph = bpy.context.evaluated_depsgraph_get()
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
# TODO: do we need preserve_all_data_layers?
else:
blender_mesh_owner = blender_object
blender_mesh = blender_mesh_owner.to_mesh()
except Exception:
return None
needs_to_mesh_clear = True
skip_filter = True
material_names = tuple([ms.material.name for ms in blender_object.material_slots if ms.material is not None])
vertex_groups = None
modifiers = None
blender_object_for_skined_data = None
result = gltf2_blender_gather_mesh.gather_mesh(blender_mesh,
library,
blender_object_for_skined_data,
vertex_groups,
modifiers,
skip_filter,
material_names,
export_settings)
finally:
if needs_to_mesh_clear:
blender_mesh_owner.to_mesh_clear()
return result
def __gather_name(blender_object, export_settings):
return blender_object.name
def __gather_trans_rot_scale(blender_object, export_settings):
if blender_object.matrix_parent_inverse == Matrix.Identity(4):
trans = blender_object.location
if blender_object.rotation_mode in ['QUATERNION', 'AXIS_ANGLE']:
rot = blender_object.rotation_quaternion
else:
rot = blender_object.rotation_euler.to_quaternion()
sca = blender_object.scale
else:
# matrix_local = matrix_parent_inverse*location*rotation*scale
# Decomposing matrix_local gives less accuracy, but is needed if matrix_parent_inverse is not the identity.
if blender_object.matrix_local[3][3] != 0.0:
trans, rot, sca = blender_object.matrix_local.decompose()
else:
# Some really weird cases, scale is null (if parent is null when evaluation is done)
print_console('WARNING', 'Some nodes are 0 scaled during evaluation. Result can be wrong')
trans = blender_object.location
if blender_object.rotation_mode in ['QUATERNION', 'AXIS_ANGLE']:
rot = blender_object.rotation_quaternion
else:
rot = blender_object.rotation_euler.to_quaternion()
sca = blender_object.scale
# make sure the rotation is normalized
rot.normalize()
trans = __convert_swizzle_location(trans, export_settings)
rot = __convert_swizzle_rotation(rot, export_settings)
sca = __convert_swizzle_scale(sca, export_settings)
if blender_object.instance_type == 'COLLECTION' and blender_object.instance_collection:
offset = -__convert_swizzle_location(
blender_object.instance_collection.instance_offset, export_settings)
s = Matrix.Diagonal(sca).to_4x4()
r = rot.to_matrix().to_4x4()
t = Matrix.Translation(trans).to_4x4()
o = Matrix.Translation(offset).to_4x4()
m = t @ r @ s @ o
trans = m.translation
translation, rotation, scale = (None, None, None)
trans[0], trans[1], trans[2] = gltf2_blender_math.round_if_near(trans[0], 0.0), gltf2_blender_math.round_if_near(trans[1], 0.0), \
gltf2_blender_math.round_if_near(trans[2], 0.0)
rot[0], rot[1], rot[2], rot[3] = gltf2_blender_math.round_if_near(rot[0], 1.0), gltf2_blender_math.round_if_near(rot[1], 0.0), \
gltf2_blender_math.round_if_near(rot[2], 0.0), gltf2_blender_math.round_if_near(rot[3], 0.0)
sca[0], sca[1], sca[2] = gltf2_blender_math.round_if_near(sca[0], 1.0), gltf2_blender_math.round_if_near(sca[1], 1.0), \
gltf2_blender_math.round_if_near(sca[2], 1.0)
if trans[0] != 0.0 or trans[1] != 0.0 or trans[2] != 0.0:
translation = [trans[0], trans[1], trans[2]]
if rot[0] != 1.0 or rot[1] != 0.0 or rot[2] != 0.0 or rot[3] != 0.0:
rotation = [rot[1], rot[2], rot[3], rot[0]]
if sca[0] != 1.0 or sca[1] != 1.0 or sca[2] != 1.0:
scale = [sca[0], sca[1], sca[2]]
return translation, rotation, scale
def __gather_skin(blender_object, export_settings):
modifiers = {m.type: m for m in blender_object.modifiers}
if "ARMATURE" not in modifiers or modifiers["ARMATURE"].object is None:
return None
# no skin needed when the modifier is linked without having a vertex group
vertex_groups = blender_object.vertex_groups
if len(vertex_groups) == 0:
return None
# check if any vertices in the mesh are part of a vertex group
depsgraph = bpy.context.evaluated_depsgraph_get()
blender_mesh_owner = blender_object.evaluated_get(depsgraph)
blender_mesh = blender_mesh_owner.to_mesh(preserve_all_data_layers=True, depsgraph=depsgraph)
if not any(vertex.groups is not None and len(vertex.groups) > 0 for vertex in blender_mesh.vertices):
return None
# Prevent infinite recursive error. A mesh can't have an Armature modifier
# and be bone parented to a bone of this armature
# In that case, ignore the armature modifier, keep only the bone parenting
if blender_object.parent is not None \
and blender_object.parent_type == 'BONE' \
and blender_object.parent.name == modifiers["ARMATURE"].object.name:
return None
# Skins and meshes must be in the same glTF node, which is different from how blender handles armatures
return gltf2_blender_gather_skins.gather_skin(modifiers["ARMATURE"].object, export_settings)
def __gather_weights(blender_object, export_settings):
return None
def __get_correction_node(blender_object, export_settings):
correction_quaternion = __convert_swizzle_rotation(
Quaternion((1.0, 0.0, 0.0), math.radians(-90.0)), export_settings)
correction_quaternion = [correction_quaternion[1], correction_quaternion[2],
correction_quaternion[3], correction_quaternion[0]]
return gltf2_io.Node(
camera=None,
children=[],
extensions=None,
extras=None,
matrix=None,
mesh=None,
name=blender_object.name + '_Orientation',
rotation=correction_quaternion,
scale=None,
skin=None,
translation=None,
weights=None
)
def __convert_swizzle_location(loc, export_settings):
"""Convert a location from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((loc[0], loc[2], -loc[1]))
else:
return Vector((loc[0], loc[1], loc[2]))
def __convert_swizzle_rotation(rot, export_settings):
"""
Convert a quaternion rotation from Blender coordinate system to glTF coordinate system.
'w' is still at first position.
"""
if export_settings[gltf2_blender_export_keys.YUP]:
return Quaternion((rot[0], rot[1], rot[3], -rot[2]))
else:
return Quaternion((rot[0], rot[1], rot[2], rot[3]))
def __convert_swizzle_scale(scale, export_settings):
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((scale[0], scale[2], scale[1]))
else:
return Vector((scale[0], scale[1], scale[2]))
| 43.322097
| 134
| 0.658771
|
a6e1ddad0f80b2dc2f42586dc0c241e2b40e24dd
| 6,463
|
py
|
Python
|
main.py
|
Arseny569/CheckHeart
|
fb4c71edbd8f5c49a4d6f9c97faa8666136cb534
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
Arseny569/CheckHeart
|
fb4c71edbd8f5c49a4d6f9c97faa8666136cb534
|
[
"CC0-1.0"
] | null | null | null |
main.py
|
Arseny569/CheckHeart
|
fb4c71edbd8f5c49a4d6f9c97faa8666136cb534
|
[
"CC0-1.0"
] | null | null | null |
# Создание и запуск приложения, программирование интерфейса экранов и действий на них
# Здесь должен быть твой код
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.uix.scrollview import ScrollView
from scrollLabel import ScrollLabel
from ruffier import *
from instructions import *
from seconds import Seconds
p1, p2, p3 = 0, 0, 0
def check_int(value):
try:
return int(value)
except:
return False
class InitScreen(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
instr = ScrollLabel(ltext=txt_instruction)
l1 = Label(text="Введите имя:")
self.name_input = TextInput()
l2 = Label(text="Введите возраст:")
self.age_input = TextInput()
self.btn = Button(text="Начать")
self.btn.background_color = (0, 0.5, 0.5, 1)
self.btn.on_press = self.next
line1 = BoxLayout(orientation="vertical", size_hint=(1, None), height='60sp')
line1.add_widget(l1)
line1.add_widget(self.name_input)
line2 = BoxLayout(orientation="vertical", size_hint=(1, None), height='60sp')
line2.add_widget(l2)
line2.add_widget(self.age_input)
linemain = BoxLayout(orientation="vertical", padding=8, spacing=8)
linemain.add_widget(instr)
linemain.add_widget(line1)
linemain.add_widget(line2)
linemain.add_widget(self.btn)
self.add_widget(linemain)
def next(self):
global age, name
name = self.name_input.text
age = check_int(self.age_input.text)
if name != "" and age >= 7:
self.manager.current = "test"
class SittingScreen(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
instr = ScrollLabel(ltext=txt_sitting)
self.ltimer = Seconds(45)
self.ltimer.bind(done=self.timer_end)
self.btn = Button(text = "Далее")
self.btn.background_color = (0, 0.5, 0.5, 1)
self.btn.on_press = self.next
linemain = BoxLayout(orientation="vertical", padding=8, spacing=8)
linemain.add_widget(instr)
linemain.add_widget(self.ltimer)
linemain.add_widget(self.btn)
self.add_widget(linemain)
def on_enter(self):
self.ltimer.start()
def timer_end(self, *args):
self.btn.set_disabled(False)
def next(self):
self.manager.current = "test_result"
class TestScreen(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
instr = ScrollLabel(ltext=txt_test1)
self.ltimer = Seconds(15)
self.ltimer.bind(done=self.timer_end)
l1 = Label(text="Запишите результат")
self.test_input = TextInput(text = '0')
self.test_input.set_disabled(True)
self.btn = Button(text = "Далее")
self.btn.background_color = (0, 0.5, 0.5, 1)
self.btn.set_disabled(True)
self.btn.on_press = self.next
line1 = BoxLayout(orientation="vertical", size_hint=(1, None), height='60sp')
line1.add_widget(l1)
line1.add_widget(self.test_input)
linemain = BoxLayout(orientation="vertical", padding=8, spacing=8)
linemain.add_widget(instr)
linemain.add_widget(self.ltimer)
linemain.add_widget(line1)
linemain.add_widget(self.btn)
self.add_widget(linemain)
def on_enter(self):
self.ltimer.start()
def timer_end(self, *args):
self.test_input.set_disabled(False)
self.btn.set_disabled(False)
def next(self):
global p1
p1 = check_int(self.test_input.text)
if p1 != False and p1 > 0:
self.manager.current = "sitting"
class Test2Screen(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
instr = ScrollLabel(ltext=txt_test2)
l1 = Label(text=" Результат после приседания")
self.test2_input = TextInput()
l2 = Label(text="")
self.test3_input = TextInput()
self.t1 = Seconds(15)
self.t2 = Seconds(45)
self.t3 = Seconds(15 )
self.btn = Button(text="Показать результат")
self.btn.background_color = (0, 0.5, 0.5, 1)
self.btn.on_press = self.next
line1 = BoxLayout(orientation="vertical", size_hint=(1, None), height='60sp')
line1.add_widget(l1)
line1.add_widget(self.test2_input)
line2 = BoxLayout(orientation="vertical", size_hint=(1, None), height='60sp')
line2.add_widget(l2)
line2.add_widget(self.test3_input)
linemain = BoxLayout(orientation="vertical", padding=8, spacing=8)
linemain.add_widget(instr)
linemain.add_widget(line1)
linemain.add_widget(line2)
linemain.add_widget(self.btn)
self.add_widget(linemain)
def on_enter(self):
self.t1.start()
self.t2.start()
def t1_end(self, *args):
self.test2_input.set_disabled(False)
def t2_end(self, *args):
self.t3.start()
def t3_end(self, *args):
self.test3_input.set_disabled(False)
self.btn.set_disabled(False)
def next(self):
global p2, p3
p2 = int(self.test2_input.text)
p3 = int(self.test3_input.text)
self.manager.current = "result"
class ResultScreen(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
index = ruffier_index(p1, p2, p3)
self.l = ScrollLabel(ltext="")
self.add_widget(self.l)
def on_enter(self):
index = ruffier_index(p1,p2,p3)
self.l.label.text = "Ваш индекс Руфье равен " + str(index)
class CheckHeart(App):
def build(self):
sm = ScreenManager()
sm.add_widget(InitScreen(name="main"))
sm.add_widget(SittingScreen(name="sitting"))
sm.add_widget(TestScreen(name="test"))
sm.add_widget(Test2Screen(name="test_result"))
sm.add_widget(ResultScreen(name="result"))
return sm
app = CheckHeart()
app.run()
| 29.377273
| 86
| 0.607148
|
a1e43c22bfb1eafd5bdb6b88be7b176702073ed4
| 15,499
|
py
|
Python
|
autotest/gcore/basic_test.py
|
NathanW2/gdal
|
a5fc0fa500765f484b497d23ec5459176837e422
|
[
"MIT"
] | 2
|
2015-07-24T16:16:34.000Z
|
2015-07-24T16:16:37.000Z
|
autotest/gcore/basic_test.py
|
samalone/gdal-ios
|
beed159503ce550c4e09edb25c168c8344e8998c
|
[
"MIT"
] | null | null | null |
autotest/gcore/basic_test.py
|
samalone/gdal-ios
|
beed159503ce550c4e09edb25c168c8344e8998c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test basic GDAL open
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append( '../pymod' )
import gdaltest
from osgeo import gdal
# Nothing exciting here. Just trying to open non existing files,
# or empty names, or files that are not valid datasets...
def basic_test_1():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('non_existing_ds', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`non_existing_ds\' does not exist in the file system,\nand is not recognised as a supported dataset name.\n':
return 'success'
else:
return 'fail'
def basic_test_2():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('non_existing_ds', gdal.GA_Update)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`non_existing_ds\' does not exist in the file system,\nand is not recognised as a supported dataset name.\n':
return 'success'
else:
return 'fail'
def basic_test_3():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`\' does not exist in the file system,\nand is not recognised as a supported dataset name.\n':
return 'success'
else:
return 'fail'
def basic_test_4():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('', gdal.GA_Update)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`\' does not exist in the file system,\nand is not recognised as a supported dataset name.\n':
return 'success'
else:
return 'fail'
def basic_test_5():
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open('data/doctype.xml', gdal.GA_ReadOnly)
gdal.PopErrorHandler()
if ds is None and gdal.GetLastErrorMsg() == '`data/doctype.xml\' not recognised as a supported file format.\n':
return 'success'
else:
return 'fail'
###############################################################################
# Issue several AllRegister() to check that GDAL drivers are good citizens
def basic_test_6():
gdal.AllRegister()
gdal.AllRegister()
gdal.AllRegister()
return 'success'
###############################################################################
# Test fix for #3077 (check that errors are cleared when using UseExceptions())
def basic_test_7_internal():
try:
ds = gdal.Open('non_existing_ds', gdal.GA_ReadOnly)
gdaltest.post_reason('opening should have thrown an exception')
return 'fail'
except:
# Special case: we should still be able to get the error message
# until we call a new GDAL function
if gdal.GetLastErrorMsg() != '`non_existing_ds\' does not exist in the file system,\nand is not recognised as a supported dataset name.\n':
gdaltest.post_reason('did not get expected error message')
return 'fail'
if gdal.GetLastErrorType() == 0:
gdaltest.post_reason('did not get expected error type')
return 'fail'
# Should issue an implicit CPLErrorReset()
gdal.GetCacheMax()
if gdal.GetLastErrorType() != 0:
gdaltest.post_reason('got unexpected error type')
return 'fail'
return 'success'
def basic_test_7():
old_use_exceptions_status = gdal.GetUseExceptions()
gdal.UseExceptions()
ret = basic_test_7_internal()
if old_use_exceptions_status == 0:
gdal.DontUseExceptions()
return ret
###############################################################################
# Test gdal.VersionInfo('RELEASE_DATE') and gdal.VersionInfo('LICENSE')
def basic_test_8():
ret = gdal.VersionInfo('RELEASE_DATE')
if len(ret) != 8:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
python_exe = sys.executable
if sys.platform == 'win32':
python_exe = python_exe.replace('\\', '/')
ret = gdaltest.runexternal(python_exe + ' basic_test.py LICENSE 0')
if ret.find('GDAL/OGR is released under the MIT/X license') != 0 and ret.find('GDAL/OGR Licensing') < 0:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
f = open('tmp/LICENSE.TXT', 'wt')
f.write('fake_license')
f.close()
ret = gdaltest.runexternal(python_exe + ' basic_test.py LICENSE 1')
os.unlink('tmp/LICENSE.TXT')
if ret.find('fake_license') != 0 and ret.find('GDAL/OGR Licensing') < 0:
gdaltest.post_reason('fail')
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test gdal.PushErrorHandler() with a Python error handler
def my_python_error_handler(eErrClass, err_no, msg):
gdaltest.eErrClass = eErrClass
gdaltest.err_no = err_no
gdaltest.msg = msg
def basic_test_9():
gdaltest.eErrClass = 0
gdaltest.err_no = 0
gdaltest.msg = ''
gdal.PushErrorHandler(my_python_error_handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
if gdaltest.eErrClass != 1:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.err_no != 2:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.msg != 'test':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test gdal.PushErrorHandler() with a Python error handler as a method (#5186)
class my_python_error_handler_class:
def __init__(self):
pass
def handler(self, eErrClass, err_no, msg):
gdaltest.eErrClass = eErrClass
gdaltest.err_no = err_no
gdaltest.msg = msg
def basic_test_10():
gdaltest.eErrClass = 0
gdaltest.err_no = 0
gdaltest.msg = ''
# Check that reference counting works OK
gdal.PushErrorHandler(my_python_error_handler_class().handler)
gdal.Error(1,2,'test')
gdal.PopErrorHandler()
if gdaltest.eErrClass != 1:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.err_no != 2:
gdaltest.post_reason('fail')
return 'fail'
if gdaltest.msg != 'test':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test gdal.OpenEx()
def basic_test_11():
ds = gdal.OpenEx('data/byte.tif')
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_VECTOR)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER | gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_ALL)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_UPDATE)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', gdal.OF_RASTER | gdal.OF_VECTOR | gdal.OF_UPDATE | gdal.OF_VERBOSE_ERROR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = [] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = ['GTiff'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', allowed_drivers = ['PNG'] )
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('data/byte.tif', open_options = ['FOO'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ar_ds = [ gdal.OpenEx('data/byte.tif', gdal.OF_SHARED) for i in range(1024) ]
if ar_ds[1023] is None:
gdaltest.post_reason('fail')
return 'fail'
ar_ds = None
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_RASTER)
if ds is not None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('fail')
return 'fail'
if ds.GetLayer(0) is None:
gdaltest.post_reason('fail')
return 'fail'
ds.GetLayer(0).GetMetadata()
ds = gdal.OpenEx('../ogr/data/poly.shp', allowed_drivers = ['ESRI Shapefile'] )
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('../ogr/data/poly.shp', gdal.OF_RASTER | gdal.OF_VECTOR)
if ds is None:
gdaltest.post_reason('fail')
return 'fail'
ds = gdal.OpenEx('non existing')
if ds is not None or gdal.GetLastErrorMsg() != '':
gdaltest.post_reason('fail')
return 'fail'
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.OpenEx('non existing', gdal.OF_VERBOSE_ERROR)
gdal.PopErrorHandler()
if ds is not None or gdal.GetLastErrorMsg() == '':
gdaltest.post_reason('fail')
return 'fail'
return 'success'
###############################################################################
# Test GDAL layer API
def basic_test_12():
ds = gdal.GetDriverByName('MEMORY').Create('bar', 0, 0, 0)
if ds.GetDescription() != 'bar':
gdaltest.post_reason('failure')
print(ds.GetDescription())
return 'fail'
lyr = ds.CreateLayer("foo")
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetDescription() != 'foo':
gdaltest.post_reason('failure')
print(lyr.GetDescription())
return 'fail'
from osgeo import ogr
if lyr.TestCapability(ogr.OLCCreateField) != 1:
gdaltest.post_reason('failure')
return 'fail'
if ds.GetLayerCount() != 1:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByName("foo")
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByIndex(0)
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayer(0)
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
sql_lyr = ds.ExecuteSQL('SELECT * FROM foo')
if sql_lyr is None:
gdaltest.post_reason('failure')
return 'fail'
ds.ReleaseResultSet(sql_lyr)
new_lyr = ds.CopyLayer(lyr, 'bar')
if new_lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if ds.DeleteLayer(0) != 0:
gdaltest.post_reason('failure')
return 'fail'
if ds.DeleteLayer('bar') != 0:
gdaltest.post_reason('failure')
return 'fail'
ds.SetStyleTable(ds.GetStyleTable())
ds = None
return 'success'
###############################################################################
# Test correct sorting of StringList / metadata (#5540, #5557)
def basic_test_13():
ds = gdal.GetDriverByName('MEM').Create('',1,1)
for i in range(3):
if i == 0:
ds.SetMetadataItem("ScaleBounds","True")
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
elif i == 1:
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds","True")
else:
ds.SetMetadataItem("ScaleBounds.MinScale","0")
ds.SetMetadataItem("ScaleBounds","True")
ds.SetMetadataItem("ScaleBounds.MaxScale","2000000")
if ds.GetMetadataItem('scalebounds') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('SCALEBOUNDS') != 'True':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds.MinScale') != '0':
gdaltest.post_reason('failure')
return 'fail'
if ds.GetMetadataItem('ScaleBounds.MaxScale') != '2000000':
gdaltest.post_reason('failure')
return 'fail'
ds = None
ds = gdal.GetDriverByName('MEM').Create('',1,1)
for i in range(200):
ds.SetMetadataItem("FILENAME_%d" % i, "%d" % i)
for i in range(200):
if ds.GetMetadataItem("FILENAME_%d" % i) != '%d' % i:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
gdaltest_list = [ basic_test_1,
basic_test_2,
basic_test_3,
basic_test_4,
basic_test_5,
basic_test_6,
basic_test_7,
basic_test_8,
basic_test_9,
basic_test_10,
basic_test_11,
basic_test_12,
basic_test_13 ]
if __name__ == '__main__':
if len(sys.argv) == 3 and sys.argv[1] == "LICENSE":
if sys.argv[2] == '0':
gdal.SetConfigOption('GDAL_DATA', '/foo')
else:
gdal.SetConfigOption('GDAL_DATA', 'tmp')
gdal.VersionInfo('LICENSE')
print(gdal.VersionInfo('LICENSE'))
import testnonboundtoswig
testnonboundtoswig.GDALDestroyDriverManager()
sys.exit(0)
gdaltest.setup_run( 'basic_test' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 32.289583
| 158
| 0.595329
|
5a98a824534bd42cea39c51ca0c4839e233e52f0
| 744
|
py
|
Python
|
lang/Python/function-composition-3.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/function-composition-3.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
lang/Python/function-composition-3.py
|
ethansaxenian/RosettaDecode
|
8ea1a42a5f792280b50193ad47545d14ee371fb7
|
[
"MIT"
] | null | null | null |
from math import (acos, cos, asin, sin)
# compose (<<<) :: (b -> c) -> (a -> b) -> a -> c
def compose(g, f):
'''Right to left function composition.'''
return lambda x: g(f(x))
# main :: IO ()
def main():
'''Test'''
print((list([f(0.5) for f in zipWith(compose)(
[sin, cos, lambda x: x ** 3.0]
)([asin, acos, lambda x: x ** (1 / 3.0)])])))
# GENERIC FUNCTIONS ---------------------------------------
# zipWith :: (a -> b -> c) -> [a] -> [b] -> [c]
def zipWith(f):
'''A list constructed by zipping with a
custom function, rather than with the
default tuple constructor.'''
return lambda xs: lambda ys: (
list(map(f, xs, ys))
)
if __name__ == '__main__':
main()
| 21.882353
| 59
| 0.487903
|
20347941051455ebee58823b4b0be93762912b0a
| 7,508
|
py
|
Python
|
tools/third_party/pywebsocket3/test/test_util.py
|
twiss/wpt
|
df5dd5a92d8c014b21b670841ce6787e422865df
|
[
"BSD-3-Clause"
] | 2
|
2020-04-16T18:41:05.000Z
|
2021-01-30T04:33:07.000Z
|
tools/third_party/pywebsocket3/test/test_util.py
|
sh3beyat/wpt
|
cb9c3cad6fbeb2ea8e6cb2424536c95e9be3b557
|
[
"BSD-3-Clause"
] | 21
|
2021-03-31T19:48:22.000Z
|
2022-03-12T00:24:53.000Z
|
tools/third_party/pywebsocket3/test/test_util.py
|
sh3beyat/wpt
|
cb9c3cad6fbeb2ea8e6cb2424536c95e9be3b557
|
[
"BSD-3-Clause"
] | 2
|
2021-01-05T23:43:46.000Z
|
2021-01-07T23:36:34.000Z
|
#!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for util module."""
from __future__ import absolute_import
from __future__ import print_function
import os
import random
import sys
import unittest
import struct
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import util
from six.moves import range
from six import PY3
from six import int2byte
_TEST_DATA_DIR = os.path.join(os.path.split(__file__)[0], 'testdata')
class UtilTest(unittest.TestCase):
"""A unittest for util module."""
def test_prepend_message_to_exception(self):
exc = Exception('World')
self.assertEqual('World', str(exc))
util.prepend_message_to_exception('Hello ', exc)
self.assertEqual('Hello World', str(exc))
def test_get_script_interp(self):
cygwin_path = 'c:\\cygwin\\bin'
cygwin_perl = os.path.join(cygwin_path, 'perl')
self.assertEqual(
None, util.get_script_interp(os.path.join(_TEST_DATA_DIR,
'README')))
self.assertEqual(
None,
util.get_script_interp(os.path.join(_TEST_DATA_DIR, 'README'),
cygwin_path))
self.assertEqual(
'/usr/bin/perl -wT',
util.get_script_interp(os.path.join(_TEST_DATA_DIR, 'hello.pl')))
self.assertEqual(
cygwin_perl + ' -wT',
util.get_script_interp(os.path.join(_TEST_DATA_DIR, 'hello.pl'),
cygwin_path))
def test_hexify(self):
self.assertEqual('61 7a 41 5a 30 39 20 09 0d 0a 00 ff',
util.hexify(b'azAZ09 \t\r\n\x00\xff'))
class RepeatedXorMaskerTest(unittest.TestCase):
"""A unittest for RepeatedXorMasker class."""
def test_mask(self):
# Sample input e6,97,a5 is U+65e5 in UTF-8
masker = util.RepeatedXorMasker(b'\xff\xff\xff\xff')
result = masker.mask(b'\xe6\x97\xa5')
self.assertEqual(b'\x19\x68\x5a', result)
masker = util.RepeatedXorMasker(b'\x00\x00\x00\x00')
result = masker.mask(b'\xe6\x97\xa5')
self.assertEqual(b'\xe6\x97\xa5', result)
masker = util.RepeatedXorMasker(b'\xe6\x97\xa5\x20')
result = masker.mask(b'\xe6\x97\xa5')
self.assertEqual(b'\x00\x00\x00', result)
def test_mask_twice(self):
masker = util.RepeatedXorMasker(b'\x00\x7f\xff\x20')
# mask[0], mask[1], ... will be used.
result = masker.mask(b'\x00\x00\x00\x00\x00')
self.assertEqual(b'\x00\x7f\xff\x20\x00', result)
# mask[2], mask[0], ... will be used for the next call.
result = masker.mask(b'\x00\x00\x00\x00\x00')
self.assertEqual(b'\x7f\xff\x20\x00\x7f', result)
def test_mask_large_data(self):
masker = util.RepeatedXorMasker(b'mASk')
original = b''.join([util.pack_byte(i % 256) for i in range(1000)])
result = masker.mask(original)
expected = b''.join([
util.pack_byte((i % 256) ^ ord('mASk'[i % 4])) for i in range(1000)
])
self.assertEqual(expected, result)
masker = util.RepeatedXorMasker(b'MaSk')
first_part = b'The WebSocket Protocol enables two-way communication.'
result = masker.mask(first_part)
self.assertEqual(
b'\x19\t6K\x1a\x0418"\x028\x0e9A\x03\x19"\x15<\x08"\rs\x0e#'
b'\x001\x07(\x12s\x1f:\x0e~\x1c,\x18s\x08"\x0c>\x1e#\x080\n9'
b'\x08<\x05c', result)
second_part = b'It has two parts: a handshake and the data transfer.'
result = masker.mask(second_part)
self.assertEqual(
b"('K%\x00 K9\x16<K=\x00!\x1f>[s\nm\t2\x05)\x12;\n&\x04s\n#"
b"\x05s\x1f%\x04s\x0f,\x152K9\x132\x05>\x076\x19c", result)
def get_random_section(source, min_num_chunks):
chunks = []
bytes_chunked = 0
while bytes_chunked < len(source):
chunk_size = random.randint(
1, min(len(source) / min_num_chunks,
len(source) - bytes_chunked))
chunk = source[bytes_chunked:bytes_chunked + chunk_size]
chunks.append(chunk)
bytes_chunked += chunk_size
return chunks
class InflaterDeflaterTest(unittest.TestCase):
"""A unittest for _Inflater and _Deflater class."""
def test_inflate_deflate_default(self):
input = b'hello' + b'-' * 30000 + b'hello'
inflater15 = util._Inflater(15)
deflater15 = util._Deflater(15)
inflater8 = util._Inflater(8)
deflater8 = util._Deflater(8)
compressed15 = deflater15.compress_and_finish(input)
compressed8 = deflater8.compress_and_finish(input)
inflater15.append(compressed15)
inflater8.append(compressed8)
self.assertNotEqual(compressed15, compressed8)
self.assertEqual(input, inflater15.decompress(-1))
self.assertEqual(input, inflater8.decompress(-1))
def test_random_section(self):
random.seed(a=0)
source = b''.join(
[int2byte(random.randint(0, 255)) for i in range(100 * 1024)])
chunked_input = get_random_section(source, 10)
print("Input chunk sizes: %r" % [len(c) for c in chunked_input])
deflater = util._Deflater(15)
compressed = []
for chunk in chunked_input:
compressed.append(deflater.compress(chunk))
compressed.append(deflater.compress_and_finish(b''))
chunked_expectation = get_random_section(source, 10)
print("Expectation chunk sizes: %r" %
[len(c) for c in chunked_expectation])
inflater = util._Inflater(15)
inflater.append(b''.join(compressed))
for chunk in chunked_expectation:
decompressed = inflater.decompress(len(chunk))
self.assertEqual(chunk, decompressed)
self.assertEqual(b'', inflater.decompress(-1))
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| 38.502564
| 79
| 0.655434
|
072a3d7cfd6be0f554d91000a01ef38e1a310073
| 4,183
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/choroplethmapbox/_stream.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/choroplethmapbox/_stream.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/choroplethmapbox/_stream.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmapbox.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choroplethmapbox.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 29.457746
| 82
| 0.543629
|
90c08775744427d819a071d7686bf4a0b63534aa
| 5,016
|
py
|
Python
|
networks/networks_pytorch.py
|
SamuelSchmidgall/RodentNavigation
|
2ec49c5f43aa456ba648d1117a1b76241ad7a946
|
[
"MIT"
] | 2
|
2021-01-03T17:41:02.000Z
|
2022-02-28T22:37:48.000Z
|
Learning/Networks/networks_pytorch.py
|
SamuelSchmidgall/ImpossibleArcade
|
945e6b6a39809e35a80eeda4e3da537c4dc92eb7
|
[
"MIT"
] | null | null | null |
Learning/Networks/networks_pytorch.py
|
SamuelSchmidgall/ImpossibleArcade
|
945e6b6a39809e35a80eeda4e3da537c4dc92eb7
|
[
"MIT"
] | 1
|
2022-02-28T22:37:48.000Z
|
2022-02-28T22:37:48.000Z
|
import torch
import numpy as np
import torch.nn as nn
from torch.distributions import Normal
from Learning.Networks.network_modules_torch import *
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
class ValueNetwork(nn.Module):
def __init__(self, input_dim, output_dim=1):
super(ValueNetwork, self).__init__()
ff1_meta = {
"activation": None, "input_size": input_dim,
"output_size": 64, "initialization": "orthogonal"}
self.ff1 = NetworkConnectivityModule("linear", ff1_meta)
ff2_meta = {
"activation": None, "input_size": 64,
"output_size": 64, "initialization": "orthogonal"}
self.ff2 = NetworkConnectivityModule("linear", ff2_meta)
ff3_meta = {
"activation": None, "input_size": 64,
"output_size": output_dim, "initialization": "orthogonal"}
self.ff3 = NetworkConnectivityModule("linear", ff3_meta)
def forward(self, x):
x = torch.relu(self.ff1(x))
x = torch.relu(self.ff2(x))
x = self.ff3(x)
return x
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, output_dim=1):
super(QNetwork, self).__init__()
# Q1 architecture
ff1_1_meta = {
"activation": None, "input_size": num_inputs + num_actions,
"output_size": 256, "initialization": "orthogonal"}
self.ff1_1 = NetworkConnectivityModule("linear", ff1_1_meta)
ff1_2_meta = {
"activation": None, "input_size": 256,
"output_size": 256, "initialization": "orthogonal"}
self.ff1_2 = NetworkConnectivityModule("linear", ff1_2_meta)
ff1_3_meta = {
"activation": None, "input_size": 256,
"output_size": output_dim, "initialization": "orthogonal"}
self.ff1_3 = NetworkConnectivityModule("linear", ff1_3_meta)
# Q2 architecture
ff2_1_meta = {
"activation": None, "input_size": num_inputs + num_actions,
"output_size": 256, "initialization": "orthogonal"}
self.ff2_1 = NetworkConnectivityModule("linear", ff2_1_meta)
ff2_2_meta = {
"activation": None, "input_size": 256,
"output_size": 256, "initialization": "orthogonal"}
self.ff2_2 = NetworkConnectivityModule("linear", ff2_2_meta)
ff2_3_meta = {
"activation": None, "input_size": 256,
"output_size": output_dim, "initialization": "orthogonal"}
self.ff2_3 = NetworkConnectivityModule("linear", ff2_3_meta)
def forward(self, state, action):
xu = torch.cat([state, action], 1)
x1 = torch.relu(self.ff1_1(xu))
x1 = torch.relu(self.ff1_2(x1))
x1 = self.ff1_3(x1)
x2 = torch.relu(self.ff2_1(xu))
x2 = torch.relu(self.ff2_2(x2))
x2 = self.ff2_3(x2)
return x1, x2
class PongNetwork(nn.Module):
def __init__(self, input_dim, output_dim, action_high, action_low):
super(PongNetwork, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.action_bias = torch.FloatTensor((action_high + action_low) / 2.)
self.action_scale = torch.FloatTensor((action_high - action_low) / 2.)
ff1_meta = {
"activation": None, "input_size": input_dim,
"output_size": 256, "initialization": "orthogonal"}
self.ff1 = NetworkConnectivityModule("linear", ff1_meta)
ff2_meta = {
"activation": None, "input_size": 256,
"output_size": 256, "initialization": "orthogonal"}
self.ff2 = NetworkConnectivityModule("linear", ff2_meta)
ff3_meta = {
"activation": None, "input_size": 256,
"output_size": output_dim, "initialization": "orthogonal"}
self.ff3 = NetworkConnectivityModule("linear", ff3_meta)
log_std_linear_meta = {
"activation": None, "input_size": 256,
"output_size": output_dim, "initialization": "orthogonal"}
self.log_std_linear = NetworkConnectivityModule("linear", log_std_linear_meta)
def forward(self, x):
x = torch.relu(self.ff1(x))
x = torch.relu(self.ff2(x))
mean = self.ff3(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
return mean, log_std
def sample(self, state):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + 1e-6)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean
| 33.218543
| 86
| 0.615032
|
132458dab35e41c22da4265fa7bb28b9624418b3
| 182
|
py
|
Python
|
ABC/045/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/045/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
ABC/045/a.py
|
fumiyanll23/AtCoder
|
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
|
[
"MIT"
] | null | null | null |
def main():
# input
a = int(input())
b = int(input())
h = int(input())
# compute
# output
print((a+b) * h // 2)
if __name__ == '__main__':
main()
| 12.133333
| 26
| 0.456044
|
59066d236b8014879b0608d92991af890d43981d
| 5,003
|
py
|
Python
|
magenta/video/tools/extract_frames.py
|
fanzhiyan/magenta
|
622c47c19bb84c6f57b286ed03b738516b2f27d6
|
[
"Apache-2.0"
] | 16
|
2016-09-02T04:59:30.000Z
|
2022-01-11T10:38:29.000Z
|
magenta/video/tools/extract_frames.py
|
fanzhiyan/magenta
|
622c47c19bb84c6f57b286ed03b738516b2f27d6
|
[
"Apache-2.0"
] | 2
|
2016-09-25T16:39:59.000Z
|
2016-11-18T17:43:41.000Z
|
magenta/video/tools/extract_frames.py
|
fanzhiyan/magenta
|
622c47c19bb84c6f57b286ed03b738516b2f27d6
|
[
"Apache-2.0"
] | 10
|
2016-09-02T04:59:32.000Z
|
2021-09-29T06:57:24.000Z
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transform one or multiple video in a set of frames.
Files are prefixed by a f followed by the frame number.
"""
from __future__ import print_function
import argparse
import glob
import os
from PIL import Image
import skvideo.io
PARSER = argparse.ArgumentParser(description="""
Transform one or multiple video in a set of frames.
Files are prefixed by a f followed by the frame number""")
PARSER.add_argument(
'--video_in',
dest='video_in',
help="""one video or a path and a wildcard,
wildcard need to be inside a quote,
please note that ~ can be expanded only outside quote
for instance ~/test.'*' works, but '~/test.*' won't""",
required=True)
PARSER.add_argument(
'--from',
dest='from_s',
type=float,
default=-1,
help='starting time in second (-1)')
PARSER.add_argument(
'--to',
dest='to_s',
type=float,
default=-1,
help='last time in second (-1)')
PARSER.add_argument(
'--path_out', dest='path_out', default='./', help='Destination folder (./)')
PARSER.add_argument(
'--offset',
dest='offset',
type=int,
default=0,
help="""skip first frame to offset the output (0)
useful with '--skip' to extract only a subset""")
PARSER.add_argument(
'--skip',
dest='skip',
type=int,
default=1,
help='"--skip n" will extract every n frames (1)')
PARSER.add_argument(
'--size',
dest='size',
type=int,
default=256,
help='size (256), this argument is used, only if cropped')
PARSER.add_argument(
'--start',
dest='start',
type=int,
default=0,
help='starting number for the filename (0)')
PARSER.add_argument(
'--multiple',
dest='multiple',
type=int,
default=10000,
help=
'''if used with a wildcard (*),
"multiple" will be added for each video (10000)'''
)
PARSER.add_argument(
'--format', dest='format_ext', default='jpg', help='(jpg) or png')
PARSER.add_argument(
'--crop',
dest='crop',
action='store_true',
help='by default the video is cropped')
PARSER.add_argument(
'--strech',
dest='crop',
action='store_false',
help='the video can be streched to a square ratio')
PARSER.set_defaults(crop=True)
ARGS = PARSER.parse_args()
def crop(img, size):
"""resize the images.
Args:
img: a pillow image
size: the size of the image (both x & y)
Returns:
nothing
"""
small_side = min(img.size)
center = img.size[0] / 2
margin_left = center - small_side / 2
margin_right = margin_left + small_side
img = img.crop((margin_left, 0, margin_right, small_side))
img = img.resize((size, size), Image.ANTIALIAS)
return img
def main(_):
"""The main fonction use skvideo to extract frames as jpg.
It can do it from a part or the totality of the video.
Args:
Nothing
"""
print('argument to expand', ARGS.video_in)
print('argument expanded', glob.glob(ARGS.video_in))
video_count = 0
for video_filename in glob.glob(ARGS.video_in):
print('start parsing', video_filename)
data = skvideo.io.ffprobe(video_filename)['video']
rate_str = data['@r_frame_rate'].split('/')
rate = float(rate_str[0]) / float(rate_str[1])
print('detected frame rate:', rate)
print('load frames:')
video = skvideo.io.vreader(video_filename)
frame_count = 0
file_count = 0
for frame in video:
if (frame_count > ARGS.offset) and \
((frame_count-ARGS.offset)%ARGS.skip == 0) and \
(frame_count/rate >= ARGS.from_s) and \
(frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):
print(frame_count,)
img = Image.fromarray(frame)
if ARGS.crop:
img = crop(img, ARGS.size)
# save file
file_number = file_count + video_count * ARGS.multiple + ARGS.start
if ARGS.format_ext.lower() == 'jpg':
file_out = os.path.join(ARGS.path_out,
'f{:07d}.jpg'.format(file_number))
img.save(file_out, 'JPEG')
elif ARGS.format_ext.lower() == 'png':
file_out = os.path.join(ARGS.path_out,
'f{:07d}.png'.format(file_number))
img.save(file_out, 'PNG')
else:
print('unrecognize format', ARGS.format_ext)
quit()
file_count += 1
frame_count += 1
video_count += 1
if __name__ == '__main__':
main(0)
| 28.426136
| 80
| 0.637018
|
cc5b06a166c7483387845050c7feb974c80221d9
| 27,941
|
py
|
Python
|
custom_components/openhasp/__init__.py
|
Chorty/openHASP-custom-component
|
21174f1b6d5c41cee27bdcf52526b362a440f9fa
|
[
"MIT"
] | null | null | null |
custom_components/openhasp/__init__.py
|
Chorty/openHASP-custom-component
|
21174f1b6d5c41cee27bdcf52526b362a440f9fa
|
[
"MIT"
] | null | null | null |
custom_components/openhasp/__init__.py
|
Chorty/openHASP-custom-component
|
21174f1b6d5c41cee27bdcf52526b362a440f9fa
|
[
"MIT"
] | null | null | null |
"""HASP components module."""
import hashlib
import json
import logging
import os
import pathlib
import re
import jsonschema
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import CONF_NAME, STATE_UNAVAILABLE, STATE_UNKNOWN
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import device_registry as dr, entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.event import TrackTemplate, async_track_template_result
from homeassistant.helpers.network import get_url
from homeassistant.helpers.reload import async_integration_yaml_config
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.service import async_call_from_config
from homeassistant.util import slugify
import voluptuous as vol
from .common import HASP_IDLE_SCHEMA
from .const import (
ATTR_CONFIG_SUBMODULE,
ATTR_HEIGHT,
ATTR_IDLE,
ATTR_IMAGE,
ATTR_OBJECT,
ATTR_PAGE,
ATTR_PATH,
ATTR_COMMAND_KEYWORD,
ATTR_COMMAND_PARAMETERS,
ATTR_CONFIG_PARAMETERS,
ATTR_WIDTH,
CONF_COMPONENT,
CONF_EVENT,
CONF_HWID,
CONF_OBJECTS,
CONF_OBJID,
CONF_PAGES,
CONF_PAGES_PATH,
CONF_PLATE,
CONF_PROPERTIES,
CONF_TOPIC,
CONF_TRACK,
DATA_IMAGES,
DATA_LISTENER,
DISCOVERED_MANUFACTURER,
DISCOVERED_MODEL,
DISCOVERED_URL,
DISCOVERED_VERSION,
DOMAIN,
EVENT_HASP_PLATE_OFFLINE,
EVENT_HASP_PLATE_ONLINE,
HASP_EVENT,
HASP_EVENT_DOWN,
HASP_EVENT_RELEASE,
HASP_EVENT_UP,
HASP_EVENTS,
HASP_LWT,
HASP_NUM_PAGES,
HASP_ONLINE,
HASP_VAL,
MAJOR,
MINOR,
SERVICE_CLEAR_PAGE,
SERVICE_LOAD_PAGE,
SERVICE_PAGE_CHANGE,
SERVICE_PAGE_NEXT,
SERVICE_PAGE_PREV,
SERVICE_PUSH_IMAGE,
SERVICE_WAKEUP,
SERVICE_COMMAND,
SERVICE_CONFIG,
)
from .image import ImageServeView, image_to_rgb565
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [LIGHT_DOMAIN, SWITCH_DOMAIN, BINARY_SENSOR_DOMAIN]
def hasp_object(value):
"""Validade HASP-LVGL object format."""
if re.match("p[0-9]+b[0-9]+", value):
return value
raise vol.Invalid("Not an HASP-LVGL object p#b#")
# Configuration YAML schemas
EVENT_SCHEMA = cv.schema_with_slug_keys([cv.SERVICE_SCHEMA])
PROPERTY_SCHEMA = cv.schema_with_slug_keys(cv.template)
OBJECT_SCHEMA = vol.Schema(
{
vol.Required(CONF_OBJID): hasp_object,
vol.Optional(CONF_TRACK, default=None): vol.Any(cv.entity_id, None),
vol.Optional(CONF_PROPERTIES, default={}): PROPERTY_SCHEMA,
vol.Optional(CONF_EVENT, default={}): EVENT_SCHEMA,
}
)
PLATE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_OBJECTS): vol.All(cv.ensure_list, [OBJECT_SCHEMA]),
},
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({cv.slug: PLATE_SCHEMA})}, extra=vol.ALLOW_EXTRA
)
# JSON Messages from HASP schemas
HASP_VAL_SCHEMA = vol.Schema(
{vol.Required(HASP_VAL): vol.All(int, vol.Range(min=0, max=1))},
extra=vol.ALLOW_EXTRA,
)
HASP_EVENT_SCHEMA = vol.Schema(
{vol.Required(HASP_EVENT): vol.Any(*HASP_EVENTS)}, extra=vol.ALLOW_EXTRA
)
HASP_STATUSUPDATE_SCHEMA = vol.Schema(
{
vol.Required("node"): cv.string,
vol.Required("version"): cv.string,
vol.Required("uptime"): int,
vol.Required("canUpdate"): cv.boolean,
},
extra=vol.ALLOW_EXTRA,
)
HASP_LWT_SCHEMA = vol.Schema(vol.Any(*HASP_LWT))
HASP_PAGE_SCHEMA = vol.Schema(vol.All(vol.Coerce(int), vol.Range(min=0, max=12)))
PUSH_IMAGE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IMAGE): vol.Any(cv.url, cv.isfile),
vol.Required(ATTR_OBJECT): hasp_object,
vol.Optional(ATTR_WIDTH): cv.positive_int,
vol.Optional(ATTR_HEIGHT): cv.positive_int,
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the MQTT async example component."""
conf = config.get(DOMAIN)
if conf is None:
# We still depend in YAML so we must fail
_LOGGER.error(
"openHASP requires you to setup your plate objects in your YAML configuration."
)
return False
hass.data[DOMAIN] = {CONF_PLATE: {}}
component = hass.data[DOMAIN][CONF_COMPONENT] = EntityComponent(
_LOGGER, DOMAIN, hass
)
component.async_register_entity_service(SERVICE_WAKEUP, {}, "async_wakeup")
component.async_register_entity_service(
SERVICE_PAGE_NEXT, {}, "async_change_page_next"
)
component.async_register_entity_service(
SERVICE_PAGE_PREV, {}, "async_change_page_prev"
)
component.async_register_entity_service(
SERVICE_PAGE_CHANGE, {vol.Required(ATTR_PAGE): int}, "async_change_page"
)
component.async_register_entity_service(
SERVICE_LOAD_PAGE, {vol.Required(ATTR_PATH): cv.isfile}, "async_load_page"
)
component.async_register_entity_service(
SERVICE_CLEAR_PAGE, {vol.Optional(ATTR_PAGE): int}, "async_clearpage"
)
component.async_register_entity_service(
SERVICE_COMMAND,
{
vol.Required(ATTR_COMMAND_KEYWORD): cv.string,
vol.Optional(ATTR_COMMAND_PARAMETERS, default=""): cv.string,
},
"async_command_service",
)
component.async_register_entity_service(
SERVICE_CONFIG,
{
vol.Required(ATTR_CONFIG_SUBMODULE): cv.string,
vol.Required(ATTR_CONFIG_PARAMETERS): cv.string,
},
"async_config_service",
)
component.async_register_entity_service(
SERVICE_PUSH_IMAGE, PUSH_IMAGE_SCHEMA, "async_push_image"
)
hass.data[DOMAIN][DATA_IMAGES] = dict()
hass.http.register_view(ImageServeView)
return True
async def async_update_options(hass, entry):
"""Handle options update."""
_LOGGER.debug("Reloading")
await hass.config_entries.async_reload(entry.entry_id)
async def async_setup_entry(hass, entry) -> bool:
"""Set up OpenHASP via a config entry."""
plate = entry.data[CONF_NAME]
_LOGGER.debug("Setup %s", plate)
hass_config = await async_integration_yaml_config(hass, DOMAIN)
if DOMAIN not in hass_config or slugify(plate) not in hass_config[DOMAIN]:
_LOGGER.error(
"No YAML configuration for %s, \
please create an entry under 'openhasp' with the slug: %s",
plate,
slugify(plate),
)
return False
config = hass_config[DOMAIN][slugify(plate)]
# Register Plate device
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.data[CONF_HWID])},
manufacturer=entry.data[DISCOVERED_MANUFACTURER],
model=entry.data[DISCOVERED_MODEL],
sw_version=entry.data[DISCOVERED_VERSION],
configuration_url=entry.data.get(DISCOVERED_URL),
name=plate,
)
# Add entity to component
component = hass.data[DOMAIN][CONF_COMPONENT]
plate_entity = SwitchPlate(hass, config, entry)
await component.async_add_entities([plate_entity])
hass.data[DOMAIN][CONF_PLATE][plate] = plate_entity
for domain in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, domain)
)
listener = entry.add_update_listener(async_update_options)
hass.data[DOMAIN][CONF_PLATE][DATA_LISTENER] = listener
return True
async def async_unload_entry(hass, entry):
"""Remove a config entry."""
plate = entry.data[CONF_NAME]
_LOGGER.debug("Unload entry for plate %s", plate)
listener = hass.data[DOMAIN][CONF_PLATE][DATA_LISTENER]
# Only remove services if it is the last
if len(hass.data[DOMAIN][CONF_PLATE]) == 1:
hass.services.async_remove(DOMAIN, SERVICE_WAKEUP)
hass.services.async_remove(DOMAIN, SERVICE_PAGE_NEXT)
hass.services.async_remove(DOMAIN, SERVICE_PAGE_PREV)
hass.services.async_remove(DOMAIN, SERVICE_PAGE_CHANGE)
hass.services.async_remove(DOMAIN, SERVICE_LOAD_PAGE)
hass.services.async_remove(DOMAIN, SERVICE_CLEAR_PAGE)
hass.services.async_remove(DOMAIN, SERVICE_COMMAND)
for domain in PLATFORMS:
await hass.config_entries.async_forward_entry_unload(entry, domain)
device_registry = await dr.async_get_registry(hass)
dev = device_registry.async_get_device(
identifiers={(DOMAIN, entry.data[CONF_HWID])}
)
if entry.entry_id in dev.config_entries:
_LOGGER.debug("Removing device %s", dev)
device_registry.async_remove_device(dev.id)
component = hass.data[DOMAIN][CONF_COMPONENT]
await component.async_remove_entity(hass.data[DOMAIN][CONF_PLATE][plate].entity_id)
# Component does not remove entity from entity_registry, so we must do it
registry = await entity_registry.async_get_registry(hass)
registry.async_remove(hass.data[DOMAIN][CONF_PLATE][plate].entity_id)
listener()
# Remove Plate entity
del hass.data[DOMAIN][CONF_PLATE][plate]
return True
# pylint: disable=R0902
class SwitchPlate(RestoreEntity):
"""Representation of an openHASP Plate."""
def __init__(self, hass, config, entry):
"""Initialize a plate."""
super().__init__()
self._entry = entry
self._topic = entry.data[CONF_TOPIC]
self._pages_jsonl = entry.options.get(
CONF_PAGES_PATH, entry.data.get(CONF_PAGES_PATH)
)
self._objects = []
for obj in config[CONF_OBJECTS]:
new_obj = HASPObject(hass, self._topic, obj)
self._objects.append(new_obj)
self._statusupdate = {HASP_NUM_PAGES: entry.data[CONF_PAGES]}
self._available = False
self._page = 1
self._subscriptions = []
with open(
pathlib.Path(__file__).parent.joinpath("pages_schema.json"), "r"
) as schema_file:
self.json_schema = json.load(schema_file)
async def async_will_remove_from_hass(self):
"""Run before entity is removed."""
_LOGGER.debug("Remove plate %s", self._entry.data[CONF_NAME])
for obj in self._objects:
await obj.disable_object()
for subscription in self._subscriptions:
subscription()
async def async_added_to_hass(self):
"""Run when entity about to be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state and state.state not in [STATE_UNAVAILABLE, STATE_UNKNOWN, None]:
self._page = int(state.state)
@callback
async def page_update_received(msg):
"""Process page state."""
try:
self._page = HASP_PAGE_SCHEMA(msg.payload)
_LOGGER.debug("Page changed to %s", self._page)
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error("%s in %s", err, msg.payload)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/page", page_update_received
)
)
@callback
async def statusupdate_message_received(msg):
"""Process statusupdate."""
try:
message = HASP_STATUSUPDATE_SCHEMA(json.loads(msg.payload))
major, minor, _ = message["version"].split(".")
if (major, minor) != (MAJOR, MINOR):
self.hass.components.persistent_notification.create(
f"You require firmware version {MAJOR}.{MINOR}.x \
in plate {self._entry.data[CONF_NAME]} \
for this component to work properly.\
<br>Some features will simply not work!",
title="openHASP Firmware mismatch",
notification_id="openhasp_firmware_notification",
)
_LOGGER.error(
"%s firmware mismatch %s <> %s",
self._entry.data[CONF_NAME],
(major, minor),
(MAJOR, MINOR),
)
self._available = True
self._statusupdate = message
self._page = message[ATTR_PAGE]
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error("While processing status update: %s", err)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/statusupdate", statusupdate_message_received
)
)
await self.hass.components.mqtt.async_publish(
self.hass, f"{self._topic}/command", "statusupdate", qos=0, retain=False
)
@callback
async def idle_message_received(msg):
"""Process idle message."""
try:
self._statusupdate[ATTR_IDLE] = HASP_IDLE_SCHEMA(msg.payload)
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error("While processing idle message: %s", err)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/state/idle", idle_message_received
)
)
@callback
async def lwt_message_received(msg):
"""Process LWT."""
_LOGGER.debug("Received LWT = %s", msg.payload)
try:
message = HASP_LWT_SCHEMA(msg.payload)
if message == HASP_ONLINE:
self._available = True
self.hass.bus.async_fire(
EVENT_HASP_PLATE_ONLINE,
{CONF_PLATE: self._entry.data[CONF_HWID]},
)
if self._pages_jsonl:
await self.async_load_page(self._pages_jsonl)
else:
await self.refresh()
for obj in self._objects:
await obj.enable_object()
else:
self._available = False
self.hass.bus.async_fire(
EVENT_HASP_PLATE_OFFLINE,
{CONF_PLATE: self._entry.data[CONF_HWID]},
)
for obj in self._objects:
await obj.disable_object()
self.async_write_ha_state()
except vol.error.Invalid as err:
_LOGGER.error("While processing LWT: %s", err)
self._subscriptions.append(
await self.hass.components.mqtt.async_subscribe(
f"{self._topic}/LWT", lwt_message_received
)
)
@property
def unique_id(self):
"""Return the plate identifier."""
return self._entry.data[CONF_HWID]
@property
def name(self):
"""Return the name of the plate."""
return self._entry.data[CONF_NAME]
@property
def icon(self):
"""Return the icon to be used for this entity."""
return "mdi:gesture-tap-box"
@property
def state(self):
"""Return the state of the component."""
return self._page
@property
def available(self):
"""Return if entity is available."""
return self._available
@property
def state_attributes(self):
"""Return the state attributes."""
attributes = {}
if self._statusupdate:
attributes = {**attributes, **self._statusupdate}
if ATTR_PAGE in attributes:
del attributes[
ATTR_PAGE
] # Page is tracked in the state, don't confuse users
return attributes
async def async_wakeup(self):
"""Wake up the display."""
cmd_topic = f"{self._topic}/command"
_LOGGER.warning("Wakeup will be deprecated in 0.8.0") # remove in version 0.8.0
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, "wakeup", qos=0, retain=False
)
async def async_change_page_next(self):
"""Change page to next one."""
cmd_topic = f"{self._topic}/command/page"
_LOGGER.warning(
"page next service will be deprecated in 0.8.0"
) # remove in version 0.8.0
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, "page next", qos=0, retain=False
)
async def async_change_page_prev(self):
"""Change page to previous one."""
cmd_topic = f"{self._topic}/command/page"
_LOGGER.warning(
"page prev service will be deprecated in 0.8.0"
) # remove in version 0.8.0
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, "page prev", qos=0, retain=False
)
async def async_clearpage(self, page="all"):
"""Clear page."""
cmd_topic = f"{self._topic}/command"
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, f"clearpage {page}", qos=0, retain=False
)
if page == "all":
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, "page 1", qos=0, retain=False
)
async def async_change_page(self, page):
"""Change page to number."""
cmd_topic = f"{self._topic}/command/page"
if self._statusupdate:
num_pages = self._statusupdate[HASP_NUM_PAGES]
if page <= 0 or page > num_pages:
_LOGGER.error(
"Can't change to %s, available pages are 1 to %s", page, num_pages
)
return
self._page = page
_LOGGER.debug("Change page %s", self._page)
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, self._page, qos=0, retain=False
)
self.async_write_ha_state()
async def async_command_service(self, keyword, parameters):
"""Sends commands directly to the plate entity"""
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/command",
f"{keyword} {parameters}".strip(),
qos=0,
retain=False,
)
async def async_config_service(self, submodule, parameters):
"""Sends configuration commands to plate entity"""
await self.hass.components.mqtt.async_publish(
self.hass,
f"{self._topic}/config/{submodule}",
f"{parameters}".strip(),
qos=0,
retain=False,
)
async def async_push_image(self, image, obj, width=None, height=None):
"""update object image."""
image_id = hashlib.md5(image.encode("utf-8")).hexdigest()
rgb_image = await self.hass.async_add_executor_job(
image_to_rgb565, image, (width, height)
)
self.hass.data[DOMAIN][DATA_IMAGES][image_id] = rgb_image
cmd_topic = f"{self._topic}/command/{obj}.src"
rgb_image_url = (
f"{get_url(self.hass, allow_external=False)}/api/openhasp/serve/{image_id}"
)
_LOGGER.debug("Push %s with %s", cmd_topic, rgb_image_url)
await self.hass.components.mqtt.async_publish(
self.hass, cmd_topic, rgb_image_url, qos=0, retain=False
)
async def refresh(self):
"""Refresh objects in the SwitchPlate."""
_LOGGER.info("Refreshing %s", self._entry.data[CONF_NAME])
for obj in self._objects:
await obj.refresh()
await self.async_change_page(self._page)
async def async_load_page(self, path):
"""Load pages file on the SwitchPlate, existing pages will not be cleared."""
cmd_topic = f"{self._topic}/command"
_LOGGER.info("Load page %s to %s", path, cmd_topic)
if not self.hass.config.is_allowed_path(path):
_LOGGER.error("'%s' is not an allowed directory", path)
return
async def send_lines(lines):
mqtt_payload_buffer = ""
for line in lines:
if len(mqtt_payload_buffer) + len(line) > 1000:
await self.hass.components.mqtt.async_publish(
self.hass, f"{cmd_topic}/jsonl", mqtt_payload_buffer, qos=0, retain=False
)
mqtt_payload_buffer = line
else:
mqtt_payload_buffer = mqtt_payload_buffer + line
await self.hass.components.mqtt.async_publish(
self.hass, f"{cmd_topic}/jsonl", mqtt_payload_buffer, qos=0, retain=False
)
try:
with open(path, "r") as pages_file:
if path.endswith(".json"):
json_data = json.load(pages_file)
jsonschema.validate(instance=json_data, schema=self.json_schema)
lines = []
for item in json_data:
if isinstance(item, dict):
lines.append(json.dumps(item) + "\n")
await send_lines(lines)
else:
await send_lines(pages_file)
await self.refresh()
except (IndexError, FileNotFoundError, IsADirectoryError, UnboundLocalError):
_LOGGER.error(
"File or data not present at the moment: %s",
os.path.basename(path),
)
except json.JSONDecodeError:
_LOGGER.error(
"Error decoding .json file: %s",
os.path.basename(path),
)
except jsonschema.ValidationError as e:
_LOGGER.error(
"Schema check failed for %s. Validation Error: %s",
os.path.basename(path),
e.message,
)
# pylint: disable=R0902
class HASPObject:
"""Representation of an HASP-LVGL object."""
def __init__(self, hass, plate_topic, config):
"""Initialize an object."""
self.hass = hass
self.obj_id = config[CONF_OBJID]
self.command_topic = f"{plate_topic}/command/{self.obj_id}."
self.state_topic = f"{plate_topic}/state/{self.obj_id}"
self.cached_properties = {}
self.properties = config.get(CONF_PROPERTIES)
self.event_services = config.get(CONF_EVENT)
self._tracked_property_templates = []
self._freeze_properties = []
self._subscriptions = []
async def enable_object(self):
"""Initialize object events and properties subscriptions."""
if self.event_services:
_LOGGER.debug("Setup event_services for '%s'", self.obj_id)
self._subscriptions.append(await self.async_listen_hasp_events())
for _property, template in self.properties.items():
self._tracked_property_templates.append(
await self.async_set_property(_property, template)
)
async def disable_object(self):
"""Remove subscriptions and event tracking."""
_LOGGER.debug("Disabling HASPObject %s", self.obj_id)
for subscription in self._subscriptions:
subscription()
self._subscriptions = []
for tracked_template in self._tracked_property_templates:
tracked_template.async_remove()
self._tracked_property_templates = []
async def async_set_property(self, _property, template):
"""Set HASP Object property to template value."""
@callback
async def _async_template_result_changed(event, updates):
track_template_result = updates.pop()
template = track_template_result.template
result = track_template_result.result
if isinstance(result, TemplateError) or result is None:
entity = event and event.data.get("entity_id")
_LOGGER.error(
"TemplateError('%s') "
"while processing template '%s' "
"in entity '%s'",
result,
template,
entity,
)
return
self.cached_properties[_property] = result
if _property in self._freeze_properties:
# Skip update to plate to avoid feedback loops
return
_LOGGER.debug(
"%s.%s - %s changed, updating with: %s",
self.obj_id,
_property,
template,
result,
)
await self.hass.components.mqtt.async_publish(
self.hass, self.command_topic + _property, result
)
property_template = async_track_template_result(
self.hass,
[TrackTemplate(template, None)],
_async_template_result_changed,
)
property_template.async_refresh()
return property_template
async def refresh(self):
"""Refresh based on cached values."""
for _property, result in self.cached_properties.items():
_LOGGER.debug("Refresh object %s.%s = %s", self.obj_id, _property, result)
await self.hass.components.mqtt.async_publish(
self.hass, self.command_topic + _property, result
)
async def async_listen_hasp_events(self):
"""Listen to messages on MQTT for HASP events."""
@callback
async def message_received(msg):
"""Process object state MQTT message."""
try:
message = HASP_EVENT_SCHEMA(json.loads(msg.payload))
if message[HASP_EVENT] == HASP_EVENT_DOWN:
# store properties that shouldn't be updated while button pressed
self._freeze_properties = message.keys()
elif message[HASP_EVENT] in [HASP_EVENT_UP, HASP_EVENT_RELEASE]:
self._freeze_properties = []
for event in self.event_services:
if event in message[HASP_EVENT]:
_LOGGER.debug(
"Service call for '%s' triggered by '%s' on '%s' with variables %s",
event,
msg.payload,
msg.topic,
message,
)
for service in self.event_services[event]:
await async_call_from_config(
self.hass,
service,
validate_config=False,
variables=message,
)
except vol.error.Invalid:
_LOGGER.debug(
"Could not handle openHASP event: '%s' on '%s'",
msg.payload,
msg.topic,
)
except json.decoder.JSONDecodeError as err:
_LOGGER.error(
"Error decoding received JSON message: %s on %s", err.doc, msg.topic
)
_LOGGER.debug("Subscribe to '%s' events on '%s'", self.obj_id, self.state_topic)
return await self.hass.components.mqtt.async_subscribe(
self.state_topic, message_received
)
| 33.826877
| 97
| 0.605562
|
57785e3742249e6e69fdb5606027bac6121de3f7
| 3,262
|
py
|
Python
|
evaluate.py
|
m4ln/HIWI_classification
|
f872c3da03bf999aeddd870eeed34332c8a9471a
|
[
"MIT"
] | 1
|
2020-09-07T10:02:07.000Z
|
2020-09-07T10:02:07.000Z
|
evaluate.py
|
m4ln/HIWI_classification
|
f872c3da03bf999aeddd870eeed34332c8a9471a
|
[
"MIT"
] | 1
|
2020-09-16T14:26:01.000Z
|
2020-09-16T14:26:01.000Z
|
evaluate.py
|
m4ln/HIWI_classification
|
f872c3da03bf999aeddd870eeed34332c8a9471a
|
[
"MIT"
] | 1
|
2020-09-07T11:29:47.000Z
|
2020-09-07T11:29:47.000Z
|
"""
call in shell: python evaluate.py --dir <rootdir/experiment/> --epoch <epoch to>
e.g. in shell: python evaluate.py --dir Runs/se_resnet_trained_final/ --epoch 149
loops over all folds and calculates + stores the accuracies in a file in the root folder of the experiment
you might change the model in line 45 from resnet to se_resnet (see comment)
"""
import torch
from torch.utils.data import Dataset, DataLoader
#import matplotlib.pyplot as plt
#import seaborn as sns; sns.set()
import numpy as np
import os
from os.path import join
import argparse
import Training_custom.load_dataset
from senet.baseline import resnet20
from senet.se_resnet import se_resnet20
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir', type=str, metavar='', required=True, help='Directory of the x_folds.')
parser.add_argument('-e', '--epoch', type=int, metavar='', required=True, help='from which epoch should the model be loaded?')
args = parser.parse_args()
working_dir = os.getcwd()
rootpath = join(working_dir, args.dir)
def evaluate(fold_i):
# path zu einem checkpoint
CHKPT = f"{args.dir}/fold_{fold_i}/checkpoints/train_chkpt_{args.epoch}.tar"
# Das file train_chkpt_100.tar is ein dictionary das ein snapshot vom trainingszustand
# der 100. epoche ist.
# es interessieren eig nur die keys "train_loss", "val_loss" und "model_state_dict".
# Train und val loss sind 1D torch tensors die den mean loss von der jeweiligen epoche (idx)
# halten.
train_status = torch.load(CHKPT, map_location='cpu')
#print(train_status)
# model wiederherstellen
model = resnet20(num_classes=4) #resnet20(num_classes=4) or alternatively: se_resnet20(num_classes=4, reduction=16)
model.load_state_dict(train_status['model_state_dict'])
model.eval()
test_data = Training_custom.load_dataset.imagewise_dataset(datadir = '/home/vbarth/HIWI/classificationDataValentin/mixed_cropped/test')
#dataloader = DataLoader(test_data, batch_size=16,
# shuffle=False, num_workers=0)
acc=0 #initialize accuracy
i = 0 #will count up
for x, y in test_data: #iterate over testset
x = x.unsqueeze(0) #add one dimension (batch missing) to get 4d tensor
y_pred = model(x).squeeze()
pred, ind = torch.max(y_pred, 0)
if y.item() == ind.item():
acc = acc + 1 #add one when the prediction was right else add nothing
i = i +1 ##print every 3000th sampel
if i % 3000 == 0:
print("Sample: ", i, "\n y_pred: ",y_pred, "\n pred: ", pred, "\n ind: ", ind, "\n y: ", y.item())
acc = acc/len(test_data)
#print("Accuracy: ", acc ) ##def of accuracy
return f"folder: {fold_i}, accuracy: {acc} \n"
if __name__ == "__main__":
n_files = (len([name for name in os.listdir(rootpath)]))
#print(n_files)
accs = []
for fold in range(n_files):
print(f"Processing folder number {fold}")
acc_str = evaluate(fold+1)
accs.append(acc_str)
with open(join(rootpath, "accuracies"), 'w') as f:
for string in accs:
f.write(string)
| 34.336842
| 139
| 0.656039
|
16628614db6700e2dfa237f28d80ff917bc8cc77
| 1,231
|
py
|
Python
|
pytest-20210411/test_calculator.py
|
ti132520/pytest-vlog
|
c6688ed2b1c5e10b91057a22e672dffc1cd54d53
|
[
"CC0-1.0"
] | 1
|
2021-08-15T09:34:45.000Z
|
2021-08-15T09:34:45.000Z
|
pytest-20210411/test_calculator.py
|
yuhuifei/pytest-vlog
|
c9f4adbf83bd01613176700f939f57af3e1cdf86
|
[
"CC0-1.0"
] | null | null | null |
pytest-20210411/test_calculator.py
|
yuhuifei/pytest-vlog
|
c9f4adbf83bd01613176700f939f57af3e1cdf86
|
[
"CC0-1.0"
] | 2
|
2021-04-12T14:40:22.000Z
|
2021-08-28T08:47:27.000Z
|
import pytest
@pytest.fixture(scope='class', autouse=True)
def init_class():
print('开始测试 class 级别输出')
yield
print('测试结束 class 级别输出')
@pytest.fixture(scope='session', autouse=True)
def init_session():
print('开始测试 session 级别输出')
yield
print('测试结束 session 级别输出')
class TestCal:
# pytest
@pytest.mark.test_add
@pytest.mark.parametrize('a,b', [
[1, 2], [2, 3]
], ids=['one', 'two'])
# 测试用例:计算器加法测试
def test_add(self, init_calculator_fun, a, b):
assert init_calculator_fun.add_fun(a, b) in [3, 5]
@pytest.mark.parametrize('a,b', [
[1, 1], [3, 3]
], ids=['计算器加法测试1', '计算器加法测试2'])
# 测试用例:计算器除法测试
@pytest.mark.run(2)
def test_div(self, init_calculator_fun, a, b):
assert init_calculator_fun.div_fun(a, b) == 1
# yaml 参数导入1
@pytest.mark.run(0)
def test_yaml_add(self, init_calculator_fun, init_calculator_data):
assert init_calculator_fun.add_fun(init_calculator_data[0], init_calculator_data[1]) > 0
# yaml 参数导入2
@pytest.mark.second
def test_yaml_add2(self, init_calculator_fun, init_calculator_data):
assert init_calculator_fun.add_fun(init_calculator_data[0], init_calculator_data[1]) > 0
| 27.355556
| 96
| 0.659626
|
4db4d2e28a13bb5b290768e405030caf6dd27333
| 661
|
py
|
Python
|
covid_19_ph/users/tests/test_urls.py
|
reyesvicente/unofficial-covidtracker-ph
|
d1a0548f9f838ccbd0abb88fffb762784354c8a9
|
[
"MIT"
] | null | null | null |
covid_19_ph/users/tests/test_urls.py
|
reyesvicente/unofficial-covidtracker-ph
|
d1a0548f9f838ccbd0abb88fffb762784354c8a9
|
[
"MIT"
] | null | null | null |
covid_19_ph/users/tests/test_urls.py
|
reyesvicente/unofficial-covidtracker-ph
|
d1a0548f9f838ccbd0abb88fffb762784354c8a9
|
[
"MIT"
] | null | null | null |
import pytest
from django.urls import resolve, reverse
from covid_19_ph.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 26.44
| 74
| 0.677761
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.