hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0098e5e710a5211ce9b40a8bdbd193c98de9dc92
| 246
|
py
|
Python
|
tests/test_util.py
|
yuanz271/vlgp
|
2f2c9f0c368904f762c75afce6d73dc26a2b911e
|
[
"MIT"
] | 1
|
2017-09-22T21:03:07.000Z
|
2017-09-22T21:03:07.000Z
|
tests/test_util.py
|
yuanz271/vlgp
|
2f2c9f0c368904f762c75afce6d73dc26a2b911e
|
[
"MIT"
] | 1
|
2017-12-07T14:45:45.000Z
|
2017-12-13T20:09:11.000Z
|
tests/test_util.py
|
yuanz271/vlgp
|
2f2c9f0c368904f762c75afce6d73dc26a2b911e
|
[
"MIT"
] | null | null | null |
def test_save():
import pathlib
from vlgp.util import save
fit = {"trials": {}, "params": {}, "config": {"path": "test_save.npy"}}
save(fit, fit["config"]["path"])
path = pathlib.Path(fit["config"]["path"])
path.unlink()
| 27.333333
| 75
| 0.577236
|
e44663e78f7ca0adaf226c8d73ec338864009396
| 22,414
|
py
|
Python
|
core/storage/topic/gae_models.py
|
arora-ria/oppia
|
698d5593689f15ee36384a57036cf2e0150bb785
|
[
"Apache-2.0"
] | 1
|
2021-06-03T20:50:52.000Z
|
2021-06-03T20:50:52.000Z
|
core/storage/topic/gae_models.py
|
arora-ria/oppia
|
698d5593689f15ee36384a57036cf2e0150bb785
|
[
"Apache-2.0"
] | null | null | null |
core/storage/topic/gae_models.py
|
arora-ria/oppia
|
698d5593689f15ee36384a57036cf2e0150bb785
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for topics and related constructs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from constants import constants
from core.platform import models
import feconf
import python_utils
(base_models, user_models) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
datastore_services = models.Registry.import_datastore_services()
class TopicSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic snapshot."""
pass
class TopicSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic snapshot."""
@staticmethod
def get_deletion_policy():
"""TopicSnapshotContentModel doesn't contain any data directly
corresponding to a user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
class TopicModel(base_models.VersionedModel):
"""Model for storing Topics.
This class should only be imported by the topic services file
and the topic model test file.
"""
SNAPSHOT_METADATA_CLASS = TopicSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicSnapshotContentModel
ALLOW_REVERT = False
# The name of the topic.
name = datastore_services.StringProperty(required=True, indexed=True)
# The canonical name of the topic, created by making `name` lowercase.
canonical_name = (
datastore_services.StringProperty(required=True, indexed=True))
# The abbreviated name of the topic.
abbreviated_name = (
datastore_services.StringProperty(indexed=True, default=''))
# The thumbnail filename of the topic.
thumbnail_filename = datastore_services.StringProperty(indexed=True)
# The thumbnail background color of the topic.
thumbnail_bg_color = datastore_services.StringProperty(indexed=True)
# The description of the topic.
description = datastore_services.TextProperty(indexed=False)
# This consists of the list of objects referencing canonical stories that
# are part of this topic.
canonical_story_references = (
datastore_services.JsonProperty(repeated=True, indexed=False))
# This consists of the list of objects referencing additional stories that
# are part of this topic.
additional_story_references = (
datastore_services.JsonProperty(repeated=True, indexed=False))
# The schema version for the story reference object on each of the above 2
# lists.
story_reference_schema_version = datastore_services.IntegerProperty(
required=True, indexed=True)
# This consists of the list of uncategorized skill ids that are not part of
# any subtopic.
uncategorized_skill_ids = (
datastore_services.StringProperty(repeated=True, indexed=True))
# The list of subtopics that are part of the topic.
subtopics = datastore_services.JsonProperty(repeated=True, indexed=False)
# The schema version of the subtopic dict.
subtopic_schema_version = (
datastore_services.IntegerProperty(required=True, indexed=True))
# The id for the next subtopic.
next_subtopic_id = datastore_services.IntegerProperty(required=True)
# The ISO 639-1 code for the language this topic is written in.
language_code = (
datastore_services.StringProperty(required=True, indexed=True))
# The url fragment of the topic.
url_fragment = (
datastore_services.StringProperty(required=True, indexed=True))
# Whether to show practice tab in the Topic viewer page.
practice_tab_is_displayed = datastore_services.BooleanProperty(
required=True, default=False)
# The content of the meta tag in the Topic viewer page.
meta_tag_content = datastore_services.StringProperty(
indexed=True, default='')
@staticmethod
def get_deletion_policy():
"""TopicModel doesn't contain any data directly corresponding
to a user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
topic_rights = TopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
topic_commit_log_entry = TopicCommitLogEntryModel.create(
self.id, self.version, committer_id, commit_type,
commit_message, commit_cmds, status, False
)
topic_commit_log_entry.topic_id = self.id
topic_commit_log_entry.update_timestamps()
topic_commit_log_entry.put()
@classmethod
def get_by_name(cls, topic_name):
"""Gets TopicModel by topic_name. Returns None if the topic with
name topic_name doesn't exist.
Args:
topic_name: str. The name of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
return TopicModel.query().filter(
cls.canonical_name == topic_name.lower()).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
@classmethod
def get_by_url_fragment(cls, url_fragment):
"""Gets TopicModel by url_fragment. Returns None if the topic with
name url_fragment doesn't exist.
Args:
url_fragment: str. The url fragment of the topic.
Returns:
TopicModel|None. The topic model of the topic or None if not
found.
"""
# TODO(#10210): Make fetching by URL fragment faster.
return TopicModel.query().filter(
cls.url_fragment == url_fragment).filter(
cls.deleted == False).get() # pylint: disable=singleton-comparison
@staticmethod
def get_model_association_to_user():
"""Model does not contain user data."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'abbreviated_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_story_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'additional_story_references':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'story_reference_schema_version':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'uncategorized_skill_ids': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopics': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopic_schema_version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'next_subtopic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'meta_tag_content': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'practice_tab_is_displayed':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE,
})
class TopicCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to topics.
A new instance of this model is created and saved every time a commit to
TopicModel occurs.
The id for this model is of the form 'topic-[topic_id]-[version]'.
"""
# The id of the topic being edited.
topic_id = datastore_services.StringProperty(indexed=True, required=True)
@classmethod
def _get_instance_id(cls, topic_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
topic_id: str. The id of the topic being edited.
version: int. The version number of the topic after the commit.
Returns:
str. The commit id with the topic id and version number.
"""
return 'topic-%s-%s' % (topic_id, version)
@staticmethod
def get_model_association_to_user():
"""Model does not contain user data."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
"""This model is only stored for archive purposes. The commit log of
entities is not related to personal user data.
"""
return dict(super(cls, cls).get_export_policy(), **{
'topic_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class TopicSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia Topic.
This should be used whenever the content blob of the topic is not
needed (e.g. search results, etc).
A TopicSummaryModel instance stores the following information:
id, description, language_code, last_updated, created_on, version,
url_fragment.
The key of each instance is the topic id.
"""
# The name of the topic.
name = datastore_services.StringProperty(required=True, indexed=True)
# The canonical name of the topic, created by making `name` lowercase.
canonical_name = (
datastore_services.StringProperty(required=True, indexed=True))
# The ISO 639-1 code for the language this topic is written in.
language_code = (
datastore_services.StringProperty(required=True, indexed=True))
# The description of the topic.
description = datastore_services.TextProperty(indexed=False)
# The url fragment of the topic.
url_fragment = (
datastore_services.StringProperty(required=True, indexed=True))
# Time when the topic model was last updated (not to be
# confused with last_updated, which is the time when the
# topic *summary* model was last updated).
topic_model_last_updated = (
datastore_services.DateTimeProperty(required=True, indexed=True))
# Time when the topic model was created (not to be confused
# with created_on, which is the time when the topic *summary*
# model was created).
topic_model_created_on = (
datastore_services.DateTimeProperty(required=True, indexed=True))
# The number of canonical stories that are part of this topic.
canonical_story_count = (
datastore_services.IntegerProperty(required=True, indexed=True))
# The number of additional stories that are part of this topic.
additional_story_count = (
datastore_services.IntegerProperty(required=True, indexed=True))
# The total number of skills in the topic (including those that are
# uncategorized).
total_skill_count = (
datastore_services.IntegerProperty(required=True, indexed=True))
# The number of skills that are not part of any subtopic.
uncategorized_skill_count = (
datastore_services.IntegerProperty(required=True, indexed=True))
# The number of subtopics of the topic.
subtopic_count = (
datastore_services.IntegerProperty(required=True, indexed=True))
# The thumbnail filename of the topic.
thumbnail_filename = datastore_services.StringProperty(indexed=True)
# The thumbnail background color of the topic.
thumbnail_bg_color = datastore_services.StringProperty(indexed=True)
version = datastore_services.IntegerProperty(required=True)
@staticmethod
def get_deletion_policy():
"""TopicSummaryModel doesn't contain any data directly corresponding
to a user.
"""
return base_models.DELETION_POLICY.NOT_APPLICABLE
@staticmethod
def get_model_association_to_user():
"""Model does not contain user data."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
"""Model does not contain user data."""
return dict(super(cls, cls).get_export_policy(), **{
'name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_name': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'language_code': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'description': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_model_last_updated':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'topic_model_created_on': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'canonical_story_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'additional_story_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'total_skill_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'uncategorized_skill_count':
base_models.EXPORT_POLICY.NOT_APPLICABLE,
'subtopic_count': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_filename': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'thumbnail_bg_color': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'version': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'url_fragment': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
class TopicRightsSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a topic rights snapshot."""
pass
class TopicRightsSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a topic rights snapshot."""
@staticmethod
def get_deletion_policy():
"""TopicRightsSnapshotContentModel contains data corresponding to
a user: inside the content field there is manager_ids field.
The pseudonymization of this model is handled in the wipeout_service
in the _pseudonymize_activity_models_with_associated_rights_models(),
based on the content_user_ids field of the
TopicRightsSnapshotMetadataModel.
"""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether TopicRightsSnapshotContentModel references the given
user. The manager_ids field is checked through content_user_ids field in
the TopicRightsSnapshotMetadataModel.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return TopicRightsSnapshotMetadataModel.query(
TopicRightsSnapshotMetadataModel.content_user_ids == user_id
).get(keys_only=True) is not None
class TopicRightsModel(base_models.VersionedModel):
"""Storage model for rights related to a topic.
The id of each instance is the id of the corresponding topic.
"""
SNAPSHOT_METADATA_CLASS = TopicRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = TopicRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_ids of the managers of this topic.
manager_ids = datastore_services.StringProperty(indexed=True, repeated=True)
# Whether this topic is published.
topic_is_published = datastore_services.BooleanProperty(
indexed=True, required=True, default=False)
@staticmethod
def get_deletion_policy():
"""TopicRightsModel contains data to pseudonymize corresponding
to a user: manager_ids field.
"""
return base_models.DELETION_POLICY.LOCALLY_PSEUDONYMIZE
@classmethod
def has_reference_to_user_id(cls, user_id):
"""Check whether TopicRightsModel references user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any models refer to the given user ID.
"""
return cls.query(
cls.manager_ids == user_id
).get(keys_only=True) is not None
@classmethod
def get_by_user(cls, user_id):
"""Retrieves the rights object for all topics assigned to given user
Args:
user_id: str. ID of user.
Returns:
list(TopicRightsModel). The list of TopicRightsModel objects in
which the given user is a manager.
"""
topic_rights_models = cls.query(
cls.manager_ids == user_id
)
return topic_rights_models
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(TopicRightsModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
topic_rights = TopicRightsModel.get_by_id(self.id)
if topic_rights.topic_is_published:
status = constants.ACTIVITY_STATUS_PUBLIC
else:
status = constants.ACTIVITY_STATUS_PRIVATE
TopicCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
topic_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=status,
post_commit_community_owned=False,
post_commit_is_private=not topic_rights.topic_is_published
).put()
snapshot_metadata_model = self.SNAPSHOT_METADATA_CLASS.get(
self.get_snapshot_id(self.id, self.version))
snapshot_metadata_model.content_user_ids = list(sorted(set(
self.manager_ids)))
commit_cmds_user_ids = set()
for commit_cmd in commit_cmds:
user_id_attribute_names = python_utils.NEXT(
cmd['user_id_attribute_names']
for cmd in feconf.TOPIC_RIGHTS_CHANGE_ALLOWED_COMMANDS
if cmd['name'] == commit_cmd['cmd']
)
for user_id_attribute_name in user_id_attribute_names:
commit_cmds_user_ids.add(commit_cmd[user_id_attribute_name])
snapshot_metadata_model.commit_cmds_user_ids = list(
sorted(commit_cmds_user_ids))
snapshot_metadata_model.update_timestamps()
snapshot_metadata_model.put()
@staticmethod
def get_model_association_to_user():
"""Model is exported as one instance shared across users since multiple
users contribute to topics and their rights.
"""
return (
base_models
.MODEL_ASSOCIATION_TO_USER
.ONE_INSTANCE_SHARED_ACROSS_USERS)
@classmethod
def get_export_policy(cls):
"""Model contains user data."""
return dict(super(cls, cls).get_export_policy(), **{
'manager_ids': base_models.EXPORT_POLICY.EXPORTED,
'topic_is_published': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def get_field_name_mapping_to_takeout_keys(cls):
"""Defines the mapping of field names to takeout keys since this model
is exported as one instance shared across users.
"""
return {
'manager_ids': 'managed_topic_ids'
}
@classmethod
def export_data(cls, user_id):
"""(Takeout) Export user-relevant properties of TopicRightsModel.
Args:
user_id: str. The user_id denotes which user's data to extract.
Returns:
dict. The user-relevant properties of TopicRightsModel in a dict
format. In this case, we are returning all the ids of the topics
this user manages.
"""
managed_topics = cls.get_all().filter(cls.manager_ids == user_id)
managed_topic_ids = [right.id for right in managed_topics]
return {
'managed_topic_ids': managed_topic_ids
}
| 40.458484
| 82
| 0.690283
|
c2a75f990c6d1722ddc9afe4af7a279cbe354de7
| 8,196
|
py
|
Python
|
gazoo_device/tests/unit_tests/utility_tests/host_utils_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | 14
|
2020-11-05T23:23:32.000Z
|
2022-03-01T18:59:29.000Z
|
gazoo_device/tests/unit_tests/utility_tests/host_utils_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/tests/unit_tests/utility_tests/host_utils_test.py
|
google/gazoo-device
|
f333b386f5993c8d4c9e12c89ebb620a0c4f5506
|
[
"Apache-2.0"
] | 5
|
2021-05-20T22:52:51.000Z
|
2022-02-21T08:46:21.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for gazoo_device.utility.host_utils.py."""
import os
from unittest import mock
from gazoo_device import config
from gazoo_device import data_types
from gazoo_device import extensions
from gazoo_device.tests.unit_tests.utils import unit_test_case
from gazoo_device.utility import host_utils
import immutabledict
_TEST_PACKAGE = "foo_package"
_EXPECTED_KEY_DIR = os.path.join(config.KEYS_DIRECTORY, _TEST_PACKAGE)
_TEST_KEY_SSH_PRIVATE_NAME = "foo_key"
_EXPECTED_KEY_SSH_PRIVATE_PATH = os.path.join(_EXPECTED_KEY_DIR,
_TEST_KEY_SSH_PRIVATE_NAME)
_TEST_KEY_SSH_PUBLIC_NAME = "foo_key.pub"
_EXPECTED_KEY_SSH_PUBLIC_PATH = os.path.join(_EXPECTED_KEY_DIR,
_TEST_KEY_SSH_PUBLIC_NAME)
_TEST_KEY_OTHER_NAME = "bar_key"
_EXPECTED_KEY_OTHER_PATH = os.path.join(_EXPECTED_KEY_DIR, _TEST_KEY_OTHER_NAME)
_TEST_KEY_SSH_PRIVATE = data_types.KeyInfo(
_TEST_KEY_SSH_PRIVATE_NAME, type=data_types.KeyType.SSH,
package=_TEST_PACKAGE)
_TEST_KEY_SSH_PUBLIC = data_types.KeyInfo(
_TEST_KEY_SSH_PUBLIC_NAME, type=data_types.KeyType.SSH,
package=_TEST_PACKAGE)
_TEST_KEY_OTHER = data_types.KeyInfo(
_TEST_KEY_OTHER_NAME, type=data_types.KeyType.OTHER, package=_TEST_PACKAGE)
class HostUtilsTests(unit_test_case.UnitTestCase):
"""Unit tests for gazoo_device.utility.host_utils.py."""
def setUp(self):
super().setUp()
self.mock_download_key = mock.Mock()
extensions_keys_patch = mock.patch.object(
extensions, "keys", new=[_TEST_KEY_SSH_PRIVATE, _TEST_KEY_OTHER])
extensions_keys_patch.start()
self.addCleanup(extensions_keys_patch.stop)
package_info_patch = mock.patch.object(
extensions, "package_info", new={
_TEST_PACKAGE: immutabledict.immutabledict({
"version": "0.0.1",
"key_download_function": self.mock_download_key,
})
})
package_info_patch.start()
self.addCleanup(package_info_patch.stop)
def test_get_key_path(self):
"""Test that path returned by get_key_path() is correct."""
self.assertEqual(host_utils.get_key_path(_TEST_KEY_SSH_PRIVATE),
_EXPECTED_KEY_SSH_PRIVATE_PATH)
@mock.patch.object(os.path, "isdir", side_effect=[False, True])
@mock.patch.object(os, "makedirs")
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(host_utils, "_set_key_permissions")
def test_download_key_creates_directory_if_its_absent(
self, unused_mock_set_key_permissions, unused_mock_exists, mock_makedirs,
mock_isdir):
"""Test that _download_key() creates package key dir if it's absent."""
host_utils._download_key(_TEST_KEY_SSH_PRIVATE)
mock_isdir.assert_called_once_with(_EXPECTED_KEY_DIR)
mock_makedirs.assert_called_once_with(_EXPECTED_KEY_DIR)
self.mock_download_key.assert_called_once_with(
_TEST_KEY_SSH_PRIVATE, _EXPECTED_KEY_SSH_PRIVATE_PATH)
@mock.patch.object(os.path, "isdir", return_value=True)
@mock.patch.object(os, "makedirs")
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(host_utils, "_set_key_permissions")
def test_download_key_does_not_create_directory_if_its_present(
self, unused_mock_set_key_permissions, unused_mock_exists, mock_makedirs,
mock_isdir):
"""Test that _download_key() does not create key dir if it's present."""
host_utils._download_key(_TEST_KEY_SSH_PRIVATE)
mock_isdir.assert_called_once_with(_EXPECTED_KEY_DIR)
mock_makedirs.assert_not_called()
@mock.patch.object(os.path, "isdir", return_value=True)
@mock.patch.object(os, "makedirs")
@mock.patch.object(os.path, "exists", return_value=False)
def test_download_key_raises_if_key_isnt_downloaded(
self, mock_exists, unused_mock_makedirs, unused_mock_isdir):
"""Test that _download_key() raises an error if key isn't downloaded."""
error_regex = r"Key .*{}.* was not downloaded to {}".format(
_TEST_KEY_SSH_PRIVATE_NAME, _EXPECTED_KEY_SSH_PRIVATE_PATH)
with self.assertRaisesRegex(FileNotFoundError, error_regex):
host_utils._download_key(_TEST_KEY_SSH_PRIVATE)
mock_exists.assert_called_once_with(_EXPECTED_KEY_SSH_PRIVATE_PATH)
@mock.patch.object(os.path, "isdir", return_value=True)
@mock.patch.object(os, "makedirs")
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(host_utils, "_set_key_permissions")
def test_download_key_sets_permissions_for_private_ssh_keys(
self, mock_set_key_permissions, unused_mock_exists, unused_mock_makedirs,
unused_mock_isdir):
"""Test that _download_key() changes permissions for SSH keys."""
host_utils._download_key(_TEST_KEY_SSH_PRIVATE)
mock_set_key_permissions.assert_called_once_with(
_EXPECTED_KEY_SSH_PRIVATE_PATH)
@mock.patch.object(os.path, "isdir", return_value=True)
@mock.patch.object(os, "makedirs")
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(host_utils, "_set_key_permissions")
def test_download_key_doesnt_set_permissions_for_non_ssh_keys(
self, mock_set_key_permissions, unused_mock_exists, unused_mock_makedirs,
unused_mock_isdir):
"""Test that _download_key() doesn't change permissions for non-SSH keys."""
host_utils._download_key(_TEST_KEY_OTHER)
mock_set_key_permissions.assert_not_called()
@mock.patch.object(os.path, "isdir", return_value=True)
@mock.patch.object(os, "makedirs")
@mock.patch.object(os.path, "exists", return_value=True)
@mock.patch.object(host_utils, "_set_key_permissions")
def test_download_key_doesnt_set_permissions_for_public_ssh_keys(
self, mock_set_key_permissions, unused_mock_exists, unused_mock_mkdir,
unused_mock_isdir):
"""Test that _download_key() doesn't set permissions for public SSH keys."""
host_utils._download_key(_TEST_KEY_SSH_PUBLIC)
mock_set_key_permissions.assert_not_called()
@mock.patch.object(os, "chmod")
def test_set_key_permissions_already_correct(self, mock_chmod):
"""Test _set_key_permissions for already correct permissions."""
mock_stat_result = mock.Mock()
mock_stat_result.st_mode = int("400", 8)
with mock.patch.object(os, "stat", return_value=mock_stat_result):
host_utils._set_key_permissions(_EXPECTED_KEY_SSH_PRIVATE_PATH)
mock_chmod.assert_not_called()
@mock.patch.object(os, "chmod")
def test_set_key_permissions_incorrect_permissions(self, mock_chmod):
"""Test _set_key_permissions for incorrect permissions."""
mock_stat_result = mock.Mock()
mock_stat_result.st_mode = int("644", 8)
with mock.patch.object(os, "stat", return_value=mock_stat_result):
host_utils._set_key_permissions(_EXPECTED_KEY_SSH_PRIVATE_PATH)
mock_chmod.assert_called_once_with(_EXPECTED_KEY_SSH_PRIVATE_PATH,
int("400", 8))
@mock.patch.object(os, "chmod", side_effect=OSError("Some failure"))
def test_set_key_permissions_incorrect_permissions_failure(self, mock_chmod):
"""Test _set_key_permissions failing to correct permissions."""
mock_stat_result = mock.Mock()
mock_stat_result.st_mode = int("644", 8)
with mock.patch.object(os, "stat", return_value=mock_stat_result):
with self.assertRaisesRegex(ValueError, "Unable to change permissions"):
host_utils._set_key_permissions(_EXPECTED_KEY_SSH_PRIVATE_PATH)
mock_chmod.assert_called_once_with(_EXPECTED_KEY_SSH_PRIVATE_PATH,
int("400", 8))
if __name__ == "__main__":
unit_test_case.main()
| 46.305085
| 80
| 0.757565
|
fcb0f65a5009e09e280341b38d6c0ef974e8e632
| 664
|
py
|
Python
|
runtime/python/Tools/scripts/lfcr.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 207
|
2018-10-01T08:53:01.000Z
|
2022-03-14T12:15:54.000Z
|
runtime/python/Tools/scripts/lfcr.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 8
|
2019-06-29T14:18:51.000Z
|
2022-02-19T07:30:27.000Z
|
runtime/python/Tools/scripts/lfcr.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 76
|
2020-03-16T01:47:46.000Z
|
2022-03-21T16:37:07.000Z
|
#! /usr/bin/env python3
"Replace LF with CRLF in argument files. Print names of changed files."
import sys, re, os
def main():
for filename in sys.argv[1:]:
if os.path.isdir(filename):
print(filename, "Directory!")
continue
with open(filename, "rb") as f:
data = f.read()
if b'\0' in data:
print(filename, "Binary!")
continue
newdata = re.sub(b"\r?\n", b"\r\n", data)
if newdata != data:
print(filename)
with open(filename, "wb") as f:
f.write(newdata)
if __name__ == '__main__':
main()
| 26.56
| 73
| 0.504518
|
c4f7b1f05de5168ccc03165ca4c3a7ca5f09b852
| 23,059
|
py
|
Python
|
openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py
|
Tilix4/OpenPype
|
8909bd890170880aa7ec8b673abaa25a9bdf40f2
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
openpype/modules/ftrack/plugins/publish/integrate_ftrack_api.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
"""Integrate components into ftrack
Requires:
context -> ftrackSession - connected ftrack.Session
instance -> ftrackComponentsList - list of components to integrate
Provides:
instance -> ftrackIntegratedAssetVersionsData
# legacy
instance -> ftrackIntegratedAssetVersions
"""
import os
import sys
import collections
import six
import pyblish.api
import clique
class IntegrateFtrackApi(pyblish.api.InstancePlugin):
""" Commit components to server. """
order = pyblish.api.IntegratorOrder+0.499
label = "Integrate Ftrack Api"
families = ["ftrack"]
def process(self, instance):
session = instance.context.data["ftrackSession"]
context = instance.context
component_list = instance.data.get("ftrackComponentsList")
if not component_list:
self.log.info(
"Instance don't have components to integrate to Ftrack."
" Skipping."
)
return
session = instance.context.data["ftrackSession"]
context = instance.context
parent_entity = None
default_asset_name = None
# If instance has set "ftrackEntity" or "ftrackTask" then use them from
# instance. Even if they are set to None. If they are set to None it
# has a reason. (like has different context)
if "ftrackEntity" in instance.data or "ftrackTask" in instance.data:
task_entity = instance.data.get("ftrackTask")
parent_entity = instance.data.get("ftrackEntity")
elif "ftrackEntity" in context.data or "ftrackTask" in context.data:
task_entity = context.data.get("ftrackTask")
parent_entity = context.data.get("ftrackEntity")
if task_entity:
default_asset_name = task_entity["name"]
parent_entity = task_entity["parent"]
if parent_entity is None:
self.log.info((
"Skipping ftrack integration. Instance \"{}\" does not"
" have specified ftrack entities."
).format(str(instance)))
return
if not default_asset_name:
default_asset_name = parent_entity["name"]
# Change status on task
asset_version_status_ids_by_name = {}
project_entity = instance.context.data.get("ftrackProject")
if project_entity:
project_schema = project_entity["project_schema"]
asset_version_statuses = (
project_schema.get_statuses("AssetVersion")
)
asset_version_status_ids_by_name = {
status["name"].lower(): status["id"]
for status in asset_version_statuses
}
self._set_task_status(instance, project_entity, task_entity, session)
# Prepare AssetTypes
asset_types_by_short = self._ensure_asset_types_exists(
session, component_list
)
self._fill_component_locations(session, component_list)
asset_versions_data_by_id = {}
used_asset_versions = []
# Iterate over components and publish
for data in component_list:
self.log.debug("data: {}".format(data))
# AssetType
asset_type_short = data["assettype_data"]["short"]
asset_type_entity = asset_types_by_short[asset_type_short]
# Asset
asset_data = data.get("asset_data") or {}
if "name" not in asset_data:
asset_data["name"] = default_asset_name
asset_entity = self._ensure_asset_exists(
session,
asset_data,
asset_type_entity["id"],
parent_entity["id"]
)
# Asset Version
asset_version_data = data.get("assetversion_data") or {}
asset_version_entity = self._ensure_asset_version_exists(
session,
asset_version_data,
asset_entity["id"],
task_entity,
asset_version_status_ids_by_name
)
# Component
self.create_component(session, asset_version_entity, data)
# Store asset version and components items that were
version_id = asset_version_entity["id"]
if version_id not in asset_versions_data_by_id:
asset_versions_data_by_id[version_id] = {
"asset_version": asset_version_entity,
"component_items": []
}
asset_versions_data_by_id[version_id]["component_items"].append(
data
)
# Backwards compatibility
if asset_version_entity not in used_asset_versions:
used_asset_versions.append(asset_version_entity)
instance.data["ftrackIntegratedAssetVersionsData"] = (
asset_versions_data_by_id
)
# Backwards compatibility
asset_versions_key = "ftrackIntegratedAssetVersions"
if asset_versions_key not in instance.data:
instance.data[asset_versions_key] = []
for asset_version in used_asset_versions:
if asset_version not in instance.data[asset_versions_key]:
instance.data[asset_versions_key].append(asset_version)
def _set_task_status(self, instance, project_entity, task_entity, session):
if not project_entity:
self.log.info("Task status won't be set, project is not known.")
return
if not task_entity:
self.log.info("Task status won't be set, task is not known.")
return
status_name = instance.context.data.get("ftrackStatus")
if not status_name:
self.log.info("Ftrack status name is not set.")
return
self.log.debug(
"Ftrack status name will be (maybe) set to \"{}\"".format(
status_name
)
)
project_schema = project_entity["project_schema"]
task_statuses = project_schema.get_statuses(
"Task", task_entity["type_id"]
)
task_statuses_by_low_name = {
status["name"].lower(): status for status in task_statuses
}
status = task_statuses_by_low_name.get(status_name.lower())
if not status:
self.log.warning((
"Task status \"{}\" won't be set,"
" status is now allowed on task type \"{}\"."
).format(status_name, task_entity["type"]["name"]))
return
self.log.info("Setting task status to \"{}\"".format(status_name))
task_entity["status"] = status
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
def _fill_component_locations(self, session, component_list):
components_by_location_name = collections.defaultdict(list)
components_by_location_id = collections.defaultdict(list)
for component_item in component_list:
# Location entity can be prefilled
# - this is not recommended as connection to ftrack server may
# be lost and in that case the entity is not valid when gets
# to this plugin
location = component_item.get("component_location")
if location is not None:
continue
# Collect location id
location_id = component_item.get("component_location_id")
if location_id:
components_by_location_id[location_id].append(
component_item
)
continue
location_name = component_item.get("component_location_name")
if location_name:
components_by_location_name[location_name].append(
component_item
)
continue
# Skip if there is nothing to do
if not components_by_location_name and not components_by_location_id:
return
# Query locations
query_filters = []
if components_by_location_id:
joined_location_ids = ",".join([
'"{}"'.format(location_id)
for location_id in components_by_location_id
])
query_filters.append("id in ({})".format(joined_location_ids))
if components_by_location_name:
joined_location_names = ",".join([
'"{}"'.format(location_name)
for location_name in components_by_location_name
])
query_filters.append("name in ({})".format(joined_location_names))
locations = session.query(
"select id, name from Location where {}".format(
" or ".join(query_filters)
)
).all()
# Fill locations in components
for location in locations:
location_id = location["id"]
location_name = location["name"]
if location_id in components_by_location_id:
for component in components_by_location_id[location_id]:
component["component_location"] = location
if location_name in components_by_location_name:
for component in components_by_location_name[location_name]:
component["component_location"] = location
def _ensure_asset_types_exists(self, session, component_list):
"""Make sure that all AssetType entities exists for integration.
Returns:
dict: All asset types by short name.
"""
# Query existing asset types
asset_types = session.query("select id, short from AssetType").all()
# Stpore all existing short names
asset_type_shorts = {asset_type["short"] for asset_type in asset_types}
# Check which asset types are missing and store them
asset_type_names_by_missing_shorts = {}
default_short_name = "upload"
for data in component_list:
asset_type_data = data.get("assettype_data") or {}
asset_type_short = asset_type_data.get("short")
if not asset_type_short:
# Use default asset type name if not set and change the
# input data
asset_type_short = default_short_name
asset_type_data["short"] = asset_type_short
data["assettype_data"] = asset_type_data
if (
# Skip if short name exists
asset_type_short in asset_type_shorts
# Skip if short name was already added to missing types
# and asset type name is filled
# - if asset type name is missing then try use name from other
# data
or asset_type_names_by_missing_shorts.get(asset_type_short)
):
continue
asset_type_names_by_missing_shorts[asset_type_short] = (
asset_type_data.get("name")
)
# Create missing asset types if there are any
if asset_type_names_by_missing_shorts:
self.log.info("Creating asset types with short names: {}".format(
", ".join(asset_type_names_by_missing_shorts.keys())
))
for missing_short, type_name in (
asset_type_names_by_missing_shorts.items()
):
# Use short for name if name is not defined
if not type_name:
type_name = missing_short
# Use short name also for name
# - there is not other source for 'name'
session.create(
"AssetType",
{
"short": missing_short,
"name": type_name
}
)
# Commit creation
session.commit()
# Requery asset types
asset_types = session.query(
"select id, short from AssetType"
).all()
return {asset_type["short"]: asset_type for asset_type in asset_types}
def _ensure_asset_exists(
self, session, asset_data, asset_type_id, parent_id
):
asset_name = asset_data["name"]
asset_entity = self._query_asset(
session, asset_name, asset_type_id, parent_id
)
if asset_entity is not None:
return asset_entity
asset_data = {
"name": asset_name,
"type_id": asset_type_id,
"context_id": parent_id
}
self.log.info("Created new Asset with data: {}.".format(asset_data))
session.create("Asset", asset_data)
session.commit()
return self._query_asset(session, asset_name, asset_type_id, parent_id)
def _query_asset(self, session, asset_name, asset_type_id, parent_id):
return session.query(
(
"select id from Asset"
" where name is \"{}\""
" and type_id is \"{}\""
" and context_id is \"{}\""
).format(asset_name, asset_type_id, parent_id)
).first()
def _ensure_asset_version_exists(
self,
session,
asset_version_data,
asset_id,
task_entity,
status_ids_by_name
):
task_id = None
if task_entity:
task_id = task_entity["id"]
status_name = asset_version_data.pop("status_name", None)
# Try query asset version by criteria (asset id and version)
version = asset_version_data.get("version") or 0
asset_version_entity = self._query_asset_version(
session, version, asset_id
)
# Prepare comment value
comment = asset_version_data.get("comment") or ""
if asset_version_entity is not None:
changed = False
if comment != asset_version_entity["comment"]:
asset_version_entity["comment"] = comment
changed = True
if task_id != asset_version_entity["task_id"]:
asset_version_entity["task_id"] = task_id
changed = True
if changed:
session.commit()
else:
new_asset_version_data = {
"version": version,
"asset_id": asset_id
}
if task_id:
new_asset_version_data["task_id"] = task_id
if comment:
new_asset_version_data["comment"] = comment
self.log.info("Created new AssetVersion with data {}".format(
new_asset_version_data
))
session.create("AssetVersion", new_asset_version_data)
session.commit()
asset_version_entity = self._query_asset_version(
session, version, asset_id
)
if status_name:
status_id = status_ids_by_name.get(status_name.lower())
if not status_id:
self.log.info((
"Ftrack status with name \"{}\""
" for AssetVersion was not found."
).format(status_name))
elif asset_version_entity["status_id"] != status_id:
asset_version_entity["status_id"] = status_id
session.commit()
# Set custom attributes if there were any set
custom_attrs = asset_version_data.get("custom_attributes") or {}
for attr_key, attr_value in custom_attrs.items():
if attr_key in asset_version_entity["custom_attributes"]:
try:
asset_version_entity["custom_attributes"][attr_key] = (
attr_value
)
session.commit()
continue
except Exception:
session.rollback()
session._configure_locations()
self.log.warning(
(
"Custom Attrubute \"{0}\" is not available for"
" AssetVersion <{1}>. Can't set it's value to: \"{2}\""
).format(
attr_key, asset_version_entity["id"], str(attr_value)
)
)
return asset_version_entity
def _query_asset_version(self, session, version, asset_id):
return session.query(
(
"select id, task_id, comment from AssetVersion"
" where version is \"{}\" and asset_id is \"{}\""
).format(version, asset_id)
).first()
def create_component(self, session, asset_version_entity, data):
component_data = data.get("component_data") or {}
if not component_data.get("name"):
component_data["name"] = "main"
version_id = asset_version_entity["id"]
component_data["version_id"] = version_id
component_entity = session.query(
(
"select id, name from Component where name is \"{}\""
" and version_id is \"{}\""
).format(component_data["name"], version_id)
).first()
component_overwrite = data.get("component_overwrite", False)
location = data.get("component_location", session.pick_location())
# Overwrite existing component data if requested.
if component_entity and component_overwrite:
origin_location = session.query(
"Location where name is \"ftrack.origin\""
).one()
# Removing existing members from location
components = list(component_entity.get("members", []))
components += [component_entity]
for component in components:
for loc in component["component_locations"]:
if location["id"] == loc["location_id"]:
location.remove_component(
component, recursive=False
)
# Deleting existing members on component entity
for member in component_entity.get("members", []):
session.delete(member)
del(member)
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
# Reset members in memory
if "members" in component_entity.keys():
component_entity["members"] = []
# Add components to origin location
try:
collection = clique.parse(data["component_path"])
except ValueError:
# Assume its a single file
# Changing file type
name, ext = os.path.splitext(data["component_path"])
component_entity["file_type"] = ext
origin_location.add_component(
component_entity, data["component_path"]
)
else:
# Changing file type
component_entity["file_type"] = collection.format("{tail}")
# Create member components for sequence.
for member_path in collection:
size = 0
try:
size = os.path.getsize(member_path)
except OSError:
pass
name = collection.match(member_path).group("index")
member_data = {
"name": name,
"container": component_entity,
"size": size,
"file_type": os.path.splitext(member_path)[-1]
}
component = session.create(
"FileComponent", member_data
)
origin_location.add_component(
component, member_path, recursive=False
)
component_entity["members"].append(component)
# Add components to location.
location.add_component(
component_entity, origin_location, recursive=True
)
data["component"] = component_entity
self.log.info(
(
"Overwriting Component with path: {0}, data: {1},"
" location: {2}"
).format(
data["component_path"],
component_data,
location
)
)
# Extracting metadata, and adding after entity creation. This is
# due to a ftrack_api bug where you can't add metadata on creation.
component_metadata = component_data.pop("metadata", {})
# Create new component if none exists.
new_component = False
if not component_entity:
component_entity = asset_version_entity.create_component(
data["component_path"],
data=component_data,
location=location
)
data["component"] = component_entity
self.log.info(
(
"Created new Component with path: {0}, data: {1},"
" metadata: {2}, location: {3}"
).format(
data["component_path"],
component_data,
component_metadata,
location
)
)
new_component = True
# Adding metadata
existing_component_metadata = component_entity["metadata"]
existing_component_metadata.update(component_metadata)
component_entity["metadata"] = existing_component_metadata
# if component_data['name'] = 'ftrackreview-mp4-mp4':
# assetversion_entity["thumbnail_id"]
# Setting assetversion thumbnail
if data.get("thumbnail"):
asset_version_entity["thumbnail_id"] = component_entity["id"]
# Inform user about no changes to the database.
if (
component_entity
and not component_overwrite
and not new_component
):
data["component"] = component_entity
self.log.info(
"Found existing component, and no request to overwrite. "
"Nothing has been changed."
)
else:
# Commit changes.
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
| 36.718153
| 79
| 0.556789
|
8beb1cb7ec15578cd5f02c2adc90e03a752128e2
| 13,793
|
py
|
Python
|
tests/test_streams.py
|
VisionsOfDrifting/pytube
|
851eb9969c08b2bcb34ec5113380c4569e122651
|
[
"Unlicense"
] | null | null | null |
tests/test_streams.py
|
VisionsOfDrifting/pytube
|
851eb9969c08b2bcb34ec5113380c4569e122651
|
[
"Unlicense"
] | null | null | null |
tests/test_streams.py
|
VisionsOfDrifting/pytube
|
851eb9969c08b2bcb34ec5113380c4569e122651
|
[
"Unlicense"
] | null | null | null |
import os
import random
import pytest
from datetime import datetime
from unittest import mock
from unittest.mock import MagicMock, Mock
from urllib.error import HTTPError
from pytube import request, Stream
@mock.patch("pytube.streams.request")
def test_stream_to_buffer(mock_request, cipher_signature):
# Given
stream_bytes = iter(
[
bytes(os.urandom(8 * 1024)),
bytes(os.urandom(8 * 1024)),
bytes(os.urandom(8 * 1024)),
]
)
mock_request.stream.return_value = stream_bytes
buffer = MagicMock()
# When
cipher_signature.streams[0].stream_to_buffer(buffer)
# Then
assert buffer.write.call_count == 3
def test_filesize(cipher_signature):
assert cipher_signature.streams[0].filesize == 28282013
def test_filesize_approx(cipher_signature):
stream = cipher_signature.streams[0]
assert stream.filesize_approx == 28309811
stream.bitrate = None
assert stream.filesize_approx == 28282013
def test_default_filename(cipher_signature):
expected = "YouTube Rewind 2019 For the Record YouTubeRewind.mp4"
stream = cipher_signature.streams[0]
assert stream.default_filename == expected
def test_title(cipher_signature):
expected = "YouTube Rewind 2019: For the Record | #YouTubeRewind"
assert cipher_signature.title == expected
def test_expiration(cipher_signature):
assert cipher_signature.streams[0].expiration >= datetime(2020, 10, 30, 5, 39, 41)
def test_caption_tracks(presigned_video):
assert len(presigned_video.caption_tracks) == 13
def test_captions(presigned_video):
assert len(presigned_video.captions) == 13
def test_description(cipher_signature):
expected = (
"In 2018, we made something you didn’t like. "
"For Rewind 2019, let’s see what you DID like.\n\n"
"Celebrating the creators, music and moments "
"that mattered most to you in 2019. \n\n"
"To learn how the top lists in Rewind were generated: "
"https://rewind.youtube/about\n\n"
"Top lists featured the following channels:\n\n"
"@1MILLION Dance Studio \n@A4 \n@Anaysa \n"
"@Andymation \n@Ariana Grande \n@Awez Darbar \n"
"@AzzyLand \n@Billie Eilish \n@Black Gryph0n \n"
"@BLACKPINK \n@ChapkisDanceUSA \n@Daddy Yankee \n"
"@David Dobrik \n@Dude Perfect \n@Felipe Neto \n"
"@Fischer's-フィッシャーズ- \n@Galen Hooks \n@ibighit \n"
"@James Charles \n@jeffreestar \n@Jelly \n@Kylie Jenner \n"
"@LazarBeam \n@Lil Dicky \n@Lil Nas X \n@LOUD \n@LOUD Babi \n"
"@LOUD Coringa \n@Magnet World \n@MrBeast \n"
"@Nilson Izaias Papinho Oficial \n@Noah Schnapp\n"
"@백종원의 요리비책 Paik's Cuisine \n@Pencilmation \n@PewDiePie \n"
"@SethEverman \n@shane \n@Shawn Mendes \n@Team Naach \n"
"@whinderssonnunes \n@워크맨-Workman \n@하루한끼 one meal a day \n\n"
"To see the full list of featured channels in Rewind 2019, "
"visit: https://rewind.youtube/about"
)
assert cipher_signature.description == expected
def test_rating(cipher_signature):
"""Test the rating value of a YouTube object.
This changes each time we rebuild the json files, so we want to use
an estimate of where it will be. The two values seen to make this
estimate were 2.073431 and 2.0860765. This represents a range of
~0.007 below and ~0.006 above 2.08. Allowing for up to 0.02 in either
direction should provide a steady indicator of correctness.
"""
assert abs(cipher_signature.rating - 2.08) < 0.02
def test_length(cipher_signature):
assert cipher_signature.length == 337
def test_views(cipher_signature):
assert cipher_signature.views >= 108531745
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "6796391"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
def test_download(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
stream.download()
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
def test_download_with_prefix(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
file_path = stream.download(filename_prefix="prefix")
assert file_path == os.path.join(
"/target",
"prefixYouTube Rewind 2019 For the Record YouTubeRewind.mp4"
)
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
def test_download_with_filename(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
file_path = stream.download(filename="cool name bro")
assert file_path == os.path.join(
"/target",
"cool name bro.mp4"
)
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
@mock.patch("os.path.isfile", MagicMock(return_value=True))
def test_download_with_existing(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
os.path.getsize = Mock(return_value=stream.filesize)
file_path = stream.download()
assert file_path == os.path.join(
"/target",
"YouTube Rewind 2019 For the Record YouTubeRewind.mp4"
)
assert not request.stream.called
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
@mock.patch("pytube.streams.target_directory", MagicMock(return_value="/target"))
@mock.patch("os.path.isfile", MagicMock(return_value=True))
def test_download_with_existing_no_skip(cipher_signature):
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
os.path.getsize = Mock(return_value=stream.filesize)
file_path = stream.download(skip_existing=False)
assert file_path == os.path.join(
"/target",
"YouTube Rewind 2019 For the Record YouTubeRewind.mp4"
)
assert request.stream.called
def test_progressive_streams_return_includes_audio_track(cipher_signature):
stream = cipher_signature.streams.filter(progressive=True)[0]
assert stream.includes_audio_track
def test_progressive_streams_return_includes_video_track(cipher_signature):
stream = cipher_signature.streams.filter(progressive=True)[0]
assert stream.includes_video_track
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
def test_on_progress_hook(cipher_signature):
callback_fn = mock.MagicMock()
cipher_signature.register_on_progress_callback(callback_fn)
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
stream.download()
assert callback_fn.called
args, _ = callback_fn.call_args
assert len(args) == 3
stream, _, _ = args
assert isinstance(stream, Stream)
@mock.patch(
"pytube.streams.request.head", MagicMock(return_value={"content-length": "16384"})
)
@mock.patch(
"pytube.request.stream",
MagicMock(return_value=iter([str(random.getrandbits(8 * 1024))])),
)
def test_on_complete_hook(cipher_signature):
callback_fn = mock.MagicMock()
cipher_signature.register_on_complete_callback(callback_fn)
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
stream = cipher_signature.streams[0]
stream.download()
assert callback_fn.called
def test_author(cipher_signature):
expected = "Test author"
cipher_signature._player_response = {"videoDetails": {"author": expected}}
assert cipher_signature.author == expected
expected = "unknown"
cipher_signature.author = None
cipher_signature._player_response = {'key': 'value'}
assert cipher_signature.author == expected
def test_thumbnail_when_in_details(cipher_signature):
expected = "some url"
cipher_signature._player_response = {
"videoDetails": {"thumbnail": {"thumbnails": [{"url": expected}]}}
}
assert cipher_signature.thumbnail_url == expected
def test_thumbnail_when_not_in_details(cipher_signature):
expected = "https://img.youtube.com/vi/2lAe1cqCOXo/maxresdefault.jpg"
cipher_signature._player_response = {'key': 'value'}
assert cipher_signature.thumbnail_url == expected
def test_repr_for_audio_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(only_audio=True)[0])
expected = (
'<Stream: itag="140" mime_type="audio/mp4" abr="128kbps" '
'acodec="mp4a.40.2" progressive="False" type="audio">'
)
assert stream == expected
def test_repr_for_video_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(only_video=True)[0])
expected = (
'<Stream: itag="137" mime_type="video/mp4" res="1080p" fps="24fps" '
'vcodec="avc1.640028" progressive="False" type="video">'
)
assert stream == expected
def test_repr_for_progressive_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(progressive=True)[0])
expected = (
'<Stream: itag="18" mime_type="video/mp4" res="360p" fps="24fps" '
'vcodec="avc1.42001E" acodec="mp4a.40.2" progressive="True" '
'type="video">'
)
assert stream == expected
def test_repr_for_adaptive_streams(cipher_signature):
stream = str(cipher_signature.streams.filter(adaptive=True)[0])
expected = (
'<Stream: itag="137" mime_type="video/mp4" res="1080p" fps="24fps" '
'vcodec="avc1.640028" progressive="False" type="video">'
)
assert stream == expected
def test_segmented_stream_on_404(cipher_signature):
stream = cipher_signature.streams.filter(adaptive=True)[0]
with mock.patch('pytube.request.head') as mock_head:
with mock.patch('pytube.request.urlopen') as mock_url_open:
# Mock the responses to YouTube
mock_url_open_object = mock.Mock()
# These are our 4 "segments" of a dash stream
# The first explains how many pieces there are, and
# the rest are those pieces
responses = [
b'Raw_data\r\nSegment-Count: 3',
b'a',
b'b',
b'c',
]
joined_responses = b''.join(responses)
# We create response headers to match the segments
response_headers = [
{
'content-length': len(r),
'Content-Range': '0-%s/%s' % (str(len(r)), str(len(r)))
}
for r in responses
]
# Request order for stream:
# 1. get(url&sn=0)
# 2. head(url&sn=[1,2,3])
# 3. info(url) -> 404
# 4. get(url&sn=0)
# 5. get(url&sn=[1,2,3])
# Handle filesize requests
mock_head.side_effect = [
HTTPError('', 404, 'Not Found', '', ''),
*response_headers[1:],
]
# Each response must be followed by None, to break iteration
# in the stream() function
mock_url_open_object.read.side_effect = [
responses[0], None,
responses[1], None,
responses[2], None,
responses[3], None,
]
# This handles the HEAD requests to get content-length
mock_url_open_object.info.side_effect = [
HTTPError('', 404, 'Not Found', '', ''),
*response_headers
]
mock_url_open.return_value = mock_url_open_object
with mock.patch('builtins.open', new_callable=mock.mock_open) as mock_open:
file_handle = mock_open.return_value.__enter__.return_value
fp = stream.download()
full_content = b''
for call in file_handle.write.call_args_list:
args, kwargs = call
full_content += b''.join(args)
assert full_content == joined_responses
mock_open.assert_called_once_with(fp, 'wb')
def test_segmented_only_catches_404(cipher_signature):
stream = cipher_signature.streams.filter(adaptive=True)[0]
with mock.patch('pytube.request.stream') as mock_stream:
mock_stream.side_effect = HTTPError('', 403, 'Forbidden', '', '')
with mock.patch("pytube.streams.open", mock.mock_open(), create=True):
with pytest.raises(HTTPError):
stream.download()
| 35.366667
| 88
| 0.662147
|
3c7123f5a4314483d89f0f85388d8e38e0c1f273
| 14,426
|
py
|
Python
|
malib/environments/particle.py
|
renos/Emergent-Multiagent-Strategies
|
afaf6acfdd6d505668f06ac23dfb33e872ab2872
|
[
"MIT"
] | 23
|
2020-07-05T11:13:00.000Z
|
2022-01-28T00:24:41.000Z
|
malib/environments/particle.py
|
renos/Emergent-Multiagent-Strategies
|
afaf6acfdd6d505668f06ac23dfb33e872ab2872
|
[
"MIT"
] | 2
|
2020-09-07T19:09:40.000Z
|
2021-06-02T02:21:51.000Z
|
malib/environments/particle.py
|
renos/Emergent-Multiagent-Strategies
|
afaf6acfdd6d505668f06ac23dfb33e872ab2872
|
[
"MIT"
] | 8
|
2020-07-06T07:24:37.000Z
|
2021-09-27T20:28:25.000Z
|
import gym
from gym import spaces
import numpy as np
from multiagent.multi_discrete import MultiDiscrete
from malib.spaces import Box, MASpace, MAEnvSpec
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
def make_particle_env(game_name, benchmark=False):
import multiagent.scenarios as scenarios
scenario = scenarios.load(game_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = ParticleEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = ParticleEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
# environment for all agents in the multiagent world
# currently code assumes that no agents will be created/destroyed at runtime!
class ParticleEnv(gym.Env):
metadata = {
'render.modes' : ['human', 'rgb_array']
}
def terminate(self):
pass
def __init__(self, world, reset_callback=None, reward_callback=None,
observation_callback=None, info_callback=None,
done_callback=None, shared_viewer=True):
self.world = world
self.agents = self.world.policy_agents
# set required vectorized gym env property
self.n = len(world.policy_agents)
# scenario callbacks
self.reset_callback = reset_callback
self.reward_callback = reward_callback
self.observation_callback = observation_callback
self.info_callback = info_callback
self.done_callback = done_callback
# environment parameters
self.discrete_action_space = True
# if true, action is a number 0...N, otherwise action is a one-hot N-dimensional vector
self.discrete_action_input = False
# if true, even the action is continuous, action will be performed discretely
self.force_discrete_action = world.discrete_action if hasattr(world, 'discrete_action') else False
# if true, every agent has the same reward
self.shared_reward = world.collaborative if hasattr(world, 'collaborative') else False
self.time = 0
# configure spaces
self.action_space = []
self.observation_space = []
obs_shapes = []
self.agent_num = len(self.agents)
for agent in self.agents:
total_action_space = []
# physical action space
if self.discrete_action_space:
u_action_space = spaces.Discrete(world.dim_p * 2 + 1)
else:
u_action_space = spaces.Box(low=-agent.u_range, high=+agent.u_range, shape=(world.dim_p,), dtype=np.float32)
if agent.movable:
total_action_space.append(u_action_space)
# communication action space
if self.discrete_action_space:
c_action_space = spaces.Discrete(world.dim_c)
else:
c_action_space = spaces.Box(low=0.0, high=1.0, shape=(world.dim_c,), dtype=np.float32)
if not agent.silent:
total_action_space.append(c_action_space)
# total action space
if len(total_action_space) > 1:
# all action spaces are discrete, so simplify to MultiDiscrete action space
if all([isinstance(act_space, spaces.Discrete) for act_space in total_action_space]):
act_space = MultiDiscrete([[0, act_space.n - 1] for act_space in total_action_space])
else:
act_space = spaces.Tuple(total_action_space)
self.action_space.append(act_space)
else:
self.action_space.append(total_action_space[0])
# observation space
obs_dim = len(observation_callback(agent, self.world))
obs_shapes.append((obs_dim,))
self.observation_space.append(spaces.Box(low=-np.inf, high=+np.inf, shape=(obs_dim,), dtype=np.float32))
agent.action.c = np.zeros(self.world.dim_c)
# simpified for non-comm game
# self.action_spaces = MASpace(tuple(Box(low=-1., high=1., shape=(1,)) for _ in range(self.agent_num)))
# self.observation_spaces = MASpace(tuple(Discrete(1) for _ in range(self.agent_num)))
self.action_spaces = MASpace(tuple(Box(low=0., high=1., shape=(world.dim_p * 2 + 1,)) for _ in range(self.agent_num)))
# print(obs_shapes)
self.observation_spaces = MASpace(tuple(Box(low=-np.inf, high=+np.inf, shape=obs_shape) for obs_shape in obs_shapes))
self.env_specs = MAEnvSpec(self.observation_spaces, self.action_spaces)
self.action_range = [0., 1.]
# rendering
self.shared_viewer = shared_viewer
if self.shared_viewer:
self.viewers = [None]
else:
self.viewers = [None] * self.n
self._reset_render()
def step(self, action_n):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
self.agents = self.world.policy_agents
# set action for each agent
for i, agent in enumerate(self.agents):
action = np.array(action_n[i]).reshape((5,))
self._set_action(action, agent, self.action_space[i])
# advance world state
self.world.step()
# record observation for each agent
for agent in self.agents:
obs_n.append(self._get_obs(agent))
reward_n.append(self._get_reward(agent))
done_n.append(self._get_done(agent))
info_n['n'].append(self._get_info(agent))
# all agents get total reward in cooperative case
reward = np.sum(reward_n)
if self.shared_reward:
reward_n = [reward] * self.n
return obs_n, reward_n, done_n, info_n
def reset(self):
# reset world
self.reset_callback(self.world)
# reset renderer
self._reset_render()
# record observations for each agent
obs_n = []
self.agents = self.world.policy_agents
for agent in self.agents:
obs_n.append(self._get_obs(agent))
return obs_n
# get info used for benchmarking
def _get_info(self, agent):
if self.info_callback is None:
return {}
return self.info_callback(agent, self.world)
# get observation for a particular agent
def _get_obs(self, agent):
if self.observation_callback is None:
return np.zeros(0)
return self.observation_callback(agent, self.world)
# get dones for a particular agent
# unused right now -- agents are allowed to go beyond the viewing screen
def _get_done(self, agent):
if self.done_callback is None:
return False
return self.done_callback(agent, self.world)
# get reward for a particular agent
def _get_reward(self, agent):
if self.reward_callback is None:
return 0.0
return self.reward_callback(agent, self.world)
# set env action for a particular agent
def _set_action(self, action, agent, action_space, time=None):
agent.action.u = np.zeros(self.world.dim_p)
agent.action.c = np.zeros(self.world.dim_c)
# process action
if isinstance(action_space, MultiDiscrete):
act = []
size = action_space.high - action_space.low + 1
index = 0
for s in size:
act.append(action[index:(index+s)])
index += s
action = act
else:
action = [action]
if agent.movable:
# physical action
if self.discrete_action_input:
agent.action.u = np.zeros(self.world.dim_p)
# process discrete action
if action[0] == 1: agent.action.u[0] = -1.0
if action[0] == 2: agent.action.u[0] = +1.0
if action[0] == 3: agent.action.u[1] = -1.0
if action[0] == 4: agent.action.u[1] = +1.0
else:
if self.force_discrete_action:
d = np.argmax(action[0])
action[0][:] = 0.0
action[0][d] = 1.0
if self.discrete_action_space:
# print('action', action)
agent.action.u[0] += action[0][1] - action[0][2]
agent.action.u[1] += action[0][3] - action[0][4]
else:
agent.action.u = action[0]
sensitivity = 5.0
if agent.accel is not None:
sensitivity = agent.accel
agent.action.u *= sensitivity
action = action[1:]
if not agent.silent:
# communication action
if self.discrete_action_input:
agent.action.c = np.zeros(self.world.dim_c)
agent.action.c[action[0]] = 1.0
else:
agent.action.c = action[0]
action = action[1:]
# make sure we used all elements of action
assert len(action) == 0
# reset rendering assets
def _reset_render(self):
self.render_geoms = None
self.render_geoms_xform = None
# render environment
def render(self, mode='human'):
if mode == 'human':
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
message = ''
for agent in self.world.agents:
comm = []
for other in self.world.agents:
if other is agent: continue
if np.all(other.state.c == 0):
word = '_'
else:
word = alphabet[np.argmax(other.state.c)]
message += (other.name + ' to ' + agent.name + ': ' + word + ' ')
# print(message)
for i in range(len(self.viewers)):
# create viewers (if necessary)
if self.viewers[i] is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.viewers[i] = rendering.Viewer(700,700)
# create rendering geometry
if self.render_geoms is None:
# import rendering only if we need it (and don't import for headless machines)
#from gym.envs.classic_control import rendering
from multiagent import rendering
self.render_geoms = []
self.render_geoms_xform = []
for entity in self.world.entities:
geom = rendering.make_circle(entity.size)
xform = rendering.Transform()
if 'agent' in entity.name:
geom.set_color(*entity.color, alpha=0.5)
else:
geom.set_color(*entity.color)
geom.add_attr(xform)
self.render_geoms.append(geom)
self.render_geoms_xform.append(xform)
# add geoms to viewer
for viewer in self.viewers:
viewer.geoms = []
for geom in self.render_geoms:
viewer.add_geom(geom)
results = []
for i in range(len(self.viewers)):
from multiagent import rendering
# update bounds to center around agent
cam_range = 1
if self.shared_viewer:
pos = np.zeros(self.world.dim_p)
else:
pos = self.agents[i].state.p_pos
self.viewers[i].set_bounds(pos[0]-cam_range,pos[0]+cam_range,pos[1]-cam_range,pos[1]+cam_range)
# update geometry positions
for e, entity in enumerate(self.world.entities):
self.render_geoms_xform[e].set_translation(*entity.state.p_pos)
# render to display or array
results.append(self.viewers[i].render(return_rgb_array = mode=='rgb_array'))
return results
# create receptor field locations in local coordinate frame
def _make_receptor_locations(self, agent):
receptor_type = 'polar'
range_min = 0.05 * 2.0
range_max = 1.00
dx = []
# circular receptive field
if receptor_type == 'polar':
for angle in np.linspace(-np.pi, +np.pi, 8, endpoint=False):
for distance in np.linspace(range_min, range_max, 3):
dx.append(distance * np.array([np.cos(angle), np.sin(angle)]))
# add origin
dx.append(np.array([0.0, 0.0]))
# grid receptive field
if receptor_type == 'grid':
for x in np.linspace(-range_max, +range_max, 5):
for y in np.linspace(-range_max, +range_max, 5):
dx.append(np.array([x,y]))
return dx
# vectorized wrapper for a batch of multi-agent environments
# assumes all environments have the same observation and action space
class BatchMultiAgentEnv(gym.Env):
metadata = {
'runtime.vectorized': True,
'render.modes' : ['human', 'rgb_array']
}
def __init__(self, env_batch):
self.env_batch = env_batch
@property
def n(self):
return np.sum([env.n for env in self.env_batch])
@property
def action_space(self):
return self.env_batch[0].action_space
@property
def observation_space(self):
return self.env_batch[0].observation_space
def step(self, action_n, time):
obs_n = []
reward_n = []
done_n = []
info_n = {'n': []}
i = 0
for env in self.env_batch:
obs, reward, done, _ = env.step(action_n[i:(i+env.n)], time)
i += env.n
obs_n += obs
# reward = [r / len(self.env_batch) for r in reward]
reward_n += reward
done_n += done
return obs_n, reward_n, done_n, info_n
def reset(self):
obs_n = []
for env in self.env_batch:
obs_n += env.reset()
return obs_n
# render environment
def render(self, mode='human', close=True):
results_n = []
for env in self.env_batch:
results_n += env.render(mode, close)
return results_n
| 38.989189
| 126
| 0.584362
|
581215e6f0601d9890d84d166d10eea43e64b7cb
| 483
|
py
|
Python
|
python/hackerrank/weightedmean/main.py
|
imjoseangel/sandbox
|
bc4ff74981faf91eb1a1f777d01fcfd13d6f5147
|
[
"MIT"
] | null | null | null |
python/hackerrank/weightedmean/main.py
|
imjoseangel/sandbox
|
bc4ff74981faf91eb1a1f777d01fcfd13d6f5147
|
[
"MIT"
] | null | null | null |
python/hackerrank/weightedmean/main.py
|
imjoseangel/sandbox
|
bc4ff74981faf91eb1a1f777d01fcfd13d6f5147
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, absolute_import, print_function,
unicode_literals)
def weightedMean(X, W):
return round(sum([i[0] * i[1] for i in zip(X, W)]) / sum(W), 1)
def main():
n = int(input().strip())
vals = list(map(int, input().rstrip().split()))
weights = list(map(int, input().rstrip().split()))
print(weightedMean(vals, weights))
if __name__ == '__main__':
main()
| 19.32
| 67
| 0.585921
|
9f739d401b75c8e36e4db122582e8965d1db3f8a
| 839
|
py
|
Python
|
evan/wsgi.py
|
eillarra/evan
|
befe0f8daedd1b1f629097110d92e68534e43da1
|
[
"MIT"
] | null | null | null |
evan/wsgi.py
|
eillarra/evan
|
befe0f8daedd1b1f629097110d92e68534e43da1
|
[
"MIT"
] | 20
|
2021-03-31T20:10:46.000Z
|
2022-02-15T09:58:13.000Z
|
evan/wsgi.py
|
eillarra/evan
|
befe0f8daedd1b1f629097110d92e68534e43da1
|
[
"MIT"
] | null | null | null |
# https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
import os
import sentry_sdk
from django.conf import settings
from django.core.wsgi import get_wsgi_application
from sentry_sdk.integrations.django import DjangoIntegration
from whitenoise import WhiteNoise
env = os.environ.get("DJANGO_ENV", None)
if env in ["production"]:
sentry_sdk.init(
dsn=os.environ.get("SENTRY_DSN", "SENTRY_DSN"),
release=os.environ.get("GIT_REV", None),
environment=env,
integrations=[DjangoIntegration()],
ignore_errors=["django.security.DisallowedHost"],
send_default_pii=True,
)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "evan.settings")
os.environ["HTTPS"] = "on"
app = get_wsgi_application()
app = WhiteNoise(app, root=os.path.join(settings.SITE_ROOT, "www"), max_age=31536000)
| 29.964286
| 85
| 0.734207
|
00470f41d238170292ab8534c0e5a58a9bbb5899
| 3,941
|
py
|
Python
|
functorch/__init__.py
|
LaudateCorpus1/functorch
|
b07eea9d29fb44f0a9be3c35c042b3f6124bfcf2
|
[
"BSD-3-Clause"
] | null | null | null |
functorch/__init__.py
|
LaudateCorpus1/functorch
|
b07eea9d29fb44f0a9be3c35c042b3f6124bfcf2
|
[
"BSD-3-Clause"
] | null | null | null |
functorch/__init__.py
|
LaudateCorpus1/functorch
|
b07eea9d29fb44f0a9be3c35c042b3f6124bfcf2
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import functools
import textwrap
from . import _C
# Top-level APIs. Please think carefully before adding something to the
# top-level namespace:
# - private helper functions should go into functorch._src
# - very experimental things should go into functorch.experimental
# - compilation related things should go into functorch.compile
# functorch transforms
from ._src.vmap import vmap
from ._src.eager_transforms import (
grad, grad_and_value, vjp, jacrev, jvp, jacfwd, hessian,
)
from ._src.python_key import make_fx
# utilities. Maybe these should go in their own namespace in the future?
from ._src.make_functional import (
make_functional_with_buffers,
make_functional,
combine_state_for_ensemble,
FunctionalModule,
FunctionalModuleWithBuffers,
)
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
# Monkeypatching lol
_old_cross_entropy = torch.nn.functional.cross_entropy
# **kwargs to handle the new label_smoothing arg
def cross_entropy(input, target, weight=None, size_average=None,
ignore_index=-100, reduce=None, reduction='mean', **kwargs):
if input.dim() == 1 and target.dim() == 0:
input = input.unsqueeze(0)
target = target.unsqueeze(0)
result = _old_cross_entropy(
input, target, weight, size_average,
ignore_index, reduce, reduction, **kwargs)
if reduction == 'none':
return result.squeeze(0)
return result
torch.nn.functional.cross_entropy = cross_entropy
# Monkeypatch .backward() to error out if any transforms are active.
# TODO: remove the monkeypatching and add an extension point into PyTorch core
_old_backward = torch.Tensor.backward
@functools.wraps(_old_backward)
def _backward(*args, **kwargs):
if _C.are_transforms_active():
raise RuntimeError(
"backward() called inside a functorch transform. This is not "
"supported, please use functorch.grad or functorch.vjp instead "
"or call backward() outside of functorch transforms.")
return _old_backward(*args, **kwargs)
setattr(torch.Tensor, 'backward', _backward)
# Monkeypatch tensor printing in pytorch
_old_str = torch._tensor_str._str
def prep_value(text, indent=4):
first_line_txt = ''
lines = text.split('\n')
lines[0] = lines[0]
lines[0] = ' ' * indent + first_line_txt + lines[0]
for i in range(1, len(lines)):
lines[i] = ' ' * (indent + len(first_line_txt)) + lines[i]
return '\n'.join(lines)
@functools.wraps(_old_str)
def _functorch_str(tensor):
level = _C.maybe_get_level(tensor)
if level == -1:
return _old_str(tensor)
value = _C.get_unwrapped(tensor)
dl_enabled = _C.tls_set_is_included()
try:
# Disable temporarily kDynamicLayerFrontModeKey/kDynamicLayerBackModeKey as included dispatch keys
if (dl_enabled):
_C._set_dynamic_layer_keys_included(False)
value_repr = repr(value)
finally:
# Reenable kDynamicLayerFrontModeKey/kDynamicLayerBackModeKey as included dispatch keys
if (dl_enabled):
_C._set_dynamic_layer_keys_included(True)
if _C.is_batchedtensor(tensor):
bdim = _C.maybe_get_bdim(tensor)
assert bdim != -1
return (
f'BatchedTensor(lvl={level}, bdim={bdim}, value=\n'
f'{prep_value(value_repr)}\n'
f')'
)
if _C.is_gradtrackingtensor(tensor):
return (
f'GradTrackingTensor(lvl={level}, value=\n'
f'{prep_value(value_repr)}\n'
f')'
)
raise ValueError("We don't know how to print this, please file us an issue")
torch._tensor_str._str = _functorch_str
| 30.550388
| 106
| 0.691956
|
e5a88fdfdcf46c6240445dd0967e191203d897c2
| 304
|
py
|
Python
|
minos/tasks/vcf_cluster.py
|
oxfordfun/minos
|
e7165f1a398b1003e82a8aa00480ef5cd65fa834
|
[
"MIT"
] | 14
|
2018-01-25T15:20:42.000Z
|
2022-03-25T07:57:19.000Z
|
minos/tasks/vcf_cluster.py
|
oxfordfun/minos
|
e7165f1a398b1003e82a8aa00480ef5cd65fa834
|
[
"MIT"
] | 41
|
2018-01-25T15:47:13.000Z
|
2021-11-04T10:30:21.000Z
|
minos/tasks/vcf_cluster.py
|
oxfordfun/minos
|
e7165f1a398b1003e82a8aa00480ef5cd65fa834
|
[
"MIT"
] | 11
|
2018-01-25T15:11:32.000Z
|
2021-11-04T08:59:55.000Z
|
from cluster_vcf_records import variant_tracking
def run(options):
tracker = variant_tracking.VariantTracker(options.merge_dir, options.ref_fasta)
tracker.cluster(
options.outprefix,
options.max_ref_len,
max_alleles=options.max_alleles,
cpus=options.cpus,
)
| 25.333333
| 83
| 0.720395
|
015ddabab3c3ec618e508b8a53c897beb5b227d7
| 860
|
py
|
Python
|
tests/test_cli.py
|
nmurtagh/rpmvenv
|
38784c4500bfa6233cd8ef1ef8c5fd7c9602fd32
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
nmurtagh/rpmvenv
|
38784c4500bfa6233cd8ef1ef8c5fd7c9602fd32
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
nmurtagh/rpmvenv
|
38784c4500bfa6233cd8ef1ef8c5fd7c9602fd32
|
[
"MIT"
] | null | null | null |
"""Test suites for the primary entry point of the CLI."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import pytest
from rpmvenv import cli
@pytest.mark.skipif(
not pytest.config.getvalue("python_git_url"),
reason="No --python-git-url option was given",
)
def test_python_cmd_build(python_source_code, python_config_file, tmpdir):
"""Test that a default build works without exception."""
with pytest.raises(SystemExit) as exc_info:
cli.main(
(
python_config_file,
'--source', python_source_code,
'--destination', str(tmpdir),
)
)
rc = exc_info.value.code if type(exc_info.value) == SystemExit else \
exc_info.value
assert rc == 0
| 28.666667
| 74
| 0.674419
|
103edc859e3d2f4bae7b1c5fc418e8d67187355d
| 1,848
|
py
|
Python
|
pkg_usage.py
|
akaneko1019/yolact_edge
|
a9a00281b33b3ac90253a4939773308a8f95e21d
|
[
"MIT"
] | 1,036
|
2020-12-23T01:48:56.000Z
|
2022-03-29T11:55:42.000Z
|
pkg_usage.py
|
akaneko1019/yolact_edge
|
a9a00281b33b3ac90253a4939773308a8f95e21d
|
[
"MIT"
] | 184
|
2020-12-23T05:10:16.000Z
|
2022-03-30T05:41:25.000Z
|
pkg_usage.py
|
akaneko1019/yolact_edge
|
a9a00281b33b3ac90253a4939773308a8f95e21d
|
[
"MIT"
] | 221
|
2020-12-23T03:35:35.000Z
|
2022-03-29T06:26:23.000Z
|
import numpy as np
import urllib
import time
import cv2
from yolact_edge.inference import YOLACTEdgeInference
weights = "yolact_edge_resnet50_54_800000.pth"
# All available model configs, depends on which weights
# you use. More info could be found in data/config.py.
model_configs = [
'yolact_edge_mobilenetv2_config',
'yolact_edge_vid_config',
'yolact_edge_vid_minimal_config',
'yolact_edge_vid_trainflow_config',
'yolact_edge_youtubevis_config',
'yolact_resnet50_config',
'yolact_resnet152_config',
'yolact_edge_resnet50_config',
'yolact_edge_vid_resnet50_config',
'yolact_edge_vid_trainflow_resnet50_config',
'yolact_edge_youtubevis_resnet50_config',
]
config = model_configs[5]
# All available model datasets, depends on which weights
# you use. More info could be found in data/config.py.
datasets = [
'coco2014_dataset',
'coco2017_dataset',
'coco2017_testdev_dataset',
'flying_chairs_dataset',
'youtube_vis_dataset',
]
dataset = datasets[1]
# Used tensorrt calibration
calib_images = "./data/calib_images"
# Override some default configuration
config_ovr = {
'use_fast_nms': True, # Does not work with regular nms
'mask_proto_debug': False
}
model_inference = YOLACTEdgeInference(
weights, config, dataset, calib_images, config_ovr)
img = None
try:
with urllib.request.urlopen("http://images.cocodataset.org/val2017/000000439715.jpg") as f:
img = np.asarray(bytearray(f.read()), dtype="uint8")
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
except:
pass
if img is None:
print("Couldn't retrieve image for benchmark...")
exit(1)
print("Benchmarking performance...")
start = time.time()
samples = 200
for i in range(samples):
p = model_inference.predict(img, False)
print(f"Average {1 / ( (time.time() - start) / samples )} FPS")
| 29.333333
| 95
| 0.739177
|
a1aa1c7f8a6461f1ef9a0f3b461b524d7b273420
| 505
|
py
|
Python
|
kol/request/OpenChatRequest.py
|
DamianDominoDavis/cwbot-ndy
|
53b826232eadb7ef558f568872a945d04d8d4252
|
[
"BSD-3-Clause"
] | null | null | null |
kol/request/OpenChatRequest.py
|
DamianDominoDavis/cwbot-ndy
|
53b826232eadb7ef558f568872a945d04d8d4252
|
[
"BSD-3-Clause"
] | null | null | null |
kol/request/OpenChatRequest.py
|
DamianDominoDavis/cwbot-ndy
|
53b826232eadb7ef558f568872a945d04d8d4252
|
[
"BSD-3-Clause"
] | null | null | null |
from .GenericRequest import GenericRequest
from kol.manager import PatternManager
class OpenChatRequest(GenericRequest):
def __init__(self, session):
super(OpenChatRequest, self).__init__(session)
self.url = session.serverURL + "lchat.php"
def parseResponse(self):
currentChannelPattern = PatternManager.getOrCompilePattern("currentChatChannel")
match = currentChannelPattern.search(self.responseText)
self.responseData["currentChannel"] = match.group(1)
| 38.846154
| 88
| 0.750495
|
a5d19a763832911d86c140daea11df751a0d9fa5
| 1,055
|
py
|
Python
|
plenum/test/input_validation/message_validation/test_consistencyproof_message.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 148
|
2017-07-11T19:05:25.000Z
|
2022-03-16T21:31:20.000Z
|
plenum/test/input_validation/message_validation/test_consistencyproof_message.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 561
|
2017-06-29T17:59:56.000Z
|
2022-03-09T15:47:14.000Z
|
plenum/test/input_validation/message_validation/test_consistencyproof_message.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 378
|
2017-06-29T17:45:27.000Z
|
2022-03-26T07:27:59.000Z
|
import pytest
from plenum.common.messages.node_messages import ConsistencyProof
from collections import OrderedDict
from plenum.common.messages.fields import NonNegativeNumberField, \
LedgerIdField, MerkleRootField, IterableField
EXPECTED_ORDERED_FIELDS = OrderedDict([
("ledgerId", LedgerIdField),
("seqNoStart", NonNegativeNumberField),
("seqNoEnd", NonNegativeNumberField),
("viewNo", NonNegativeNumberField),
("ppSeqNo", NonNegativeNumberField),
("oldMerkleRoot", MerkleRootField),
("newMerkleRoot", MerkleRootField),
("hashes", IterableField),
])
def test_hash_expected_type():
assert ConsistencyProof.typename == "CONSISTENCY_PROOF"
def test_has_expected_fields():
actual_field_names = OrderedDict(ConsistencyProof.schema).keys()
assert list(actual_field_names) == list(EXPECTED_ORDERED_FIELDS.keys())
def test_has_expected_validators():
schema = dict(ConsistencyProof.schema)
for field, validator in EXPECTED_ORDERED_FIELDS.items():
assert isinstance(schema[field], validator)
| 32.96875
| 75
| 0.76872
|
d59caa9d9568e7231fa52af8017cb636b5294ee0
| 4,523
|
py
|
Python
|
libs/RedisProtocol.py
|
mgronhol/h3-tritium
|
815b863cb0a4fc438273129dcff537ac2633aa82
|
[
"Apache-2.0"
] | 2
|
2020-09-05T01:34:58.000Z
|
2020-09-05T01:54:25.000Z
|
libs/RedisProtocol.py
|
mgronhol/h3-tritium
|
815b863cb0a4fc438273129dcff537ac2633aa82
|
[
"Apache-2.0"
] | null | null | null |
libs/RedisProtocol.py
|
mgronhol/h3-tritium
|
815b863cb0a4fc438273129dcff537ac2633aa82
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2013 Markus Gronholm <markus@alshain.fi> / Alshain Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RedisProtocol(object):
def __init__( self, conn ):
self.conn = conn
self.buf = ""
def _pack_list( self, data ):
#print "packing list", data
out = "*%i\r\n" % len( data )
for entry in data:
if isinstance( entry, int ):
out += ":%i\r\n" % entry
elif isinstance( entry, list ):
out += self._pack_list( entry )
elif isinstance( entry, dict ):
out += self._pack_dict( entry )
else:
out += "$%i\r\n" % len( str(entry) )
out += "%s\r\n" % str( entry )
return out
def _pack_dict( self, data ):
#print "packing dict", data
out = "*%i\r\n" % (len( data ) * 2)
for (key, value) in data.items():
out += "$%i\r\n" % len( key )
out += "%s\r\n" % key
if isinstance( value, int ):
out += ":%i\r\n" % value
elif isinstance( value, list ):
out += self._pack_list( value )
elif isinstance( value, dict ):
out += self._pack_dict( value )
else:
out += "$%i\r\n" % len( value )
out += "%s\r\n" % value
return out
def send_response( self, message ):
if isinstance( message, str ):
self.conn.send( "+%s\r\n" % repr(message)[1:-1])
elif isinstance( message, list ):
self.conn.send( self._pack_list( message ) )
elif isinstance( message, dict ):
self.conn.send( self._pack_dict( message ) )
elif isinstance( message, int ):
self.conn.send(":%i\r\n" % message )
def send_error( self, message ):
self.conn.send( '-%s\r\n' % repr(message)[1:-1] )
def recv_bulk( self ):
#print "bulk"
while '\r\n' not in self.buf:
self._recv()
if not self.buf.startswith( "$" ):
return False
(line, rest) = self.buf.split( "\r\n", 1 )
next_bytes = int( line[1:] )
self.buf = rest
while len( self.buf ) < next_bytes + 2:
self._recv()
(line, rest) = self.buf.split( "\r\n", 1 )
self.buf = rest
return line
def recv_string( self ):
#print "string"
while '\r\n' not in self.buf:
self._recv()
if not self.buf.startswith( "+" ):
return False
(line, rest) = self.buf.split( "\r\n", 1 )
self.buf = rest
return line[1:]
def recv_error( self ):
while '\r\n' not in self.buf:
self._recv()
if not self.buf.startswith( "-" ):
return False
(line, rest) = self.buf.split( "\r\n", 1 )
self.buf = rest
return line
def recv_integer( self ):
#print "integer"
while '\r\n' not in self.buf:
self._recv()
if not self.buf.startswith( ":" ):
return False
(line, rest) = self.buf.split( "\r\n", 1 )
self.buf = rest
return int(line[1:])
def recv_multibulk( self ):
#print "multibulk"
while '\r\n' not in self.buf:
self._recv()
if not self.buf.startswith( "*" ):
return False
(line, rest) = self.buf.split( "\r\n", 1 )
self.buf = rest
N = int( line[1:] )
#print "Nparams", N
out = []
for i in range( N ):
while len( self.buf ) < 1:
self._recv()
#print "multibulk selector", self.buf[0]
if self.buf.startswith( "+" ):
out.append( self.recv_string() )
elif self.buf.startswith( "-" ):
out.append( self.recv_error() )
elif self.buf.startswith( ":" ):
out.append( self.recv_integer() )
elif self.buf.startswith( "$" ):
out.append( self.recv_bulk() )
else:
out.append( self.recv_multibulk() )
return out
def _recv( self ):
chunk = self.conn.recv(1024)
if len( chunk ) < 1:
raise IOError
self.buf += chunk
#print self.buf
def receive(self):
try:
while len( self.buf ) < 1:
self._recv()
if self.buf.startswith( "+" ):
return self.recv_string()
elif self.buf.startswith( "-" ):
return self.recv_error()
elif self.buf.startswith( ":" ):
return self.recv_integer()
elif self.buf.startswith( "$" ):
return self.recv_bulk()
elif self.buf.startswith( "*" ):
return self.recv_multibulk()
except IOError:
return False
| 23.805263
| 76
| 0.604024
|
f4a28fac4b39c7798cb7cb3d8e11f3d681cf8859
| 1,246
|
py
|
Python
|
msg_handler/__init__.py
|
OpenUpSA/odac-ford-housing
|
010d9a0361606b1041e2b64140825f7bff60935a
|
[
"Apache-2.0"
] | 1
|
2015-04-24T23:47:26.000Z
|
2015-04-24T23:47:26.000Z
|
msg_handler/__init__.py
|
Code4SA/odac-ford-housing
|
010d9a0361606b1041e2b64140825f7bff60935a
|
[
"Apache-2.0"
] | null | null | null |
msg_handler/__init__.py
|
Code4SA/odac-ford-housing
|
010d9a0361606b1041e2b64140825f7bff60935a
|
[
"Apache-2.0"
] | null | null | null |
import logging
from flask import Flask
from redis import Redis
from flask.ext.sqlalchemy import SQLAlchemy
import sys
redis = Redis()
app = Flask(__name__, instance_relative_config=True)
app.config.from_pyfile('config.py', silent=True)
app.config.from_pyfile('config_private.py', silent=True)
db = SQLAlchemy(app)
# load log level from config
LOG_LEVEL = app.config['LOG_LEVEL']
LOGGER_NAME = app.config['LOGGER_NAME']
# create logger for this application
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(LOG_LEVEL)
# declare format for logging to file
file_formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
)
# add file handler to application logger
from logging.handlers import RotatingFileHandler
log_path = app.instance_path[0:app.instance_path.index('instance')]
file_handler = RotatingFileHandler(log_path + 'debug.log')
file_handler.setLevel(LOG_LEVEL)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
# also log to stdout
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(LOG_LEVEL)
stream_handler.setFormatter(file_formatter)
logger.addHandler(stream_handler)
import msg_handler.admin
import msg_handler.views
| 28.318182
| 67
| 0.800963
|
65dc618a41383981ef84d1caa7785a13385dd89b
| 3,496
|
py
|
Python
|
DXFImporter/lib/ezdxf/math/eulerspiral.py
|
tapnair/DXFImporter
|
c86cde0b4420ca7d0c5e3569675acd2d4426667f
|
[
"MIT"
] | 2
|
2021-07-28T03:52:02.000Z
|
2021-07-31T05:08:11.000Z
|
DXFImporter/lib/ezdxf/math/eulerspiral.py
|
tapnair/DXFImporter
|
c86cde0b4420ca7d0c5e3569675acd2d4426667f
|
[
"MIT"
] | 1
|
2020-04-28T17:52:26.000Z
|
2020-10-07T01:28:56.000Z
|
DXFImporter/lib/ezdxf/math/eulerspiral.py
|
tapnair/DXFImporter
|
c86cde0b4420ca7d0c5e3569675acd2d4426667f
|
[
"MIT"
] | 1
|
2021-07-31T05:08:12.000Z
|
2021-07-31T05:08:12.000Z
|
# Created: 26.03.2010
# License: MIT License
from typing import Dict, Iterable
from ezdxf.math import Vector
from ezdxf.math.bspline import bspline_control_frame, BSpline
class EulerSpiral:
"""
This class represents an euler spiral (clothoid) for `curvature` (Radius of curvature).
This is a parametric curve, which always starts at the origin = ``(0, 0)``.
Args:
curvature: radius of curvature
"""
def __init__(self, curvature: float = 1.0):
self.curvature = curvature # Radius of curvature
self.curvature_powers = [curvature ** power for power in range(19)]
self._cache = {} # type: Dict[float, Vector] # coordinates cache
def radius(self, t: float) -> float:
"""
Get radius of circle at distance `t`.
"""
if t > 0.:
return self.curvature_powers[2] / t
else:
return 0. # radius = infinite
def tangent(self, t: float) -> Vector:
"""
Get tangent at distance `t` as :class.`Vector` object.
"""
angle = t ** 2 / (2. * self.curvature_powers[2])
return Vector.from_angle(angle)
def distance(self, radius: float) -> float:
"""
Get distance L from origin for `radius`.
"""
return self.curvature_powers[2] / float(radius)
def point(self, t: float) -> Vector:
"""
Get point at distance `t` as :class.`Vector`.
"""
def term(length_power, curvature_power, const):
return t ** length_power / (const * self.curvature_powers[curvature_power])
if t not in self._cache:
y = term(3, 2, 6.) - term(7, 6, 336.) + term(11, 10, 42240.) - \
term(15, 14, 9676800.) + term(19, 18, 3530096640.)
x = t - term(5, 4, 40.) + term(9, 8, 3456.) - term(13, 12, 599040.) + \
term(17, 16, 175472640.)
self._cache[t] = Vector(x, y)
return self._cache[t]
def approximate(self, length: float, segments: int) -> Iterable[Vector]:
"""
Approximate curve of length with line segments.
Generates segments+1 vertices as :class:`Vector` objects.
"""
delta_l = float(length) / float(segments)
yield Vector(0, 0)
for index in range(1, segments + 1):
yield self.point(delta_l * index)
def circle_center(self, t: float) -> Vector:
"""
Get circle center at distance `t`.
.. versionchanged:: 0.10
renamed from `circle_midpoint`
"""
p = self.point(t)
r = self.radius(t)
return p + self.tangent(t).normalize(r).orthogonal()
def bspline(self, length: float, segments: int = 10, degree: int = 3, method: str = 'uniform') -> BSpline:
"""
Approximate euler spiral as B-spline.
Args:
length: length of euler spiral
segments: count of fit points for B-spline calculation
degree: degree of BSpline
method: calculation method for parameter vector t
Returns:
:class:`BSpline`
"""
fit_points = list(self.approximate(length, segments=segments))
spline = bspline_control_frame(fit_points, degree, method=method)
knots = [v * length for v in spline.knot_values()] # scale knot values to length
spline.basis.knots = knots
return spline
# backward compatibility
circle_midpoint = circle_center
| 30.938053
| 110
| 0.579805
|
3eb69bd7f3d42f5cd8d6cc6d2d32cc9eb808d9a4
| 38,110
|
py
|
Python
|
tensorflow/python/keras/engine/topology_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 71
|
2017-05-25T16:02:15.000Z
|
2021-06-09T16:08:08.000Z
|
tensorflow/python/keras/engine/topology_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 133
|
2017-04-26T16:49:49.000Z
|
2019-10-15T11:39:26.000Z
|
tensorflow/python/keras/engine/topology_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 26
|
2017-04-12T16:25:44.000Z
|
2018-10-30T10:10:15.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for layer graphs construction & handling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_layer as input_layer_lib
from tensorflow.python.keras.engine import network as network_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
try:
import yaml # pylint:disable=g-import-not-at-top
except ImportError:
yaml = None
class TopologyConstructionTest(test.TestCase):
def test_get_updates(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_update(state_ops.assign_add(self.a, [[1.]],
name='unconditional_update'))
self.built = True
def call(self, inputs):
self.add_update(state_ops.assign_add(self.b, inputs,
name='conditional_update'),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.updates), 3)
self.assertEqual(len(layer.get_updates_for(x1)), 1)
self.assertEqual(len(layer.get_updates_for(x2)), 1)
self.assertEqual(len(layer.get_updates_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.updates), 2)
self.assertEqual(len(network.get_updates_for(x1)), 0)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.updates), 2)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.updates), 3)
self.assertEqual(len(network.get_updates_for(x2)), 1)
self.assertEqual(len(network.get_updates_for(x4)), 1)
self.assertEqual(len(network.get_updates_for(None)), 1)
network.add_update(state_ops.assign_add(layer.a, [[1]]))
self.assertEqual(len(network.updates), 4)
self.assertEqual(len(network.get_updates_for(None)), 2)
network.add_update(state_ops.assign_add(layer.b, x4), inputs=True)
self.assertEqual(len(network.updates), 5)
self.assertEqual(len(network.get_updates_for(x4)), 2)
def test_get_updates_bn(self):
x1 = input_layer_lib.Input(shape=(1,))
layer = keras.layers.BatchNormalization()
_ = layer.apply(x1)
print('BN updates', layer._updates)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(x1)), 2)
self.assertEqual(len(layer.get_updates_for(None)), 0)
def test_get_losses(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.a = self.add_variable('a',
(1, 1),
'float32',
trainable=False)
self.b = self.add_variable('b',
(1, 1),
'float32',
trainable=False)
self.add_loss(math_ops.reduce_sum(self.a))
self.built = True
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs),
inputs=True)
return inputs + 1
x1 = input_layer_lib.Input(shape=(1,))
layer = MyLayer()
_ = layer.apply(x1)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
x2 = input_layer_lib.Input(shape=(1,))
y2 = layer.apply(x2)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.get_losses_for(x1)), 1)
self.assertEqual(len(layer.get_losses_for(x2)), 1)
self.assertEqual(len(layer.get_losses_for(None)), 1)
network = network_lib.Network(x2, y2)
self.assertEqual(len(network.losses), 2)
self.assertEqual(len(network.get_losses_for(x1)), 0)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
x3 = input_layer_lib.Input(shape=(1,))
_ = layer.apply(x3)
self.assertEqual(len(network.losses), 2)
x4 = input_layer_lib.Input(shape=(1,))
_ = network(x4)
self.assertEqual(len(network.losses), 3)
self.assertEqual(len(network.get_losses_for(x2)), 1)
self.assertEqual(len(network.get_losses_for(x4)), 1)
self.assertEqual(len(network.get_losses_for(None)), 1)
network.add_loss(math_ops.reduce_sum(layer.a))
self.assertEqual(len(network.losses), 4)
self.assertEqual(len(network.get_losses_for(None)), 2)
network.add_loss(math_ops.reduce_sum(x4), inputs=True)
self.assertEqual(len(network.losses), 5)
self.assertEqual(len(network.get_losses_for(x4)), 2)
def testTopologicalAttributes(self):
# test layer attributes / methods related to cross-layer connectivity.
a = input_layer_lib.Input(shape=(32,), name='input_a')
b = input_layer_lib.Input(shape=(32,), name='input_b')
# test input, output, input_shape, output_shape
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
# test `get_*_at` methods
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
# Test invalid value for attribute retrieval.
with self.assertRaises(ValueError):
dense.get_input_at(2)
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.output_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.input_shape
with self.assertRaises(AttributeError):
new_dense = keras.layers.Dense(16)
a = input_layer_lib.Input(shape=(3, 32))
a = input_layer_lib.Input(shape=(5, 32))
a_2 = dense(a)
b_2 = dense(b)
_ = new_dense.output_shape
def testTopologicalAttributesMultiOutputLayer(self):
class PowersLayer(keras.layers.Layer):
def call(self, inputs):
return [inputs**2, inputs**3]
x = input_layer_lib.Input(shape=(32,))
test_layer = PowersLayer()
p1, p2 = test_layer(x) # pylint: disable=not-callable
self.assertEqual(test_layer.input, x)
self.assertEqual(test_layer.output, [p1, p2])
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, [(None, 32), (None, 32)])
def testTopologicalAttributesMultiInputLayer(self):
class AddLayer(keras.layers.Layer):
def call(self, inputs):
assert len(inputs) == 2
return inputs[0] + inputs[1]
a = input_layer_lib.Input(shape=(32,))
b = input_layer_lib.Input(shape=(32,))
test_layer = AddLayer()
y = test_layer([a, b]) # pylint: disable=not-callable
self.assertEqual(test_layer.input, [a, b])
self.assertEqual(test_layer.output, y)
self.assertEqual(test_layer.input_shape, [(None, 32), (None, 32)])
self.assertEqual(test_layer.output_shape, (None, 32))
def testBasicNetwork(self):
# minimum viable network
x = input_layer_lib.Input(shape=(32,))
dense = keras.layers.Dense(2)
y = dense(x)
network = network_lib.Network(x, y, name='dense_network')
# test basic attributes
self.assertEqual(network.name, 'dense_network')
self.assertEqual(len(network.layers), 2) # InputLayer + Dense
self.assertEqual(network.layers[1], dense)
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, dense.trainable_weights)
self.assertEqual(network.non_trainable_weights, dense.non_trainable_weights)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 2])
# test network `trainable` attribute
network.trainable = False
self.assertEqual(network.weights, dense.weights)
self.assertEqual(network.trainable_weights, [])
self.assertEqual(network.non_trainable_weights,
dense.trainable_weights + dense.non_trainable_weights)
def test_trainable_weights(self):
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dense(1)(a)
model = keras.models.Model(a, b)
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[1].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
# sequential model
model = keras.models.Sequential()
model.add(keras.layers.Dense(1, input_dim=2))
weights = model.weights
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
model.trainable = True
self.assertListEqual(model.trainable_weights, weights)
self.assertListEqual(model.non_trainable_weights, [])
model.layers[0].trainable = False
self.assertListEqual(model.trainable_weights, [])
self.assertListEqual(model.non_trainable_weights, weights)
def test_learning_phase(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
a_2 = keras.layers.Dense(16, name='dense_1')(a)
dp = keras.layers.Dropout(0.5, name='dropout')
b_2 = dp(b)
self.assertFalse(a_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# test merge
m = keras.layers.concatenate([a_2, b_2])
self.assertTrue(m._uses_learning_phase)
# Test recursion
model = keras.models.Model([a, b], [a_2, b_2])
self.assertTrue(model.uses_learning_phase)
c = keras.layers.Input(shape=(32,), name='input_c')
d = keras.layers.Input(shape=(32,), name='input_d')
c_2, b_2 = model([c, d])
self.assertTrue(c_2._uses_learning_phase)
self.assertTrue(b_2._uses_learning_phase)
# try actually running graph
fn = keras.backend.function(
model.inputs + [keras.backend.learning_phase()], model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs_no_dp = fn([input_a_np, input_b_np, 0])
fn_outputs_dp = fn([input_a_np, input_b_np, 1])
# output a: nothing changes
self.assertEqual(fn_outputs_no_dp[0].sum(), fn_outputs_dp[0].sum())
# output b: dropout applied
self.assertNotEqual(fn_outputs_no_dp[1].sum(), fn_outputs_dp[1].sum())
def test_layer_call_arguments(self):
# Test the ability to pass and serialize arguments to `call`.
inp = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(inp)
x = keras.layers.Dropout(0.5)(x, training=True)
model = keras.models.Model(inp, x)
self.assertFalse(model.uses_learning_phase)
# Test that argument is kept when applying the model
inp2 = keras.layers.Input(shape=(2,))
out2 = model(inp2)
self.assertFalse(out2._uses_learning_phase)
# Test that argument is kept after loading a model
config = model.get_config()
model = keras.models.Model.from_config(config)
self.assertFalse(model.uses_learning_phase)
def test_node_construction(self):
# test basics
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), batch_shape=(10, 32))
with self.assertRaises(ValueError):
_ = keras.layers.Input(shape=(32,), unknown_kwarg=None)
self.assertListEqual(a.get_shape().as_list(), [None, 32])
a_layer, a_node_index, a_tensor_index = a._keras_history
b_layer, _, _ = b._keras_history
self.assertEqual(len(a_layer._inbound_nodes), 1)
self.assertEqual(a_tensor_index, 0)
node = a_layer._inbound_nodes[a_node_index]
self.assertEqual(node.outbound_layer, a_layer)
self.assertListEqual(node.inbound_layers, [])
self.assertListEqual(node.input_tensors, [a])
self.assertListEqual(node.input_shapes, [(None, 32)])
self.assertListEqual(node.output_tensors, [a])
self.assertListEqual(node.output_shapes, [(None, 32)])
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
self.assertEqual(len(dense._inbound_nodes), 2)
self.assertEqual(len(dense._outbound_nodes), 0)
self.assertListEqual(dense._inbound_nodes[0].inbound_layers, [a_layer])
self.assertEqual(dense._inbound_nodes[0].outbound_layer, dense)
self.assertListEqual(dense._inbound_nodes[1].inbound_layers, [b_layer])
self.assertEqual(dense._inbound_nodes[1].outbound_layer, dense)
self.assertListEqual(dense._inbound_nodes[0].input_tensors, [a])
self.assertListEqual(dense._inbound_nodes[1].input_tensors, [b])
# test layer properties
test_layer = keras.layers.Dense(16, name='test_layer')
a_test = test_layer(a)
self.assertListEqual(test_layer.kernel.get_shape().as_list(), [32, 16])
self.assertEqual(test_layer.input, a)
self.assertEqual(test_layer.output, a_test)
self.assertEqual(test_layer.input_shape, (None, 32))
self.assertEqual(test_layer.output_shape, (None, 16))
self.assertEqual(dense.get_input_at(0), a)
self.assertEqual(dense.get_input_at(1), b)
self.assertEqual(dense.get_output_at(0), a_2)
self.assertEqual(dense.get_output_at(1), b_2)
self.assertEqual(dense.get_input_shape_at(0), (None, 32))
self.assertEqual(dense.get_input_shape_at(1), (None, 32))
self.assertEqual(dense.get_output_shape_at(0), (None, 16))
self.assertEqual(dense.get_output_shape_at(1), (None, 16))
self.assertEqual(dense.get_input_mask_at(0), None)
self.assertEqual(dense.get_input_mask_at(1), None)
self.assertEqual(dense.get_output_mask_at(0), None)
self.assertEqual(dense.get_output_mask_at(1), None)
def test_multi_input_layer(self):
with self.test_session():
# test multi-input layer
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
self.assertListEqual(merged.get_shape().as_list(), [None, 16 * 2])
merge_layer, merge_node_index, merge_tensor_index = merged._keras_history
self.assertEqual(merge_node_index, 0)
self.assertEqual(merge_tensor_index, 0)
self.assertEqual(len(merge_layer._inbound_nodes), 1)
self.assertEqual(len(merge_layer._outbound_nodes), 0)
self.assertEqual(len(merge_layer._inbound_nodes[0].input_tensors), 2)
self.assertEqual(len(merge_layer._inbound_nodes[0].inbound_layers), 2)
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
self.assertEqual(len(model.layers), 6)
output_shapes = model.compute_output_shape([(None, 32), (None, 32)])
self.assertListEqual(output_shapes[0].as_list(), [None, 64])
self.assertListEqual(output_shapes[1].as_list(), [None, 5])
self.assertListEqual(
model.compute_mask([a, b], [None, None]), [None, None])
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([l.name for l in model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in model._output_layers],
['dense_2', 'dense_3'])
# actually run model
fn = keras.backend.function(model.inputs, model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
# test get_source_inputs
self.assertListEqual(keras.engine.get_source_inputs(c), [a, b])
# serialization / deserialization
json_config = model.to_json()
recreated_model = keras.models.model_from_json(json_config)
recreated_model.compile('rmsprop', 'mse')
self.assertListEqual([l.name for l in recreated_model.layers][2:],
['dense_1', 'merge', 'dense_2', 'dense_3'])
self.assertListEqual([l.name for l in recreated_model._input_layers],
['input_a', 'input_b'])
self.assertListEqual([l.name for l in recreated_model._output_layers],
['dense_2', 'dense_3'])
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 64), (10, 5)])
def test_recursion(self):
with self.test_session():
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
e = keras.layers.Input(shape=(32,), name='input_e')
f = keras.layers.Input(shape=(32,), name='input_f')
self.assertEqual(len(model.inputs), 2)
g, h = model([e, f])
self.assertEqual(len(model.inputs), 2)
self.assertEqual(g.name, 'model/dense_2/BiasAdd:0')
self.assertListEqual(g.get_shape().as_list(), c.get_shape().as_list())
self.assertListEqual(h.get_shape().as_list(), d.get_shape().as_list())
# test separate manipulation of different layer outputs
i = keras.layers.Dense(7, name='dense_4')(h)
final_model = keras.models.Model(
inputs=[e, f], outputs=[i, g], name='final')
self.assertEqual(len(final_model.inputs), 2)
self.assertEqual(len(final_model.outputs), 2)
self.assertEqual(len(final_model.layers), 4)
# we don't check names of first 2 layers (inputs) because
# ordering of same-level layers is not fixed
self.assertListEqual([layer.name for layer in final_model.layers][2:],
['model', 'dense_4'])
self.assertListEqual(
model.compute_mask([e, f], [None, None]), [None, None])
self.assertListEqual(
final_model.compute_output_shape([(10, 32), (10, 32)]), [(10, 7),
(10, 64)])
# run recursive model
fn = keras.backend.function(final_model.inputs, final_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
# test serialization
model_config = final_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
input_a_np = np.random.random((10, 32))
input_b_np = np.random.random((10, 32))
fn_outputs = fn([input_a_np, input_b_np])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 7), (10, 64)])
def test_multi_input_multi_output_recursion(self):
with self.test_session():
# test multi-input multi-output
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
_, n = model([j, k])
o = keras.layers.Input(shape=(32,), name='input_o')
p = keras.layers.Input(shape=(32,), name='input_p')
q, _ = model([o, p])
self.assertListEqual(n.get_shape().as_list(), [None, 5])
self.assertListEqual(q.get_shape().as_list(), [None, 64])
s = keras.layers.concatenate([n, q], name='merge_nq')
self.assertListEqual(s.get_shape().as_list(), [None, 64 + 5])
# test with single output as 1-elem list
multi_io_model = keras.models.Model([j, k, o, p], [s])
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test with single output as tensor
multi_io_model = keras.models.Model([j, k, o, p], s)
fn = keras.backend.function(multi_io_model.inputs, multi_io_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
# test serialization
model_config = multi_io_model.get_config()
recreated_model = keras.models.Model.from_config(model_config)
fn = keras.backend.function(recreated_model.inputs,
recreated_model.outputs)
fn_outputs = fn([
np.random.random((10, 32)), np.random.random((10, 32)),
np.random.random((10, 32)), np.random.random((10, 32))
])
# note that the output of the function will still be a 1-elem list
self.assertListEqual([x.shape for x in fn_outputs], [(10, 69)])
config = model.get_config()
keras.models.Model.from_config(config)
model.summary()
json_str = model.to_json()
keras.models.model_from_json(json_str)
if yaml is not None:
yaml_str = model.to_yaml()
keras.models.model_from_yaml(yaml_str)
def test_invalid_graphs(self):
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
# input is not an Input tensor
j = keras.layers.Input(shape=(32,), name='input_j')
j = keras.layers.Dense(32)(j)
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n])
# disconnected graph
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j], [m, n])
# redundant outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
keras.models.Model([j, k], [m, n, n])
# redundant inputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k, j], [m, n])
# i have not idea what I'm doing: garbage as inputs/outputs
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
m, n = model([j, k])
with self.assertRaises(Exception):
keras.models.Model([j, k], [m, n, 0])
def test_raw_tf_compatibility(self):
# test calling layers/models on TF tensors
a = keras.layers.Input(shape=(32,), name='input_a')
b = keras.layers.Input(shape=(32,), name='input_b')
dense = keras.layers.Dense(16, name='dense_1')
a_2 = dense(a)
b_2 = dense(b)
merged = keras.layers.concatenate([a_2, b_2], name='merge')
c = keras.layers.Dense(64, name='dense_2')(merged)
d = keras.layers.Dense(5, name='dense_3')(c)
model = keras.models.Model(inputs=[a, b], outputs=[c, d], name='model')
j = keras.layers.Input(shape=(32,), name='input_j')
k = keras.layers.Input(shape=(32,), name='input_k')
self.assertEqual(len(model.inputs), 2)
m, n = model([j, k])
self.assertEqual(len(model.inputs), 2)
tf_model = keras.models.Model([j, k], [m, n])
j_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
k_tf = array_ops.placeholder(dtype=dtypes.float32, shape=(None, 32))
m_tf, n_tf = tf_model([j_tf, k_tf])
self.assertListEqual(m_tf.get_shape().as_list(), [None, 64])
self.assertListEqual(n_tf.get_shape().as_list(), [None, 5])
# test merge
keras.layers.concatenate([j_tf, k_tf], axis=1)
keras.layers.add([j_tf, k_tf])
# test tensor input
x = array_ops.placeholder(shape=(None, 2), dtype=dtypes.float32)
keras.layers.InputLayer(input_tensor=x)
x = keras.layers.Input(tensor=x)
keras.layers.Dense(2)(x)
def test_basic_masking(self):
a = keras.layers.Input(shape=(10, 32), name='input_a')
b = keras.layers.Masking()(a)
model = keras.models.Model(a, b)
self.assertEqual(model.output_mask.get_shape().as_list(), [None, 10])
def testMaskingSingleInput(self):
class MaskedLayer(keras.layers.Layer):
def call(self, inputs, mask=None):
if mask is not None:
return inputs * mask
return inputs
def compute_mask(self, inputs, mask=None):
return array_ops.ones_like(inputs)
if context.executing_eagerly():
a = constant_op.constant([2] * 32)
mask = constant_op.constant([0, 1] * 16)
a._keras_mask = mask
b = MaskedLayer().apply(a)
self.assertTrue(hasattr(b, '_keras_mask'))
self.assertAllEqual(
self.evaluate(array_ops.ones_like(mask)),
self.evaluate(getattr(b, '_keras_mask')))
self.assertAllEqual(self.evaluate(a * mask), self.evaluate(b))
else:
x = input_layer_lib.Input(shape=(32,))
y = MaskedLayer()(x) # pylint: disable=not-callable
network = network_lib.Network(x, y)
# test callability on Input
x_2 = input_layer_lib.Input(shape=(32,))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
# test callability on regular tensor
x_2 = array_ops.placeholder(dtype='float32', shape=(None, 32))
y_2 = network(x_2)
self.assertEqual(y_2.get_shape().as_list(), [None, 32])
def test_activity_regularization_with_model_composition(self):
def reg(x):
return math_ops.reduce_sum(x)
net_a_input = input_layer_lib.Input((2,))
net_a = net_a_input
net_a = keras.layers.Dense(2, kernel_initializer='ones',
use_bias=False,
activity_regularizer=reg)(net_a)
model_a = keras.Model([net_a_input], [net_a])
net_b_input = input_layer_lib.Input((2,))
net_b = model_a(net_b_input)
model_b = keras.Model([net_b_input], [net_b])
model_b.compile(optimizer='sgd', loss=None)
x = np.ones((1, 2))
loss = model_b.evaluate(x)
self.assertEqual(loss, 4.)
def test_layer_sharing_at_heterogenous_depth(self):
with self.test_session():
x_val = np.random.random((10, 5))
x = input_layer_lib.Input(shape=(5,))
a = keras.layers.Dense(5, name='A')
b = keras.layers.Dense(5, name='B')
output = a(b(a(b(x))))
m = keras.models.Model(x, output)
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
def test_layer_sharing_at_heterogenous_depth_with_concat(self):
with self.test_session():
input_shape = (16, 9, 3)
input_layer = input_layer_lib.Input(shape=input_shape)
a = keras.layers.Dense(3, name='dense_A')
b = keras.layers.Dense(3, name='dense_B')
c = keras.layers.Dense(3, name='dense_C')
x1 = b(a(input_layer))
x2 = a(c(input_layer))
output = keras.layers.concatenate([x1, x2])
m = keras.models.Model(inputs=input_layer, outputs=output)
x_val = np.random.random((10, 16, 9, 3))
output_val = m.predict(x_val)
config = m.get_config()
weights = m.get_weights()
m2 = keras.models.Model.from_config(config)
m2.set_weights(weights)
output_val_2 = m2.predict(x_val)
self.assertAllClose(output_val, output_val_2, atol=1e-6)
def test_explicit_training_argument(self):
with self.test_session():
a = keras.layers.Input(shape=(2,))
b = keras.layers.Dropout(0.5)(a)
base_model = keras.models.Model(a, b)
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=False)
model = keras.models.Model(a, b)
x = np.ones((100, 2))
y = np.ones((100, 2))
model.compile(optimizer='sgd', loss='mse')
loss = model.train_on_batch(x, y)
self.assertEqual(loss, 0) # In inference mode, output is equal to input.
a = keras.layers.Input(shape=(2,))
b = base_model(a, training=True)
model = keras.models.Model(a, b)
preds = model.predict(x)
self.assertEqual(np.min(preds), 0.) # At least one unit was dropped.
def test_multi_output_model_with_none_masking(self):
with self.test_session():
def func(x):
return [x * 0.2, x * 0.3]
def output_shape(input_shape):
return [input_shape, input_shape]
i = keras.layers.Input(shape=(3, 2, 1))
o = keras.layers.Lambda(function=func, output_shape=output_shape)(i)
self.assertEqual(keras.backend.int_shape(o[0]), (None, 3, 2, 1))
self.assertEqual(keras.backend.int_shape(o[1]), (None, 3, 2, 1))
o = keras.layers.add(o)
model = keras.Model(i, o)
i2 = keras.layers.Input(shape=(3, 2, 1))
o2 = model(i2)
model2 = keras.Model(i2, o2)
x = np.random.random((4, 3, 2, 1))
out = model2.predict(x)
assert out.shape == (4, 3, 2, 1)
self.assertAllClose(out, x * 0.2 + x * 0.3, atol=1e-4)
class DeferredModeTest(test.TestCase):
def testDeferredTensorAttributes(self):
x = base_layer.DeferredTensor(shape=(None, 2),
dtype='float32',
name='x')
self.assertEqual(str(x),
'DeferredTensor(\'x\', shape=(?, 2), dtype=float32)')
self.assertEqual(repr(x),
'<DeferredTensor \'x\' shape=(?, 2) dtype=float32>')
@test_util.run_in_graph_and_eager_modes()
def testSimpleNetworkBuilding(self):
inputs = input_layer_lib.Input(shape=(32,))
if context.executing_eagerly():
self.assertIsInstance(inputs, base_layer.DeferredTensor)
self.assertEqual(inputs.dtype.name, 'float32')
self.assertEqual(inputs.shape.as_list(), [None, 32])
x = keras.layers.Dense(2)(inputs)
if context.executing_eagerly():
self.assertIsInstance(x, base_layer.DeferredTensor)
self.assertEqual(x.dtype.name, 'float32')
self.assertEqual(x.shape.as_list(), [None, 2])
outputs = keras.layers.Dense(4)(x)
network = network_lib.Network(inputs, outputs)
self.assertIsInstance(network, network_lib.Network)
if context.executing_eagerly():
# It should be possible to call such a network on EagerTensors.
inputs = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
outputs = network(inputs)
self.assertEqual(outputs.shape.as_list(), [10, 4])
@test_util.run_in_graph_and_eager_modes()
def testMultiIONetworkbuilding(self):
input_a = input_layer_lib.Input(shape=(32,))
input_b = input_layer_lib.Input(shape=(16,))
a = keras.layers.Dense(16)(input_a)
class AddLayer(keras.layers.Layer):
def call(self, inputs):
return inputs[0] + inputs[1]
def compute_output_shape(self, input_shape):
return input_shape[0]
c = AddLayer()([a, input_b]) # pylint: disable=not-callable
c = keras.layers.Dense(2)(c)
network = network_lib.Network([input_a, input_b], [a, c])
if context.executing_eagerly():
a_val = constant_op.constant(
np.random.random((10, 32)).astype('float32'))
b_val = constant_op.constant(
np.random.random((10, 16)).astype('float32'))
outputs = network([a_val, b_val])
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [10, 16])
self.assertEqual(outputs[1].shape.as_list(), [10, 2])
class GraphUtilsTest(test.TestCase):
def testGetReachableFromInputs(self):
with self.test_session():
pl_1 = array_ops.placeholder(shape=None, dtype='float32')
pl_2 = array_ops.placeholder(shape=None, dtype='float32')
pl_3 = array_ops.placeholder(shape=None, dtype='float32')
x_1 = pl_1 + pl_2
x_2 = pl_2 * 2
x_3 = pl_3 + 1
x_4 = x_1 + x_2
x_5 = x_3 * pl_1
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1]),
{pl_1, x_1, x_4, x_5, x_1.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_1, pl_2]),
{pl_1, pl_2, x_1, x_2, x_4, x_5, x_1.op, x_2.op, x_4.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([pl_3]),
{pl_3, x_3, x_5, x_3.op, x_5.op})
self.assertEqual(
keras.utils.tf_utils.get_reachable_from_inputs([x_3]),
{x_3, x_5, x_5.op})
if __name__ == '__main__':
test.main()
| 37.695351
| 80
| 0.652611
|
66757a9498d7bb876cfd2f548886a2ace8af3d09
| 2,143
|
py
|
Python
|
cogs/userinfo_ja.py
|
PLM912/Keter
|
0dc24d33732fccf36406010e2f8e1519ecdfa4b1
|
[
"MIT"
] | 6
|
2020-09-01T08:20:49.000Z
|
2021-01-21T15:23:33.000Z
|
cogs/userinfo_ja.py
|
PLM912/Keter
|
0dc24d33732fccf36406010e2f8e1519ecdfa4b1
|
[
"MIT"
] | 3
|
2020-11-04T23:25:38.000Z
|
2021-01-21T17:01:20.000Z
|
cogs/userinfo_ja.py
|
PLM912/Keter
|
0dc24d33732fccf36406010e2f8e1519ecdfa4b1
|
[
"MIT"
] | 11
|
2020-09-01T08:04:17.000Z
|
2021-02-07T13:12:16.000Z
|
import discord
from discord.ext import commands
from evs import default
class Userinfo_Ja(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
# Commands
@commands.command(aliases=['ユーザー情報', 'ユーザ情報', 'ユザー情報', 'ユザ情報'])
async def __userinfo(self, ctx):
if (ctx.message.mentions.__len__() > 0):
for user in ctx.message.mentions:
embed = discord.Embed(title="**" + user.name + "さんのプロフィール", description="",
color=0xeff0f1)
embed.add_field(name="**ID**",
value=user.id,
inline=True)
embed.add_field(name="**ニックネーム**",
value=user.display_name,
inline=True)
embed.add_field(name="**ステイタス**",
value=user.status,
inline=True)
embed.add_field(name="**メンション**",
value="<@" + str(user.id) + ">",
inline=True)
embed.set_thumbnail(url=user.avatar_url)
await ctx.send(embed=embed)
else:
embed = discord.Embed(title=ctx.author.name + "さんのプロフィール", description="",
color=0xeff0f1)
embed.add_field(name="**ID**",
value=ctx.author.id,
inline=True)
embed.add_field(name="**ニックネーム**",
value=ctx.author.display_name,
inline=True)
embed.add_field(name="**ステイタス**",
value=ctx.author.status,
inline=True)
embed.add_field(name="**メンション**",
value="<@" + str(ctx.author.id) + ">",
inline=True)
embed.set_thumbnail(url=ctx.author.avatar_url)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Userinfo_Ja(bot))
| 42.019608
| 91
| 0.451237
|
6d7e630f7dba7a532391d6d2371dea495b65b56e
| 2,410
|
py
|
Python
|
sharpy-sc2/sharpy/plans/acts/protoss/chrono_unit.py
|
etzhang416/sharpy-bot-eco
|
badc68ad1aa903dfa1bbc33f6225608e433ff353
|
[
"Unlicense"
] | null | null | null |
sharpy-sc2/sharpy/plans/acts/protoss/chrono_unit.py
|
etzhang416/sharpy-bot-eco
|
badc68ad1aa903dfa1bbc33f6225608e433ff353
|
[
"Unlicense"
] | null | null | null |
sharpy-sc2/sharpy/plans/acts/protoss/chrono_unit.py
|
etzhang416/sharpy-bot-eco
|
badc68ad1aa903dfa1bbc33f6225608e433ff353
|
[
"Unlicense"
] | null | null | null |
import warnings
from sc2 import UnitTypeId, AbilityId
from sc2.ids.buff_id import BuffId
from sc2.unit import Unit, UnitOrder
from sharpy.knowledges import Knowledge
from sharpy.plans.acts.act_base import ActBase
class ChronoUnit(ActBase):
# Use Chronoboost on unit production
def __init__(self, name: UnitTypeId, from_building: UnitTypeId, count: int = 0):
"""
Chrono boosts unit production.
@param name: Unit type for which to chronoboost
@param from_building: Which building to chrono
@param count: Amount of times to cast chronoboost, use 0 for infinite
"""
assert name is not None and isinstance(name, UnitTypeId)
assert from_building is not None and isinstance(from_building, UnitTypeId)
self.unit_type = name
self.from_building = from_building
self.count = count
self.casted = 0
super().__init__()
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
unit = self.ai._game_data.units[self.unit_type.value]
self.creation_ability = unit.creation_ability.id
async def execute(self) -> bool:
if self.casted > 0 and self.count < self.casted:
return True
for target in self.cache.own(self.from_building).ready: # type: Unit
if target.orders and target.orders[0].ability.id == self.creation_ability:
# boost here!
if not target.has_buff(BuffId.CHRONOBOOSTENERGYCOST):
for nexus in self.cache.own(UnitTypeId.NEXUS):
if self.cd_manager.is_ready(
nexus.tag, AbilityId.EFFECT_CHRONOBOOSTENERGYCOST
) and self.allow_new_action(nexus):
self.do(nexus(AbilityId.EFFECT_CHRONOBOOSTENERGYCOST, target))
self.print(f"Chrono {self.creation_ability.name}")
self.casted += 1
return True # TODO: better solution for real time, to prevent multiple duplicate chronos
return True # Never block
class ChronoUnitProduction(ChronoUnit):
def __init__(self, name: UnitTypeId, from_building: UnitTypeId):
warnings.warn("'ChronoUnitProduction' is deprecated, use 'ChronoUnit' instead", DeprecationWarning, 2)
super().__init__(name, from_building)
| 43.035714
| 117
| 0.645643
|
cec860a2787b03b552074aabd4aba8f77808e8a4
| 380
|
py
|
Python
|
Dependencies/gyp-master/test/actions/src/subdir3/generate_main.py
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/actions/src/subdir3/generate_main.py
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/actions/src/subdir3/generate_main.py
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = """
#include <stdio.h>
int main(void)
{
printf("Hello from generate_main.py\\n");
return 0;
}
"""
open(sys.argv[1], 'w').write(contents)
sys.exit(0)
| 17.272727
| 73
| 0.644737
|
f27d22249289a0544d33e9458cb99727c6602acf
| 315
|
py
|
Python
|
Programas basicos/ex005.py
|
RuanSampaio-code/Python-Journey
|
678bfc5e3df22c3082b00e2b6e1e29d4814d470b
|
[
"MIT"
] | null | null | null |
Programas basicos/ex005.py
|
RuanSampaio-code/Python-Journey
|
678bfc5e3df22c3082b00e2b6e1e29d4814d470b
|
[
"MIT"
] | null | null | null |
Programas basicos/ex005.py
|
RuanSampaio-code/Python-Journey
|
678bfc5e3df22c3082b00e2b6e1e29d4814d470b
|
[
"MIT"
] | null | null | null |
print('==== PROGRAMA PARA DESCOBRIR ANTECESSOR E SUCESSOR DE UM NUMERO ====')
num = int(input('\033[30;7m Digite um número:\033[m'))
anter = num - 1
suces = num + 1
print(' O número \033[1m{}\033[m tem como o antecessor '
'o numero \033[1m{}\033[m e sucessor o número \033[1;3m{}.'.format(num, anter,suces,))
| 45
| 92
| 0.650794
|
839e837fd4b5d0faddbb2bd057ed8dd5d1436de2
| 960
|
py
|
Python
|
config.py
|
mellonixie/Cartoonization-Style-Transfer
|
3f8720df23bb9d846133a4d523c8dbd37abc0446
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
mellonixie/Cartoonization-Style-Transfer
|
3f8720df23bb9d846133a4d523c8dbd37abc0446
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
mellonixie/Cartoonization-Style-Transfer
|
3f8720df23bb9d846133a4d523c8dbd37abc0446
|
[
"Apache-2.0"
] | 1
|
2021-09-01T13:31:10.000Z
|
2021-09-01T13:31:10.000Z
|
import cv2
from torchvision import transforms, models
style_dict = {}
style_dict['dog'] = "images/anime3.png"
real_img = "images/golder-retriever-puppy.jpeg"
net = cv2.dnn.readNet("YOLO/yolov3.weights", "YOLO/yolov3.cfg")
classes = []
with open("YOLO/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
print("YOLO donee")
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
## GET WEIGHTS
style_weights = {'conv1_1': 1.,
'conv2_1': 0.75,
'conv3_1': 0.2,
'conv4_1': 0.2,
'conv5_1': 0.2}
content_weight = 1 # alpha
style_weight = 1e9 # beta
in_transform = transforms.Compose([transforms.Resize((256,256)),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
| 30
| 80
| 0.577083
|
da7e399e420e194e82b38bec5a1cfdac66800fb7
| 35,937
|
py
|
Python
|
absl/testing/tests/xml_reporter_test.py
|
yuhc/abseil-py
|
1fe0726fa42bc687c36cb6eba3a6d5dfd5173013
|
[
"Apache-2.0"
] | null | null | null |
absl/testing/tests/xml_reporter_test.py
|
yuhc/abseil-py
|
1fe0726fa42bc687c36cb6eba3a6d5dfd5173013
|
[
"Apache-2.0"
] | null | null | null |
absl/testing/tests/xml_reporter_test.py
|
yuhc/abseil-py
|
1fe0726fa42bc687c36cb6eba3a6d5dfd5173013
|
[
"Apache-2.0"
] | 1
|
2019-09-09T21:20:17.000Z
|
2019-09-09T21:20:17.000Z
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from xml.etree import ElementTree
from xml.parsers import expat
from absl import logging
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import parameterized
from absl.testing import xml_reporter
from absl.third_party import unittest3_backport
import mock
import six
class StringIOWriteLn(six.StringIO):
def writeln(self, line):
self.write(line + '\n')
class MockTest(absltest.TestCase):
failureException = AssertionError
def __init__(self, name):
super(MockTest, self).__init__()
self.name = name
def id(self):
return self.name
def runTest(self):
return
def shortDescription(self):
return "This is this test's description."
# str(exception_type) is different between Python 2 and 3.
def xml_escaped_exception_type(exception_type):
return xml_reporter._escape_xml_attr(str(exception_type))
OUTPUT_STRING = '\n'.join([
r'<\?xml version="1.0"\?>',
'<testsuites name="" tests="%(tests)d" failures="%(failures)d"'
' errors="%(errors)d" time="%(run_time).1f">',
'<testsuite name="%(suite_name)s" tests="%(tests)d"'
' failures="%(failures)d" errors="%(errors)d" time="%(run_time).1f">',
' <testcase name="%(test_name)s" status="%(status)s" result="%(result)s"'
' time="%(run_time).1f" classname="%(classname)s">%(message)s',
' </testcase>',
'</testsuite>',
'</testsuites>'])
FAILURE_MESSAGE = r"""
<failure message="e" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_failure
raise AssertionError\(\'e\'\)
AssertionError: e
\]\]></failure>""".format(xml_escaped_exception_type(AssertionError))
ERROR_MESSAGE = r"""
<error message="invalid literal for int\(\) with base 10: (')?a(')?" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_error
int\('a'\)
ValueError: invalid literal for int\(\) with base 10: '?a'?
\]\]></error>""".format(xml_escaped_exception_type(ValueError))
UNICODE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_unicode_sample_failure
raise AssertionError\(u'\\xe9'\)
AssertionError: {0}
\]\]></%s>""".format(
r'\\xe9' if six.PY2 else r'\xe9',
xml_escaped_exception_type(AssertionError))
NEWLINE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_newline_message_sample_failure
raise AssertionError\(\'{2}'\)
AssertionError: {3}
\]\]></%s>""".format(
'new
line',
xml_escaped_exception_type(AssertionError),
r'new\\nline',
'new\nline')
UNEXPECTED_SUCCESS_MESSAGE = '\n'.join([
'',
r' <error message="" type=""><!\[CDATA\[Test case '
r'__main__.MockTest.unexpectedly_passing_test should have failed, '
r'but passed.\]\]></error>'])
UNICODE_ERROR_MESSAGE = UNICODE_MESSAGE % ('error', 'error')
NEWLINE_ERROR_MESSAGE = NEWLINE_MESSAGE % ('error', 'error')
class TextAndXMLTestResultTest(absltest.TestCase):
def setUp(self):
self.stream = StringIOWriteLn()
self.xml_stream = six.StringIO()
def _make_result(self, times):
timer = mock.Mock()
timer.side_effect = times
return xml_reporter._TextAndXMLTestResult(self.xml_stream, self.stream,
'foo', 0, timer)
def _assert_match(self, regex, output):
self.assertRegex(output, regex)
def _assert_valid_xml(self, xml_output):
try:
expat.ParserCreate().Parse(xml_output)
except expat.ExpatError as e:
raise AssertionError('Bad XML output: {}\n{}'.format(e, xml_output))
def _simulate_error_test(self, test, result):
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
def _simulate_failing_test(self, test, result):
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
def _simulate_passing_test(self, test, result):
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
def test_with_passing_test(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': r'passing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest_with_dots_in_parameter_name(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', {'case': 'a.b.c'})
else:
# In Python 3 subTest uses a ChainMap to hold the parameters, but ChainMap
# does not exist in Python 2, so a list of dict is used to simulate the
# behavior of a ChainMap. This is why a list is provided as a parameter
# here.
subtest = unittest3_backport.case._SubTest(test, 'msg',
[{'case': 'a.b.c'}])
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name':
r'passing_test \[msg\] \(case='a.b.c'\)',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def get_sample_error(self):
try:
int('a')
except ValueError:
error_values = sys.exc_info()
return error_values
def get_sample_failure(self):
try:
raise AssertionError('e')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_newline_message_sample_failure(self):
try:
raise AssertionError('new\nline')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_unicode_sample_failure(self):
try:
raise AssertionError(u'\xe9')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_terminal_escape_sample_failure(self):
try:
raise AssertionError('\x1b')
except AssertionError:
error_values = sys.exc_info()
return error_values
def test_with_failing_test(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_failing_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_failure())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1,
'errors': 0,
'run_time': run_time,
'test_name': r'failing_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': FAILURE_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_error_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.error_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_error())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': r'error_test \[msg\]',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ERROR_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_fail_and_error_test(self):
"""Tests a failure and subsequent error within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
# This could happen in tearDown
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 1, # Only the failure is tallied (because it was first).
'errors': 0,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from failure and error should be concatenated in order.
'message': FAILURE_MESSAGE+ERROR_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_error_and_fail_test(self):
"""Tests an error and subsequent failure within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_sample_error())
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1, # Only the error is tallied (because it was first).
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
# Messages from error and failure should be concatenated in order.
'message': ERROR_MESSAGE+FAILURE_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_newline_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_newline_message_sample_failure())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': NEWLINE_ERROR_MESSAGE} + '\n'
self._assert_match(expected_re, xml)
def test_with_unicode_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_unicode_sample_failure())
result.stopTest(test)
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNICODE_ERROR_MESSAGE}
self._assert_match(expected_re, xml)
def test_with_terminal_escape_error(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
result.addError(test, self.get_terminal_escape_sample_failure())
result.stopTest(test)
result.printErrors()
self._assert_valid_xml(self.xml_stream.getvalue())
def test_with_expected_failure_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
error_values = ''
try:
raise RuntimeError('Test expectedFailure')
except RuntimeError:
error_values = sys.exc_info()
test = MockTest('__main__.MockTest.expected_failing_test')
result.startTest(test)
result.addExpectedFailure(test, error_values)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'expected_failing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(re.compile(expected_re, re.DOTALL),
self.xml_stream.getvalue())
def test_with_unexpected_success_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.unexpectedly_passing_test')
result.startTest(test)
result.addUnexpectedSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 1,
'run_time': run_time,
'test_name': 'unexpectedly_passing_test',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': UNEXPECTED_SUCCESS_MESSAGE}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_skipped_test(self):
start_time = 100
end_time = 100
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.skipped_test_with_reason')
result.startTest(test)
result.addSkip(test, 'b"r')
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'skipped_test_with_reason',
'classname': '__main__.MockTest',
'status': 'notrun',
'result': 'suppressed',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_suite_time(self):
start_time1 = 100
end_time1 = 200
start_time2 = 400
end_time2 = 700
name = '__main__.MockTest.failing_test'
result = self._make_result((start_time1, end_time1, start_time2, end_time2))
test = MockTest('%s1' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
test = MockTest('%s2' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time1 = end_time1 - start_time1
run_time2 = end_time2 - start_time2
expected_prefix = """<?xml version="1.0"?>
<testsuites name="" tests="2" failures="0" errors="0" time="%.1f">
<testsuite name="MockTest" tests="2" failures="0" errors="0" time="%.1f">
""" % (run_time1 + run_time2, run_time1 + run_time2)
self.failUnless(self.xml_stream.getvalue().startswith(expected_prefix))
def test_with_no_suite_name(self):
start_time = 1000
end_time = 1200
result = self._make_result((start_time, end_time))
test = MockTest('__main__.MockTest.bad_name')
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name': 'MockTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': 'bad_name',
'classname': '__main__.MockTest',
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_unnamed_parameterized_testcase(self):
"""Test unnamed parameterized test cases.
Unnamed parameterized test cases might have non-alphanumeric characters in
their test method names. This test ensures xml_reporter handles them
correctly.
"""
class ParameterizedTest(parameterized.TestCase):
@parameterized.parameters(('a (b.c)',))
def test_prefix(self, case):
self.assertTrue(case.startswith('a'))
start_time = 1000
end_time = 1200
result = self._make_result((start_time, end_time))
test = ParameterizedTest(methodName='test_prefix0')
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.printErrors()
run_time = end_time - start_time
classname = xml_reporter._escape_xml_attr(
unittest.util.strclass(test.__class__))
expected_re = OUTPUT_STRING % {
'suite_name': 'ParameterizedTest',
'tests': 1,
'failures': 0,
'errors': 0,
'run_time': run_time,
'test_name': re.escape('test_prefix('a (b.c)')'),
'classname': classname,
'status': 'run',
'result': 'completed',
'attributes': '',
'message': ''}
self._assert_match(expected_re, self.xml_stream.getvalue())
def teststop_test_without_pending_test(self):
end_time = 1200
result = self._make_result((end_time,))
test = MockTest('__main__.MockTest.bad_name')
result.stopTest(test)
# Just verify that this doesn't crash
def test_text_and_xmltest_runner(self):
runner = xml_reporter.TextAndXMLTestRunner(self.xml_stream, self.stream,
'foo', 1)
result1 = runner._makeResult()
result2 = xml_reporter._TextAndXMLTestResult(None, None, None, 0, None)
self.failUnless(type(result1) is type(result2))
def test_timing_with_time_stub(self):
"""Make sure that timing is correct even if time.time is stubbed out."""
try:
saved_time = time.time
time.time = lambda: -1
reporter = xml_reporter._TextAndXMLTestResult(self.xml_stream,
self.stream,
'foo', 0)
test = MockTest('bar')
reporter.startTest(test)
self.failIf(reporter.start_time == -1)
finally:
time.time = saved_time
def test_concurrent_add_and_delete_pending_test_case_result(self):
"""Make sure adding/deleting pending test case results are thread safe."""
result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,
None)
def add_and_delete_pending_test_case_result(test_name):
test = MockTest(test_name)
result.addSuccess(test)
result.delete_pending_test_case_result(test)
for i in range(50):
add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)
self.assertEqual(result.pending_test_case_results, {})
def test_concurrent_test_runs(self):
"""Make sure concurrent test runs do not race each other."""
num_passing_tests = 20
num_failing_tests = 20
num_error_tests = 20
total_num_tests = num_passing_tests + num_failing_tests + num_error_tests
times = [i for i in range(2*total_num_tests)]
result = self._make_result(times)
threads = []
names = []
for i in range(num_passing_tests):
name = 'passing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
# xml_reporter uses id(test) as the test identifier.
# In a real testing scenario, all the test instances are created before
# running them. So all ids will be unique.
# We must do the same here: create test instance beforehand.
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_passing_test, args=(test, result)))
for i in range(num_failing_tests):
name = 'failing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_failing_test, args=(test, result)))
for i in range(num_error_tests):
name = 'error_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_error_test, args=(test, result)))
for t in threads:
t.start()
for t in threads:
t.join()
result.printErrors()
tests_not_in_xml = []
for tn in names:
if tn not in self.xml_stream.getvalue():
tests_not_in_xml.append(tn)
msg = ('Expected xml_stream to contain all test %s results, but %s tests '
'are missing. List of missing tests: %s' % (
total_num_tests, len(tests_not_in_xml), tests_not_in_xml))
self.assertEqual([], tests_not_in_xml, msg)
def test_add_failure_during_stop_test(self):
"""Tests an addFailure() call from within a stopTest() call stack."""
result = self._make_result((0, 2))
test = MockTest('__main__.MockTest.failing_test')
result.startTest(test)
# Replace parent stopTest method from unittest3_backport.TextTestResult with
# a version that calls self.addFailure().
with mock.patch.object(
unittest3_backport.TextTestResult,
'stopTest',
side_effect=lambda t: result.addFailure(t, self.get_sample_failure())):
# Run stopTest in a separate thread since we are looking to verify that
# it does not deadlock, and would otherwise prevent the test from
# completing.
stop_test_thread = threading.Thread(target=result.stopTest, args=(test,))
stop_test_thread.daemon = True
stop_test_thread.start()
stop_test_thread.join(10.0)
self.assertFalse(stop_test_thread.is_alive(),
'result.stopTest(test) call failed to complete')
class XMLTest(absltest.TestCase):
def test_escape_xml(self):
self.assertEqual(xml_reporter._escape_xml_attr('"Hi" <\'>\t\r\n'),
'"Hi" <'>	
')
class XmlReporterFixtureTest(absltest.TestCase):
def _run_test_and_get_xml(self, flag):
"""Runs xml_reporter_helper_test and returns an Element instance.
Runs xml_reporter_helper_test in a new process so that it can
exercise the entire test infrastructure, and easily test issues in
the test fixture.
Args:
flag: flag to pass to xml_reporter_helper_test
Returns:
The Element instance of the XML output.
"""
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
args = [_bazelize_command.get_executable_path(binary_name),
flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
finally:
os.remove(xml_fname)
return xml
def _run_test(self, flag, num_errors, num_failures, suites):
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
args = [_bazelize_command.get_executable_path(binary_name),
flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
logging.info('xml output is:\n%s', ElementTree.tostring(xml))
finally:
os.remove(xml_fname)
self.assertEqual(int(xml.attrib['errors']), num_errors)
self.assertEqual(int(xml.attrib['failures']), num_failures)
self.assertLen(xml, len(suites))
actual_suites = sorted(
xml.findall('testsuite'), key=lambda x: x.attrib['name'])
suites = sorted(suites, key=lambda x: x['name'])
for actual_suite, expected_suite in zip(actual_suites, suites):
self.assertEqual(actual_suite.attrib['name'], expected_suite['name'])
self.assertLen(actual_suite, len(expected_suite['cases']))
actual_cases = sorted(actual_suite.findall('testcase'),
key=lambda x: x.attrib['name'])
expected_cases = sorted(expected_suite['cases'], key=lambda x: x['name'])
for actual_case, expected_case in zip(actual_cases, expected_cases):
self.assertEqual(actual_case.attrib['name'], expected_case['name'])
self.assertEqual(actual_case.attrib['classname'],
expected_case['classname'])
if 'error' in expected_case:
actual_error = actual_case.find('error')
self.assertEqual(actual_error.attrib['message'],
expected_case['error'])
if 'failure' in expected_case:
actual_failure = actual_case.find('failure')
self.assertEqual(actual_failure.attrib['message'],
expected_case['failure'])
return xml
def _test_for_error(self, flag, message):
"""Run the test and look for an Error with the specified message."""
ret, xml = self._run_test_with_subprocess(flag)
self.assertNotEqual(ret, 0)
self.assertEqual(int(xml.attrib['errors']), 1)
self.assertEqual(int(xml.attrib['failures']), 0)
for msg in xml.iter('error'):
if msg.attrib['message'] == message:
break
else:
self.fail(msg='Did not find message: "%s" in xml\n%s' % (
message, ElementTree.tostring(xml)))
def _test_for_failure(self, flag, message):
"""Run the test and look for a Failure with the specified message."""
ret, xml = self._run_test_with_subprocess(flag)
self.assertNotEqual(ret, 0)
self.assertEqual(int(xml.attrib['errors']), 0)
self.assertEqual(int(xml.attrib['failures']), 1)
for msg in xml.iter('failure'):
if msg.attrib['message'] == message:
break
else:
self.fail(msg='Did not find message: "%s"' % message)
def test_set_up_module_error(self):
self._run_test(
flag='--set_up_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': '__main__',
'cases': [{'name': 'setUpModule',
'classname': '__main__',
'error': 'setUpModule Errored!'}]}])
def test_tear_down_module_error(self):
self._run_test(
flag='--tear_down_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'}]},
{'name': '__main__',
'cases': [{'name': 'tearDownModule',
'classname': '__main__',
'error': 'tearDownModule Errored!'}]}])
def test_set_up_class_error(self):
self._run_test(
flag='--set_up_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'setUpClass',
'classname': '__main__.FailableTest',
'error': 'setUpClass Errored!'}]}])
def test_tear_down_class_error(self):
self._run_test(
flag='--tear_down_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'},
{'name': 'tearDownClass',
'classname': '__main__.FailableTest',
'error': 'tearDownClass Errored!'}]}])
def test_set_up_error(self):
self._run_test(
flag='--set_up_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Errored!'}]}])
def test_tear_down_error(self):
self._run_test(
flag='--tear_down_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Errored!'}]}])
def test_test_error(self):
self._run_test(
flag='--test_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'test Errored!'}]}])
def test_set_up_failure(self):
if six.PY2:
# A failure in setUp() produces an error (not a failure), which is
# inconsistent with the Python unittest documentation. In Python
# 2.7, the bug appears to be in unittest.TestCase.run() method.
# Although it correctly checks for a SkipTest exception, it does
# not check for a failureException.
self._run_test(
flag='--set_up_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Failed!'}]}])
else:
self._run_test(
flag='--set_up_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'setUp Failed!'}]}])
def test_tear_down_failure(self):
if six.PY2:
# See comment in test_set_up_failure().
self._run_test(
flag='--tear_down_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Failed!'}]}])
else:
self._run_test(
flag='--tear_down_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'tearDown Failed!'}]}])
def test_test_fail(self):
self._run_test(
flag='--test_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'test Failed!'}]}])
if __name__ == '__main__':
absltest.main()
| 33.492078
| 173
| 0.629045
|
3c9863dbafde39a229c9be910c165c625cd7a4d2
| 14,578
|
py
|
Python
|
sql/views.py
|
gaotuan/Archery
|
5b218521e1538e265306855484af9bbfee744c6b
|
[
"Apache-2.0"
] | 1
|
2020-06-05T06:17:52.000Z
|
2020-06-05T06:17:52.000Z
|
sql/views.py
|
gaotuan/Archery
|
5b218521e1538e265306855484af9bbfee744c6b
|
[
"Apache-2.0"
] | null | null | null |
sql/views.py
|
gaotuan/Archery
|
5b218521e1538e265306855484af9bbfee744c6b
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
import traceback
import simplejson as json
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.models import Group
from django.core.exceptions import PermissionDenied
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, FileResponse
from django.urls import reverse
from archery import settings
from common.config import SysConfig
from sql.engines import get_engine
from common.utils.permission import superuser_required
from sql.engines.models import ReviewResult, ReviewSet
from sql.utils.tasks import task_info
from .models import Users, SqlWorkflow, QueryPrivileges, ResourceGroup, \
QueryPrivilegesApply, Config, SQL_WORKFLOW_CHOICES, InstanceTag, Instance, QueryLog
from sql.utils.workflow_audit import Audit
from sql.utils.sql_review import can_execute, can_timingtask, can_cancel, can_view, can_rollback
from common.utils.const import Const, WorkflowDict
from sql.utils.resource_group import user_groups
import logging
logger = logging.getLogger('default')
def index(request):
index_path_url = SysConfig().get('index_path_url', 'sqlworkflow')
return HttpResponseRedirect(f"/{index_path_url.strip('/')}/")
def login(request):
"""登录页面"""
if request.user and request.user.is_authenticated:
return HttpResponseRedirect('/')
return render(request, 'login.html', context={'sign_up_enabled': SysConfig().get('sign_up_enabled')})
@permission_required('sql.menu_dashboard', raise_exception=True)
def dashboard(request):
"""dashboard页面"""
return render(request, 'dashboard.html')
def sqlworkflow(request):
"""SQL上线工单列表页面"""
user = request.user
# 过滤筛选项的数据
filter_dict = dict()
# 管理员,可查看所有工单
if user.is_superuser:
pass
# 非管理员,拥有审核权限、资源组粒度执行权限的,可以查看组内所有工单
elif user.has_perm('sql.sql_review') or user.has_perm('sql.sql_execute_for_resource_group'):
# 先获取用户所在资源组列表
group_list = user_groups(user)
group_ids = [group.group_id for group in group_list]
filter_dict['group_id__in'] = group_ids
# 其他人只能查看自己提交的工单
else:
filter_dict['engineer'] = user.username
instance_id = SqlWorkflow.objects.filter(**filter_dict).values('instance_id').distinct()
instance = Instance.objects.filter(pk__in=instance_id)
resource_group_id = SqlWorkflow.objects.filter(**filter_dict).values('group_id').distinct()
resource_group = ResourceGroup.objects.filter(group_id__in=resource_group_id)
return render(request, 'sqlworkflow.html',
{'status_list': SQL_WORKFLOW_CHOICES,
'instance': instance, 'resource_group': resource_group})
@permission_required('sql.sql_submit', raise_exception=True)
def submit_sql(request):
"""提交SQL的页面"""
user = request.user
# 获取组信息
group_list = user_groups(user)
# 获取所有有效用户,通知对象
active_user = Users.objects.filter(is_active=1)
# 获取系统配置
archer_config = SysConfig()
# 主动创建标签
InstanceTag.objects.get_or_create(tag_code='can_write', defaults={'tag_name': '支持上线', 'active': True})
context = {'active_user': active_user, 'group_list': group_list,
'enable_backup_switch': archer_config.get('enable_backup_switch')}
return render(request, 'sqlsubmit.html', context)
def detail(request, workflow_id):
"""展示SQL工单详细页面"""
workflow_detail = get_object_or_404(SqlWorkflow, pk=workflow_id)
if not can_view(request.user, workflow_id):
raise PermissionDenied
if workflow_detail.status in ['workflow_finish', 'workflow_exception']:
rows = workflow_detail.sqlworkflowcontent.execute_result
else:
rows = workflow_detail.sqlworkflowcontent.review_content
# 自动审批不通过的不需要获取下列信息
if workflow_detail.status != 'workflow_autoreviewwrong':
# 获取当前审批和审批流程
audit_auth_group, current_audit_auth_group = Audit.review_info(workflow_id, 2)
# 是否可审核
is_can_review = Audit.can_review(request.user, workflow_id, 2)
# 是否可执行
is_can_execute = can_execute(request.user, workflow_id)
# 是否可定时执行
is_can_timingtask = can_timingtask(request.user, workflow_id)
# 是否可取消
is_can_cancel = can_cancel(request.user, workflow_id)
# 是否可查看回滚信息
is_can_rollback = can_rollback(request.user, workflow_id)
# 获取审核日志
try:
audit_id = Audit.detail_by_workflow_id(workflow_id=workflow_id,
workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id
last_operation_info = Audit.logs(audit_id=audit_id).latest('id').operation_info
except Exception as e:
logger.debug(f'无审核日志记录,错误信息{e}')
last_operation_info = ''
else:
audit_auth_group = '系统自动驳回'
current_audit_auth_group = '系统自动驳回'
is_can_review = False
is_can_execute = False
is_can_timingtask = False
is_can_cancel = False
is_can_rollback = False
last_operation_info = None
# 获取定时执行任务信息
if workflow_detail.status == 'workflow_timingtask':
job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflow_id)
job = task_info(job_id)
if job:
run_date = job.next_run
else:
run_date = ''
else:
run_date = ''
# 获取是否开启手工执行确认
manual = SysConfig().get('manual')
review_result = ReviewSet()
if rows:
try:
# 检验rows能不能正常解析
loaded_rows = json.loads(rows)
# 兼容旧数据'[[]]'格式,转换为新格式[{}]
if isinstance(loaded_rows[-1], list):
for r in loaded_rows:
review_result.rows += [ReviewResult(inception_result=r)]
rows = review_result.json()
except IndexError:
review_result.rows += [ReviewResult(
id=1,
sql=workflow_detail.sqlworkflowcontent.sql_content,
errormessage="Json decode failed."
"执行结果Json解析失败, 请联系管理员"
)]
rows = review_result.json()
except json.decoder.JSONDecodeError:
review_result.rows += [ReviewResult(
id=1,
sql=workflow_detail.sqlworkflowcontent.sql_content,
# 迫于无法单元测试这里加上英文报错信息
errormessage="Json decode failed."
"执行结果Json解析失败, 请联系管理员"
)]
rows = review_result.json()
else:
rows = workflow_detail.sqlworkflowcontent.review_content
context = {'workflow_detail': workflow_detail, 'rows': rows, 'last_operation_info': last_operation_info,
'is_can_review': is_can_review, 'is_can_execute': is_can_execute, 'is_can_timingtask': is_can_timingtask,
'is_can_cancel': is_can_cancel, 'is_can_rollback': is_can_rollback, 'audit_auth_group': audit_auth_group,
'manual': manual, 'current_audit_auth_group': current_audit_auth_group, 'run_date': run_date}
return render(request, 'detail.html', context)
def rollback(request):
"""展示回滚的SQL页面"""
workflow_id = request.GET.get('workflow_id')
if not can_rollback(request.user, workflow_id):
raise PermissionDenied
download = request.GET.get('download')
if workflow_id == '' or workflow_id is None:
context = {'errMsg': 'workflow_id参数为空.'}
return render(request, 'error.html', context)
workflow = SqlWorkflow.objects.get(id=int(workflow_id))
try:
query_engine = get_engine(instance=workflow.instance)
list_backup_sql = query_engine.get_rollback(workflow=workflow)
except Exception as msg:
logger.error(traceback.format_exc())
context = {'errMsg': msg}
return render(request, 'error.html', context)
# 获取数据,存入目录
path = os.path.join(settings.BASE_DIR, 'downloads/rollback')
os.makedirs(path, exist_ok=True)
file_name = f'{path}/rollback_{workflow_id}.sql'
with open(file_name, 'w') as f:
for sql in list_backup_sql:
f.write(f'/*{sql[0]}*/\n{sql[1]}\n')
# 回滚语句大于4M强制转换为下载,此时前端无法自动填充
if os.path.getsize(file_name) > 4194304 or download:
response = FileResponse(open(file_name, 'rb'))
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = f'attachment;filename="rollback_{workflow_id}.sql"'
return response
# 小于4M的删除文件
else:
os.remove(file_name)
rollback_workflow_name = f"【回滚工单】原工单Id:{workflow_id} ,{workflow.workflow_name}"
context = {'list_backup_sql': list_backup_sql, 'workflow_detail': workflow,
'rollback_workflow_name': rollback_workflow_name}
return render(request, 'rollback.html', context)
@permission_required('sql.menu_sqlanalyze', raise_exception=True)
def sqlanalyze(request):
"""SQL分析页面"""
return render(request, 'sqlanalyze.html')
@permission_required('sql.menu_query', raise_exception=True)
def sqlquery(request):
"""SQL在线查询页面"""
# 主动创建标签
InstanceTag.objects.get_or_create(tag_code='can_read', defaults={'tag_name': '支持查询', 'active': True})
# 收藏语句
user = request.user
favorites = QueryLog.objects.filter(username=user.username, favorite=True).values('id', 'alias')
return render(request, 'sqlquery.html', {'favorites': favorites})
@permission_required('sql.menu_queryapplylist', raise_exception=True)
def queryapplylist(request):
"""查询权限申请列表页面"""
user = request.user
# 获取资源组
group_list = user_groups(user)
context = {'group_list': group_list}
return render(request, 'queryapplylist.html', context)
def queryapplydetail(request, apply_id):
"""查询权限申请详情页面"""
workflow_detail = QueryPrivilegesApply.objects.get(apply_id=apply_id)
# 获取当前审批和审批流程
audit_auth_group, current_audit_auth_group = Audit.review_info(apply_id, 1)
# 是否可审核
is_can_review = Audit.can_review(request.user, apply_id, 1)
# 获取审核日志
if workflow_detail.status == 2:
try:
audit_id = Audit.detail_by_workflow_id(workflow_id=apply_id, workflow_type=1).audit_id
last_operation_info = Audit.logs(audit_id=audit_id).latest('id').operation_info
except Exception as e:
logger.debug(f'无审核日志记录,错误信息{e}')
last_operation_info = ''
else:
last_operation_info = ''
context = {'workflow_detail': workflow_detail, 'audit_auth_group': audit_auth_group,
'last_operation_info': last_operation_info, 'current_audit_auth_group': current_audit_auth_group,
'is_can_review': is_can_review}
return render(request, 'queryapplydetail.html', context)
def queryuserprivileges(request):
"""查询权限管理页面"""
# 获取所有用户
user_list = QueryPrivileges.objects.filter(is_deleted=0).values('user_display').distinct()
context = {'user_list': user_list}
return render(request, 'queryuserprivileges.html', context)
@permission_required('sql.menu_sqladvisor', raise_exception=True)
def sqladvisor(request):
"""SQL优化工具页面"""
return render(request, 'sqladvisor.html')
@permission_required('sql.menu_slowquery', raise_exception=True)
def slowquery(request):
"""SQL慢日志页面"""
return render(request, 'slowquery.html')
@permission_required('sql.menu_instance', raise_exception=True)
def instance(request):
"""实例管理页面"""
# 获取实例标签
tags = InstanceTag.objects.filter(active=True)
return render(request, 'instance.html', {'tags': tags})
@permission_required('sql.menu_instance_account', raise_exception=True)
def instanceaccount(request):
"""实例账号管理页面"""
return render(request, 'instanceaccount.html')
@permission_required('sql.menu_database', raise_exception=True)
def database(request):
"""实例数据库管理页面"""
# 获取所有有效用户,通知对象
active_user = Users.objects.filter(is_active=1)
return render(request, 'database.html', {"active_user": active_user})
@permission_required('sql.menu_dbdiagnostic', raise_exception=True)
def dbdiagnostic(request):
"""会话管理页面"""
return render(request, 'dbdiagnostic.html')
@permission_required('sql.menu_data_dictionary', raise_exception=True)
def data_dictionary(request):
"""数据字典页面"""
return render(request, 'data_dictionary.html', locals())
@permission_required('sql.menu_param', raise_exception=True)
def instance_param(request):
"""实例参数管理页面"""
return render(request, 'param.html')
@permission_required('sql.menu_binlog2sql', raise_exception=True)
def binlog2sql(request):
"""binlog2sql页面"""
return render(request, 'binlog2sql.html')
@permission_required('sql.menu_schemasync', raise_exception=True)
def schemasync(request):
"""数据库差异对比页面"""
return render(request, 'schemasync.html')
@superuser_required
def config(request):
"""配置管理页面"""
# 获取所有资源组名称
group_list = ResourceGroup.objects.all()
# 获取所有权限组
auth_group_list = Group.objects.all()
# 获取所有配置项
all_config = Config.objects.all().values('item', 'value')
sys_config = {}
for items in all_config:
sys_config[items['item']] = items['value']
context = {'group_list': group_list, 'auth_group_list': auth_group_list,
'config': sys_config, 'WorkflowDict': WorkflowDict}
return render(request, 'config.html', context)
@superuser_required
def group(request):
"""资源组管理页面"""
return render(request, 'group.html')
@superuser_required
def groupmgmt(request, group_id):
"""资源组组关系管理页面"""
group = ResourceGroup.objects.get(group_id=group_id)
return render(request, 'groupmgmt.html', {'group': group})
def workflows(request):
"""待办列表页面"""
return render(request, "workflow.html")
def workflowsdetail(request, audit_id):
"""待办详情"""
# 按照不同的workflow_type返回不同的详情
audit_detail = Audit.detail(audit_id)
if audit_detail.workflow_type == WorkflowDict.workflow_type['query']:
return HttpResponseRedirect(reverse('sql:queryapplydetail', args=(audit_detail.workflow_id,)))
elif audit_detail.workflow_type == WorkflowDict.workflow_type['sqlreview']:
return HttpResponseRedirect(reverse('sql:detail', args=(audit_detail.workflow_id,)))
@permission_required('sql.menu_document', raise_exception=True)
def dbaprinciples(request,pname):
"""SQL文档页面"""
# 读取MD文件
file = os.path.join(settings.BASE_DIR, 'docs/'+pname)
with open(file, 'r') as f:
md = f.read().replace('\n', '\\n')
return render(request, 'dbaprinciples.html', {'md': md})
| 34.792363
| 120
| 0.688023
|
2089115a7928bd6eb2f6a45f87a48d43f528c7bb
| 148
|
py
|
Python
|
SphereNet_PyTorch/spherenet/__init__.py
|
ustundag/2D-3D-Semantics
|
6f79be0082e2bfd6b7940c2314972a603e55f201
|
[
"Apache-2.0"
] | null | null | null |
SphereNet_PyTorch/spherenet/__init__.py
|
ustundag/2D-3D-Semantics
|
6f79be0082e2bfd6b7940c2314972a603e55f201
|
[
"Apache-2.0"
] | null | null | null |
SphereNet_PyTorch/spherenet/__init__.py
|
ustundag/2D-3D-Semantics
|
6f79be0082e2bfd6b7940c2314972a603e55f201
|
[
"Apache-2.0"
] | null | null | null |
from .sphere_cnn import SphereConv2D
from .sphere_cnn import SphereMaxPool2D
#from .dataset import OmniMNIST
#from .dataset import OmniFashionMNIST
| 29.6
| 39
| 0.851351
|
3598a42aba799cbc6f2b60fae4ce22058661a7a0
| 1,960
|
py
|
Python
|
cwbot/modules/messages/CashoutModule.py
|
DamianDominoDavis/cwbot-ndy
|
53b826232eadb7ef558f568872a945d04d8d4252
|
[
"BSD-3-Clause"
] | null | null | null |
cwbot/modules/messages/CashoutModule.py
|
DamianDominoDavis/cwbot-ndy
|
53b826232eadb7ef558f568872a945d04d8d4252
|
[
"BSD-3-Clause"
] | null | null | null |
cwbot/modules/messages/CashoutModule.py
|
DamianDominoDavis/cwbot-ndy
|
53b826232eadb7ef558f568872a945d04d8d4252
|
[
"BSD-3-Clause"
] | null | null | null |
from cwbot.modules.BaseKmailModule import BaseKmailModule
from kol.database.ItemDatabase import getItemFromId
class CashoutModule(BaseKmailModule):
"""
A module that "cashes out" items withheld due to ronin/HC status.
No configuration options.
"""
requiredCapabilities = ['kmail']
_name = "cashout"
def __init__(self, manager, identity, config):
super(CashoutModule, self).__init__(manager, identity, config)
def _processKmail(self, message):
if message.text.strip().lower() == "cashout":
self.log("Cashout message: {}".format(message))
try:
self.parent.director.cashout(message.uid)
return self.newMessage(-1)
except IndexError:
return self.newMessage(message.uid, "I don't have any items "
"stored for you.")
elif message.text.strip().lower() == "balance":
(meat, items) = self.parent.director.balance(message.uid)
if meat == 0 and len(items) == 0:
return self.newMessage(message.uid, "I don't have any items "
"stored for you.")
text = "Your balance: \n"
for iid, qty in list(items.items()):
text += ("\n{}: {}".format(qty, getItemFromId(iid).get(
'name', "item ID {}".format(iid))))
if meat > 0:
text += "\n{} meat".format(meat)
return self.newMessage(message.uid, text)
def _kmailDescription(self):
return ("CASHING OUT: If I am holding any items for you, send a kmail "
"with the text \"cashout\" to get your stuff back. You can "
"send a kmail with the text \"balance\" to get a list of what "
"I am holding for you.")
| 43.555556
| 80
| 0.529592
|
f8462435a451b6e3fce07109be4f6ab9090e657f
| 3,506
|
py
|
Python
|
homeassistant/components/thinkingcleaner/sensor.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/thinkingcleaner/sensor.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 3
|
2021-09-08T03:29:36.000Z
|
2022-03-12T00:59:48.000Z
|
homeassistant/components/thinkingcleaner/sensor.py
|
shanbs/home-assistant
|
818776d2b4f11e4f51992dc88bc0a6f9055833b2
|
[
"Apache-2.0"
] | 1
|
2019-09-28T07:06:08.000Z
|
2019-09-28T07:06:08.000Z
|
"""Support for ThinkingCleaner sensors."""
import logging
from datetime import timedelta
from homeassistant import util
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['pythinkingcleaner==0.0.3']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
SENSOR_TYPES = {
'battery': ['Battery', '%', 'mdi:battery'],
'state': ['State', None, None],
'capacity': ['Capacity', None, None],
}
STATES = {
'st_base': 'On homebase: Not Charging',
'st_base_recon': 'On homebase: Reconditioning Charging',
'st_base_full': 'On homebase: Full Charging',
'st_base_trickle': 'On homebase: Trickle Charging',
'st_base_wait': 'On homebase: Waiting',
'st_plug': 'Plugged in: Not Charging',
'st_plug_recon': 'Plugged in: Reconditioning Charging',
'st_plug_full': 'Plugged in: Full Charging',
'st_plug_trickle': 'Plugged in: Trickle Charging',
'st_plug_wait': 'Plugged in: Waiting',
'st_stopped': 'Stopped',
'st_clean': 'Cleaning',
'st_cleanstop': 'Stopped with cleaning',
'st_clean_spot': 'Spot cleaning',
'st_clean_max': 'Max cleaning',
'st_delayed': 'Delayed cleaning will start soon',
'st_dock': 'Searching Homebase',
'st_pickup': 'Roomba picked up',
'st_remote': 'Remote control driving',
'st_wait': 'Waiting for command',
'st_off': 'Off',
'st_error': 'Error',
'st_locate': 'Find me!',
'st_unknown': 'Unknown state',
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ThinkingCleaner platform."""
from pythinkingcleaner import Discovery
discovery = Discovery()
devices = discovery.discover()
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update all devices."""
for device_object in devices:
device_object.update()
dev = []
for device in devices:
for type_name in SENSOR_TYPES:
dev.append(ThinkingCleanerSensor(device, type_name,
update_devices))
add_entities(dev)
class ThinkingCleanerSensor(Entity):
"""Representation of a ThinkingCleaner Sensor."""
def __init__(self, tc_object, sensor_type, update_devices):
"""Initialize the ThinkingCleaner."""
self.type = sensor_type
self._tc_object = tc_object
self._update_devices = update_devices
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._tc_object.name, SENSOR_TYPES[self.type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Update the sensor."""
self._update_devices()
if self.type == 'battery':
self._state = self._tc_object.battery
elif self.type == 'state':
self._state = STATES[self._tc_object.status]
elif self.type == 'capacity':
self._state = self._tc_object.capacity
| 31.026549
| 79
| 0.651169
|
2b86c65d5052be9e8fce8307fa13ad5fa2c3f11d
| 64
|
py
|
Python
|
software/models/common_components/multisense_sl/mit_modifications/jointNameMap.py
|
liangfok/oh-distro
|
eeee1d832164adce667e56667dafc64a8d7b8cee
|
[
"BSD-3-Clause"
] | 92
|
2016-01-14T21:03:50.000Z
|
2021-12-01T17:57:46.000Z
|
software/models/common_components/multisense_sl/mit_modifications/jointNameMap.py
|
liangfok/oh-distro
|
eeee1d832164adce667e56667dafc64a8d7b8cee
|
[
"BSD-3-Clause"
] | 62
|
2016-01-16T18:08:14.000Z
|
2016-03-24T15:16:28.000Z
|
software/models/common_components/multisense_sl/mit_modifications/jointNameMap.py
|
liangfok/oh-distro
|
eeee1d832164adce667e56667dafc64a8d7b8cee
|
[
"BSD-3-Clause"
] | 41
|
2016-01-14T21:26:58.000Z
|
2022-03-28T03:10:39.000Z
|
jointNameMap = {"head_hokuyo_joint": "head_hokuyo_fixed_joint"}
| 32
| 63
| 0.8125
|
f2528d3216364331761e553855635e84849018d7
| 2,566
|
py
|
Python
|
pythonScripts/images.py
|
FuckBrains/videoAutoProduction
|
e6988c1d2debd511b63968a55e29c4d2373c5ab6
|
[
"MIT"
] | 18
|
2019-10-02T13:04:25.000Z
|
2022-03-01T03:24:10.000Z
|
pythonScripts/images.py
|
JDuggal760/videoAutoProduction
|
e6988c1d2debd511b63968a55e29c4d2373c5ab6
|
[
"MIT"
] | 1
|
2021-04-28T14:33:36.000Z
|
2022-01-28T09:43:21.000Z
|
pythonScripts/images.py
|
JDuggal760/videoAutoProduction
|
e6988c1d2debd511b63968a55e29c4d2373c5ab6
|
[
"MIT"
] | 7
|
2020-01-02T14:48:51.000Z
|
2021-11-15T17:05:50.000Z
|
import nltk
from datetime import datetime
from os import listdir, mkdir, remove
import extractImages
from subprocess import run
from PIL import Image
date = datetime.now().strftime('%Y-%m-%d')
path = f'/home/sr1/Projects/Others/videoAUTO/News/{date}'
mkdir(f'{path}/Images')
files = []
for f in listdir(f'{path}/Articles'):
if f.endswith('.txt') and f.startswith('HEADLINE') is not True:
files.append(f)
for filename in files:
filename = filename.replace('.txt', '')
filename = filename.replace(';', ' ')
filename = filename.replace("'", " ")
tokens = nltk.word_tokenize(filename)
# Article's headline is less than 6 words
if len(tokens) < 6:
images_path = f'{path}/Images/{filename}'
mkdir(images_path)
extractImages.run(filename, images_path, 10)
# Removes corrupt image files
for image in listdir(images_path):
try:
img = Image.open(f'{images_path}/{image}')
img.verify()
except Exception as e:
print(e)
remove(f'{images_path}/{image}')
# print(images_path)
# print(filename)
# Article's headline more or equal to 6 words
else:
tagged_sent = nltk.pos_tag(tokens)
# scope to further experiment
proper_noun = [word for word, pos in tagged_sent if pos == 'NNP' or pos == 'NN' or pos == 'NNS' or pos == 'JJ' or pos == 'CD']
nnp_string = ' '.join(list(dict.fromkeys(proper_noun)))
images_path = f'{path}/Images/{filename}'
mkdir(images_path)
extractImages.run(nnp_string, images_path, 10)
# Remove any corrupted image that might have downloaded
for image in listdir(images_path):
try:
img = Image.open(f'{images_path}/{image}')
img.verify()
except Exception as e:
print(e)
remove(f'{images_path}/{image}')
# print(images_path)
# print(nnp_string)
# Renaming images with numbers in extension (ex: photo.jpg1)
try:
run('../shellScripts/nameCleanup.sh')
except Exception as e:
print(e)
print('nameCleanup.sh script did not work!')
# To manually delete non-related images
print(f'\nCheck all your images before you continue.')
num = 1
for image_folders in listdir(f'{path}/Images'):
print(f'\n\n{num}. {image_folders}')
# using sxiv to view the images, you can replace it with your program
run(f'sxiv {path}/Images/"{image_folders}"/*', shell=True)
num += 1
| 28.831461
| 134
| 0.613016
|
d5bd47319134e86e5d34fd69633d5336a7376e6e
| 5,142
|
py
|
Python
|
tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/wider.py
|
TolyaTalamanov/open_model_zoo
|
1697e60712df4ca72635a2080a197b9d3bc24129
|
[
"Apache-2.0"
] | 2,201
|
2018-10-15T14:37:19.000Z
|
2020-07-16T02:05:51.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/wider.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 759
|
2018-10-18T07:43:55.000Z
|
2020-07-16T01:23:12.000Z
|
tools/accuracy_checker/openvino/tools/accuracy_checker/annotation_converters/wider.py
|
Pandinosaurus/open_model_zoo
|
2543996541346418919c5cddfb71e33e2cdef080
|
[
"Apache-2.0"
] | 808
|
2018-10-16T14:03:49.000Z
|
2020-07-15T11:41:45.000Z
|
"""
Copyright (c) 2018-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..config import NumberField, PathField
from ..representation import DetectionAnnotation
from ..utils import convert_bboxes_xywh_to_x1y1x2y2, read_txt, check_file_existence, read_json
from .format_converter import BaseFormatConverter, ConverterReturn, verify_label_map
class WiderFormatConverter(BaseFormatConverter):
__provider__ = 'wider'
annotation_types = (DetectionAnnotation, )
@classmethod
def parameters(cls):
configuration_parameters = super().parameters()
configuration_parameters.update({
'annotation_file': PathField(
description="Path to xml file, which contains ground truth data in WiderFace dataset format."
),
'label_start': NumberField(
value_type=int, optional=True, default=1,
description="Specifies face label index in label map. Default value is 1. "
"You can provide another value, if you want to use this"
),
'images_dir': PathField(
is_directory=True, optional=True,
description='path to dataset images, used only for content existence check'
),
'dataset_meta_file': PathField(
description='path to json file with dataset meta (e.g. label_map, color_encoding)', optional=True
)
})
return configuration_parameters
def configure(self):
self.annotation_file = self.get_value_from_config('annotation_file')
self.label_start = self.get_value_from_config('label_start')
self.images_dir = self.get_value_from_config('images_dir') or self.annotation_file.parent
self.dataset_meta = self.get_value_from_config('dataset_meta_file')
def convert(self, check_content=False, progress_callback=None, progress_interval=100, **kwargs):
image_annotations = read_txt(self.annotation_file)
content_errors = None if not check_content else []
image_ids = [image_id for image_id, line in enumerate(image_annotations) if '.jpg' in line]
meta = self.get_meta()
annotations = []
num_iterations = len(image_ids)
for index, image_id in enumerate(image_ids):
identifier = image_annotations[image_id]
if check_content:
if not check_file_existence(self.images_dir / identifier):
content_errors.append('{}: does not exist'.format(self.images_dir / identifier))
bbox_count = image_annotations[image_id + 1]
bbox_lines = image_annotations[image_id + 2:image_id + 2 + int(bbox_count)]
x_mins, y_mins, x_maxs, y_maxs = [], [], [], []
for bbox in bbox_lines:
x_min, y_min, x_max, y_max = convert_bboxes_xywh_to_x1y1x2y2(*(map(float, (bbox.split(' ')[0:4]))))
x_mins.append(x_min)
y_mins.append(y_min)
x_maxs.append(x_max)
y_maxs.append(y_max)
annotations.append(DetectionAnnotation(
identifier, [self.label_start] * len(x_mins),
x_mins, y_mins, x_maxs, y_maxs
))
if progress_callback and index % progress_interval == 0:
progress_callback(index * 100 / num_iterations)
return ConverterReturn(annotations, meta, content_errors)
def get_meta(self):
if not self.dataset_meta:
if self.label_start != 0:
return {'label_map': {0: '__background__', self.label_start: 'face'}, 'background_label': 0}
return {'label_map': {self.label_start: 'face'}}
dataset_meta = read_json(self.dataset_meta)
background_label = dataset_meta.get('background_label', -1)
labels = dataset_meta.get('labels')
label_map = {0: '__background__', self.label_start: 'face'}
if labels:
label_map = {
label_id + self.label_start if label not in ('background', '__background__') else 0: label
for label_id, label in enumerate(labels)
}
label_map[background_label] = '__background__'
label_map = verify_label_map(dataset_meta.get('label_map', label_map))
valid_labels = [key for key in label_map if key != background_label]
self.background_label = background_label
self.label_start = sorted(valid_labels)[0]
meta = {'label_map': label_map}
if background_label != -1:
meta['background_label'] = background_label
return meta
| 44.327586
| 115
| 0.653637
|
a7a528b0142fd2a9df1bc75b3518dc509b76e286
| 5,453
|
py
|
Python
|
domain/analysis.py
|
sirsean/espn-fantasy-football-analyzer
|
f7565821d4a558a692cd7c3a07d56a14d5c47faf
|
[
"MIT"
] | 1
|
2016-05-08T22:44:08.000Z
|
2016-05-08T22:44:08.000Z
|
domain/analysis.py
|
sirsean/espn-fantasy-football-analyzer
|
f7565821d4a558a692cd7c3a07d56a14d5c47faf
|
[
"MIT"
] | 1
|
2015-12-08T17:09:19.000Z
|
2015-12-08T22:01:56.000Z
|
domain/analysis.py
|
sirsean/espn-fantasy-football-analyzer
|
f7565821d4a558a692cd7c3a07d56a14d5c47faf
|
[
"MIT"
] | null | null | null |
"""
Represents a team in the league.
Keep track of the team's actual record and optimum record, as well as the players who scored above their average against this team.
"""
class Team:
def __init__(self, name):
self.name = name
self.actualWins = 0
self.actualLosses = 0
self.actualTies = 0
self.optimumWins = 0
self.optimumLosses = 0
self.optimumTies = 0
self.actualPointsFor = 0
self.actualPointsAgainst = 0
self.optimumPointsFor = 0
self.optimumPointsAgainst = 0
self.aboveAverageOpposingPlayerPointsLines = []
self.highScoringBenchPlayers = []
self.lowScoringStarters = []
"""
Add a player points line to this team, to keep track of those players who scored above their average against this team.
"""
def addAboveAverageOpposingPlayerPointsLine(self, line):
self.aboveAverageOpposingPlayerPointsLines.append(line)
self.aboveAverageOpposingPlayerPointsLines.sort(PlayerPointsLine.sortByDifferenceFromAverage)
"""
Add a player points line to the high scoring bench players list.
"""
def addHighScoringBenchPlayerPointsLine(self, line):
self.highScoringBenchPlayers.append(line)
self.highScoringBenchPlayers.sort(PlayerPointsLine.sortByName)
"""
Add a player points line to the low scoring starters list.
"""
def addLowScoringStarterPlayerPointsLine(self, line):
self.lowScoringStarters.append(line)
self.lowScoringStarters.sort(PlayerPointsLine.sortByName)
"""
Get the total number of points scored against this team, over the course of the whole season, by players who scored more than their average in the game in which they faced this team.
"""
def getTotalOpposingPlayersPointsAboveAverage(self):
total = 0
for line in self.aboveAverageOpposingPlayerPointsLines:
total += line.weekPoints - line.averagePoints
return total
"""
Sorting function to help sort teams by optimum points, in descending order.
"""
def sortByOptimumPointsForDescending(team1, team2):
return cmp(team2.optimumPointsFor, team1.optimumPointsFor)
"""
Sorting function to help sort teams by optimum wins, in descending order.
"""
def sortByOptimumWinsDescending(team1, team2):
return cmp(team2.optimumWins, team1.optimumWins)
"""
Represents a single player.
This is a definitive datasource; there should only be one instance of this object per player.
Contains a list of the player's weekly scoring lines, and calculates total/average points.
"""
class Player:
def __init__(self, playerId, name):
self.scoreLines = []
self.playerId = playerId
self.name = name
self.totalPoints = 0
self.averagePoints = 0
self.linesAboveAverage = []
self.linesBelowAverage = []
"""
Add a PlayerScoreLine to this player's record.
"""
def addScoreLine(self, scoreLine):
self.scoreLines.append(scoreLine)
"""
Analyze the scores for this player over the course of the season.
Compiles the total points and the average weekly points, then calculates which games were above and below average.
"""
def analyzeScores(self):
# first sum the points
self.totalPoints = 0
for scoreLine in self.scoreLines:
self.totalPoints += scoreLine.points
# calculate the average
self.averagePoints = (self.totalPoints * 1.0) / len(self.scoreLines)
# determine which lines are above and below average
self.linesAboveAverage = []
self.linesBelowAverage = []
for scoreLine in self.scoreLines:
if scoreLine.points > self.averagePoints:
self.linesAboveAverage.append(PlayerPointsLine(self, scoreLine))
elif scoreLine.points < self.averagePoints:
self.linesBelowAverage.append(PlayerPointsLine(self, scoreLine))
"""
Get this player's points line for the given week.
"""
def getAboveAverageWeeklyPointsLine(self, week):
for line in self.linesAboveAverage:
if line.playerId == self.playerId and line.week == week:
return line
else:
return None
"""
A simple class to represent the points a player scored in a given week, compared to their average points.
There will be many of these for each player, depending on how often they scored above/below average against various teams.
This is not a definitive data source for anything; it's created on the fly from the actual score line.
"""
class PlayerPointsLine:
def __init__(self, player, playerScoreLine):
self.scoreLine = playerScoreLine
self.playerId = player.playerId
self.name = player.name
self.averagePoints = player.averagePoints
self.week = playerScoreLine.week
self.weekPoints = playerScoreLine.points
"""
Determine if this line represents a high scoring bench player.
"""
def isHighScoringBenchPlayer(self):
return (self.scoreLine.slot == 'Bench' and self.weekPoints > 12)
#return (self.scoreLine.slot == 'Bench' and self.weekPoints > self.averagePoints)
"""
Determine if this line represents a low scoring starter.
"""
def isLowScoringStarter(self):
return (self.scoreLine.slot != 'Bench' and self.scoreLine.slot != 'IR' and self.weekPoints < 10)
#return (self.scoreLine.slot != 'Bench' and self.scoreLine.slot != 'IR' and self.weekPoints < self.averagePoints)
"""
Sorting function to sort by the difference between the week's points and the player's average, in descending order.
"""
def sortByDifferenceFromAverage(lineA, lineB):
return cmp(lineB.weekPoints - lineB.averagePoints, lineA.weekPoints - lineA.averagePoints)
"""
Sorting function to sort by the player's name.
"""
def sortByName(lineA, lineB):
return cmp(lineA.name, lineB.name)
| 32.652695
| 183
| 0.758115
|
09f88b781864f7bd704de61823aea3ff5b26b934
| 8,446
|
py
|
Python
|
pinax/stripe/actions/charges.py
|
code-kitchen/pinax-stripe
|
8fb33bba6f7b23ea589094f3c76456ad8e73f909
|
[
"MIT"
] | null | null | null |
pinax/stripe/actions/charges.py
|
code-kitchen/pinax-stripe
|
8fb33bba6f7b23ea589094f3c76456ad8e73f909
|
[
"MIT"
] | null | null | null |
pinax/stripe/actions/charges.py
|
code-kitchen/pinax-stripe
|
8fb33bba6f7b23ea589094f3c76456ad8e73f909
|
[
"MIT"
] | 1
|
2020-03-23T10:21:07.000Z
|
2020-03-23T10:21:07.000Z
|
import decimal
from django.conf import settings
from django.db.models import Q
import stripe
from six import string_types
from .. import hooks, models, utils
def calculate_refund_amount(charge, amount=None):
"""
Calculate refund amount given a charge and optional amount.
Args:
charge: a pinax.stripe.models.Charge object
amount: optionally, the decimal.Decimal amount you wish to refund
"""
eligible_to_refund = charge.amount - (charge.amount_refunded or 0)
if amount:
return min(eligible_to_refund, amount)
return eligible_to_refund
def capture(charge, amount=None, idempotency_key=None):
"""
Capture the payment of an existing, uncaptured, charge.
Args:
charge: a pinax.stripe.models.Charge object
amount: the decimal.Decimal amount of the charge to capture
idempotency_key: Any string that allows retries to be performed safely.
"""
amount = utils.convert_amount_for_api(
amount if amount else charge.amount,
charge.currency
)
stripe_charge = stripe.Charge(
charge.stripe_id,
stripe_account=charge.stripe_account_stripe_id,
).capture(
amount=amount,
idempotency_key=idempotency_key,
expand=["balance_transaction"],
)
sync_charge_from_stripe_data(stripe_charge)
def _validate_create_params(customer, source, amount, application_fee, destination_account, destination_amount, on_behalf_of):
if not customer and not source:
raise ValueError("Must provide `customer` or `source`.")
if not isinstance(amount, decimal.Decimal):
raise ValueError(
"You must supply a decimal value for `amount`."
)
if application_fee and not isinstance(application_fee, decimal.Decimal):
raise ValueError(
"You must supply a decimal value for `application_fee`."
)
if application_fee and not destination_account:
raise ValueError(
"You can only specify `application_fee` with `destination_account`"
)
if application_fee and destination_account and destination_amount:
raise ValueError(
"You can't specify `application_fee` with `destination_amount`"
)
if destination_account and on_behalf_of:
raise ValueError(
"`destination_account` and `on_behalf_of` are mutualy exclusive")
def create(
amount, customer=None, source=None, currency="usd", description=None,
send_receipt=settings.PINAX_STRIPE_SEND_EMAIL_RECEIPTS, capture=True,
email=None, destination_account=None, destination_amount=None,
application_fee=None, on_behalf_of=None, idempotency_key=None,
):
"""
Create a charge for the given customer or source.
If both customer and source are provided, the source must belong to the
customer.
See https://stripe.com/docs/api#create_charge-customer.
Args:
amount: should be a decimal.Decimal amount
customer: the Customer object to charge
source: the Stripe id of the source to charge
currency: the currency with which to charge the amount in
description: a description of the charge
send_receipt: send a receipt upon successful charge
capture: immediately capture the charge instead of doing a pre-authorization
destination_account: stripe_id of a connected account
destination_amount: amount to transfer to the `destination_account` without creating an application fee
application_fee: used with `destination_account` to add a fee destined for the platform account
on_behalf_of: Stripe account ID that these funds are intended for. Automatically set if you use the destination parameter.
idempotency_key: Any string that allows retries to be performed safely.
Returns:
a pinax.stripe.models.Charge object
"""
# Handle customer as stripe_id for backward compatibility.
if customer and not isinstance(customer, models.Customer):
customer, _ = models.Customer.objects.get_or_create(stripe_id=customer)
_validate_create_params(customer, source, amount, application_fee, destination_account, destination_amount, on_behalf_of)
kwargs = dict(
amount=utils.convert_amount_for_api(amount, currency), # find the final amount
currency=currency,
source=source,
customer=customer.stripe_id,
stripe_account=customer.stripe_account_stripe_id,
description=description,
capture=capture,
idempotency_key=idempotency_key,
)
if destination_account:
kwargs["destination"] = {"account": destination_account}
if destination_amount:
kwargs["destination"]["amount"] = utils.convert_amount_for_api(
destination_amount,
currency
)
if application_fee:
kwargs["application_fee"] = utils.convert_amount_for_api(
application_fee, currency
)
elif on_behalf_of:
kwargs["on_behalf_of"] = on_behalf_of
stripe_charge = stripe.Charge.create(
**kwargs
)
charge = sync_charge_from_stripe_data(stripe_charge)
if send_receipt:
hooks.hookset.send_receipt(charge, email)
return charge
def retrieve(stripe_id, stripe_account=None):
"""Retrieve a Charge plus its balance info."""
return stripe.Charge.retrieve(
stripe_id,
stripe_account=stripe_account,
expand=["balance_transaction"]
)
def sync_charges_for_customer(customer):
"""
Populate database with all the charges for a customer.
Args:
customer: a pinax.stripe.models.Customer object
"""
for charge in customer.stripe_customer.charges().data:
sync_charge_from_stripe_data(charge)
def sync_charge(stripe_id, stripe_account=None):
"""Sync a charge given a Stripe charge ID."""
return sync_charge_from_stripe_data(
retrieve(stripe_id, stripe_account=stripe_account)
)
def sync_charge_from_stripe_data(data):
"""
Create or update the charge represented by the data from a Stripe API query.
Args:
data: the data representing a charge object in the Stripe API
Returns:
a pinax.stripe.models.Charge object
"""
source = data.get('source', {})
source_id = source.get('id') if source is not None else str(data.get('payment_method', ''))
obj, _ = models.Charge.objects.get_or_create(stripe_id=data["id"])
obj.customer = models.Customer.objects.filter(stripe_id=data["customer"]).first()
obj.source = source_id
obj.currency = data["currency"]
obj.invoice = models.Invoice.objects.filter(stripe_id=data["invoice"]).first()
obj.amount = utils.convert_amount_for_db(data["amount"], obj.currency)
obj.paid = data["paid"]
obj.refunded = data["refunded"]
obj.captured = data["captured"]
obj.disputed = data["dispute"] is not None
obj.charge_created = utils.convert_tstamp(data, "created")
if data.get("description"):
obj.description = data["description"]
if data.get("amount_refunded"):
obj.amount_refunded = utils.convert_amount_for_db(data["amount_refunded"], obj.currency)
if data["refunded"]:
obj.amount_refunded = obj.amount
balance_transaction = data.get("balance_transaction")
if balance_transaction and not isinstance(balance_transaction, string_types):
obj.available = balance_transaction["status"] == "available"
obj.available_on = utils.convert_tstamp(
balance_transaction, "available_on"
)
obj.fee = utils.convert_amount_for_db(
balance_transaction["fee"], balance_transaction["currency"]
)
obj.fee_currency = balance_transaction["currency"]
obj.transfer_group = data.get("transfer_group")
obj.outcome = data.get("outcome")
obj.save()
return obj
def update_charge_availability():
"""
Update `available` and `available_on` attributes of Charges.
We only bother checking those Charges that can become available.
"""
charges = models.Charge.objects.filter(
paid=True,
captured=True
).exclude(
Q(available=True) | Q(refunded=True)
).select_related(
"customer"
)
for c in charges.iterator():
sync_charge(
c.stripe_id,
stripe_account=c.customer.stripe_account
)
| 36.248927
| 130
| 0.692044
|
5f0095e9ec6cff31d46cccf0356605b24d06f9e3
| 2,488
|
py
|
Python
|
update-sha1sums.py
|
devices-ng/android_device_gigaset_mt6763-common
|
2bc486ae6efaecdf144556910ad309b4fbee62ea
|
[
"FTL"
] | 2
|
2021-08-12T10:59:55.000Z
|
2021-11-15T11:16:05.000Z
|
update-sha1sums.py
|
devices-ng/android_device_gigaset_mt6763-common
|
2bc486ae6efaecdf144556910ad309b4fbee62ea
|
[
"FTL"
] | null | null | null |
update-sha1sums.py
|
devices-ng/android_device_gigaset_mt6763-common
|
2bc486ae6efaecdf144556910ad309b4fbee62ea
|
[
"FTL"
] | 4
|
2021-08-07T05:15:14.000Z
|
2022-03-02T22:42:24.000Z
|
#!/usr/bin/env python
#
# Copyright (C) 2016 The CyanogenMod Project
# Copyright (C) 2017-2020 The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
from hashlib import sha1
DEVICE = 'mt6763-common'
VENDOR = 'gigaset'
VENDOR_PATH = os.path.join(
*['..', '..', '..', 'vendor', VENDOR, DEVICE, 'proprietary'])
class Updater:
def __init__(self, filename):
self.filename = filename
with open(self.filename, 'r') as f:
self.lines = f.read().splitlines()
def write(self):
with open(self.filename, 'w') as f:
f.write('\n'.join(self.lines) + '\n')
def cleanup(self):
for index, line in enumerate(self.lines):
# Skip empty or commented lines
if len(line) == 0 or line[0] == '#' or '|' not in line:
continue
# Drop SHA1 hash, if existing
self.lines[index] = line.split('|')[0]
self.write()
def update(self):
need_sha1 = False
for index, line in enumerate(self.lines):
# Skip empty lines
if len(line) == 0:
continue
# Check if we need to set SHA1 hash for the next files
if line[0] == '#':
need_sha1 = (' - from' in line)
continue
if need_sha1:
# Remove existing SHA1 hash
line = line.split('|')[0]
file_path = line.split(';')[0].split(':')[-1]
if file_path[0] == '-':
file_path = file_path[1:]
with open(os.path.join(VENDOR_PATH, file_path), 'rb') as f:
hash = sha1(f.read()).hexdigest()
self.lines[index] = '{}|{}'.format(line, hash)
self.write()
for file in ['proprietary-files.txt']:
updater = Updater(file)
if len(sys.argv) == 2 and sys.argv[1] == '-c':
updater.cleanup()
else:
updater.update()
| 29.619048
| 75
| 0.569534
|
c1be7ee6b5622befaafe13de6f606183a20c5e1b
| 174
|
py
|
Python
|
getBMR.py
|
fixture94/super-duper-sortmabob
|
b3c2eabe4ff2cccbcafae8d1fb7d88263705168c
|
[
"MIT"
] | null | null | null |
getBMR.py
|
fixture94/super-duper-sortmabob
|
b3c2eabe4ff2cccbcafae8d1fb7d88263705168c
|
[
"MIT"
] | null | null | null |
getBMR.py
|
fixture94/super-duper-sortmabob
|
b3c2eabe4ff2cccbcafae8d1fb7d88263705168c
|
[
"MIT"
] | null | null | null |
def getBMR(weightLBS, heightINCHES, age):
weightKG = weightLBS*.453592
heightCM = heightINCHES*2.54
BMR = 10*weightKG+.25*heightCM-5*age+5
return BMR
| 29
| 45
| 0.666667
|
24d66635b23b08e74fcf1f0cbf820c8b3c57a56c
| 223
|
py
|
Python
|
twitter_media_tweets/twitter/forms.py
|
rmotr-group-projects/wdd-w2-twitter-media-tweets
|
970889ea23a5a692a841be0e2f0e097f74b30f71
|
[
"MIT"
] | null | null | null |
twitter_media_tweets/twitter/forms.py
|
rmotr-group-projects/wdd-w2-twitter-media-tweets
|
970889ea23a5a692a841be0e2f0e097f74b30f71
|
[
"MIT"
] | 6
|
2020-06-05T22:14:42.000Z
|
2022-01-13T01:30:01.000Z
|
twitter_media_tweets/twitter/forms.py
|
ine-rmotr-projects/wdd-w3-twitter-media-tweets
|
970889ea23a5a692a841be0e2f0e097f74b30f71
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Tweet
class TweetForm(forms.Form):
content = forms.CharField(max_length=140)
image_url = forms.URLField(required=False)
video_url = forms.URLField(required=False)
| 22.3
| 46
| 0.757848
|
068140e07ff22ec0e1788a3a70252b008c329b69
| 716
|
py
|
Python
|
run_postpred.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 1
|
2019-03-15T04:01:19.000Z
|
2019-03-15T04:01:19.000Z
|
run_postpred.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 5
|
2017-12-11T00:11:39.000Z
|
2021-07-09T17:05:16.000Z
|
run_postpred.py
|
deapplegate/wtgpipeline
|
9693e8562022cc97bf5a96427e22965e1a5e8497
|
[
"MIT"
] | 2
|
2017-08-15T21:19:11.000Z
|
2017-10-12T00:36:35.000Z
|
#!/usr/bin/env python
#####################
import sys, glob
import pymc, numpy as np
import shapedistro, shapedistro_residuals as sdr, ldac
#####################
trainingfile = sys.argv[1]
testingfile = sys.argv[2]
resfile = sys.argv[3]
modelname = sys.argv[4]
burn = int(sys.argv[5])
outfile = sys.argv[6]
training_cat = ldac.openObjectFile(trainingfile)
testing_cat = ldac.openObjectFile(testingfile)
shapemodel = getattr(shapedistro, modelname)
mcmc = sdr.loadResult(training_cat, resfile, shapemodel)
postpred_dist = mcmc.trace('postpred_g')[burn:]
logp = sdr.calcPostPredLogProb(testing_cat, mcmc.unique_g_true, postpred_dist)
output = open(outfile, 'w')
output.write('%f\n' % logp)
output.close()
| 21.058824
| 78
| 0.710894
|
ec31bfa84bf7174a714c67eceac31a2f7f8442a8
| 1,059
|
py
|
Python
|
demoweb/urls.py
|
biljiang/demoweb
|
4eb2654f7a8099460339a16dba3d0f78e8cc4115
|
[
"MIT"
] | null | null | null |
demoweb/urls.py
|
biljiang/demoweb
|
4eb2654f7a8099460339a16dba3d0f78e8cc4115
|
[
"MIT"
] | null | null | null |
demoweb/urls.py
|
biljiang/demoweb
|
4eb2654f7a8099460339a16dba3d0f78e8cc4115
|
[
"MIT"
] | null | null | null |
"""demoweb URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('',views.HomePageView.as_view(), name = "root_index"),
path('', include('django.contrib.auth.urls')),
path('shdky/', include('shdky.urls')),
path('admin/', admin.site.urls),
# path('accounts/logout/', views.logout_view, name ="logout_view"),
]
| 36.517241
| 77
| 0.703494
|
b82dee324a722219487888d21d231e9dba03a31e
| 5,107
|
py
|
Python
|
scrapper.py
|
jonboboeva/2021-2-level-ctlr
|
4a97fd28a416d39c8d36463df781e8c291c334b5
|
[
"MIT"
] | null | null | null |
scrapper.py
|
jonboboeva/2021-2-level-ctlr
|
4a97fd28a416d39c8d36463df781e8c291c334b5
|
[
"MIT"
] | null | null | null |
scrapper.py
|
jonboboeva/2021-2-level-ctlr
|
4a97fd28a416d39c8d36463df781e8c291c334b5
|
[
"MIT"
] | null | null | null |
"""
Scrapper implementation
"""
from datetime import datetime
import json
from pathlib import Path
import random
import shutil
from time import sleep
from bs4 import BeautifulSoup
import requests
from core_utils.article import Article
from constants import ASSETS_PATH, CRAWLER_CONFIG_PATH, HTTP_PATTERN
class IncorrectURLError(Exception):
"""
Seed URL does not match standard pattern
"""
class NumberOfArticlesOutOfRangeError(Exception):
"""
Total number of articles to parse is too big
"""
class IncorrectNumberOfArticlesError(Exception):
"""
Total number of articles to parse in not integer
"""
class Crawler:
"""
Crawler implementation
"""
def __init__(self, seed_urls, total_max_articles):
self.seed_urls = seed_urls
self.total_max_articles = total_max_articles
self.urls = []
def _extract_url(self, article_bs):
articles_blocks = article_bs.find_all('div', class_='card-article__content')
not_full_urls = []
for article_block in articles_blocks:
try:
not_full_url = article_block.find_parent()['href']
not_full_urls.append(not_full_url)
except TypeError:
continue
for url in not_full_urls:
if len(self.urls) < self.total_max_articles and url:
full_url = HTTP_PATTERN + url
self.urls.append(full_url)
def find_articles(self):
"""
Finds articles
"""
for seed_url in self.seed_urls:
sleep(random.randint(1, 7))
response = requests.get(url=seed_url)
response.encoding = 'utf-8'
if not response.ok:
continue
soup = BeautifulSoup(response.text, 'lxml')
self._extract_url(soup)
def get_search_urls(self):
"""
Returns seed_urls param
"""
pass
class HTMLParser:
def __init__(self, article_url, article_id):
self.article_url = article_url
self.article_id = article_id
self.article = Article(self.article_url, self.article_id)
def _fill_article_with_meta_information(self, article_bs):
try:
self.article.author = article_bs.find('div', class_='article-authors__info').text.strip()
except AttributeError:
self.article.author = 'NOT FOUND'
if self.article.author == 'NOT FOUND':
try:
self.article.author = article_bs.find('div', class_='article-authors__author').text.strip()
except AttributeError:
self.article.author = 'NOT FOUND'
self.article.topics = article_bs.find('span', class_='tags').text.strip().split(' / ')
raw_date = article_bs.find('time')['datetime'][:-10]
self.article.date = datetime.strptime(raw_date, '%Y-%m-%dT%H:%M:%S')
self.article.title = article_bs.find('h1', class_='article-headline__title').text.strip()
def _fill_article_with_text(self, article_bs):
self.article.text = article_bs.find('div', class_='article-boxes-list article__boxes').text
def parse(self):
response = requests.get(url=self.article_url)
response.encoding = 'utf-8'
article_bs = BeautifulSoup(response.text, 'lxml')
self._fill_article_with_text(article_bs)
self._fill_article_with_meta_information(article_bs)
return self.article
def prepare_environment(base_path):
"""
Creates ASSETS_PATH folder if not created and removes existing folder
"""
path_for_environment = Path(base_path)
if path_for_environment.exists():
shutil.rmtree(base_path)
path_for_environment.mkdir(parents=True)
def validate_config(crawler_path):
"""
Validates given config
"""
with open(crawler_path) as file:
configuration = json.load(file)
for url in configuration["seed_urls"]:
if HTTP_PATTERN not in url:
raise IncorrectURLError
seed_urls = configuration["seed_urls"]
total_articles_to_find_and_parse = configuration["total_articles_to_find_and_parse"]
if not seed_urls:
raise IncorrectURLError
if not isinstance(total_articles_to_find_and_parse, int) or total_articles_to_find_and_parse <= 0:
raise IncorrectNumberOfArticlesError
if total_articles_to_find_and_parse > 200:
raise NumberOfArticlesOutOfRangeError
return seed_urls, total_articles_to_find_and_parse
if __name__ == '__main__':
seed_urls_list, max_articles = validate_config(CRAWLER_CONFIG_PATH)
prepare_environment(ASSETS_PATH)
crawler = Crawler(seed_urls_list, max_articles)
crawler.find_articles()
ID = 1
for article_url_main in crawler.urls:
article_parser = HTMLParser(article_url=article_url_main, article_id=ID)
article = article_parser.parse()
article.save_raw()
ID += 1
| 31.331288
| 108
| 0.645976
|
9a2dc1499736f0d32bf8dfb13564b60fae29fe16
| 766
|
py
|
Python
|
.aux/venv/lib/python3.9/site-packages/auxilium/add_arguments/__init__.py
|
sonntagsgesicht/regtest
|
160ef1089f797fbade48160efb0e1a386adbada7
|
[
"Apache-2.0"
] | null | null | null |
.aux/venv/lib/python3.9/site-packages/auxilium/add_arguments/__init__.py
|
sonntagsgesicht/regtest
|
160ef1089f797fbade48160efb0e1a386adbada7
|
[
"Apache-2.0"
] | null | null | null |
.aux/venv/lib/python3.9/site-packages/auxilium/add_arguments/__init__.py
|
sonntagsgesicht/regtest
|
160ef1089f797fbade48160efb0e1a386adbada7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# auxilium
# --------
# Python project for an automated test and deploy toolkit.
#
# Author: sonntagsgesicht
# Version: 0.1.8, copyright Saturday, 02 October 2021
# Website: https://github.com/sonntagsgesicht/auxilium
# License: Apache License 2.0 (see LICENSE file)
from .root import add_arguments as root # noqa: F401
from .create import add_arguments as create # noqa: F401
from .update import add_arguments as update # noqa: F401
from .test import add_arguments as test # noqa: F401
from .doc import add_arguments as doc # noqa: F401
from .build import add_arguments as build # noqa: F401
from .python import add_arguments as python # noqa: F401
from .formatter import ArgumentDefaultsAndConstsHelpFormatter # noqa: F401
| 34.818182
| 75
| 0.744125
|
d99ca988d41fe491ac3bf89aeaa4176f9c1987c1
| 1,185
|
py
|
Python
|
release/stubs.min/System/Windows/Forms/__init___parts/MdiLayout.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/MdiLayout.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/Windows/Forms/__init___parts/MdiLayout.py
|
tranconbv/ironpython-stubs
|
a601759e6c6819beff8e6b639d18a24b7e351851
|
[
"MIT"
] | null | null | null |
class MdiLayout(Enum,IComparable,IFormattable,IConvertible):
"""
Specifies the layout of multiple document interface (MDI) child windows in an MDI parent window.
enum MdiLayout,values: ArrangeIcons (3),Cascade (0),TileHorizontal (1),TileVertical (2)
"""
def Instance(self):
""" This function has been arbitrarily put into the stubs"""
return MdiLayout()
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
ArrangeIcons=None
Cascade=None
TileHorizontal=None
TileVertical=None
value__=None
| 29.625
| 215
| 0.679325
|
4bb99519c29a4befe9c56b20af27c13f7ed28f63
| 1,740
|
py
|
Python
|
mock_sensors/lock/lock.py
|
NGI-Trust-CASSIOPEIA/demonstrator-backend
|
fd627b393357f799e7203480cd955f04066b684b
|
[
"MIT"
] | null | null | null |
mock_sensors/lock/lock.py
|
NGI-Trust-CASSIOPEIA/demonstrator-backend
|
fd627b393357f799e7203480cd955f04066b684b
|
[
"MIT"
] | null | null | null |
mock_sensors/lock/lock.py
|
NGI-Trust-CASSIOPEIA/demonstrator-backend
|
fd627b393357f799e7203480cd955f04066b684b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# encoding=utf-8
import paho.mqtt.client as mqtt
import logging
import json
import time
import argparse
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
lock = True
def on_connect(client, userdata, flags, rc):
logger.debug('Connected with result code {0}'.format(str(rc)))
client.subscribe('home-assistant/frontdoor/set', qos=1)
client.publish('home-assistant/frontdoor/', 'LOCK', qos=1, retain=True)
def on_message(client, userdata, msg):
global lock
incoming = msg.payload.decode('utf-8')
logger.info('Incoming topic (%s): %s', msg.topic, incoming)
logger.info('Lock state (before): %s', lock)
if incoming == 'LOCK':
lock = True
elif incoming == 'UNLOCK':
lock = False
else:
logger.warning('Unkown payload: %s', incoming)
logger.info('Lock state (after): %s', lock)
reply_topic = 'home-assistant/frontdoor/'
if lock:
msg = 'LOCK'
else:
msg = 'UNLOCK'
logger.info('Reply message: %s', msg)
client.publish(reply_topic, msg, qos=1, retain=True)
def main(args):
client =mqtt.Client('lock')
client.on_connect = on_connect
client.on_message = on_message
client.enable_logger()
logger.debug('Connect to %s', args.u)
client.connect(args.u, port=args.p)
client.loop_start()
done = False
while not done:
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Virtual Lock')
parser.add_argument('-u', type=str, help='MQTT URL', default='192.168.94.98')
parser.add_argument('-p', type=int, help='MQTT Port', default=1883)
args = parser.parse_args()
main(args)
| 24.507042
| 81
| 0.645402
|
b6c4e6a2b7c1e1f85c2efd5006a00e8ae67ca9ac
| 896
|
py
|
Python
|
benchmarks/operator_benchmark/common/tests/random_sample_test.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 206
|
2020-11-28T22:56:38.000Z
|
2022-03-27T02:33:04.000Z
|
benchmarks/operator_benchmark/common/tests/random_sample_test.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 19
|
2020-12-09T23:13:14.000Z
|
2022-01-24T23:24:08.000Z
|
benchmarks/operator_benchmark/common/tests/random_sample_test.py
|
jsun94/nimble
|
e5c899a69677818b1becc58100577441e15ede13
|
[
"BSD-3-Clause"
] | 28
|
2020-11-29T15:25:12.000Z
|
2022-01-20T02:16:27.000Z
|
import operator_benchmark as op_bench
import torch
configs = op_bench.random_sample_configs(
M=[1, 2, 3, 4, 5, 6],
N=[7, 8, 9, 10, 11, 12],
K=[13, 14, 15, 16, 17, 18],
# probs saves the weights of each value
probs=op_bench.attr_probs(
M=[0.5, 0.2, 0.1, 0.05, 0.03, 0.1],
N=[0.1, 0.3, 0.4, 0.02, 0.03, 0.04],
K=[0.03, 0.6, 0.04, 0.02, 0.03, 0.01],
),
# this is the number of returned inputs
total_samples=10,
tags=["short"],
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
op_bench.generate_pt_test(configs, AddBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 24.888889
| 56
| 0.603795
|
6094c459ce0be0b0f6631e4a607202c4bc015b94
| 6,352
|
py
|
Python
|
src/S1Processor.py
|
Thetaspace/TS_Sen12Mosaicker
|
69c479b2fab6acb988552dec93d6b08eafb622c6
|
[
"MIT"
] | 1
|
2022-01-24T22:54:08.000Z
|
2022-01-24T22:54:08.000Z
|
src/S1Processor.py
|
Thetaspace/TS_Sen12Mosaicker
|
69c479b2fab6acb988552dec93d6b08eafb622c6
|
[
"MIT"
] | null | null | null |
src/S1Processor.py
|
Thetaspace/TS_Sen12Mosaicker
|
69c479b2fab6acb988552dec93d6b08eafb622c6
|
[
"MIT"
] | null | null | null |
# extended from https://github.com/wajuqi/Sentinel-1-preprocessing-using-Snappy
import snappy
from snappy import ProductIO
from snappy import HashMap
from snappy import GPF
import os
import zipfile
import logging
import glob
from src.Processor import Processor
import numpy as np
logger = logging.getLogger('S1ProcessorLogger')
logging.basicConfig(level=logging.INFO)
class S1Processor(Processor):
def __init__(self, zips_path, footprint):
super(S1Processor, self).__init__(zips_path, footprint)
logger.info('Instanciating S1 processor for S1 files in {0}'.format(self.zips_path))
self.suffix = 'S1'
self.dtype = np.float32
self.safe_folders = []
self.basenames = []
self.pols = []
self.polarizations = []
def unzip(self):
for zip_file in glob.glob(self.zips_path + '/S1*.zip'):
basename = os.path.basename(zip_file)[:-4]
self.basenames.append(basename)
self.safe_folders.append(os.path.join(self.zips_path, basename) + '.SAFE')
with zipfile.ZipFile(zip_file, 'r') as f:
f.extractall(self.zips_path)
def apply_orbit_file(self, source):
logger.info('\tApplying orbit file')
parameters = HashMap()
parameters.put('Apply-Orbit-File', True)
output = GPF.createProduct('Apply-Orbit-File', parameters, source)
return output
def get_meta(self):
for i in range(len(self.safe_folders)):
only_safe_folder = os.path.basename(self.safe_folders[i])
modestamp = only_safe_folder.split("_")[1]
productstamp = only_safe_folder.split("_")[2]
polstamp = only_safe_folder.split("_")[3]
polarization = polstamp[2:4]
self.polarizations.append(polarization)
if polarization == 'DV':
self.pols.append('VH,VV')
elif polarization == 'DH':
self.pols.append('HH,HV')
elif polarization == 'SH' or polarization == 'HH':
self.pols.append('HH')
elif polarization == 'SV':
self.pols.append('VV')
else:
self.pols.append('NaN')
logger.info("Polarization error!")
def remove_thermal_noise(self, source):
logger.info('\tThermal noise removal')
parameters = HashMap()
parameters.put('removeThermalNoise', True)
output = GPF.createProduct('ThermalNoiseRemoval', parameters, source)
return output
def calibrate(self, source, pol, polarization):
logger.info('\tCalibration')
parameters = HashMap()
parameters.put('outputSigmaBand', True)
if polarization == 'DH':
parameters.put('sourceBands', 'Intensity_HH,Intensity_HV')
elif polarization == 'DV':
parameters.put('sourceBands', 'Intensity_VH,Intensity_VV')
elif polarization == 'SH' or polarization == 'HH':
parameters.put('sourceBands', 'Intensity_HH')
elif polarization == 'SV':
parameters.put('sourceBands', 'Intensity_VV')
else:
logger.info("Unknown polarization")
parameters.put('selectedPolarisations', pol)
parameters.put('outputImageScaleInDb', False)
parameters.put('auxFile', 'Product Auxiliary File')
parameters.put('outputImageInComplex', False)
parameters.put('outputGammaBand', False)
parameters.put('outputBetaBand', False)
output = GPF.createProduct("Calibration", parameters, source)
return output
def multi_temporal_despeckling(self):
pass
return
def terrain_correction(self, source):
logger.info('\tTerrain correction...')
parameters = HashMap()
parameters.put('demName', 'SRTM 3Sec')
parameters.put('imgResamplingMethod', 'BILINEAR_INTERPOLATION')
#parameters.put('mapProjection', 'AUTO:42001') # comment this line if no need to convert to UTM/WGS84, default is WGS84
parameters.put('saveProjectedLocalIncidenceAngle', True)
parameters.put('saveSelectedSourceBand', True)
#parameters.put('pixelSpacingInMeter', 10.0)
#parameters.put('pixelSpacingInDegree', 8.983152841195215E-5)
#parameters.put('alignToStandardGrid', False)
#parameters.put('standardGridOriginX', 0.0)
#parameters.put('standardGridOriginXY', 0.0)
parameters.put('nodataValueAtSea', True)
parameters.put('saveDEM', False)
parameters.put('saveSelectedSourceBand', True)
parameters.put('incidenceAngleForSigma0', 'Use projected local incidence angle from DEM')
parameters.put('auxFile', 'Latest Auxiliary File')
output = GPF.createProduct('Terrain-Correction', parameters, source)
return output
def subset(self, source):
logger.info('\tClipping to AOI')
parameters = HashMap()
parameters.put('geoRegion', self.footprint)
output = GPF.createProduct('Subset', parameters, source)
return output
def scale_db(self, source):
logger.info('\tScaling to dB')
parameters = HashMap()
parameters.put('sourceBands', 'Sigma0_VV,Sigma0_VH')
output = GPF.createProduct("LinearToFromdB", parameters, source)
return output
def process(self):
self.unzip()
self.get_meta()
for i, safe_folder in enumerate(self.safe_folders):
scene = ProductIO.readProduct(safe_folder + '/manifest.safe')
applyorbit = self.apply_orbit_file(scene)
thermaremoved = self.remove_thermal_noise(applyorbit)
calibrated = self.calibrate(thermaremoved, self.pols[i], self.polarizations[i])
tercorrected = self.terrain_correction(calibrated)
# subset here
if self.footprint:
tercorrected = self.subset(tercorrected)
scaled_db = self.scale_db(tercorrected)
output_path = os.path.join(self.zips_path, self.basenames[i]) + '_VV_VH_dB.tif'
ProductIO.writeProduct(scaled_db, output_path, 'GeoTIFF-BigTIFF')
scene.dispose()
scene.closeIO()
self.paths_to_merge.append(output_path)
self.merge()
| 36.930233
| 133
| 0.631612
|
2aa92d455b45534bf9f79cd71d3f212403f0f7e2
| 857
|
py
|
Python
|
scripts/test_script.py
|
abraker95/ultimate_osu_analyzer
|
bea58c997d13c3f461ccbe682f52799f0f88fdea
|
[
"MIT"
] | 23
|
2019-02-27T06:20:15.000Z
|
2022-03-31T22:54:11.000Z
|
scripts/test_script.py
|
abraker95/ultimate_osu_analyzer
|
bea58c997d13c3f461ccbe682f52799f0f88fdea
|
[
"MIT"
] | 38
|
2019-03-03T17:35:39.000Z
|
2021-08-23T20:43:34.000Z
|
scripts/test_script.py
|
abraker95/ultimate_osu_analyzer
|
bea58c997d13c3f461ccbe682f52799f0f88fdea
|
[
"MIT"
] | 4
|
2020-03-30T20:43:14.000Z
|
2022-03-06T19:40:15.000Z
|
'''
Such scripts allow to execute code that is too complex to manage in the embedded console
To start enter the following in the embedded console:
CmdUtils.run_script('scripts/test_script.py', globals(), locals())
Now try:
ret = TestScript().run('hello', 'world')
print(ret)
All variables accessable in the embedded console are accessible here. Even ones you declared
right before loading the script!
It is recommended to create your script as a class to lower chances there might be conflicts in
variable names. If there are, you may accidentally overwrite or use an unintended variable.
'''
class TestScript():
def __init__(self):
# Initializing script stuff go here
print('hello world')
def run(self, param1, param2):
# Use run function to execute the script once loaded
return param1 + param2
| 34.28
| 95
| 0.725788
|
c53e6042d6d46f815020d147c70048103d763cfe
| 11,180
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/a10/a10_virtual_server.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/a10/a10_virtual_server.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/a10/a10_virtual_server.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Mischa Peters <mpeters@a10networks.com>,
# Eric Chou <ericc@a10networks.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author:
- Eric Chou (@ericchou1)
- Mischa Peters (@mischapeters)
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment:
- a10
- url
options:
state:
description:
- If the specified virtual server should exist.
choices: ['present', 'absent']
default: present
partition:
version_added: "2.3"
description:
- set active-partition
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
type: bool
default: 'yes'
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: str
sample: "mynewvirtualserver"
'''
import json
from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure,
axapi_enabled_disabled, axapi_get_vport_protocol, AXAPI_VPORT_PROTOCOLS)
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except Exception:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
if __name__ == '__main__':
main()
| 39.22807
| 138
| 0.627818
|
d3db60b7177b5f34d9ad3d9143746c4ae2adbf81
| 4,437
|
py
|
Python
|
biobb_dna/test/unitests/test_correlation/test_correlation.py
|
bioexcel/biobb_dna
|
7b61937d1683629949ffd7e1abb55831dcd25060
|
[
"Apache-2.0"
] | null | null | null |
biobb_dna/test/unitests/test_correlation/test_correlation.py
|
bioexcel/biobb_dna
|
7b61937d1683629949ffd7e1abb55831dcd25060
|
[
"Apache-2.0"
] | 1
|
2021-10-30T07:25:54.000Z
|
2021-10-30T07:25:54.000Z
|
biobb_dna/test/unitests/test_correlation/test_correlation.py
|
bioexcel/biobb_dna
|
7b61937d1683629949ffd7e1abb55831dcd25060
|
[
"Apache-2.0"
] | null | null | null |
from biobb_common.tools import test_fixtures as fx
from biobb_dna.interbp_correlations.interhpcorr import interhpcorr
from biobb_dna.interbp_correlations.interseqcorr import interseqcorr
from biobb_dna.interbp_correlations.interbpcorr import interbpcorr
from biobb_dna.intrabp_correlations.intrahpcorr import intrahpcorr
from biobb_dna.intrabp_correlations.intraseqcorr import intraseqcorr
from biobb_dna.intrabp_correlations.intrabpcorr import intrabpcorr
class TestInterHelparCorrelation():
def setUp(self):
fx.test_setup(self, 'interhpcorr')
def tearDown(self):
fx.test_teardown(self)
def test_helparcorrelation(self):
returncode = interhpcorr(**self.paths)
assert fx.not_empty(self.paths['output_csv_path'])
assert fx.not_empty(self.paths['output_jpg_path'])
assert fx.exe_success(returncode)
assert fx.equal(
self.paths['output_csv_path'],
self.paths['ref_csv_output'])
assert fx.equal(
self.paths['output_jpg_path'],
self.paths['ref_jpg_output'])
class TestInterSequenceCorrelation():
def setUp(self):
fx.test_setup(self, 'interseqcorr')
def tearDown(self):
fx.test_teardown(self)
def test_sequencecorrelation(self):
returncode = interseqcorr(
properties=self.properties,
**self.paths)
assert fx.not_empty(self.paths['output_csv_path'])
assert fx.not_empty(self.paths['output_jpg_path'])
assert fx.exe_success(returncode)
assert fx.equal(
self.paths['output_csv_path'],
self.paths['ref_csv_output'])
assert fx.equal(
self.paths['output_jpg_path'],
self.paths['ref_jpg_output'])
class TestInterBasepairCorrelation():
def setUp(self):
fx.test_setup(self, 'interbpcorr')
def tearDown(self):
fx.test_teardown(self)
def test_basepaircorrelation(self):
returncode = interbpcorr(
properties=self.properties,
**self.paths)
assert fx.not_empty(self.paths['output_csv_path'])
assert fx.not_empty(self.paths['output_jpg_path'])
assert fx.exe_success(returncode)
assert fx.equal(
self.paths['output_csv_path'],
self.paths['ref_csv_output'])
assert fx.equal(
self.paths['output_jpg_path'],
self.paths['ref_jpg_output'])
class TestIntraHelparCorrelation():
def setUp(self):
fx.test_setup(self, 'intrahpcorr')
def tearDown(self):
fx.test_teardown(self)
def test_helparcorrelation(self):
returncode = intrahpcorr(**self.paths)
assert fx.not_empty(self.paths['output_csv_path'])
assert fx.not_empty(self.paths['output_jpg_path'])
assert fx.exe_success(returncode)
assert fx.equal(
self.paths['output_csv_path'],
self.paths['ref_csv_output'])
assert fx.equal(
self.paths['output_jpg_path'],
self.paths['ref_jpg_output'])
class TestIntraSequenceCorrelation():
def setUp(self):
fx.test_setup(self, 'intraseqcorr')
# def tearDown(self):
# fx.test_teardown(self)
def test_sequencecorrelation(self):
returncode = intraseqcorr(
properties=self.properties,
**self.paths)
assert fx.not_empty(self.paths['output_csv_path'])
assert fx.not_empty(self.paths['output_jpg_path'])
assert fx.exe_success(returncode)
assert fx.equal(
self.paths['output_csv_path'],
self.paths['ref_csv_output'])
assert fx.equal(
self.paths['output_jpg_path'],
self.paths['ref_jpg_output'])
class TestIntraBasepairCorrelation():
def setUp(self):
fx.test_setup(self, 'intrabpcorr')
def tearDown(self):
fx.test_teardown(self)
def test_basepaircorrelation(self):
returncode = intrabpcorr(
properties=self.properties,
**self.paths)
assert fx.not_empty(self.paths['output_csv_path'])
assert fx.not_empty(self.paths['output_jpg_path'])
assert fx.exe_success(returncode)
assert fx.equal(
self.paths['output_csv_path'],
self.paths['ref_csv_output'])
assert fx.equal(
self.paths['output_jpg_path'],
self.paths['ref_jpg_output'])
| 32.625
| 68
| 0.652243
|
b4bdd0372fd308a902c6210e987f9f290f9b9fed
| 16,180
|
py
|
Python
|
git-hound.py
|
killvxk/git-hound
|
4cf9be3340f48ff94f94afab8b695199e05acefc
|
[
"MIT"
] | 1
|
2020-01-07T13:19:08.000Z
|
2020-01-07T13:19:08.000Z
|
git-hound.py
|
killvxk/git-hound
|
4cf9be3340f48ff94f94afab8b695199e05acefc
|
[
"MIT"
] | null | null | null |
git-hound.py
|
killvxk/git-hound
|
4cf9be3340f48ff94f94afab8b695199e05acefc
|
[
"MIT"
] | null | null | null |
import requests
import sys
import re
import urllib.parse
import time
import hashlib
import random
import json
import yaml
import argparse
import entropy
import fileinput
parser = argparse.ArgumentParser(
description='Git Hound')
parser.add_argument(
'--subdomain-file', type=str,
help='The file with the subdomains (or other queries).')
parser.add_argument(
'--output',
help='The output file.')
parser.add_argument(
'--output-type', type=str,
help='The output type. [default, json]')
parser.add_argument(
'--all',
default=False,
action='store_true',
help='Print all URLs, including ones with no pattern match. Otherwise, the scoring system will do the work.')
parser.add_argument(
'--api-keys',
default=False,
action='store_true',
help='Search for API keys')
parser.add_argument(
'--regex-file',
help='Supply your own regex list')
parser.add_argument(
'--search-files',
help='Supply your own list of files to check (*.env, .htpasswd)')
parser.add_argument(
'--language-file',
help='Supply your own list of file types to check (java, python)')
parser.add_argument(
'--config-file',
help='Custom config file location (default is config.yml)')
parser.add_argument(
'--results-only',
default=False,
action='store_true',
help='ONLY print the regexed search results in stdout (useful for piping to things)')
parser.add_argument(
'--pages',
type=int,
help='Max number of pages to search.')
parser.add_argument(
'--silent',
action='store_true',
default=False,
help='Don\'t print results to stdout (most reasonably used with --output).')
parser.add_argument(
'--no-antikeywords',
action='store_true',
default=False,
help='Don\'t attempt to filter out known mass scan databases')
parser.add_argument(
'--only-filtered',
default=False,
action='store_true',
help='Only search filtered queries (languages and files)')
parser.add_argument(
'--gist-only',
action='store_true',
default=False,
help='Only search Gists (default searches both repos and gists)')
parser.add_argument(
'--no-repeated-matches',
action='store_true',
default=False,
help='Don\'t print repeated matches')
parser.add_argument(
'--debug',
default=False,
action='store_true',
help='Print debug messages')
parser.add_argument(
'--many-results',
default=False,
action='store_true',
help='Print debug messages')
args = parser.parse_args()
with open((args.config_file if args.config_file else "config.yml"), 'r') as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.SafeLoader)
GH_USERNAME = config['github_username']
GH_PASSWORD = config['github_password']
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def debug_log(data):
print(bcolors.OKBLUE + '[debug] ' + bcolors.ENDC + data)
def grab_csrf_token(url, session):
response = session.get(url)
text = response.text
csrf = re.search(r"authenticity_token.*\"\s", text).group()[27:]
csrf = csrf[:len(csrf) - 2]
return csrf
def login_to_github(session):
csrf = grab_csrf_token('https://github.com/login', session)
session.post('https://github.com/session',
data = {
'authenticity_token': csrf,
'login': GH_USERNAME,
'password': GH_PASSWORD
}
)
def search_code(query, sessions, language=None, fileName=None):
query = urllib.parse.quote(query.replace("-", "+") + " fork:false")
if fileName != None:
query += " filename:" + fileName
paths = []
path_set = set()
delay_time = 5
maximum_pages = args.pages if args.pages else 100
page = 1
if args.debug:
debug_log('Querying GitHub projects: `' + query + '`')
order = ['asc']
order_index = 0
search_type = ['indexed']
search_type_index = 0
while search_type_index < len(search_type):
order_index = 0
while order_index < len(order):
if search_type[search_type_index] == '':
order_index += 1
continue
page = 0
while page < maximum_pages + 1:
session = random.choice(sessions)
url_string = 'https://github.com/search?o=' + order[order_index] \
+ '&p=' + str(page) + '&q=' + query + '&s=' + search_type[search_type_index] + '&type=Code'
if language:
url_string += '&l=' + language
if args.debug:
debug_log(url_string)
response = session.get(url_string)
if response.status_code == 429:
delay_time += 5
if not args.results_only:
print(bcolors.WARNING + '[!] Rate limited by GitHub. Delaying ' + str(delay_time) + 's...' + bcolors.ENDC)
time.sleep(delay_time)
continue
if delay_time > 10:
delay_time -= 1
if page == 1 and order[order_index] == 'asc' and search_type[search_type_index] == 'indexed':
match = re.search(r"\bdata\-total\-pages\=\"(\d+)\"", response.text)
if match != None:
if args.many_results and int(match.group(1)) > maximum_pages - 1:
if not args.results_only:
print(bcolors.OKBLUE + '[*] Searching ' + str(match.group(1)) + '+ pages of results...' + bcolors.ENDC)
order.append('desc')
search_type.append('')
else:
if not args.results_only:
print(bcolors.OKBLUE + '[*] Searching ' + str(match.group(1)) + ' pages of results...' + bcolors.ENDC)
else:
if not args.results_only:
print(bcolors.OKBLUE + '[*] Searching 1 page of results...' + bcolors.ENDC)
page += 1
if args.debug and page % 20 == 0:
debug_log(' Page ' + str(page))
if response.status_code != 200 and response.status_code != 400:
break
results = re.findall(r"href=\"\/.*blob.*\">", response.text)
if len(results) == 0:
break
for result in results:
result = result[7:len(result) - 2]
if result in path_set:
continue
path_set.add(result)
if re.match(r"(h1domains|bounty\-targets|url_short|url_list|\.csv|alexa)", result):
continue
raw_path = result.replace('blob/', '').split('#')[0]
paths.append({ 'source': 'github_repo', 'url': 'https://github.com/' + result, 'data_url': 'https://raw.githubusercontent.com/' + raw_path })
time.sleep(delay_time)
order_index += 1
search_type_index += 1
return paths
def search_gist(query, sessions, language=None, fileName=None):
query = urllib.parse.quote(query.replace("-", "+") + " stars:<5 fork:false")
if fileName != None:
query += " filename:" + fileName
paths = []
delay_time = 5
maximum_pages = args.pages if args.pages else 100
page = 1
while page < maximum_pages + 1:
session = random.choice(sessions)
url_string = 'https://gist.github.com/search?o=asc&p=' + str(page) + '&q=' + query + '&s=indexed'
if args.debug:
debug_log('Querying Gist: `' + query + '`')
if args.debug and page % 20 == 0:
debug_log(' . Page ' + str(page))
if language:
url_string += '&l=' + language
response = session.get(url_string)
if response.status_code == 429:
delay_time += 5
if not args.results_only:
print(bcolors.WARNING + '[!] Rate limited by GitHub. Delaying ' + str(delay_time) + 's...' + bcolors.ENDC)
time.sleep(delay_time)
continue
if delay_time > 10:
delay_time -= 1
page += 1
if response.status_code != 200 and response.status_code != 400:
break
results = re.findall(r"href=\"\/(\w+\/[0-9a-z]{5,})\">", response.text)
if len(results) == 0:
break
result_set = set()
for result in results:
if result in result_set:
continue
result_set.add(result)
project_page = session.get('https://gist.github.com/' + result)
escaped_path = re.escape(result)
match = re.search(r"href\=\"(\/" + escaped_path + r"\/raw\/[0-9a-z]{40}\/[\w_\-\.\/\%]{1,255})\"\>", project_page.text)
if match != None:
paths.append({ 'source': 'gist', 'url': 'https://gist.github.com/' + result, 'data_url': 'https://gist.githubusercontent.com' + match.group(1) })
time.sleep(delay_time)
return paths
def regex_array(array):
regex = r"("
for elm in array:
if elm == "":
continue
regex += elm + r"|"
if '.*' in elm:
if not args.results_only:
print(bcolors.WARNING + "[!] The regex wildcard match .* can be slow if used improperly and may slow down Git Hound." + bcolors.ENDC)
regex = regex[:-1] + r")"
return re.compile(regex)
interesting = {}
visited = set()
visited_hashes = set()
match_string_set = set()
def print_paths_highlighted(subdomain, paths, sessions, output_file, regex=None):
if not args.results_only:
print(bcolors.OKGREEN + subdomain + bcolors.ENDC)
if len(paths) == 0:
if not args.silent and not args.results_only:
print('No results.')
custom_regex = regex != None
for result in paths:
if result['data_url'] in visited:
continue
visited.add(result['data_url'])
session = random.choice(sessions)
response = session.get(result['data_url'])
checksum = hashlib.md5(response.text.encode('utf-8'))
if checksum in visited_hashes:
continue
visited_hashes.add(checksum)
score = 0
domain = '.'.join(subdomain.split(".")[-2:])
if not custom_regex:
regex = re.compile(r"\b(sf_username" \
+ r"|(stage|staging|atlassian|jira|conflence|zendesk)\." + re.escape(domain) + r"|db_username|db_password" \
+ r"|hooks\.slack\.com|pt_token|full_resolution_time_in_minutes" \
+ r"|xox[a-zA-Z]-[a-zA-Z0-9-]+" \
+ r"|s3\.console\.aws\.amazon\.com\/s3\/buckets|" \
+ r"|id_rsa|pg_pass|[\w\.=-]+@" + re.escape(domain) + r")\b", flags=re.IGNORECASE)
s_time = 0
if args.debug:
s_time = time.time()
matches = re.finditer(
regex,
response.text
)
match_set = set()
match_text_set = set()
for match in matches:
if match.start(0) == match.end(0):
continue
if not match.group(0) in match_text_set:
match_set.add(match.group(0))
match_text_set.add(match.group(0))
if custom_regex:
score += 2
else:
score += 1
if args.debug:
debug_log(result['data_url'])
debug_log("Time to check definite regexes: " + str(time.time() - s_time) + ".")
if args.api_keys:
if args.debug:
s_time = time.time()
generic_api_keys = re.finditer(
re.compile(r"(ACCESS|SECRET|LICENSE|CRYPT|PASS|KEY|ADMIn|TOKEN|PWD|Authorization|Bearer)[\w\s:=\"']{0,20}[=:\s'\"]([\w\-+=]{32,})\b", flags=re.IGNORECASE),
response.text
)
for match in generic_api_keys:
if not match.group(2) in match_text_set:
if entropy.entropy(match.group(2)) > 3.25:
match_set.add(match.group(2))
match_text_set.add(match.group(2))
score += 2
if args.debug:
debug_log("Time to find API key regexes: " + str(time.time() - s_time) + ".")
if not custom_regex:
keywords = re.findall(r"(.sql|.sublime_session|.env|.yml|.ipynb)$", result['data_url'].lower())
if keywords:
score += len(keywords) * 2
if not args.no_antikeywords:
if re.search(r"(\.html|\.csv|hosts\.txt|host\.txt|registry\.json|readme\.md|" + re.escape('.'.join(subdomain.split(".")[-2:])) + r".txt)$", result['data_url'].lower()):
score -= 1
anti_keywords = re.findall(r"(alexa|urls|adblock|domain|dns|top1000|top\-1000|httparchive"
+ r"|blacklist|hosts|ads|whitelist|crunchbase|tweets|tld|hosts\.txt"
+ r"|host\.txt|aquatone|recon\-ng|hackerone|bugcrowd|xtreme|list|tracking|malicious|ipv(4|6)|host\.txt)", result['data_url'].lower())
if anti_keywords:
score -= 2 ** len(anti_keywords)
if score > 0:
if args.no_repeated_matches:
unique_matches = len(match_set)
for match in match_set:
if match in match_string_set:
unique_matches -= 1
else:
match_string_set.add(match)
if unique_matches == 0:
continue
if score > 1:
if not args.silent and not args.results_only:
print(bcolors.FAIL + result['url'] + bcolors.ENDC)
else:
if not args.silent and not args.results_only:
print(bcolors.WARNING + result['url'] + bcolors.ENDC)
interesting[result['url']] = {
'url': result['url'],
'results': []
}
if output_file != None:
output_file.write(result['url'] + "\n")
for match in match_set:
truncated = match
interesting[result['url']]['results'].append(match)
if len(match) == 0:
continue
if not args.silent:
if args.results_only:
print(truncated)
if output_file != None:
output_file.write(match + "\n")
else:
print(' > ' + truncated)
if output_file != None:
output_file.write(' > ' + match + "\n")
else:
if args.all:
interesting[result['url']] = {
'url': result['url'],
'results': []
}
if not args.silent:
print(result['url'])
if output_file != None:
output_file.write(result['url'] + "\n")
if args.output and args.output_type == "json":
out_file = open(args.output, 'w+')
out_file.write(json.dumps(interesting))
out_file.close()
###
subdomains = []
if not sys.stdin.isatty():
for line in fileinput.input(files=('-')):
stripped = line.rstrip()
if len(stripped) > 0:
subdomains.append(stripped)
else:
if args.subdomain_file:
subdomain_file = args.subdomain_file
subdomains = open(subdomain_file).read().split("\n")
if len(subdomains) == 0:
print(bcolors.FAIL + "[!] Please specify some queries (either with stdin or the --subdomain-file flag)." + bcolors.ENDC)
exit(1)
regex_string = None
if args.regex_file:
regex_file_array = open(args.regex_file).read().split("\n")
regex_string = regex_array(regex_file_array)
files = []
if args.search_files:
ext_filetypes = open(args.search_files).read().split("\n")
for filetype in ext_filetypes:
files.append(filetype)
languages = []
if args.language_file:
ext_languages = open(args.language_file).read().split("\n")
for filetype in ext_languages:
languages.append(filetype)
sessions = []
session = requests.Session()
login_to_github(session)
sessions.append(session)
if not args.results_only:
print(bcolors.OKBLUE + '[*] Logged into GitHub.com as ' + GH_USERNAME + bcolors.ENDC)
output_file = None
if args.output and args.output_type != "json":
output_file = open(args.output, 'w+')
for subdomain in subdomains:
paths = []
# results = []
github_results = 0
if not args.only_filtered:
if not args.gist_only:
for path in search_code('"' + subdomain + '"', sessions):
paths.append(path)
github_results = len(paths)
for path in search_gist('"' + subdomain + '"', sessions):
paths.append(path)
for file_type in languages:
if not args.gist_only:
for path in search_code('"' + subdomain + '"', sessions, language=file_type):
paths.append(path)
for path in search_gist('"' + subdomain + '"', sessions, language=file_type):
paths.append(path)
for filename in files:
for path in search_code('"' + subdomain + '"', sessions, fileName=filename):
paths.append(path)
for path in search_gist('"' + subdomain + '"', sessions, fileName=filename):
paths.append(path)
if args.debug:
debug_log('Finished scraping GitHub search results. Will now search for secrets in ' + str(len(paths)) + ' files.')
print_paths_highlighted(subdomain, paths, sessions, output_file, regex=regex_string)
time.sleep(5)
if output_file != None:
output_file.close()
| 34.063158
| 174
| 0.618727
|
45e32d616be78ed0da58476e782d2127a3904f92
| 165
|
py
|
Python
|
e/mail-relay/web_customer/router_local.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
e/mail-relay/web_customer/router_local.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | 18
|
2020-06-05T18:17:40.000Z
|
2022-03-11T23:25:21.000Z
|
e/mail-relay/web_customer/router_local.py
|
zhouli121018/nodejsgm
|
0ccbc8acf61badc812f684dd39253d55c99f08eb
|
[
"MIT"
] | null | null | null |
class MyRouter(object):
def db_for_read(self, model, **hints):
return 'localhost'
def db_for_write(self, model, **hints):
return 'localhost'
| 27.5
| 43
| 0.642424
|
991e9cb1c68f5845134145529ab921a4db65c19b
| 7,774
|
py
|
Python
|
docker/loos/board-meters-app/app.py
|
sourceperl/tk-dashboard
|
015ececc670902b02284749ac59f354db4304e48
|
[
"MIT"
] | null | null | null |
docker/loos/board-meters-app/app.py
|
sourceperl/tk-dashboard
|
015ececc670902b02284749ac59f354db4304e48
|
[
"MIT"
] | null | null | null |
docker/loos/board-meters-app/app.py
|
sourceperl/tk-dashboard
|
015ececc670902b02284749ac59f354db4304e48
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from board_lib import catch_log_except
from configparser import ConfigParser
import logging
import time
from pyHMI.DS_ModbusTCP import ModbusTCPDevice
from pyHMI.DS_Redis import RedisDevice
from pyHMI.Tag import Tag
import requests
import schedule
# some const
LTX_IP = '192.168.0.62'
# modbus address for IEM 3155 and 2155
AD_3155_LIVE_PWR = 3059
AD_2155_LIVE_PWR = 3053
AD_3155_INDEX_PWR = 3205
AD_2155_INDEX_PWR = 3205
# read config
cnf = ConfigParser()
cnf.read('/data/conf/board.conf')
# redis
redis_user = cnf.get('redis', 'user')
redis_pass = cnf.get('redis', 'pass')
# thingspeak api key
ts_pwr_api_key = cnf.get('electric_meter', 'tspeak_pwr_w_key')
ts_idx_api_key = cnf.get('electric_meter', 'tspeak_idx_w_key')
# some class
class Devices(object):
# redis datasource
rd = RedisDevice(host='board-redis-srv', client_adv_args=dict(username=redis_user, password=redis_pass))
# modbus datasource
# meter 'garage'
meter_garage = ModbusTCPDevice(LTX_IP, timeout=2.0, refresh=2.0, unit_id=1)
meter_garage.add_floats_table(AD_3155_LIVE_PWR)
meter_garage.add_longs_table(AD_3155_INDEX_PWR)
# meter 'cold water'
meter_cold_water = ModbusTCPDevice(LTX_IP, timeout=2.0, refresh=2.0, unit_id=2)
meter_cold_water.add_floats_table(AD_3155_LIVE_PWR)
meter_cold_water.add_longs_table(AD_3155_INDEX_PWR)
# meter 'light'
meter_light = ModbusTCPDevice(LTX_IP, timeout=2.0, refresh=2.0, unit_id=3)
meter_light.add_floats_table(AD_3155_LIVE_PWR)
meter_light.add_longs_table(AD_3155_INDEX_PWR)
# meter 'tech'
meter_tech = ModbusTCPDevice(LTX_IP, timeout=2.0, refresh=2.0, unit_id=4)
meter_tech.add_floats_table(AD_3155_LIVE_PWR)
meter_tech.add_longs_table(AD_3155_INDEX_PWR)
# meter 'CTA' (air process)
meter_cta = ModbusTCPDevice(LTX_IP, timeout=2.0, refresh=2.0, unit_id=5)
meter_cta.add_floats_table(AD_3155_LIVE_PWR)
meter_cta.add_longs_table(AD_3155_INDEX_PWR)
# meter 'heater room'
meter_heat = ModbusTCPDevice(LTX_IP, timeout=2.0, refresh=2.0, unit_id=6)
meter_heat.add_floats_table(AD_2155_LIVE_PWR)
meter_heat.add_longs_table(AD_2155_INDEX_PWR)
class Tags(object):
# redis tags
RD_TOTAL_PWR = Tag(0, src=Devices.rd, ref={'type': 'int',
'key': 'int:loos_elec:pwr_act',
'ttl': 60})
RD_TODAY_WH = Tag(0.0, src=Devices.rd, ref={'type': 'float',
'key': 'float:loos_elec:today_wh',
'ttl': 86400})
RD_YESTERDAY_WH = Tag(0.0, src=Devices.rd, ref={'type': 'float',
'key': 'float:loos_elec:yesterday_wh',
'ttl': 172800})
RD_TIMESTAMP_WH = Tag(0.0, src=Devices.rd, ref={'type': 'float',
'key': 'float:loos_elec:timestamp_wh',
'ttl': 172800})
# modbus tags
GARAGE_PWR = Tag(0.0, src=Devices.meter_garage, ref={'type': 'float', 'addr': AD_3155_LIVE_PWR, 'span': 1000})
GARAGE_I_PWR = Tag(0, src=Devices.meter_garage, ref={'type': 'long', 'addr': AD_3155_INDEX_PWR, 'span': 1 / 1000})
COLD_WATER_PWR = Tag(0.0, src=Devices.meter_cold_water, ref={'type': 'float',
'addr': AD_3155_LIVE_PWR,
'span': 1000})
COLD_WATER_I_PWR = Tag(0, src=Devices.meter_cold_water, ref={'type': 'long',
'addr': AD_3155_INDEX_PWR,
'span': 1 / 1000})
LIGHT_PWR = Tag(0.0, src=Devices.meter_light, ref={'type': 'float', 'addr': AD_3155_LIVE_PWR, 'span': 1000})
LIGHT_I_PWR = Tag(0, src=Devices.meter_light, ref={'type': 'long', 'addr': AD_3155_INDEX_PWR, 'span': 1 / 1000})
TECH_PWR = Tag(0.0, src=Devices.meter_tech, ref={'type': 'float', 'addr': AD_3155_LIVE_PWR, 'span': 1000})
TECH_I_PWR = Tag(0, src=Devices.meter_tech, ref={'type': 'long', 'addr': AD_3155_INDEX_PWR, 'span': 1 / 1000})
CTA_PWR = Tag(0.0, src=Devices.meter_cta, ref={'type': 'float', 'addr': AD_3155_LIVE_PWR, 'span': 1000})
CTA_I_PWR = Tag(0, src=Devices.meter_cta, ref={'type': 'long', 'addr': AD_3155_INDEX_PWR, 'span': 1 / 1000})
HEAT_PWR = Tag(0.0, src=Devices.meter_heat, ref={'type': 'float', 'addr': AD_2155_LIVE_PWR, 'span': 1000})
HEAT_I_PWR = Tag(0.0, src=Devices.meter_heat, ref={'type': 'long', 'addr': AD_2155_INDEX_PWR, 'span': 1 / 1000})
# virtual tags
# total power consumption
TOTAL_PWR = Tag(0.0, get_cmd=lambda: Tags.GARAGE_PWR.val + Tags.COLD_WATER_PWR.val + Tags.LIGHT_PWR.val +
Tags.TECH_PWR.val + Tags.CTA_PWR.val + Tags.HEAT_PWR.val)
def thingspeak_send(api_key, l_values):
""" upload data to thingspeak platform
:param api_key: thingspeak write API Key
:type api_key: str
:param l_values: value to update as a list (map to field1, field2...)
:type l_values: list or tuple
:return: True if update is a success, False otherwise
:rtype: bool
"""
# format data for request
d_data = {'api_key': api_key}
for i, value in enumerate(l_values):
i += 1
d_data['field%i' % i] = value
# do http POST request
logging.debug('thingspeak_send: POST data %s' % d_data)
r = requests.post('https://api.thingspeak.com/update', data=d_data, timeout=10.0)
is_ok = ((r.status_code == 200) and (int(r.text) != 0))
return is_ok
@catch_log_except()
def db_refresh_job():
since_last_integrate = time.time() - Tags.RD_TIMESTAMP_WH.val
Tags.RD_TIMESTAMP_WH.val += since_last_integrate
# integrate active power for daily index (if time since last integrate is regular)
if 0 < since_last_integrate < 7200:
Tags.RD_TODAY_WH.val += Tags.TOTAL_PWR.val * since_last_integrate / 3600
# publish active power
Tags.RD_TOTAL_PWR.val = Tags.TOTAL_PWR.e_val
@catch_log_except()
def db_midnight_job():
# backup daily value to yesterday then reset it for new day start
Tags.RD_YESTERDAY_WH.val = Tags.RD_TODAY_WH.val
Tags.RD_TODAY_WH.val = 0
@catch_log_except()
def web_publish_pwr_job():
l_fields = (
round(Tags.TOTAL_PWR.val),
round(Tags.GARAGE_PWR.val),
round(Tags.COLD_WATER_PWR.val),
round(Tags.LIGHT_PWR.val),
round(Tags.TECH_PWR.val),
round(Tags.CTA_PWR.val),
round(Tags.HEAT_PWR.val),
)
thingspeak_send(api_key=ts_pwr_api_key, l_values=l_fields)
@catch_log_except()
def web_publish_index_job():
l_fields = (
round(Tags.GARAGE_I_PWR.val),
round(Tags.COLD_WATER_I_PWR.val),
round(Tags.LIGHT_I_PWR.val),
round(Tags.TECH_I_PWR.val),
round(Tags.CTA_I_PWR.val),
round(Tags.HEAT_I_PWR.val),
)
thingspeak_send(api_key=ts_idx_api_key, l_values=l_fields)
if __name__ == '__main__':
# logging setup
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
logging.info('board-meters-app started')
# wait DS_ModbusTCP thread start
time.sleep(1.0)
# init scheduler
schedule.every(5).seconds.do(db_refresh_job)
schedule.every().day.at('00:00').do(db_midnight_job)
schedule.every(2).minutes.do(web_publish_pwr_job)
schedule.every().day.at('06:00').do(web_publish_index_job)
# first call
db_refresh_job()
web_publish_pwr_job()
# main loop
while True:
schedule.run_pending()
time.sleep(1.0)
| 41.572193
| 118
| 0.638153
|
4d9fe8abe42098b93dc0d40f7371aa03cc3a0b77
| 1,692
|
py
|
Python
|
aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20171204/DeleteTrailRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20171204/DeleteTrailRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20171204/DeleteTrailRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteTrailRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Actiontrail', '2017-12-04', 'DeleteTrail','actiontrail')
def get_Help(self):
return self.get_query_params().get('Help')
def set_Help(self,Help):
self.add_query_param('Help',Help)
def get_Format(self):
return self.get_query_params().get('Format')
def set_Format(self,Format):
self.add_query_param('Format',Format)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
| 31.333333
| 86
| 0.743499
|
7dfda37ed9fe7023323d90423724239c06aefa1c
| 1,285
|
py
|
Python
|
run_linters.py
|
stevenpelley/gloomhaven-monster-ai
|
6b9422662e7e2554dc01edb55911b7519d38e01c
|
[
"MIT"
] | 1
|
2020-09-29T23:12:46.000Z
|
2020-09-29T23:12:46.000Z
|
run_linters.py
|
stevenpelley/gloomhaven-monster-ai
|
6b9422662e7e2554dc01edb55911b7519d38e01c
|
[
"MIT"
] | null | null | null |
run_linters.py
|
stevenpelley/gloomhaven-monster-ai
|
6b9422662e7e2554dc01edb55911b7519d38e01c
|
[
"MIT"
] | null | null | null |
"""Run script for all linters."""
import subprocess
import sys
import typing
def main() -> None:
"""Run all linters.
Linter output will be sent to stdout.
This function will exit the script with return code 0 on success, and other
value on failure.
"""
is_success: bool = True
for linter_input in [
['flake8', '--max-complexity', '8', '.'],
['mypy', '--strict', '.'],
['pydocstyle', '.'],
['autopep8', '-r', '-d', '-a', '-a', '--exit-code', '.']
]:
this_success = run_single_linter(linter_input)
is_success = is_success and this_success
if is_success:
print("all linters pass")
sys.exit(0)
else:
print("linter failure")
sys.exit(-1)
def run_single_linter(args: typing.List[str]) -> bool:
"""Return true if the linter passes, and false if it fails."""
p = subprocess.run(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, text=True)
if p.returncode != 0:
print("{} failure:".format(args[0]))
print(p.stdout)
return False
else:
print("{} success".format(args[0]))
return True
if __name__ == "__main__":
"""Run the main function as a script."""
main()
| 26.770833
| 79
| 0.565759
|
43a4a8d4928d4254e2bfa367bba52c37c52db4e1
| 1,174
|
py
|
Python
|
projectq/setups/decompositions/globalphase.py
|
VirtueQuantumCloud/projectqX
|
fa484fe037a3a1772127bbd00fe4628ddba34611
|
[
"Apache-2.0"
] | null | null | null |
projectq/setups/decompositions/globalphase.py
|
VirtueQuantumCloud/projectqX
|
fa484fe037a3a1772127bbd00fe4628ddba34611
|
[
"Apache-2.0"
] | null | null | null |
projectq/setups/decompositions/globalphase.py
|
VirtueQuantumCloud/projectqX
|
fa484fe037a3a1772127bbd00fe4628ddba34611
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers a decomposition rule for global phases.
Deletes global phase gates (which can be ignored).
"""
from projectq.cengines import DecompositionRule
from projectq.meta import get_control_count
from projectq.ops import Ph
def _decompose_PhNoCtrl(cmd):
""" Throw out global phases (no controls). """
pass
def _recognize_PhNoCtrl(cmd):
""" Recognize global phases (no controls). """
return get_control_count(cmd) == 0
all_defined_decomposition_rules = [
DecompositionRule(Ph, _decompose_PhNoCtrl, _recognize_PhNoCtrl)
]
| 30.102564
| 76
| 0.746167
|
3680268a54f56718e3a6d93fe472b716742356d6
| 554
|
py
|
Python
|
dbt/adapters/sqlite/relation.py
|
edgarrmondragon/dbt-sqlite
|
dc578b8e7f7707cb0e9fb8f2bb90b3340d95f5f6
|
[
"Apache-2.0"
] | 43
|
2020-11-06T20:30:56.000Z
|
2022-03-12T21:57:37.000Z
|
dbt/adapters/sqlite/relation.py
|
edgarrmondragon/dbt-sqlite
|
dc578b8e7f7707cb0e9fb8f2bb90b3340d95f5f6
|
[
"Apache-2.0"
] | 12
|
2020-11-07T07:46:01.000Z
|
2021-12-09T06:07:15.000Z
|
dbt/adapters/sqlite/relation.py
|
edgarrmondragon/dbt-sqlite
|
dc578b8e7f7707cb0e9fb8f2bb90b3340d95f5f6
|
[
"Apache-2.0"
] | 3
|
2020-11-15T01:41:43.000Z
|
2021-08-09T20:34:27.000Z
|
from dataclasses import dataclass
from dbt.adapters.base.relation import BaseRelation, Policy
@dataclass
class SQLiteQuotePolicy(Policy):
database: bool = False
schema: bool = False
identifier: bool = True
@dataclass
class SQLiteIncludePolicy(Policy):
database: bool = False
schema: bool = True
identifier: bool = True
@dataclass(frozen=True, eq=False, repr=False)
class SQLiteRelation(BaseRelation):
quote_policy: SQLiteQuotePolicy = SQLiteQuotePolicy()
include_policy: SQLiteIncludePolicy = SQLiteIncludePolicy()
| 23.083333
| 63
| 0.758123
|
475a9dd1b954f4c955fce272de2eea64141cb7df
| 3,009
|
py
|
Python
|
examples/ad_manager/v201911/adjustment_service/update_traffic_adjustments.py
|
MattCardoso/googleads-python-lib
|
62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v201911/adjustment_service/update_traffic_adjustments.py
|
MattCardoso/googleads-python-lib
|
62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8
|
[
"Apache-2.0"
] | null | null | null |
examples/ad_manager/v201911/adjustment_service/update_traffic_adjustments.py
|
MattCardoso/googleads-python-lib
|
62f0db9fdb78a1bcdb1e61c82c609d9f47cb48d8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a historical adjustment of 110% for New Years Day traffic.
"""
from __future__ import print_function
import datetime
# Import appropriate modules from the client library.
from googleads import ad_manager
ADJUSTMENT_ID = 'INSERT_ADJUSTMENT_ID_HERE'
def main(client, adjustment_id):
# Initialize the adjustment service.
adjustment_service = client.GetService('AdjustmentService', version='v201911')
# Create a statement to select a single traffic forecast adjustment by id.
statement = (
ad_manager.StatementBuilder(
version='v201911').Where('id = :id').WithBindVariable(
'id', adjustment_id))
# Get the forecast traffic adjustment.
response = adjustment_service.getTrafficAdjustmentsByStatement(
statement.ToStatement())
# Create a new historical adjustment segment for New Year's Day.
this_new_years = datetime.date(datetime.date.today().year, 12, 31)
next_new_years = datetime.date(datetime.date.today().year + 1, 12, 31)
new_years_segment = {
'basisType': 'HISTORICAL',
'historicalAdjustment': {
'targetDateRange': {
'startDate': next_new_years,
'endDate': next_new_years
},
'referenceDateRange': {
'startDate': this_new_years,
'endDate': this_new_years
},
'milliPercentMultiplier': 110000
}
}
if 'results' in response and len(response['results']):
# Update each local traffic adjustment.
updated_adjustments = []
for adjustment in response['results']:
adjustment['forecastAdjustmentSegments'].append(new_years_segment)
updated_adjustments.append(adjustment)
# Update traffic adjustments remotely.
adjustments = adjustment_service.updateTrafficAdjustments(
updated_adjustments)
# Display the results.
if adjustments:
for adjustment in adjustments:
print('Traffic forecast adjustment with id %d and %d segments was '
'created.' % (adjustment['id'],
len(adjustment['forecastAdjustmentSegments'])))
else:
print('No traffic adjustments were updated.')
else:
print('No traffic adjustments found to update.')
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, ADJUSTMENT_ID)
| 33.808989
| 80
| 0.70123
|
efb70e6d199971b96fef25ce6491558bc17c5105
| 697
|
py
|
Python
|
setup.py
|
techlift-tech/membership_program
|
265a84345c26670f037beb7363b05acb3f9586f2
|
[
"MIT"
] | null | null | null |
setup.py
|
techlift-tech/membership_program
|
265a84345c26670f037beb7363b05acb3f9586f2
|
[
"MIT"
] | null | null | null |
setup.py
|
techlift-tech/membership_program
|
265a84345c26670f037beb7363b05acb3f9586f2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import re, ast
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in membership_program/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('membership_program/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='membership_program',
version=version,
description='Membership Program',
author='Techlift',
author_email='suraj@techlift.in',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 26.807692
| 73
| 0.743185
|
20f0182720d90cb43e9b7dfc4501b099805c4b05
| 3,676
|
py
|
Python
|
lib/networks/VGGnet_test.py
|
Chrisvb95/Claran-repo
|
a1218faa58fcf148fb352e8674439dceca072ef5
|
[
"MIT"
] | 39
|
2018-04-09T09:32:46.000Z
|
2022-03-09T12:05:44.000Z
|
lib/networks/VGGnet_test.py
|
Chrisvb95/Claran-repo
|
a1218faa58fcf148fb352e8674439dceca072ef5
|
[
"MIT"
] | 10
|
2018-06-29T00:29:50.000Z
|
2021-10-12T22:52:30.000Z
|
lib/networks/VGGnet_test.py
|
Chrisvb95/Claran-repo
|
a1218faa58fcf148fb352e8674439dceca072ef5
|
[
"MIT"
] | 15
|
2018-08-28T06:55:48.000Z
|
2021-08-10T13:05:33.000Z
|
import tensorflow as tf
from networks.network import Network
from fast_rcnn.config import cfg
n_classes = 7
class VGGnet_test(Network):
def __init__(self, trainable=True, anchor_scales=[8, 16, 32],
feat_stride=[16, ], low_level_trainable=False,
anchor_ratios=[0.5, 1, 2], transform_img=False):
self.inputs = []
self._anchor_scales = anchor_scales
self._feat_stride = feat_stride
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
#target_size = cfg.TEST.SCALES[0]
#self.data = tf.placeholder(tf.float32, shape=[1, target_size, target_size, 3])
self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data': self.data, 'im_info': self.im_info})
self.trainable = trainable
self.low_level_trainable = low_level_trainable
self.anchor_ratio_size = len(anchor_ratios)
self.anchor_ratios = anchor_ratios
self.transform_img = transform_img
self.setup()
def setup(self):
(self.feed('data')
#.spatial_transform(name='spt_trans', do_transform=self.transform_img, keep_prob=1)
.conv(3, 3, 64, 1, 1, name='conv1_1', trainable=self.low_level_trainable)
.conv(3, 3, 64, 1, 1, name='conv1_2', trainable=self.low_level_trainable)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool1')
.conv(3, 3, 128, 1, 1, name='conv2_1', trainable=self.low_level_trainable)
.conv(3, 3, 128, 1, 1, name='conv2_2', trainable=self.low_level_trainable)
.max_pool(2, 2, 2, 2, padding='VALID', name='pool2')
.conv(3, 3, 256, 1, 1, name='conv3_1')
.conv(3, 3, 256, 1, 1, name='conv3_2')
.conv(3, 3, 256, 1, 1, name='conv3_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool3')
.conv(3, 3, 512, 1, 1, name='conv4_1')
.conv(3, 3, 512, 1, 1, name='conv4_2')
.conv(3, 3, 512, 1, 1, name='conv4_3')
.max_pool(2, 2, 2, 2, padding='VALID', name='pool4')
.conv(3, 3, 512, 1, 1, name='conv5_1')
.conv(3, 3, 512, 1, 1, name='conv5_2')
.conv(3, 3, 512, 1, 1, name='conv5_3'))
(self.feed('conv5_3')
.conv(3, 3, 512, 1, 1, name='rpn_conv/3x3')
.conv(1, 1, len(self._anchor_scales) * self.anchor_ratio_size * 2, 1, 1,
padding='VALID', relu=False, name='rpn_cls_score'))
(self.feed('rpn_conv/3x3')
.conv(1, 1, len(self._anchor_scales) * self.anchor_ratio_size * 4, 1, 1,
padding='VALID', relu=False, name='rpn_bbox_pred'))
(self.feed('rpn_cls_score')
.reshape_layer(2, name='rpn_cls_score_reshape')
.softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.reshape_layer(len(self._anchor_scales) * self.anchor_ratio_size * 2, name='rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape', 'rpn_bbox_pred', 'im_info')
.proposal_layer(self._feat_stride, self._anchor_scales, self.anchor_ratios, 'TEST', name='rois'))
(self.feed('conv5_3', 'rois')
#.roi_pool(7, 7, 1.0/16, name='pool_5')
.st_pool(7, 7, 1.0 / 16, name='pool_5', phase='TEST')
.fc(4096, name='fc6')
.fc(4096, name='fc7')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
(self.feed('fc7')
.fc(n_classes * 4, relu=False, name='bbox_pred'))
| 46.531646
| 112
| 0.572361
|
24cad57cea41dbca1005be334b039e62cd3b8789
| 240
|
py
|
Python
|
Business/bustemplate/models.py
|
Kshashank9/Business-Topology-Website
|
17d0c5ccabb17c52b6162e147640275de32e5169
|
[
"MIT"
] | null | null | null |
Business/bustemplate/models.py
|
Kshashank9/Business-Topology-Website
|
17d0c5ccabb17c52b6162e147640275de32e5169
|
[
"MIT"
] | null | null | null |
Business/bustemplate/models.py
|
Kshashank9/Business-Topology-Website
|
17d0c5ccabb17c52b6162e147640275de32e5169
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class courses(models.Model):
name = models.CharField(max_length=100)
desc = models.TextField()
img = models.ImageField(upload_to='pics')
price = models.IntegerField()
| 26.666667
| 45
| 0.720833
|
22f415c45c26822a487e38b9ce086234fc867caf
| 894
|
py
|
Python
|
LeetCode.Test/_0001_0050/Test_020_ValidParentheses.py
|
BigEggStudy/LeetCode-Py
|
735e782742fab15bdb046eb6d5fc7b03502cc92d
|
[
"MIT"
] | 1
|
2021-01-30T04:07:26.000Z
|
2021-01-30T04:07:26.000Z
|
LeetCode.Test/_0001_0050/Test_020_ValidParentheses.py
|
BigEggStudy/LeetCode-Py
|
735e782742fab15bdb046eb6d5fc7b03502cc92d
|
[
"MIT"
] | null | null | null |
LeetCode.Test/_0001_0050/Test_020_ValidParentheses.py
|
BigEggStudy/LeetCode-Py
|
735e782742fab15bdb046eb6d5fc7b03502cc92d
|
[
"MIT"
] | 1
|
2021-09-09T20:12:25.000Z
|
2021-09-09T20:12:25.000Z
|
import unittest
import sys
sys.path.append('LeetCode/_0001_0050')
from _020_ValidParentheses import Solution
class Test_020_ValidParentheses(unittest.TestCase):
def test_isValid_oneType(self):
solution = Solution()
self.assertTrue(solution.isValid('()'))
def test_isValid_multipleType(self):
solution = Solution()
self.assertTrue(solution.isValid('()[]{}'))
def test_isValid_mismatch(self):
solution = Solution()
self.assertFalse(solution.isValid('(]'))
def test_isValid_earlyExist(self):
solution = Solution()
self.assertFalse(solution.isValid('([)]'))
def test_isValid_multipleType_2(self):
solution = Solution()
self.assertTrue(solution.isValid('{[]}'))
def test_isValid_withOtherCharacter(self):
solution = Solution()
self.assertFalse(solution.isValid('(123)'))
| 27.9375
| 51
| 0.675615
|
92c9a9c21debb3de20ccc25eb3d65b19705bf5e9
| 3,215
|
py
|
Python
|
build.py
|
Kataiser/radeline
|
da1fc490556b393623e0c8ced20cbcdb8de68229
|
[
"MIT"
] | 3
|
2021-03-18T20:00:21.000Z
|
2022-03-01T04:11:13.000Z
|
build.py
|
Kataiser/radeline
|
da1fc490556b393623e0c8ced20cbcdb8de68229
|
[
"MIT"
] | 1
|
2021-12-21T23:21:41.000Z
|
2021-12-21T23:21:41.000Z
|
build.py
|
Kataiser/radeline
|
da1fc490556b393623e0c8ced20cbcdb8de68229
|
[
"MIT"
] | null | null | null |
import os
import shutil
import site
import subprocess
import sys
import zipfile
from io import BytesIO
import requests
try:
import requests_cache
except ImportError:
requests_cache = None
def main():
print("Building Radeline release package\n")
if os.path.isdir('Radeline'):
print("Deleting old build folder")
shutil.rmtree('Radeline')
else:
print("No old build folder to delete")
print('Creating new build folder')
os.mkdir('Radeline')
os.mkdir('Radeline\\Optimizer')
os.mkdir('Radeline\\Optimizer\\Backups')
os.mkdir('Radeline\\Simulator')
compile_command = f'{sys.executable} "movement sim\\setup.py" build_ext --inplace'
subprocess.run(compile_command)
print("Copied", shutil.copy('movement sim\\sim_compiled.cp310-win32.pyd', 'Radeline\\Simulator\\'))
if requests_cache:
print("Using DL cache for interpreter")
requests_cache.install_cache('interpreter_dl_cache')
else:
print("Not using DL cache for interpreter")
interpreter_url = 'https://www.python.org/ftp/python/3.10.1/python-3.10.1-embed-win32.zip'
print(f"Downloading Python interpreter from {interpreter_url}...")
interpreter_data = requests.get(interpreter_url, timeout=30).content
with zipfile.ZipFile(BytesIO(interpreter_data), 'r') as interpreter_zip:
interpreter_zip.extractall(path=f"Radeline\\{interpreter_url.split('/')[-1][:-4]}\\")
packages_dir = site.getsitepackages()[1]
needed_packages = ['beautifulsoup4', 'bs4', 'certifi', 'charset_normalizer', 'idna', 'keyboard', 'lxml', 'psutil', 'pyperclip', 'requests', 'soupsieve', 'tqdm', 'urllib3', 'yaml']
for site_package in os.listdir(packages_dir):
for needed_package in needed_packages:
if needed_package in site_package and os.path.isdir(f'{packages_dir}\\{site_package}'):
shutil.copytree(f'{packages_dir}\\{site_package}', f'Radeline\\packages\\{site_package}')
break
print(f"Copied {len(needed_packages)} packages from {packages_dir} to Radeline\\packages")
shutil.rmtree('Radeline\\packages\\psutil\\tests')
print("Deleted psutil and bs4 tests")
print("Copied", shutil.copy('README.md', 'Radeline'))
print("Copied", shutil.copy('LICENSE', 'Radeline'))
print("Copied", shutil.copy('optimizer\\main.py', 'Radeline\\Optimizer'))
print("Copied", shutil.copy('optimizer\\run.py', 'Radeline\\Optimizer'))
print("Copied", shutil.copy('optimizer\\run.bat', 'Radeline\\Optimizer'))
print("Copied", shutil.copy('optimizer\\settings.yaml', 'Radeline\\Optimizer'))
print("Copied", shutil.copy('movement sim\\run.py', 'Radeline\\Simulator'))
print("Copied", shutil.copy('movement sim\\run.bat', 'Radeline\\Simulator'))
print("Copied", shutil.copy('movement sim\\config.yaml', 'Radeline\\Simulator'))
print("Copied", shutil.copy('movement sim\\input_formatter.py', 'Radeline\\Simulator'))
print("Copied", shutil.copy('movement sim\\run_formatter.py', 'Radeline\\Simulator'))
print("Copied", shutil.copy('movement sim\\run formatter.bat', 'Radeline\\Simulator'))
print("\nBuild finished")
if __name__ == '__main__':
main()
| 41.753247
| 183
| 0.689891
|
2adcde52a5b3ef32ba6e7b7d35b0ab1ca66dc5c8
| 1,913
|
py
|
Python
|
openapi_core/schema/schemas/exceptions.py
|
plambein/openapi-core
|
91c346b1846cde66909ffd8b7fab91be525d5e17
|
[
"BSD-3-Clause"
] | null | null | null |
openapi_core/schema/schemas/exceptions.py
|
plambein/openapi-core
|
91c346b1846cde66909ffd8b7fab91be525d5e17
|
[
"BSD-3-Clause"
] | null | null | null |
openapi_core/schema/schemas/exceptions.py
|
plambein/openapi-core
|
91c346b1846cde66909ffd8b7fab91be525d5e17
|
[
"BSD-3-Clause"
] | null | null | null |
import attr
from openapi_core.schema.exceptions import OpenAPIMappingError
class OpenAPISchemaError(OpenAPIMappingError):
pass
@attr.s(hash=True)
class NoValidSchema(OpenAPISchemaError):
value = attr.ib()
def __str__(self):
return "No valid schema found for value: {0}".format(self.value)
@attr.s(hash=True)
class UndefinedItemsSchema(OpenAPISchemaError):
type = attr.ib()
def __str__(self):
return "Null value for schema type {0}".format(self.type)
@attr.s(hash=True)
class InvalidSchemaValue(OpenAPISchemaError):
msg = attr.ib()
value = attr.ib()
type = attr.ib()
def __str__(self):
return self.msg.format(value=self.value, type=self.type)
@attr.s(hash=True)
class InvalidCustomFormatSchemaValue(InvalidSchemaValue):
original_exception = attr.ib()
def __str__(self):
return self.msg.format(value=self.value, type=self.type, exception=self.original_exception)
@attr.s(hash=True)
class UndefinedSchemaProperty(OpenAPISchemaError):
extra_props = attr.ib()
def __str__(self):
return "Extra unexpected properties found in schema: {0}".format(self.extra_props)
@attr.s(hash=True)
class InvalidSchemaProperty(OpenAPISchemaError):
property_name = attr.ib()
original_exception = attr.ib()
def __str__(self):
return "Invalid schema property {0}: {1}".format(self.property_name, self.original_exception)
@attr.s(hash=True)
class MissingSchemaProperty(OpenAPISchemaError):
property_name = attr.ib()
def __str__(self):
return "Missing schema property: {0}".format(self.property_name)
class UnmarshallerError(OpenAPIMappingError):
pass
class UnmarshallerStrictTypeError(UnmarshallerError):
value = attr.ib()
types = attr.ib()
def __str__(self):
return "Value {value} is not one of types {types}".format(
self.value, self.types)
| 23.9125
| 101
| 0.708834
|
70d25f71ee328024cf0d09a599912dd1203f8f54
| 619
|
py
|
Python
|
pcbs/ornament2020/fw/gentable.py
|
Matir/hacks
|
232cdb5af8a1e8338d9406dd9ee63252bf589b2b
|
[
"BSD-2-Clause"
] | 6
|
2020-10-21T13:41:16.000Z
|
2021-12-21T22:48:40.000Z
|
pcbs/ornament2020/fw/gentable.py
|
Matir/hacks
|
232cdb5af8a1e8338d9406dd9ee63252bf589b2b
|
[
"BSD-2-Clause"
] | 2
|
2021-08-28T19:40:41.000Z
|
2021-08-29T17:46:27.000Z
|
pcbs/ornament2020/fw/gentable.py
|
Matir/hacks
|
232cdb5af8a1e8338d9406dd9ee63252bf589b2b
|
[
"BSD-2-Clause"
] | 3
|
2020-05-08T11:48:07.000Z
|
2021-10-17T21:06:05.000Z
|
import math
import sys
gamma = 2.5
template = '''
#include <stdint.h>
#include <avr/pgmspace.h>
const uint8_t consts_num_steps = %(nsteps)d;
const uint8_t gamma_table[] PROGMEM = {%(gamma_table)s};
'''
nsteps = int(sys.argv[1], 0)
maxval = int(sys.argv[2], 0)
outfile = sys.argv[3]
# Gamma adjustment table per Adafruit
gamma_vals = [int(math.pow(i/float(nsteps), gamma) * float(maxval) + 0.75)
for i in range(nsteps)]
# Render them
templ = {}
templ['nsteps'] = nsteps - 1
templ['gamma_table'] = ', '.join('%d' % x for x in gamma_vals)
with open(outfile, 'w') as fp:
fp.write(template % templ)
| 21.344828
| 74
| 0.654281
|
748b056976fbad5b727fb6b4ce0ed0b3234c5ce9
| 1,227
|
py
|
Python
|
src/pyuwds3/reasoning/estimation/object_pose_estimator.py
|
LAAS-HRI/uwds3
|
42390f62ed5701a32710341b01faa10efc448078
|
[
"MIT"
] | 2
|
2020-08-19T06:15:14.000Z
|
2021-05-23T09:55:18.000Z
|
src/pyuwds3/reasoning/estimation/object_pose_estimator.py
|
LAAS-HRI/uwds3
|
42390f62ed5701a32710341b01faa10efc448078
|
[
"MIT"
] | 5
|
2021-01-06T09:00:35.000Z
|
2021-01-20T13:22:19.000Z
|
src/pyuwds3/reasoning/estimation/object_pose_estimator.py
|
LAAS-HRI/uwds3
|
42390f62ed5701a32710341b01faa10efc448078
|
[
"MIT"
] | 2
|
2020-11-18T17:34:43.000Z
|
2021-05-23T16:14:17.000Z
|
import numpy as np
from ...types.vector.vector6d import Vector6D
from ...types.vector.vector3d import Vector3D
class ObjectPoseEstimator(object):
""" Allow to compute the 6D pose in global frame
"""
def estimate(self, tracks, view_pose, camera):
""" Estimate the 6D pose in global frame from bbox (assume same orientation than map)
"""
view_matrix = view_pose.transform()
camera_matrix = camera.camera_matrix()
for o in tracks:
if o.bbox.depth is not None:
if o.is_confirmed():
fx = camera_matrix[0][0]
fy = camera_matrix[1][1]
cx = camera_matrix[0][2]
cy = camera_matrix[1][2]
c = o.bbox.center()
z = o.bbox.depth
x = (c.x - cx) * z / fx
y = (c.y - cy) * z / fy
sensor_transform = Vector6D(x=x, y=y, z=z).transform()
world_pose = Vector6D().from_transform(np.dot(view_matrix, sensor_transform))
position = world_pose.position()
rotation = Vector3D()
o.update_pose(position, rotation)
| 40.9
| 97
| 0.526487
|
741c86c9b8924e5c71490d4769650fb7cd3c8517
| 6,252
|
py
|
Python
|
budgetportal/guide_data.py
|
d3ft0uch/datamanager
|
60f2f9d5278d20ae553bb063dcedaf206bb3ab29
|
[
"MIT"
] | null | null | null |
budgetportal/guide_data.py
|
d3ft0uch/datamanager
|
60f2f9d5278d20ae553bb063dcedaf206bb3ab29
|
[
"MIT"
] | null | null | null |
budgetportal/guide_data.py
|
d3ft0uch/datamanager
|
60f2f9d5278d20ae553bb063dcedaf206bb3ab29
|
[
"MIT"
] | null | null | null |
category_guides = {
"adjusted-estimates-of-national-expenditure": "adjusted-estimates-of-national-expenditure",
"estimates-of-provincial-expenditure": "estimates-of-provincial-expenditure",
"frameworks-for-conditional-grants-to-provinces": "frameworks-for-conditional-grants",
"frameworks-for-conditional-grants-to-municipalities": "frameworks-for-conditional-grants",
"estimates-of-national-expenditure": "estimates-of-national-expenditure",
"performance-and-expenditure-reviews": "performance-and-expenditure-reviews",
"in-year-spending": "in-year-spending",
}
guides = {
"adjusted-estimates-of-national-expenditure": {
"description": u"This guide relates to the structured adjusted estimates of expenditure data in CSV form the AENE PDF documents on each department page, and the accompanying Excel file with the document\u2019s tables",
"name": "Adjusted Estimates of National Expenditure",
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "Adjusted Estimates of National Expenditure (AENE) - vulekamali",
},
"estimates-of-national-expenditure": {
"description": "The Estimates of National Expenditure (ENE) publications describe in detail the planned spending in all national government votes over the three-year medium-term expenditure framework (MTEF) period.",
"name": "Estimates of National Expenditure",
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "Estimates of National Expenditure (ENE) - vulekamali",
},
"estimates-of-provincial-expenditure": {
"description": "The Estimates of Provincial Revenue and Expenditure (EPRE) is considered a summary of the departmental Strategic and Performance Plan to a level at which the legislature and the public can engage the provincial departments.",
"name": "Estimates of Provincial Revenue and Expenditure",
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "Estimates of Provincial Revenue and Expenditure (EPRE) - vulekamali",
},
"frameworks-for-conditional-grants": {
"description": "Learn where to find the rules for how conditional grants may be spent and how much has been allocated to each municipality and province",
"name": "Frameworks for Conditional Grants",
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "Frameworks for Conditional Grants - vulekamali",
},
"in-year-spending": {
"description": "The in-year spending dataset provides monthly totals from the transactions of each department.",
"name": "In-year spending data",
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "In-year spending data - vulekamali",
},
"index": {
"description": "South Africa's National and Provincial budget data from National Treasury in partnership with IMALI YETHU.",
"items": [
{
"description": u"This guide relates to the structured adjusted estimates of expenditure data in CSV form the AENE PDF documents on each department page, and the accompanying Excel file with the document\u2019s tables",
"name": "Adjusted Estimates of National Expenditure",
"url_path": "/guides/adjusted-estimates-of-national-expenditure",
},
{
"description": "The Estimates of National Expenditure (ENE) publications describe in detail the planned spending in all national government votes over the three-year medium-term expenditure framework (MTEF) period.",
"name": "Estimates of National Expenditure (ENE)",
"url_path": "/guides/estimates-of-national-expenditure",
},
{
"description": "The Estimates of Provincial Revenue and Expenditure (EPRE) is considered a summary of the departmental Strategic and Performance Plan to a level at which the legislature and the public can engage the provincial departments.",
"name": "Estimates of Provincial Revenue and Expenditure",
"url_path": "/guides/estimates-of-provincial-expenditure",
},
{
"description": "Learn where to find the rules for how conditional grants may be spent and how much has been allocated to each municipality and province",
"name": "Conditional Grant Frameworks and Allocations",
"url_path": "/guides/frameworks-for-conditional-grants",
},
{
"description": "The in-year spending dataset provides monthly totals from the transactions of each department.",
"name": "In-year spending data",
"url_path": "/guides/in-year-spending",
},
{
"description": "A Performance and Expenditure Review (PER) is a process of reviewing government spending on a particular service, and how effective this spending is.",
"name": "Performance and Expenditure Reviews (PER)",
"url_path": "/guides/performance-and-expenditure-reviews",
},
{
"description": "Procurement is the process where government buys goods and services using public money. Government buys goods and services in order to deliver services according to its mandate.",
"name": "Procurement",
"url_path": "https://procurement.vulekamali.gov.za/",
},
],
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "Guides - vulekamali",
},
"performance-and-expenditure-reviews": {
"description": "South Africa's National and Provincial budget data from National Treasury in partnership with IMALI YETHU.",
"name": "Performance and Expenditure Reviews",
"selected_sidebar": "guides",
"selected_tab": "learning-centre",
"title": "Performance and Expenditure Reviews (PER) - vulekamali",
},
}
for slug, guide in guides.items():
guide["slug"] = slug
guide["url"] = "/guides/%s" % slug
| 61.294118
| 257
| 0.662828
|
c74dc376ba27219e61387502cc6b223f113a7594
| 134
|
py
|
Python
|
daxboard/home/urls.py
|
anderson-joyle/daxboard
|
39b97ea85d771187a9c8600ee1dd8faa9567c455
|
[
"MIT"
] | 1
|
2017-12-29T09:44:58.000Z
|
2017-12-29T09:44:58.000Z
|
daxboard/home/urls.py
|
anderson-joyle/Daxboard
|
39b97ea85d771187a9c8600ee1dd8faa9567c455
|
[
"MIT"
] | 2
|
2020-02-11T22:42:26.000Z
|
2020-06-05T18:19:35.000Z
|
daxboard/home/urls.py
|
anderson-joyle/Daxboard
|
39b97ea85d771187a9c8600ee1dd8faa9567c455
|
[
"MIT"
] | 2
|
2018-11-13T07:33:35.000Z
|
2020-01-29T07:52:19.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('logout', views.logout),
]
| 19.142857
| 34
| 0.634328
|
c4f53ba27932cd262bccdb7b5fa79c53230ce629
| 1,986
|
py
|
Python
|
src/modelmanager.py
|
HimanshuMittal01/openSeqNet
|
b52ae66a6adaeaeaded5c65e8517d95e487bff6c
|
[
"MIT"
] | null | null | null |
src/modelmanager.py
|
HimanshuMittal01/openSeqNet
|
b52ae66a6adaeaeaded5c65e8517d95e487bff6c
|
[
"MIT"
] | null | null | null |
src/modelmanager.py
|
HimanshuMittal01/openSeqNet
|
b52ae66a6adaeaeaded5c65e8517d95e487bff6c
|
[
"MIT"
] | null | null | null |
import torch
from .layer import Layer
from .edge import EdgeMatrix
class ModelManager:
def __init__(self, filepath, canvas):
self.filepath = filepath
self.canvas = canvas
self.model = self._load_torch_model(filepath)
self.layers = []
self.weights = []
# Instantiates graph structure
self.num_layers = self.get_num_layers()
self._create_graph()
def _load_torch_model(self, filepath):
'''Loads pytorch model
'''
# TODO : Handle errors
return torch.load(filepath)
def _create_graph(self):
'''Creates necessary data structures for graph
'''
width = self.canvas.winfo_width()
height = self.canvas.winfo_height()
# TODO: This is not the right thing, collisions may happen
gap = width/self.num_layers
offset = gap/2
num_layer = 0
for name,param in self.model.named_parameters():
if (name.endswith(".bias")):
continue
if (num_layer==0):
self.layers.append(Layer(param.size()[1], offset+gap*num_layer, self.canvas))
num_layer += 1
self.layers.append(Layer(param.size()[0], offset+gap*num_layer, self.canvas))
self.weights.append(EdgeMatrix(param, self.layers[num_layer-1], self.layers[num_layer], self.canvas))
def get_num_layers(self):
'''Returns number of layers
'''
num_layers = 1 # For input layer
for name,param in self.model.named_parameters():
if (name.endswith(".bias")):
continue
num_layers+=1
return num_layers
def update(self):
'''Updates the x and y location of the layers
'''
pass
def show(self):
'''Shows the model on canvas
'''
for layer in self.layers:
layer.show()
for edges in self.weights:
edges.show()
| 31.52381
| 113
| 0.576032
|
1d9a63a6bb7d5cc3321a075576e558bb702050c0
| 801
|
py
|
Python
|
tensorflow_datasets/import_test.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | 2
|
2022-02-14T09:51:39.000Z
|
2022-02-14T13:27:49.000Z
|
tensorflow_datasets/import_test.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/import_test.py
|
shubhamkumaR630/datasets
|
fe9ee91849cefed0953141ea3588f73b7def78fd
|
[
"Apache-2.0"
] | 1
|
2020-12-13T22:11:33.000Z
|
2020-12-13T22:11:33.000Z
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test import."""
import tensorflow_datasets as tfds
class ImportTest(tfds.testing.TestCase):
def test_import(self):
pass
if __name__ == '__main__':
tfds.testing.test_main()
| 27.62069
| 74
| 0.751561
|
de6cb054b10e128b5fa04a23dcae33cf6944c0b1
| 16,258
|
py
|
Python
|
functions/visualizations.py
|
CIMAC-CIDC/cidc-cloud-functions
|
cf6f37d699497a09b68dd5a90000ef5a96f1dd7c
|
[
"MIT"
] | null | null | null |
functions/visualizations.py
|
CIMAC-CIDC/cidc-cloud-functions
|
cf6f37d699497a09b68dd5a90000ef5a96f1dd7c
|
[
"MIT"
] | 20
|
2019-08-05T20:20:32.000Z
|
2022-02-04T16:14:55.000Z
|
functions/visualizations.py
|
CIMAC-CIDC/cidc-cloud-functions
|
cf6f37d699497a09b68dd5a90000ef5a96f1dd7c
|
[
"MIT"
] | 1
|
2022-02-15T17:51:36.000Z
|
2022-02-15T17:51:36.000Z
|
import json
from io import BytesIO
from typing import Optional
# clustergrammer2 via sklearn uses np.float which is deprecated as of numpy==1.20
import warnings
warnings.filterwarnings(
action="ignore", category=DeprecationWarning, module="scikit-learn"
)
import pandas as pd
from clustergrammer2 import Network as CGNetwork
from deepdiff import DeepSearch
from openpyxl import load_workbook
from cidc_api.models import DownloadableFiles, prism, TrialMetadata
from .settings import GOOGLE_DATA_BUCKET
from .util import (
BackgroundContext,
extract_pubsub_data,
sqlalchemy_session,
get_blob_as_stream,
)
# sets the maximum number of divisions within a category
## that is shown on the top of the clustergrammer
CLUSTERGRAMMER_MAX_CATEGORY_CARDINALITY = 5
def vis_preprocessing(event: dict, context: BackgroundContext):
with sqlalchemy_session() as session:
object_url = extract_pubsub_data(event)
file_record: DownloadableFiles = DownloadableFiles.get_by_object_url(
object_url, session=session
)
if not file_record:
raise Exception(f"No downloadable file with object URL {object_url} found.")
metadata_df = _get_metadata_df(file_record.trial_id)
# Apply the transformations and get derivative data for visualization.
for transform_name, transform in _get_transforms().items():
vis_json = transform(file_record, metadata_df)
if vis_json:
# Add the vis config to the file_record
setattr(file_record, transform_name, vis_json)
# Save the derivative data additions to the database.
session.commit()
def _get_metadata_df(trial_id: str) -> pd.DataFrame:
"""
Build a dataframe containing the participant/sample metadata for this trial,
joined on CIMAC ID and indexed on CIMAC ID.
"""
participants_blob = get_blob_as_stream(
f"{trial_id}/participants.csv", as_string=True
)
samples_blob = get_blob_as_stream(f"{trial_id}/samples.csv", as_string=True)
participants_df = pd.read_csv(participants_blob)
samples_df = pd.read_csv(samples_blob)
metadata_df = pd.merge(
participants_df,
samples_df,
left_on="cimac_participant_id",
right_on="participants.cimac_participant_id",
how="outer",
)
metadata_df.set_index("cimac_id", inplace=True)
return metadata_df
def _get_transforms() -> dict:
"""
Get a list of functions taking an open file and
that file's downloadable file record as arguments, returning
a JSON blob that the frontend will use for visualization.
"""
return {
"clustergrammer": _ClustergrammerTransform(),
"ihc_combined_plot": _ihc_combined_transform,
"additional_metadata": _add_antibody_metadata,
}
def _add_antibody_metadata(
file_record: DownloadableFiles, metadata_df: pd.DataFrame
) -> Optional[dict]:
"""
Pseudo transformation to add antibody data to the DownloadableFiles.additional_metadata JSON
Only for upload_type in [cytof, elisa, ihc, micsss, and mif]
"""
transforms = {
"cytof": _cytof_antibody_md,
"elisa": _elisa_antibody_md,
"ihc": _ihc_antibody_md,
"micsss": _micsss_antibody_md,
"mif": _mif_antibody_md,
}
upload_type = file_record.upload_type.lower()
if upload_type not in transforms.keys():
return None
with sqlalchemy_session() as session:
ct_md = TrialMetadata.find_by_trial_id(
file_record.trial_id, session=session
).metadata_json
assay_instances = ct_md.get("assays", {}).get(upload_type, [])
# asserting that this will return a list, which is not necessarily true
# check cidc-schemas/schemas/assays/components/available_assays.json
if isinstance(assay_instances, dict):
# only exception to list, eg olink
assay_md = assay_instances
elif isinstance(assay_instances, list):
ds = DeepSearch(assay_instances, file_record.object_url)
if "matched_values" in ds:
if len(ds["matched_values"]) != 1:
raise Exception(
f"Issue loading antibodies for {file_record.object_url} in {file_record.trial_id}: {file_record.object_url} is not unique in ct['assays'][{upload_type}]"
)
# matched_value = ["root[path][to][matching]"]
matching_path = list(ds["matched_values"])[0]
index = matching_path.split("[")[1].split("]")[0]
if index.isdigit(): # not technically needed, see below
assay_md = assay_instances[int(index)]
else:
# technically can't get here because DeepSearch on assay_instances: list has return bounded to "root[ int ]..."
# if some error occurs, need to error or need assay_md defined
# testing this doesn't seem necessary, but would likely need patching DeepSearch
try:
assay_md = assay_instances[index] # should work for all root[...]
except:
# add a bit of actual context, as any IndexError thrown would not be useful
raise Exception(
f"Issue loading antibodies for {file_record.object_url} in {file_record.trial_id}: unable to search ct['assays']['{upload_type}']"
)
else:
raise TypeError(
f"Issue loading antibodies for {file_record.object_url} in {file_record.trial_id}: ct['assays']['{upload_type}'] is {type(assay_instances).__name__} not list, dict"
)
md = transforms[upload_type](assay_md)
if md is None: # no antibody metadata on the assay
return None
file_md = file_record.additional_metadata
if upload_type == "ihc":
# for ihc, is only a single antibody
file_md["ihc.antibody"] = md
else:
file_md[f"{upload_type}.antibodies"] = md
return file_md
def _cytof_antibody_md(assay_md: dict) -> Optional[str]:
antibody_md = assay_md.get("cytof_antibodies")
if not antibody_md:
return None
antibodies = []
for ab in antibody_md:
if ab["usage"] != "Ignored":
entry = f"{ab['stain_type'].lower().split()[0]} {ab['isotope']}-{ab['antibody']}"
if ab.get("clone"):
entry += f" ({ab['clone']})"
antibodies.append(entry)
return ", ".join(antibodies)
def _elisa_antibody_md(assay_md: dict) -> Optional[str]:
antibody_md = assay_md.get("antibodies")
if not antibody_md:
return None
antibodies = []
for ab in antibody_md:
if ab["usage"] != "Ignored":
entry = f"{ab['stain_type'].lower().split()[0]} {ab['isotope']}-{ab['antibody']}"
if ab.get("clone"):
entry += f" ({ab['clone']})"
antibodies.append(entry)
return ", ".join(antibodies)
def _ihc_antibody_md(assay_md: dict) -> Optional[str]:
antibody_md = assay_md.get("antibody")
if not antibody_md:
return None
antibody = antibody_md["antibody"]
if antibody_md.get("clone"):
antibody += f" ({antibody_md['clone']})"
return antibody
def _micsss_antibody_md(assay_md: dict) -> Optional[str]:
antibody_md = assay_md.get("antibody")
if not antibody_md:
return None
antibodies = []
for ab in antibody_md:
entry = ab["antibody"]
if ab.get("clone"):
entry += f" ({ab['clone']})"
antibodies.append(entry)
return ", ".join(antibodies)
def _mif_antibody_md(assay_md: dict) -> Optional[str]:
antibody_md = assay_md.get("antibodies")
if not antibody_md:
return None
antibodies = []
for ab in antibody_md:
if ab.get("export_name"):
entry = ab["export_name"]
else:
entry = ab["antibody"] + " ("
if ab.get("clone"):
entry += ab["clone"] + " - "
entry += str(ab["fluor_wavelength"]) + ")"
antibodies.append(entry)
return ", ".join(antibodies)
def _ihc_combined_transform(
file_record: DownloadableFiles, metadata_df: pd.DataFrame
) -> Optional[dict]:
"""
Prepare an IHC combined file for visualization by joining it with relevant metadata
"""
if file_record.upload_type.lower() != "ihc marker combined":
return None
print(f"Generating IHC combined visualization config for file {file_record.id}")
data_file = get_blob_as_stream(file_record.object_url)
data_df = pd.read_csv(data_file)
full_df = data_df.join(metadata_df, on="cimac_id", how="inner")
return json.loads(full_df.to_json(orient="records"))
class _ClustergrammerTransform:
def __call__(
self, file_record: DownloadableFiles, metadata_df: pd.DataFrame
) -> Optional[dict]:
"""
Prepare the data file for visualization in clustergrammer.
NOTE: `metadata_df` should contain data from the participants and samples CSVs
for this file's trial, joined on CIMAC ID and indexed on CIMAC ID.
"""
if file_record.object_url.endswith("npx.xlsx"):
data_file = get_blob_as_stream(file_record.object_url)
return self.npx(data_file, metadata_df)
elif file_record.upload_type.lower() in (
"cell counts compartment",
"cell counts assignment",
"cell counts profiling",
):
data_file = get_blob_as_stream(file_record.object_url)
return self.cytof_summary(data_file, metadata_df)
return None
def npx(self, data_file, metadata_df: pd.DataFrame) -> dict:
"""Prepare an NPX file for visualization in clustergrammer"""
# Load the NPX data into a dataframe.
npx_df = _npx_to_dataframe(data_file)
return self._clustergrammerify(npx_df, metadata_df)
def cytof_summary(self, data_file, metadata_df: pd.DataFrame) -> dict:
"""Prepare CyTOF summary csv for visualization in clustergrammer"""
# Load the CyTOF summary data into a dataframe
cytof_df = _cytof_summary_to_dataframe(data_file)
return self._clustergrammerify(cytof_df, metadata_df)
def _clustergrammerify(
self, data_df: pd.DataFrame, metadata_df: pd.DataFrame
) -> dict:
"""
Produce the clustergrammer config for the given data and metadata dfs.
`data_df` must be a dataframe with CIMAC ID column headers.
"""
assert (
data_df.shape[1] > 1
), "Cannot generate clustergrammer visualization for data with only one sample."
data_df.columns = _metadata_to_categories(metadata_df.loc[data_df.columns])
# TODO: find a better way to handle missing values
data_df.fillna(0, inplace=True)
# Produce a clustergrammer JSON blob for this dataframe.
net = CGNetwork()
net.load_df(data_df)
net.normalize()
net.cluster()
return net.viz
def _metadata_to_categories(metadata_df: pd.DataFrame) -> list:
"""
Add category information to `data_df`'s column headers in the format that Clustergrammer expects:
"([Category 1]: [Value 1], [Category 2]: [Value 2], ...)"
"""
metadata_df = metadata_df.copy() # so don't modify original
CLINICAL_FIELD_PREFIX = "arbitrary_trial_specific_clinical_annotations."
columns = []
for c in metadata_df.columns:
# go through and check cardinality = # unique
# also rename the columns to pretty things
cardinality = len(metadata_df[c].unique())
if (
cardinality > CLUSTERGRAMMER_MAX_CATEGORY_CARDINALITY
or cardinality <= 1
or cardinality == metadata_df.shape[0]
):
# only want if not all the same, not too many, and not each unique to sample
if c not in [
"cimac_participant_id",
"cohort_name",
"collection_event_name",
]:
# we want to keep the above no matter what
metadata_df.pop(c)
continue
if "(1=Yes,0=No)" in c:
# these are boolean! let's treat them that way
metadata_df[c] = metadata_df[c].astype(bool)
if c.startswith(CLINICAL_FIELD_PREFIX):
# for 10021 participants.csv:
## remove the prefix
## remove any parentheses
cat = c[len(CLINICAL_FIELD_PREFIX) :]
if "(" in cat and ")" in cat and cat.index(")") > cat.index("("):
cat = cat.split("(", 1)[0] + cat.rsplit(")", 1)[1]
else:
# otherwise
## break up underscores
## title case
## drop 'CIDC' / 'CIMAC' anywhere
## drop trailing 'Name'
cat = c.replace("_", " ").title().replace("Cidc", "").replace("Cimac", "")
if cat.endswith("Name") and not cat == "Name":
cat = cat[:-4]
# strip so it's pretty!
if cat.strip() not in columns:
columns.append(cat.strip())
else:
# if it's a repeated name, pop it
metadata_df.pop(c)
metadata_df.columns = columns
print("CG Category options:", ", ".join(columns))
# cut down to only the categories we want
columns = [
c
for c in [
"Participant Id",
"Collection Event",
"Cohort",
"Treatment",
"Disease progression",
"RECIST clinical benefit status",
]
if c in metadata_df.columns
]
columns = sorted(columns, key=lambda c: len(metadata_df[c].unique()))
metadata_df = metadata_df[columns]
if "Disease progression" in columns:
columns[columns.index("Disease progression")] = "Disease prog"
if "RECIST clinical benefit status" in columns:
columns[columns.index("RECIST clinical benefit status")] = "Clin benefit"
metadata_df.columns = columns
# build the output str in ClusterGrammer compatible format
categories = []
for idx, row in metadata_df.iterrows():
temp = [f"CIMAC Id: {idx}"]
for cat, val in row.items():
temp.append(f"{cat}: {val}")
categories.append(tuple(temp))
return categories
def _npx_to_dataframe(fname, sheet_name="NPX Data") -> pd.DataFrame:
"""Load raw data from an NPX file into a pandas dataframe."""
wb = load_workbook(fname)
if sheet_name not in wb.sheetnames:
raise ValueError(f"Couldn't locate expected worksheet '{sheet_name}'.")
ws = wb[sheet_name]
extract_values = lambda xlsx_row: [cell.value for cell in xlsx_row]
# Assay labels (row 5 of the spreadsheet)
assay_labels = extract_values(ws[4][1:-2])
# Raw data (row 8 of the spreadsheet onwards)
rows = []
num_cols = len(assay_labels) + 1
for row in ws.iter_rows(min_row=8):
sample_id = row[0].value
# If we hit a blank line, there's no more data to read.
if not sample_id:
break
# Only include rows pertaining to CIMAC ids
if prism.cimac_id_regex.match(sample_id):
new_row = extract_values(row[0:num_cols])
rows.append(new_row)
raw = pd.DataFrame(rows).set_index(0)
raw.index.name = "cimac_id"
raw.columns = assay_labels
# Drop columns that don't have raw data
raw.drop(columns=["Plate ID", "QC Warning"], inplace=True)
# Data is later z-scored, so remove data that would introduce NaN's
raw.drop(columns=raw.columns[raw.std() == 0], inplace=True)
return raw.T
def _cytof_summary_to_dataframe(csv: BytesIO) -> pd.DataFrame:
"""Load a CyTOF summary CSV into a dataframe with CIMAC IDs as column headers"""
raw_df = pd.read_csv(csv)
# Index on CIMAC ID column
indexed_df = raw_df.set_index("cimac_id")
# Drop unused metadata columns (we should get these from the metadata df)
for col in ["cimac_participant_id", "protocol_identifier"]:
try:
indexed_df.drop(col, axis=1, inplace=True)
except KeyError:
pass
return indexed_df.T
| 34.444915
| 176
| 0.632489
|
0e7045f517d44e8d73b08bac7ce499f79d2bf80e
| 9,498
|
py
|
Python
|
tensorflow/python/keras/_impl/keras/preprocessing/sequence_test.py
|
ashuven63/tf_audio
|
bc561b81069001da01a1c7df4c16f6b9ba9a400b
|
[
"Apache-2.0"
] | 1
|
2018-05-30T00:34:05.000Z
|
2018-05-30T00:34:05.000Z
|
tensorflow/python/keras/_impl/keras/preprocessing/sequence_test.py
|
timctho/tensorflow
|
015c72eac3f4e448dd8ab852843e902771496532
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/keras/_impl/keras/preprocessing/sequence_test.py
|
timctho/tensorflow
|
015c72eac3f4e448dd8ab852843e902771496532
|
[
"Apache-2.0"
] | 1
|
2021-11-16T19:59:48.000Z
|
2021-11-16T19:59:48.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sequence data preprocessing utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from math import ceil
import numpy as np
from tensorflow.python.keras._impl import keras
from tensorflow.python.platform import test
class TestSequence(test.TestCase):
def test_pad_sequences(self):
a = [[1], [1, 2], [1, 2, 3]]
# test padding
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='pre')
self.assertAllClose(b, [[0, 0, 1], [0, 1, 2], [1, 2, 3]])
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='post')
self.assertAllClose(b, [[1, 0, 0], [1, 2, 0], [1, 2, 3]])
# test truncating
b = keras.preprocessing.sequence.pad_sequences(
a, maxlen=2, truncating='pre')
self.assertAllClose(b, [[0, 1], [1, 2], [2, 3]])
b = keras.preprocessing.sequence.pad_sequences(
a, maxlen=2, truncating='post')
self.assertAllClose(b, [[0, 1], [1, 2], [1, 2]])
# test value
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[1, 1, 1], [1, 1, 2], [1, 2, 3]])
def test_pad_sequences_vector(self):
a = [[[1, 1]], [[2, 1], [2, 2]], [[3, 1], [3, 2], [3, 3]]]
# test padding
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='pre')
self.assertAllClose(b, [[[0, 0], [0, 0], [1, 1]], [[0, 0], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, padding='post')
self.assertAllClose(b, [[[1, 1], [0, 0], [0, 0]], [[2, 1], [2, 2], [0, 0]],
[[3, 1], [3, 2], [3, 3]]])
# test truncating
b = keras.preprocessing.sequence.pad_sequences(
a, maxlen=2, truncating='pre')
self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 2], [3,
3]]])
b = keras.preprocessing.sequence.pad_sequences(
a, maxlen=2, truncating='post')
self.assertAllClose(b, [[[0, 0], [1, 1]], [[2, 1], [2, 2]], [[3, 1], [3,
2]]])
# test value
b = keras.preprocessing.sequence.pad_sequences(a, maxlen=3, value=1)
self.assertAllClose(b, [[[1, 1], [1, 1], [1, 1]], [[1, 1], [2, 1], [2, 2]],
[[3, 1], [3, 2], [3, 3]]])
def test_make_sampling_table(self):
a = keras.preprocessing.sequence.make_sampling_table(3)
self.assertAllClose(
a, np.asarray([0.00315225, 0.00315225, 0.00547597]), rtol=.1)
def test_skipgrams(self):
# test with no window size and binary labels
couples, labels = keras.preprocessing.sequence.skipgrams(
np.arange(3), vocabulary_size=3)
for couple in couples:
self.assertIn(couple[0], [0, 1, 2])
self.assertIn(couple[1], [0, 1, 2])
# test window size and categorical labels
couples, labels = keras.preprocessing.sequence.skipgrams(
np.arange(5), vocabulary_size=5, window_size=1, categorical=True)
for couple in couples:
self.assertLessEqual(couple[0] - couple[1], 3)
for l in labels:
self.assertEqual(len(l), 2)
def test_TimeseriesGenerator(self):
data = np.array([[i] for i in range(50)])
targets = np.array([[i] for i in range(50)])
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, batch_size=2)
self.assertEqual(len(data_gen), 20)
self.assertAllClose(data_gen[0][0],
np.array([[[0], [2], [4], [6], [8]], [[1], [3], [5],
[7], [9]]]))
self.assertAllClose(data_gen[0][1], np.array([[10], [11]]))
self.assertAllClose(data_gen[1][0],
np.array([[[2], [4], [6], [8], [10]], [[3], [5], [7],
[9], [11]]]))
self.assertAllClose(data_gen[1][1], np.array([[12], [13]]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, reverse=True, batch_size=2)
self.assertEqual(len(data_gen), 20)
self.assertAllClose(data_gen[0][0],
np.array([[[8], [6], [4], [2], [0]], [[9], [7], [5],
[3], [1]]]))
self.assertAllClose(data_gen[0][1], np.array([[10], [11]]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, shuffle=True, batch_size=1)
batch = data_gen[0]
r = batch[1][0][0]
self.assertAllClose(batch[0],
np.array([[[r - 10], [r - 8], [r - 6], [r - 4],
[r - 2]]]))
self.assertAllClose(batch[1], np.array([
[r],
]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data, targets, length=10, sampling_rate=2, stride=2, batch_size=2)
self.assertEqual(len(data_gen), 10)
self.assertAllClose(data_gen[1][0],
np.array([[[4], [6], [8], [10], [12]], [[6], [8], [10],
[12], [14]]]))
self.assertAllClose(data_gen[1][1], np.array([[14], [16]]))
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data,
targets,
length=10,
sampling_rate=2,
start_index=10,
end_index=30,
batch_size=2)
self.assertEqual(len(data_gen), 6)
self.assertAllClose(data_gen[0][0],
np.array([[[10], [12], [14], [16], [18]],
[[11], [13], [15], [17], [19]]]))
self.assertAllClose(data_gen[0][1], np.array([[20], [21]]))
data = np.array([np.random.random_sample((1, 2, 3, 4)) for i in range(50)])
targets = np.array([np.random.random_sample((3, 2, 1)) for i in range(50)])
data_gen = keras.preprocessing.sequence.TimeseriesGenerator(
data,
targets,
length=10,
sampling_rate=2,
start_index=10,
end_index=30,
batch_size=2)
self.assertEqual(len(data_gen), 6)
self.assertAllClose(data_gen[0][0],
np.array(
[np.array(data[10:19:2]),
np.array(data[11:20:2])]))
self.assertAllClose(data_gen[0][1], np.array([targets[20], targets[21]]))
with self.assertRaises(ValueError) as context:
keras.preprocessing.sequence.TimeseriesGenerator(data, targets, length=50)
error = str(context.exception)
self.assertIn('`start_index+length=50 > end_index=49` is disallowed', error)
def test_TimeSeriesGenerator_doesnt_miss_any_sample(self):
x = np.array([[i] for i in range(10)])
for length in range(3, 10):
g = keras.preprocessing.sequence.TimeseriesGenerator(
x, x, length=length, batch_size=1)
expected = max(0, len(x) - length)
actual = len(g)
self.assertEqual(expected, actual)
if actual > 0:
# All elements in range(length, 10) should be used as current step
expected = np.arange(length, 10).reshape(-1, 1)
y = np.concatenate([g[ix][1] for ix in range(len(g))], axis=0)
self.assertAllClose(y, expected)
x = np.array([[i] for i in range(23)])
strides = (1, 1, 5, 7, 3, 5, 3)
lengths = (3, 3, 4, 3, 1, 3, 7)
batch_sizes = (6, 6, 6, 5, 6, 6, 6)
shuffles = (False, True, True, False, False, False, False)
for stride, length, batch_size, shuffle in zip(strides, lengths,
batch_sizes, shuffles):
g = keras.preprocessing.sequence.TimeseriesGenerator(
x,
x,
length=length,
sampling_rate=1,
stride=stride,
start_index=0,
end_index=None,
shuffle=shuffle,
reverse=False,
batch_size=batch_size)
if shuffle:
# all batches have the same size when shuffle is True.
expected_sequences = ceil(
(23 - length) / float(batch_size * stride)) * batch_size
else:
# last batch will be different if `(samples - length) / stride`
# is not a multiple of `batch_size`.
expected_sequences = ceil((23 - length) / float(stride))
expected_batches = ceil(expected_sequences / float(batch_size))
y = [g[ix][1] for ix in range(len(g))]
actual_sequences = sum(len(iy) for iy in y)
actual_batches = len(y)
self.assertEqual(expected_sequences, actual_sequences)
self.assertEqual(expected_batches, actual_batches)
if __name__ == '__main__':
test.main()
| 39.740586
| 80
| 0.562645
|
f6d8a8122ca736019f0c9ffba9b833faf23203ae
| 3,433
|
py
|
Python
|
myfirst/settings.py
|
fyunka/django-blog
|
3cb44c3311ef4bc2a920ec1c4f34eff738b60eea
|
[
"MIT"
] | null | null | null |
myfirst/settings.py
|
fyunka/django-blog
|
3cb44c3311ef4bc2a920ec1c4f34eff738b60eea
|
[
"MIT"
] | null | null | null |
myfirst/settings.py
|
fyunka/django-blog
|
3cb44c3311ef4bc2a920ec1c4f34eff738b60eea
|
[
"MIT"
] | null | null | null |
"""
Django settings for myfirst project.
Generated by 'django-admin startproject' using Django 1.11.23.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os, sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_46r4imw54-znik&a)60fthi!f4&i8n4r*xy1_%p3%5cl2f@4u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'articles.apps.ArticlesConfig',
# 'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myfirst.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'myfirst.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
| 26.206107
| 91
| 0.694728
|
b6df8118746a90fac3bcbb541b9310b08e0fe13b
| 937
|
py
|
Python
|
ble_uart/process_unit.py
|
yjlou/blue_uart
|
442dd2bd7d66efbdca995b071ed0a85765152a10
|
[
"MIT"
] | null | null | null |
ble_uart/process_unit.py
|
yjlou/blue_uart
|
442dd2bd7d66efbdca995b071ed0a85765152a10
|
[
"MIT"
] | null | null | null |
ble_uart/process_unit.py
|
yjlou/blue_uart
|
442dd2bd7d66efbdca995b071ed0a85765152a10
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""A Process Unit is a component that handles the data flows. It contains 2 flows:
flow0: data flow from the BLE to the server side.
flow1: data flow from the server side to the BLE.
+--------------+
| Process Unit | S
| | e
B ====> flow 0 ====> r
L | | v
E <==== flow 1 <==== e
| | r
+--------------+
"""
class Flow(object):
def __init__(self):
self._egress = None
def set_egress(self, egress:callable):
self._egress = egress
def ingress(self, data:bytearray):
"""This inputs data to the entrance of the flow.
The default behavior is pass-thru. The inheritor can overwrite this.
"""
if self._egress:
self._egress(data)
class ProcessUnit(object):
def __init__(self):
self._flows = [Flow(), Flow()]
def flow(self, idx:int):
return self._flows[idx]
| 23.425
| 82
| 0.550694
|
034fe9ef7d5abc690c5de710e383a980cb9eea29
| 14,734
|
py
|
Python
|
youtube_audio_matcher/database/database.py
|
nrsyed/youtube-audio-matcher
|
d6a65d2e824f4e8f363c49341896612978b7318c
|
[
"MIT"
] | 1
|
2020-11-26T00:55:55.000Z
|
2020-11-26T00:55:55.000Z
|
youtube_audio_matcher/database/database.py
|
nrsyed/youtube-audio-matcher
|
d6a65d2e824f4e8f363c49341896612978b7318c
|
[
"MIT"
] | null | null | null |
youtube_audio_matcher/database/database.py
|
nrsyed/youtube-audio-matcher
|
d6a65d2e824f4e8f363c49341896612978b7318c
|
[
"MIT"
] | null | null | null |
import asyncio
import logging
import time
import sqlalchemy
from .schema import Base, Fingerprint, Song
def database_obj_to_py(obj, fingerprints_in_song=False):
"""
Recursively convert Fingerprint and Song sqlalchemy objects to native
Python types (lists and dicts).
Args:
obj (database.schema.Fingerprint|database.schema.Song): ``audio``
module Fingerprint or Song object.
fingerprints_in_song (bool): Include each song's fingerprints as a
list within the song dict.
Returns:
py_obj: list|dict
"""
if isinstance(obj, list):
return [
database_obj_to_py(elem, fingerprints_in_song=fingerprints_in_song)
for elem in obj
]
elif isinstance(obj, Song):
song = {
"id": obj.id,
"duration": obj.duration,
"filehash": obj.filehash,
"filepath": obj.filepath,
"title": obj.title,
"youtube_id": obj.youtube_id,
}
if fingerprints_in_song:
song["fingerprints"] = [
database_obj_to_py(fp) for fp in obj.fingerprints
]
song["num_fingerprints"] = len(obj.fingerprints)
return song
elif isinstance(obj, Fingerprint):
return {
"song_id": obj.song_id,
"hash": obj.hash,
"offset": obj.offset,
}
else:
raise ValueError("Unsupported object")
def _threadsafe_add_fingerprints(db_kwargs, song):
logging.info(f"Adding {song['path']} to database...")
db = Database(**db_kwargs)
song_id = db.add_song(
duration=song.get("duration"), filepath=song.get("path"),
filehash=song.get("filehash"), title=song.get("title"),
youtube_id=song.get("youtube_id")
)
db.add_fingerprints(song_id, song["fingerprints"])
del db
del song["fingerprints"]
# TODO: delete files after fingerprinting
async def _update_database(song, loop, executor, db_kwargs):
start_t = time.time()
delete_file = False
try:
if "delete" in song:
delete_file = True
await loop.run_in_executor(
executor, _threadsafe_add_fingerprints, db_kwargs, song
)
elapsed = time.time() - start_t
logging.info(f"Added {song['path']} to database ({elapsed:.2f} s)")
except Exception as e:
logging.error(f"Error adding {song['path']} to database ({str(e)})")
finally:
if delete_file:
logging.info(f"Deleting file {song['path']}")
# TODO delete song
# TODO: handle song already existing in database
async def update_database(loop, executor, db_kwargs, in_queue):
"""
Consume fingerprinted songs from an async input queue and add the songs
and their fingerprints to the database either concurrently (via a thread
pool) or in parallel (via a process pool).
Args:
loop (asyncio.BaseEventLoop): asyncio EventLoop.
executor (concurrent.futures.Executor): `concurrent.futures`
ThreadPoolExecutor or ProcessPoolExecutor in which the database
connection will be created to update the database.
db_kwargs (dict): Dict containing keyword args for instantiating a
:class:`Database` object.
in_queue (asyncio.queues.Queue): Database queue containing song dicts
representing songs (and their fingerprints) to be added to the
database.
"""
start_t = time.time()
tasks = []
while True:
song = await in_queue.get()
if song is None:
break
task = loop.create_task(
_update_database(song, loop, executor, db_kwargs)
)
tasks.append(task)
# Wrap asyncio.wait() in if statement to avoid ValueError if no tasks.
if tasks:
await asyncio.wait(tasks)
elapsed = time.time() - start_t
logging.info(f"All songs added to database ({elapsed:.2f} s)")
# TODO: try/except, db rollbacks
class Database:
"""
Class that holds a database connection to perform read/update/delete
operations.
"""
def __init__(
self, user, password, db_name, host="localhost", port=None,
dialect="postgresql", driver=None
):
"""
Constructs a sqlalchemy database URL of the form
`dialect+driver://username:password@host:port/db_name` to create a
sqlalchemy database engine. Refer to `SQLAlchemy Database URLs`_.
Args:
user (str): User name.
password (str): User password.
db_name (str): Database name.
host (str): Database hostname.
port (int): Port on ``host``.
dialect (str): SQL database dialect to use. See
`SQLAlchemy Dialects`_.
driver (str): SQL database driver to use. See
`SQLAlchemy Database URLs`_.
.. _`SQLAlchemy Dialects`:
https://docs.sqlalchemy.org/en/13/dialects/
.. _`SQLAlchemy Database URLs`:
https://docs.sqlalchemy.org/en/13/core/engines.html#database-urls
"""
# TODO: check/add sqlite support.
# Construct database URL.
driver = f"+{driver}" if driver else ""
port = f":{port}" if port is not None else ""
# MySQL requires localhost to be specified as 127.0.0.1 to correctly
# use TCP.
if dialect == "mysql" and host == "localhost":
host = "127.0.0.1"
url = f"{dialect}{driver}://{user}:{password}@{host}{port}/{db_name}"
logging.debug(f"Connecting to database URL {url}")
engine = sqlalchemy.create_engine(url)
Session = sqlalchemy.orm.sessionmaker(engine)
self.session = Session()
Base.metadata.create_all(engine)
self.base = Base
self.engine = engine
def __del__(self):
self.session.close()
def add_song(
self, duration=None, filepath=None, filehash=None, title=None,
youtube_id=None
):
"""
Args:
duration (float): Song duration.
filepath (str): Path to file on local machine.
filehash (str): File hash.
title (str): Song title.
youtube_id (str): YouTube ID, i.e., watch?v=<youtube_id>.
Returns:
int: id of the inserted song.
"""
new_song = Song(
duration=duration, filepath=filepath, filehash=filehash,
title=title, youtube_id=youtube_id
)
self.session.add(new_song)
self.session.commit()
return new_song.id
def add_fingerprint(self, song_id, hash_, offset):
"""
Args:
song_id (int): Song id corresponding to song in the Song table.
hash_ (str): Fingerprint hash.
offset (float): Fingerprint offset.
Returns:
int: id of the inserted fingerprint.
"""
new_fingerprint = Fingerprint(
song_id=song_id, hash=hash_, offset=offset
)
self.session.add(new_fingerprint)
self.session.commit()
return new_fingerprint.id
def add_fingerprints(self, song_id, fingerprints):
"""
Args:
song_id (int): Song table song id the fingerprints correspond to.
fingerprints (List[tuple]): A list of (hash, offset) fingerprints.
"""
new_fingerprints = [
Fingerprint(song_id=song_id, hash=hash_, offset=offset)
for hash_, offset in fingerprints
]
self.session.bulk_save_objects(new_fingerprints)
self.session.commit()
def as_dict(self, combine_tables=False):
"""
Return the database as a Python dictionary. See
:func:`database_obj_to_py`.
Args:
combine_tables (bool): If True, the returned dict will have a
single ``songs`` field containing a list of songs, and each
song will have a ``fingerprints`` field containing the list
of fingerprints belonging to it. If False, the returned dict
will contain a ``songs`` field and a ``fingerprints`` field.
Returns:
dict: tables
Dict containing database Fingerprint and Song tables::
{
"songs": list,
"fingerprints": list
}
If ``combine_tables=True``, the returned dict will not contain
a ``fingerprints`` key.
"""
songs_table = self.session.query(Song).all()
fingerprints_table = self.session.query(Fingerprint).all()
if combine_tables:
return {
"songs":
database_obj_to_py(songs_table, fingerprints_in_song=True),
}
else:
return {
"songs": database_obj_to_py(songs_table),
"fingerprints": database_obj_to_py(fingerprints_table),
}
def delete_all(self):
"""
Delete all rows in the Fingerprint and Song tables.
"""
self.session.query(Fingerprint).delete()
self.session.query(Song).delete()
self.session.commit()
def _drop_tables(self, tables):
self.base.metadata.drop_all(bind=self.engine, tables=tables)
self.session.commit()
def drop_all_tables(self):
"""
Drop Fingerprint and Song tables.
"""
self._drop_tables([Fingerprint.__table__, Song.__table__])
def drop_song_table(self):
"""
Drop Song table.
"""
self._drop_tables([Song.__table__])
def drop_fingerprint_table(self):
"""
Drop Fingerprint table.
"""
self._drop_tables([Fingerprint.__table__])
def query_fingerprints(self, hashes):
"""
Query the database for a list of matching hashes.
Args:
hashes (str|List[str]): Hash or list of hashes from a
fingerprinted audio signal.
Returns:
fingerprints: list
A list of fingerprints whose hashes match the input hashes.
Each fingerprint is a dict containing the hash, song id, and
offset (in seconds)::
{
"song_id": int,
"hash": str,
"offset": float
}
"""
# Perform query; this is the equivalent of
# SELECT * FROM fingerprint WHERE fingerprint.hash IN (`hashes`)
query = self.session.query(Fingerprint).filter(
Fingerprint.hash.in_(hashes)
)
return database_obj_to_py(query.all())
def query_songs(
self, id_=None, duration=None, duration_greater_than=None,
duration_less_than=None, filehash=None, filepath=None, title=None,
youtube_id=None, include_fingerprints=False
):
"""
Query the database for songs matching the specified criteria.
Args:
id_ (list|int): Int or list of ints corresponding to the id
primary key field of the database Song table.
duration (list|float): Float or list of floats corresponding to
the duration field of the database Song table.
duration_greater_than (float): Only return songs with a duration
greater than this value, if one is specified.
duration_less_than (float): Only return songs with a duration less
than this value, if one is specified.
filehash (list|str): A filehash or list of filehashes corresponding
to the filehash field of the database Song table.
filepath (list|str): A filepath or list of filepaths corresponding
to the filepath field of the database Song table.
title (list|str): A title or list of titles corresponding to the
title field of the database Song table.
youtube_id (list|str): A YouTube id or list of YouTube ids
corresponding to the youtube_id field of the databse Song
table.
include_fingerprints (bool): Include the fingerprints of each
song as a list within the dict corresponding to the song; see
:meth:`query_fingerprints`.
Returns:
results: List[dict]
A list of dicts where each dict represents a song and contains
the following keys::
{
"id": int,
"duration": float,
"filehash": str,
"filepath": str,
"title": str,
"youtube_id" str,
"fingerprints": list[dict],
"num_fingerprints": int
}
The ``fingerprints`` and ``num_fingerprints`` keys are only
included if ``include_fingerprints=True``.
Raises:
ValueError: if more than one of ``duration``,
``duration_greater_than``, or ``duration_less_than`` are supplied.
"""
duration_args_bool = [
duration is not None, duration_greater_than is not None,
duration_less_than is not None
]
if sum(duration_args_bool) > 1:
raise ValueError(
"Can only choose one of duration, duration_greater_than, "
"or duration_less_than"
)
query = self.session.query(Song)
if duration is not None:
query = query.filter(Song.duration == duration)
elif duration_greater_than is not None:
query = query.filter(Song.duration > duration_greater_than)
elif duration_less_than is not None:
query = query.filter(Song.duration < duration_less_than)
# Handle remaining non-duration args separately.
other_args = {
"id": id_,
"filehash": filehash,
"filepath": filepath,
"title": title,
"youtube_id": youtube_id,
}
# Make the Song sqlalchemy DeclarativeMeta a dict so we can iterate
# over its attributes.
Song_ = vars(Song)
for arg, val in other_args.items():
if val is not None:
if not isinstance(val, (list, tuple)):
val = [val]
query = query.filter(Song_[arg].in_(val))
return database_obj_to_py(
query.all(), fingerprints_in_song=include_fingerprints
)
| 34.75
| 79
| 0.579272
|
a55a8cb055373766c801a93cbe22b800648dbe11
| 1,143
|
py
|
Python
|
Docker-Swarm-deployment/single-node/databroker/backup/backup-app/backup-watch.py
|
abhilashvenkatesh/iudx-deployment
|
d57a31709f196764da6cf1da2c7ae93fc19b6f11
|
[
"MIT"
] | 4
|
2021-12-24T12:34:15.000Z
|
2022-02-24T12:59:50.000Z
|
Docker-Swarm-deployment/single-node/databroker/backup/backup-app/backup-watch.py
|
abhilashvenkatesh/iudx-deployment
|
d57a31709f196764da6cf1da2c7ae93fc19b6f11
|
[
"MIT"
] | null | null | null |
Docker-Swarm-deployment/single-node/databroker/backup/backup-app/backup-watch.py
|
abhilashvenkatesh/iudx-deployment
|
d57a31709f196764da6cf1da2c7ae93fc19b6f11
|
[
"MIT"
] | null | null | null |
#This python script watches for the changes in file LATEST.LOG and backup the definition
#by calling the backup.sh
import time
import os
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class OnMyWatch:
# Set the directory to watch
watchDirectory = "/var/lib/rabbitmq/"
def __init__(self):
self.observer = Observer()
def run(self):
event_handler = Handler()
#recursively watch files in watchDirectory
self.observer.schedule(event_handler, self.watchDirectory, recursive = True)
self.observer.start()
try:
while True:
time.sleep(5)
except:
self.observer.stop()
print("Observer Stopped")
self.observer.join()
class Handler(FileSystemEventHandler):
exec_string = f'/usr/share/app/backup.sh'
def on_any_event(self,event):
file_name=event.src_path.split('/')[-1]
if event.is_directory:
return None
elif event.event_type == 'modified' and file_name == 'LATEST.LOG':
#executes the backup script in shell on change of LATEST.LOG file
os.system(self.exec_string)
if __name__ == '__main__':
watch = OnMyWatch()
watch.run()
| 26.581395
| 88
| 0.725284
|
4af03f237452b0e1578b371813b59067f8547cef
| 10,941
|
py
|
Python
|
data_generator.py
|
KayqueTeles/MAML-work-in-progress
|
7a7c793a994a5f93d7a0b259adfc22918ce93a79
|
[
"MIT"
] | null | null | null |
data_generator.py
|
KayqueTeles/MAML-work-in-progress
|
7a7c793a994a5f93d7a0b259adfc22918ce93a79
|
[
"MIT"
] | null | null | null |
data_generator.py
|
KayqueTeles/MAML-work-in-progress
|
7a7c793a994a5f93d7a0b259adfc22918ce93a79
|
[
"MIT"
] | null | null | null |
""" Code for loading data. """
import numpy as np
import os
import random
import tensorflow as tf
import sys
from tensorflow.python.platform import flags
from utils import get_images
FLAGS = flags.FLAGS
class DataGenerator(object):
"""
Data Generator capable of generating batches of sinusoid or Omniglot data.
A "class" is considered a class of omniglot digits or a particular sinusoid function.
"""
def __init__(self, num_samples_per_class, batch_size, config={}):
"""
Args:
num_samples_per_class: num samples to generate per class in one batch
batch_size: size of meta batch size (e.g. number of functions)
"""
print('\n ** Initializing data_generator at data_generator.py ...')
self.batch_size = batch_size
self.num_samples_per_class = num_samples_per_class
self.num_classes = 1 # by default 1 (only relevant for classification problems)
if FLAGS.datasource == 'sinusoid':
self.generate = self.generate_sinusoid_batch
self.amp_range = config.get('amp_range', [0.1, 5.0])
self.phase_range = config.get('phase_range', [0, np.pi])
self.input_range = config.get('input_range', [-5.0, 5.0])
self.dim_input = 1
self.dim_output = 1
elif 'omniglot' in FLAGS.datasource:
self.num_classes = config.get('num_classes', FLAGS.num_classes)
self.img_size = config.get('img_size'f, (28, 28))
self.dim_input = np.prod(self.img_size)
self.dim_output = self.num_classes
# data that is pre-resized using PIL with lanczos filter
data_folder = config.get('data_folder', './data/omniglot_resized')
character_folders = [os.path.join(data_folder, family, character) \
for family in os.listdir(data_folder) \
if os.path.isdir(os.path.join(data_folder, family)) \
for character in os.listdir(os.path.join(data_folder, family))]
random.seed(1)
random.shuffle(character_folders)
num_val = 100
num_train = config.get('num_train', 1200) - num_val
self.metatrain_character_folders = character_folders[:num_train]
if FLAGS.test_set:
self.metaval_character_folders = character_folders[num_train+num_val:]
else:
self.metaval_character_folders = character_folders[num_train:num_train+num_val]
self.rotations = config.get('rotations', [0, 90, 180, 270])
elif FLAGS.datasource == 'miniimagenet':
print(' ** Mini-Imagenet predefinitions selected.')
self.num_classes = config.get('num_classes', FLAGS.num_classes)
print(' ** Defining image data...')
self.img_size = config.get('img_size', (84, 84))
#self.img_size = config.get('img_size', (101, 101))
self.dim_input = np.prod(self.img_size)*3
self.dim_output = self.num_classes
print(' ** Choosing folders...')
metatrain_folder = config.get('metatrain_folder', './data/miniImagenet/train')
if FLAGS.test_set:
metaval_folder = config.get('metaval_folder', './data/miniImagenet/test')
else:
metaval_folder = config.get('metaval_folder', './data/miniImagenet/val')
print(' ** Defining folder labels...')
metatrain_folders = [os.path.join(metatrain_folder, label) \
for label in os.listdir(metatrain_folder) \
if os.path.isdir(os.path.join(metatrain_folder, label)) \
]
metaval_folders = [os.path.join(metaval_folder, label) \
for label in os.listdir(metaval_folder) \
if os.path.isdir(os.path.join(metaval_folder, label)) \
]
print(' -- metaval_folders: ', metaval_folders)
print(' ** Checking if data source is reckognized...')
self.metatrain_character_folders = metatrain_folders
self.metaval_character_folders = metaval_folders
self.rotations = config.get('rotations', [0])
else:
raise ValueError('Unrecognized data source')
def make_data_tensor(self, train=True):
print(' ** Creating data tensor...')
if train:
folders = self.metatrain_character_folders
# number of tasks, not number of meta-iterations. (divide by metabatch size to measure)
num_total_batches = 200000
else:
folders = self.metaval_character_folders
print(' -- folders: ', folders)
####################################
#testefol = self.metaval_character_folders
#print(' -- testelos: ', testelos)
num_total_batches = 600
# make list of files
print(' ** Generating filenames....')
all_filenames = []
print(' -- folders: ', folders)
for _ in range(num_total_batches):
sampled_character_folders = random.sample(folders, self.num_classes)
random.shuffle(sampled_character_folders)
labels_and_images = get_images(sampled_character_folders, range(self.num_classes), nb_samples=self.num_samples_per_class, shuffle=False)
#print(' -- labels_and_images:', labels_and_images) #LISTA DE DIRETÓRIOS ONDE ESTÃO AS IMAGENS, VEJA: labels_and_images: [(0, './data/miniImagenet/test/n02/img_Y_369.jpg'), (0, './data/miniImagenet/test/n02/img_Y_730.jpg'),
# make sure the above isn't randomized order
labels = [li[0] for li in labels_and_images]
#print(' -- labels:', labels)
filenames = [li[1] for li in labels_and_images]
all_filenames.extend(filenames)
#print(' -- allfilenames: ', all_filenames)
print(' ** Generating filename queue....')
# make queue for tensorflow to read from
filename_queue = tf.train.string_input_producer(tf.convert_to_tensor(all_filenames), shuffle=False)
print(' ** Generating image processing ops')
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
print(' -- image_reader = ', image_reader)
if FLAGS.datasource == 'miniimagenet':
image = tf.image.decode_jpeg(image_file, channels=3)
print(' -- image = ', image)
image.set_shape((self.img_size[0],self.img_size[1],3))
print(' -- image = ', image)
image = tf.reshape(image, [self.dim_input])
print(' -- image = ', image)
image = tf.cast(image, tf.float32) / 255.0
print(' -- image = ', image)
else:
image = tf.image.decode_png(image_file)
image.set_shape((self.img_size[0],self.img_size[1],1))
image = tf.reshape(image, [self.dim_input])
image = tf.cast(image, tf.float32) / 255.0
image = 1.0 - image # invert
num_preprocess_threads = 1 # TODO - enable this to be set to >1
min_queue_examples = 256
examples_per_batch = self.num_classes * self.num_samples_per_class
batch_image_size = self.batch_size * examples_per_batch
print(' ** Batching images')
images = tf.train.batch(
[image],
batch_size = batch_image_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_image_size,
)
print(' -- images = ', images)
all_image_batches, all_label_batches = [], []
print(' ** Manipulating image data to be right shape')
for i in range(self.batch_size):
image_batch = images[i*examples_per_batch:(i+1)*examples_per_batch]
if FLAGS.datasource == 'omniglot':
# omniglot augments the dataset by rotating digits to create new classes
# get rotation per class (e.g. 0,1,2,0,0 if there are 5 classes)
rotations = tf.multinomial(tf.log([[1., 1.,1.,1.]]), self.num_classes)
label_batch = tf.convert_to_tensor(labels)
new_list, new_label_list = [], []
for k in range(self.num_samples_per_class):
class_idxs = tf.range(0, self.num_classes)
class_idxs = tf.random_shuffle(class_idxs)
true_idxs = class_idxs*self.num_samples_per_class + k
new_list.append(tf.gather(image_batch,true_idxs))
if FLAGS.datasource == 'omniglot': # and FLAGS.train:
new_list[-1] = tf.stack([tf.reshape(tf.image.rot90(
tf.reshape(new_list[-1][ind], [self.img_size[0],self.img_size[1],1]),
k=tf.cast(rotations[0,class_idxs[ind]], tf.int32)), (self.dim_input,))
for ind in range(self.num_classes)])
new_label_list.append(tf.gather(label_batch, true_idxs))
new_list = tf.concat(new_list, 0) # has shape [self.num_classes*self.num_samples_per_class, self.dim_input]
new_label_list = tf.concat(new_label_list, 0)
all_image_batches.append(new_list)
all_label_batches.append(new_label_list)
all_image_batches = tf.stack(all_image_batches)
all_label_batches = tf.stack(all_label_batches)
all_label_batches = tf.one_hot(all_label_batches, self.num_classes)
print(' -- all_image_batches', all_image_batches)
part = all_image_batches[0,0:40,]
tf.print(' -- part: ', part, output_stream=sys.stdout)
print(' ** Data tensor generation COMPLETE!')
return all_image_batches, all_label_batches
def generate_sinusoid_batch(self, train=True, input_idx=None):
print(' ** GENERATING DEF BEING USED!')
# Note train arg is not used (but it is used for omniglot method.
# input_idx is used during qualitative testing --the number of examples used for the grad update
amp = np.random.uniform(self.amp_range[0], self.amp_range[1], [self.batch_size])
phase = np.random.uniform(self.phase_range[0], self.phase_range[1], [self.batch_size])
outputs = np.zeros([self.batch_size, self.num_samples_per_class, self.dim_output])
init_inputs = np.zeros([self.batch_size, self.num_samples_per_class, self.dim_input])
for func in range(self.batch_size):
init_inputs[func] = np.random.uniform(self.input_range[0], self.input_range[1], [self.num_samples_per_class, 1])
if input_idx is not None:
init_inputs[:,input_idx:,0] = np.linspace(self.input_range[0], self.input_range[1], num=self.num_samples_per_class-input_idx, retstep=False)
outputs[func] = amp[func] * np.sin(init_inputs[func]-phase[func])
return init_inputs, outputs, amp, phase
| 53.370732
| 238
| 0.618591
|
9654e0d4b319cc304bdde3e9bf2583401f96b590
| 2,090
|
py
|
Python
|
mpl_format/utils/color_utils.py
|
vahndi/mpl-format
|
b03f97c37968e55a35c7181d93616eb44fc55f05
|
[
"MIT"
] | null | null | null |
mpl_format/utils/color_utils.py
|
vahndi/mpl-format
|
b03f97c37968e55a35c7181d93616eb44fc55f05
|
[
"MIT"
] | 51
|
2020-05-18T04:18:11.000Z
|
2022-02-01T02:35:59.000Z
|
mpl_format/utils/color_utils.py
|
vahndi/mpl-format
|
b03f97c37968e55a35c7181d93616eb44fc55f05
|
[
"MIT"
] | null | null | null |
from typing import Iterable, Union, List
from matplotlib.colors import to_rgb, to_rgba
from compound_types.built_ins import FloatOrFloatIterable
from mpl_format.compound_types import Color
def cross_fade(
from_color: Color, to_color: Color,
amount: FloatOrFloatIterable,
) -> Union[Color, List[Color]]:
"""
Return a new color which fades amount proportion of the way between the 2
colors.
:param from_color: The color to fade from.
:param to_color: The color to fade to.
:param amount: The amount to fade by, from 0.0 to 1.0
"""
if isinstance(amount, Iterable):
return [
cross_fade(from_color, to_color, amt)
for amt in amount
]
if isinstance(from_color, str):
from_color = to_rgb(from_color)
if isinstance(to_color, str):
to_color = to_rgb(to_color)
return tuple([from_value + amount * (to_value - from_value)
for from_value, to_value in zip(from_color, to_color)])
def blacken(
color: Color, amount: FloatOrFloatIterable
) -> Union[Color, List[Color]]:
"""
Return a color or colors amount fraction or fractions of the way from
`color` to `black`.
:param color: The existing color.
:param amount: The proportion to blacken by.
"""
return cross_fade(from_color=color, to_color='black',
amount=amount)
def whiten(
color: Color, amount: FloatOrFloatIterable
) -> Union[Color, List[Color]]:
"""
Return a color or colors amount fraction or fractions of the way from
`color` to `white`.
:param color: The existing color.
:param amount: The proportion to blacken by.
"""
return cross_fade(from_color=color, to_color='white',
amount=amount)
def set_alpha(color: Color, alpha: float) -> Color:
"""
Set the alpha level of a color to a given value.
:param color: The existing color.
:param alpha: The new alpha level
"""
color = to_rgba(color)
color = (color[0], color[1], color[2], alpha)
return color
| 28.630137
| 77
| 0.651196
|
1c76efbc0e58ecb3302fbdd372538a8293a9f49a
| 40,862
|
py
|
Python
|
control/tests/statesp_test.py
|
josephcslater/python-control
|
e080cef44e718c7c0e3a286fcc3adae405936b14
|
[
"BSD-3-Clause"
] | null | null | null |
control/tests/statesp_test.py
|
josephcslater/python-control
|
e080cef44e718c7c0e3a286fcc3adae405936b14
|
[
"BSD-3-Clause"
] | null | null | null |
control/tests/statesp_test.py
|
josephcslater/python-control
|
e080cef44e718c7c0e3a286fcc3adae405936b14
|
[
"BSD-3-Clause"
] | null | null | null |
"""statesp_test.py - test state space class
RMM, 30 Mar 2011 based on TestStateSp from v0.4a)
RMM, 14 Jun 2019 statesp_array_test.py coverted from statesp_test.py to test
with use_numpy_matrix(False)
BG, 26 Jul 2020 merge statesp_array_test.py differences into statesp_test.py
convert to pytest
"""
import numpy as np
import pytest
import operator
from numpy.linalg import solve
from scipy.linalg import block_diag, eigvals
import control as ct
from control.config import defaults
from control.dtime import sample_system
from control.lti import evalfr
from control.statesp import (StateSpace, _convert_to_statespace, drss,
rss, ss, tf2ss, _statesp_defaults)
from control.tests.conftest import ismatarrayout, slycotonly
from control.xferfcn import TransferFunction, ss2tf
from .conftest import editsdefaults
class TestStateSpace:
"""Tests for the StateSpace class."""
@pytest.fixture
def sys322ABCD(self):
"""Matrices for sys322"""
A322 = [[-3., 4., 2.],
[-1., -3., 0.],
[2., 5., 3.]]
B322 = [[1., 4.],
[-3., -3.],
[-2., 1.]]
C322 = [[4., 2., -3.],
[1., 4., 3.]]
D322 = [[-2., 4.],
[0., 1.]]
return (A322, B322, C322, D322)
@pytest.fixture
def sys322(self, sys322ABCD):
"""3-states square system (2 inputs x 2 outputs)"""
return StateSpace(*sys322ABCD)
@pytest.fixture
def sys222(self):
"""2-states square system (2 inputs x 2 outputs)"""
A222 = [[4., 1.],
[2., -3]]
B222 = [[5., 2.],
[-3., -3.]]
C222 = [[2., -4],
[0., 1.]]
D222 = [[3., 2.],
[1., -1.]]
return StateSpace(A222, B222, C222, D222)
@pytest.fixture
def sys623(self):
"""sys3: 6 states non square system (2 inputs x 3 outputs)"""
A623 = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0],
[0, 0, 0, -4, 0, 0],
[0, 0, 0, 0, -1, 0],
[0, 0, 0, 0, 0, 3]])
B623 = np.array([[0, -1],
[-1, 0],
[1, -1],
[0, 0],
[0, 1],
[-1, -1]])
C623 = np.array([[1, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1]])
D623 = np.zeros((3, 2))
return StateSpace(A623, B623, C623, D623)
@pytest.mark.parametrize(
"dt",
[(), (None, ), (0, ), (1, ), (0.1, ), (True, )],
ids=lambda i: "dt " + ("unspec" if len(i) == 0 else str(i[0])))
@pytest.mark.parametrize(
"argfun",
[pytest.param(
lambda ABCDdt: (ABCDdt, {}),
id="A, B, C, D[, dt]"),
pytest.param(
lambda ABCDdt: (ABCDdt[:4], {'dt': dt_ for dt_ in ABCDdt[4:]}),
id="A, B, C, D[, dt=dt]"),
pytest.param(
lambda ABCDdt: ((StateSpace(*ABCDdt), ), {}),
id="sys")
])
def test_constructor(self, sys322ABCD, dt, argfun):
"""Test different ways to call the StateSpace() constructor"""
args, kwargs = argfun(sys322ABCD + dt)
sys = StateSpace(*args, **kwargs)
dtref = defaults['control.default_dt'] if len(dt) == 0 else dt[0]
np.testing.assert_almost_equal(sys.A, sys322ABCD[0])
np.testing.assert_almost_equal(sys.B, sys322ABCD[1])
np.testing.assert_almost_equal(sys.C, sys322ABCD[2])
np.testing.assert_almost_equal(sys.D, sys322ABCD[3])
assert sys.dt == dtref
@pytest.mark.parametrize("args, exc, errmsg",
[((True, ), TypeError,
"(can only take in|sys must be) a StateSpace"),
((1, 2), ValueError, "1, 4, or 5 arguments"),
((np.ones((3, 2)), np.ones((3, 2)),
np.ones((2, 2)), np.ones((2, 2))),
ValueError, "A must be square"),
((np.ones((3, 3)), np.ones((2, 2)),
np.ones((2, 3)), np.ones((2, 2))),
ValueError, "A and B"),
((np.ones((3, 3)), np.ones((3, 2)),
np.ones((2, 2)), np.ones((2, 2))),
ValueError, "A and C"),
((np.ones((3, 3)), np.ones((3, 2)),
np.ones((2, 3)), np.ones((2, 3))),
ValueError, "B and D"),
((np.ones((3, 3)), np.ones((3, 2)),
np.ones((2, 3)), np.ones((3, 2))),
ValueError, "C and D"),
])
def test_constructor_invalid(self, args, exc, errmsg):
"""Test invalid input to StateSpace() constructor"""
with pytest.raises(exc, match=errmsg):
StateSpace(*args)
with pytest.raises(exc, match=errmsg):
ss(*args)
def test_constructor_warns(self, sys322ABCD):
"""Test ambiguos input to StateSpace() constructor"""
with pytest.warns(UserWarning, match="received multiple dt"):
sys = StateSpace(*(sys322ABCD + (0.1, )), dt=0.2)
np.testing.assert_almost_equal(sys.A, sys322ABCD[0])
np.testing.assert_almost_equal(sys.B, sys322ABCD[1])
np.testing.assert_almost_equal(sys.C, sys322ABCD[2])
np.testing.assert_almost_equal(sys.D, sys322ABCD[3])
assert sys.dt == 0.1
def test_copy_constructor(self):
"""Test the copy constructor"""
# Create a set of matrices for a simple linear system
A = np.array([[-1]])
B = np.array([[1]])
C = np.array([[1]])
D = np.array([[0]])
# Create the first linear system and a copy
linsys = StateSpace(A, B, C, D)
cpysys = StateSpace(linsys)
# Change the original A matrix
A[0, 0] = -2
np.testing.assert_array_equal(linsys.A, [[-1]]) # original value
np.testing.assert_array_equal(cpysys.A, [[-1]]) # original value
# Change the A matrix for the original system
linsys.A[0, 0] = -3
np.testing.assert_array_equal(cpysys.A, [[-1]]) # original value
def test_copy_constructor_nodt(self, sys322):
"""Test the copy constructor when an object without dt is passed"""
sysin = sample_system(sys322, 1.)
del sysin.dt
sys = StateSpace(sysin)
assert sys.dt == defaults['control.default_dt']
# test for static gain
sysin = StateSpace([], [], [], [[1, 2], [3, 4]], 1.)
del sysin.dt
sys = StateSpace(sysin)
assert sys.dt is None
def test_matlab_style_constructor(self):
"""Use (deprecated) matrix-style construction string"""
with pytest.deprecated_call():
sys = StateSpace("-1 1; 0 2", "0; 1", "1, 0", "0")
assert sys.A.shape == (2, 2)
assert sys.B.shape == (2, 1)
assert sys.C.shape == (1, 2)
assert sys.D.shape == (1, 1)
for X in [sys.A, sys.B, sys.C, sys.D]:
assert ismatarrayout(X)
def test_D_broadcast(self, sys623):
"""Test broadcast of D=0 to the right shape"""
# Giving D as a scalar 0 should broadcast to the right shape
sys = StateSpace(sys623.A, sys623.B, sys623.C, 0)
np.testing.assert_array_equal(sys623.D, sys.D)
# Giving D as a matrix of the wrong size should generate an error
with pytest.raises(ValueError):
sys = StateSpace(sys.A, sys.B, sys.C, np.array([[0]]))
# Make sure that empty systems still work
sys = StateSpace([], [], [], 1)
np.testing.assert_array_equal(sys.D, [[1]])
sys = StateSpace([], [], [], [[0]])
np.testing.assert_array_equal(sys.D, [[0]])
sys = StateSpace([], [], [], [0])
np.testing.assert_array_equal(sys.D, [[0]])
sys = StateSpace([], [], [], 0)
np.testing.assert_array_equal(sys.D, [[0]])
def test_pole(self, sys322):
"""Evaluate the poles of a MIMO system."""
p = np.sort(sys322.pole())
true_p = np.sort([3.34747678408874,
-3.17373839204437 + 1.47492908003839j,
-3.17373839204437 - 1.47492908003839j])
np.testing.assert_array_almost_equal(p, true_p)
def test_zero_empty(self):
"""Test to make sure zero() works with no zeros in system."""
sys = _convert_to_statespace(TransferFunction([1], [1, 2, 1]))
np.testing.assert_array_equal(sys.zero(), np.array([]))
@slycotonly
def test_zero_siso(self, sys222):
"""Evaluate the zeros of a SISO system."""
# extract only first input / first output system of sys222. This system is denoted sys111
# or tf111
tf111 = ss2tf(sys222)
sys111 = tf2ss(tf111[0, 0])
# compute zeros as root of the characteristic polynomial at the numerator of tf111
# this method is simple and assumed as valid in this test
true_z = np.sort(tf111[0, 0].zero())
# Compute the zeros through ab08nd, which is tested here
z = np.sort(sys111.zero())
np.testing.assert_almost_equal(true_z, z)
@slycotonly
def test_zero_mimo_sys322_square(self, sys322):
"""Evaluate the zeros of a square MIMO system."""
z = np.sort(sys322.zero())
true_z = np.sort([44.41465, -0.490252, -5.924398])
np.testing.assert_array_almost_equal(z, true_z)
@slycotonly
def test_zero_mimo_sys222_square(self, sys222):
"""Evaluate the zeros of a square MIMO system."""
z = np.sort(sys222.zero())
true_z = np.sort([-10.568501, 3.368501])
np.testing.assert_array_almost_equal(z, true_z)
@slycotonly
def test_zero_mimo_sys623_non_square(self, sys623):
"""Evaluate the zeros of a non square MIMO system."""
z = np.sort(sys623.zero())
true_z = np.sort([2., -1.])
np.testing.assert_array_almost_equal(z, true_z)
def test_add_ss(self, sys222, sys322):
"""Add two MIMO systems."""
A = [[-3., 4., 2., 0., 0.], [-1., -3., 0., 0., 0.],
[2., 5., 3., 0., 0.], [0., 0., 0., 4., 1.], [0., 0., 0., 2., -3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.], [5., 2.], [-3., -3.]]
C = [[4., 2., -3., 2., -4.], [1., 4., 3., 0., 1.]]
D = [[1., 6.], [1., 0.]]
sys = sys322 + sys222
np.testing.assert_array_almost_equal(sys.A, A)
np.testing.assert_array_almost_equal(sys.B, B)
np.testing.assert_array_almost_equal(sys.C, C)
np.testing.assert_array_almost_equal(sys.D, D)
def test_subtract_ss(self, sys222, sys322):
"""Subtract two MIMO systems."""
A = [[-3., 4., 2., 0., 0.], [-1., -3., 0., 0., 0.],
[2., 5., 3., 0., 0.], [0., 0., 0., 4., 1.], [0., 0., 0., 2., -3.]]
B = [[1., 4.], [-3., -3.], [-2., 1.], [5., 2.], [-3., -3.]]
C = [[4., 2., -3., -2., 4.], [1., 4., 3., 0., -1.]]
D = [[-5., 2.], [-1., 2.]]
sys = sys322 - sys222
np.testing.assert_array_almost_equal(sys.A, A)
np.testing.assert_array_almost_equal(sys.B, B)
np.testing.assert_array_almost_equal(sys.C, C)
np.testing.assert_array_almost_equal(sys.D, D)
def test_multiply_ss(self, sys222, sys322):
"""Multiply two MIMO systems."""
A = [[4., 1., 0., 0., 0.], [2., -3., 0., 0., 0.], [2., 0., -3., 4., 2.],
[-6., 9., -1., -3., 0.], [-4., 9., 2., 5., 3.]]
B = [[5., 2.], [-3., -3.], [7., -2.], [-12., -3.], [-5., -5.]]
C = [[-4., 12., 4., 2., -3.], [0., 1., 1., 4., 3.]]
D = [[-2., -8.], [1., -1.]]
sys = sys322 * sys222
np.testing.assert_array_almost_equal(sys.A, A)
np.testing.assert_array_almost_equal(sys.B, B)
np.testing.assert_array_almost_equal(sys.C, C)
np.testing.assert_array_almost_equal(sys.D, D)
@pytest.mark.parametrize("omega, resp",
[(1.,
np.array([[ 4.37636761e-05-0.01522976j,
-7.92603939e-01+0.02617068j],
[-3.31544858e-01+0.0576105j,
1.28919037e-01-0.14382495j]])),
(32,
np.array([[-1.16548243e-05-3.13444825e-04j,
-7.99936828e-01+4.54201816e-06j],
[-3.00137118e-01+3.42881660e-03j,
6.32015038e-04-1.21462255e-02j]]))])
@pytest.mark.parametrize("dt", [None, 0, 1e-3])
def test_call(self, dt, omega, resp):
"""Evaluate the frequency response at single frequencies"""
A = [[-2, 0.5], [0.5, -0.3]]
B = [[0.3, -1.3], [0.1, 0.]]
C = [[0., 0.1], [-0.3, -0.2]]
D = [[0., -0.8], [-0.3, 0.]]
sys = StateSpace(A, B, C, D)
if dt:
sys = sample_system(sys, dt)
s = np.exp(omega * 1j * dt)
else:
s = omega * 1j
# Correct versions of the call
np.testing.assert_allclose(evalfr(sys, s), resp, atol=1e-3)
np.testing.assert_allclose(sys(s), resp, atol=1e-3)
# Deprecated name of the call (should generate error)
with pytest.raises(AttributeError):
sys.evalfr(omega)
@slycotonly
def test_freq_resp(self):
"""Evaluate the frequency response at multiple frequencies."""
A = [[-2, 0.5], [0.5, -0.3]]
B = [[0.3, -1.3], [0.1, 0.]]
C = [[0., 0.1], [-0.3, -0.2]]
D = [[0., -0.8], [-0.3, 0.]]
sys = StateSpace(A, B, C, D)
true_mag = [[[0.0852992637230322, 0.00103596611395218],
[0.935374692849736, 0.799380720864549]],
[[0.55656854563842, 0.301542699860857],
[0.609178071542849, 0.0382108097985257]]]
true_phase = [[[-0.566195599644593, -1.68063565332582],
[3.0465958317514, 3.14141384339534]],
[[2.90457947657161, 3.10601268291914],
[-0.438157380501337, -1.40720969147217]]]
true_omega = [0.1, 10.]
mag, phase, omega = sys.frequency_response(true_omega)
np.testing.assert_almost_equal(mag, true_mag)
np.testing.assert_almost_equal(phase, true_phase)
np.testing.assert_equal(omega, true_omega)
# Deprecated version of the call (should return warning)
with pytest.warns(DeprecationWarning, match="will be removed"):
mag, phase, omega = sys.freqresp(true_omega)
np.testing.assert_almost_equal(mag, true_mag)
def test__isstatic(self):
A0 = np.zeros((2,2))
A1 = A0.copy()
A1[0,1] = 1.1
B0 = np.zeros((2,1))
B1 = B0.copy()
B1[0,0] = 1.3
C0 = A0
C1 = np.eye(2)
D0 = 0
D1 = np.ones((2,1))
assert StateSpace(A0, B0, C1, D1)._isstatic()
assert not StateSpace(A1, B0, C1, D1)._isstatic()
assert not StateSpace(A0, B1, C1, D1)._isstatic()
assert not StateSpace(A1, B1, C1, D1)._isstatic()
assert StateSpace(A0, B0, C0, D0)._isstatic()
assert StateSpace(A0, B0, C0, D1)._isstatic()
assert StateSpace(A0, B0, C1, D0)._isstatic()
@slycotonly
def test_minreal(self):
"""Test a minreal model reduction."""
# A = [-2, 0.5, 0; 0.5, -0.3, 0; 0, 0, -0.1]
A = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]
# B = [0.3, -1.3; 0.1, 0; 1, 0]
B = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]
# C = [0, 0.1, 0; -0.3, -0.2, 0]
C = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]
# D = [0 -0.8; -0.3 0]
D = [[0., -0.8], [-0.3, 0.]]
# sys = ss(A, B, C, D)
sys = StateSpace(A, B, C, D)
sysr = sys.minreal()
assert sysr.nstates == 2
assert sysr.ninputs == sys.ninputs
assert sysr.noutputs == sys.noutputs
np.testing.assert_array_almost_equal(
eigvals(sysr.A), [-2.136154, -0.1638459])
def test_append_ss(self):
"""Test appending two state-space systems."""
A1 = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]
B1 = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]
C1 = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]
D1 = [[0., -0.8], [-0.3, 0.]]
A2 = [[-1.]]
B2 = [[1.2]]
C2 = [[0.5]]
D2 = [[0.4]]
A3 = [[-2, 0.5, 0, 0], [0.5, -0.3, 0, 0], [0, 0, -0.1, 0],
[0, 0, 0., -1.]]
B3 = [[0.3, -1.3, 0], [0.1, 0., 0], [1.0, 0.0, 0], [0., 0, 1.2]]
C3 = [[0., 0.1, 0.0, 0.0], [-0.3, -0.2, 0.0, 0.0], [0., 0., 0., 0.5]]
D3 = [[0., -0.8, 0.], [-0.3, 0., 0.], [0., 0., 0.4]]
sys1 = StateSpace(A1, B1, C1, D1)
sys2 = StateSpace(A2, B2, C2, D2)
sys3 = StateSpace(A3, B3, C3, D3)
sys3c = sys1.append(sys2)
np.testing.assert_array_almost_equal(sys3.A, sys3c.A)
np.testing.assert_array_almost_equal(sys3.B, sys3c.B)
np.testing.assert_array_almost_equal(sys3.C, sys3c.C)
np.testing.assert_array_almost_equal(sys3.D, sys3c.D)
def test_append_tf(self):
"""Test appending a state-space system with a tf"""
A1 = [[-2, 0.5, 0], [0.5, -0.3, 0], [0, 0, -0.1]]
B1 = [[0.3, -1.3], [0.1, 0.], [1.0, 0.0]]
C1 = [[0., 0.1, 0.0], [-0.3, -0.2, 0.0]]
D1 = [[0., -0.8], [-0.3, 0.]]
s = TransferFunction([1, 0], [1])
h = 1 / (s + 1) / (s + 2)
sys1 = StateSpace(A1, B1, C1, D1)
sys2 = _convert_to_statespace(h)
sys3c = sys1.append(sys2)
np.testing.assert_array_almost_equal(sys1.A, sys3c.A[:3, :3])
np.testing.assert_array_almost_equal(sys1.B, sys3c.B[:3, :2])
np.testing.assert_array_almost_equal(sys1.C, sys3c.C[:2, :3])
np.testing.assert_array_almost_equal(sys1.D, sys3c.D[:2, :2])
np.testing.assert_array_almost_equal(sys2.A, sys3c.A[3:, 3:])
np.testing.assert_array_almost_equal(sys2.B, sys3c.B[3:, 2:])
np.testing.assert_array_almost_equal(sys2.C, sys3c.C[2:, 3:])
np.testing.assert_array_almost_equal(sys2.D, sys3c.D[2:, 2:])
np.testing.assert_array_almost_equal(sys3c.A[:3, 3:], np.zeros((3, 2)))
np.testing.assert_array_almost_equal(sys3c.A[3:, :3], np.zeros((2, 3)))
def test_array_access_ss(self):
sys1 = StateSpace([[1., 2.], [3., 4.]],
[[5., 6.], [6., 8.]],
[[9., 10.], [11., 12.]],
[[13., 14.], [15., 16.]], 1)
sys1_11 = sys1[0, 1]
np.testing.assert_array_almost_equal(sys1_11.A,
sys1.A)
np.testing.assert_array_almost_equal(sys1_11.B,
sys1.B[:, 1:2])
np.testing.assert_array_almost_equal(sys1_11.C,
sys1.C[0:1, :])
np.testing.assert_array_almost_equal(sys1_11.D,
sys1.D[0, 1])
assert sys1.dt == sys1_11.dt
def test_dc_gain_cont(self):
"""Test DC gain for continuous-time state-space systems."""
sys = StateSpace(-2., 6., 5., 0)
np.testing.assert_allclose(sys.dcgain(), 15.)
sys2 = StateSpace(-2, [6., 4.], [[5.], [7.], [11]], np.zeros((3, 2)))
expected = np.array([[15., 10.], [21., 14.], [33., 22.]])
np.testing.assert_allclose(sys2.dcgain(), expected)
sys3 = StateSpace(0., 1., 1., 0.)
np.testing.assert_equal(sys3.dcgain(), np.nan)
def test_dc_gain_discr(self):
"""Test DC gain for discrete-time state-space systems."""
# static gain
sys = StateSpace([], [], [], 2, True)
np.testing.assert_equal(sys.dcgain(), 2)
# averaging filter
sys = StateSpace(0.5, 0.5, 1, 0, True)
np.testing.assert_allclose(sys.dcgain(), 1)
# differencer
sys = StateSpace(0, 1, -1, 1, True)
np.testing.assert_equal(sys.dcgain(), 0)
# summer
sys = StateSpace(1, 1, 1, 0, True)
np.testing.assert_equal(sys.dcgain(), np.nan)
@pytest.mark.parametrize("outputs", range(1, 6))
@pytest.mark.parametrize("inputs", range(1, 6))
@pytest.mark.parametrize("dt", [None, 0, 1, True],
ids=["dtNone", "c", "dt1", "dtTrue"])
def test_dc_gain_integrator(self, outputs, inputs, dt):
"""DC gain when eigenvalue at DC returns appropriately sized array of nan.
the SISO case is also tested in test_dc_gain_{cont,discr}
time systems (dt=0)
"""
states = max(inputs, outputs)
# a matrix that is singular at DC, and has no "useless" states as in
# _remove_useless_states
a = np.triu(np.tile(2, (states, states)))
# eigenvalues all +2, except for ...
a[0, 0] = 0 if dt in [0, None] else 1
b = np.eye(max(inputs, states))[:states, :inputs]
c = np.eye(max(outputs, states))[:outputs, :states]
d = np.zeros((outputs, inputs))
sys = StateSpace(a, b, c, d, dt)
dc = np.squeeze(np.full_like(d, np.nan))
np.testing.assert_array_equal(dc, sys.dcgain())
def test_scalar_static_gain(self):
"""Regression: can we create a scalar static gain?
make sure StateSpace internals, specifically ABC matrix
sizes, are OK for LTI operations
"""
g1 = StateSpace([], [], [], [2])
g2 = StateSpace([], [], [], [3])
g3 = g1 * g2
assert 6 == g3.D[0, 0]
g4 = g1 + g2
assert 5 == g4.D[0, 0]
g5 = g1.feedback(g2)
np.testing.assert_allclose(2. / 7, g5.D[0, 0])
g6 = g1.append(g2)
np.testing.assert_allclose(np.diag([2, 3]), g6.D)
def test_matrix_static_gain(self):
"""Regression: can we create matrix static gains?"""
d1 = np.array([[1, 2, 3], [4, 5, 6]])
d2 = np.array([[7, 8], [9, 10], [11, 12]])
g1 = StateSpace([], [], [], d1)
# _remove_useless_states was making A = [[0]]
assert (0, 0) == g1.A.shape
g2 = StateSpace([], [], [], d2)
g3 = StateSpace([], [], [], d2.T)
h1 = g1 * g2
np.testing.assert_array_equal(np.dot(d1, d2), h1.D)
h2 = g1 + g3
np.testing.assert_array_equal(d1 + d2.T, h2.D)
h3 = g1.feedback(g2)
np.testing.assert_array_almost_equal(
solve(np.eye(2) + np.dot(d1, d2), d1), h3.D)
h4 = g1.append(g2)
np.testing.assert_array_equal(block_diag(d1, d2), h4.D)
def test_remove_useless_states(self):
"""Regression: _remove_useless_states gives correct ABC sizes."""
g1 = StateSpace(np.zeros((3, 3)), np.zeros((3, 4)),
np.zeros((5, 3)), np.zeros((5, 4)),
remove_useless_states=True)
assert (0, 0) == g1.A.shape
assert (0, 4) == g1.B.shape
assert (5, 0) == g1.C.shape
assert (5, 4) == g1.D.shape
assert 0 == g1.nstates
@pytest.mark.parametrize("A, B, C, D",
[([1], [], [], [1]),
([1], [1], [], [1]),
([1], [], [1], [1]),
([], [1], [], [1]),
([], [1], [1], [1]),
([], [], [1], [1]),
([1], [1], [1], [])])
def test_bad_empty_matrices(self, A, B, C, D):
"""Mismatched ABCD matrices when some are empty."""
with pytest.raises(ValueError):
StateSpace(A, B, C, D)
def test_minreal_static_gain(self):
"""Regression: minreal on static gain was failing."""
g1 = StateSpace([], [], [], [1])
g2 = g1.minreal()
np.testing.assert_array_equal(g1.A, g2.A)
np.testing.assert_array_equal(g1.B, g2.B)
np.testing.assert_array_equal(g1.C, g2.C)
np.testing.assert_array_equal(g1.D, g2.D)
def test_empty(self):
"""Regression: can we create an empty StateSpace object?"""
g1 = StateSpace([], [], [], [])
assert 0 == g1.nstates
assert 0 == g1.ninputs
assert 0 == g1.noutputs
def test_matrix_to_state_space(self):
"""_convert_to_statespace(matrix) gives ss([],[],[],D)"""
with pytest.deprecated_call():
D = np.matrix([[1, 2, 3], [4, 5, 6]])
g = _convert_to_statespace(D)
np.testing.assert_array_equal(np.empty((0, 0)), g.A)
np.testing.assert_array_equal(np.empty((0, D.shape[1])), g.B)
np.testing.assert_array_equal(np.empty((D.shape[0], 0)), g.C)
np.testing.assert_array_equal(D, g.D)
def test_lft(self):
""" test lft function with result obtained from matlab implementation"""
# test case
A = [[1, 2, 3],
[1, 4, 5],
[2, 3, 4]]
B = [[0, 2],
[5, 6],
[5, 2]]
C = [[1, 4, 5],
[2, 3, 0]]
D = [[0, 0],
[3, 0]]
P = StateSpace(A, B, C, D)
Ak = [[0, 2, 3],
[2, 3, 5],
[2, 1, 9]]
Bk = [[1, 1],
[2, 3],
[9, 4]]
Ck = [[1, 4, 5],
[2, 3, 6]]
Dk = [[0, 2],
[0, 0]]
K = StateSpace(Ak, Bk, Ck, Dk)
# case 1
pk = P.lft(K, 2, 1)
Amatlab = [1, 2, 3, 4, 6, 12, 1, 4, 5, 17, 38, 61, 2, 3, 4, 9, 26, 37,
2, 3, 0, 3, 14, 18, 4, 6, 0, 8, 27, 35, 18, 27, 0, 29, 109,
144]
Bmatlab = [0, 10, 10, 7, 15, 58]
Cmatlab = [1, 4, 5, 0, 0, 0]
Dmatlab = [0]
np.testing.assert_allclose(np.array(pk.A).reshape(-1), Amatlab)
np.testing.assert_allclose(np.array(pk.B).reshape(-1), Bmatlab)
np.testing.assert_allclose(np.array(pk.C).reshape(-1), Cmatlab)
np.testing.assert_allclose(np.array(pk.D).reshape(-1), Dmatlab)
# case 2
pk = P.lft(K)
Amatlab = [1, 2, 3, 4, 6, 12, -3, -2, 5, 11, 14, 31, -2, -3, 4, 3, 2,
7, 0.6, 3.4, 5, -0.6, -0.4, 0, 0.8, 6.2, 10, 0.2, -4.2,
-4, 7.4, 33.6, 45, -0.4, -8.6, -3]
Bmatlab = []
Cmatlab = []
Dmatlab = []
np.testing.assert_allclose(np.array(pk.A).reshape(-1), Amatlab)
np.testing.assert_allclose(np.array(pk.B).reshape(-1), Bmatlab)
np.testing.assert_allclose(np.array(pk.C).reshape(-1), Cmatlab)
np.testing.assert_allclose(np.array(pk.D).reshape(-1), Dmatlab)
def test_repr(self, sys322):
"""Test string representation"""
ref322 = "\n".join(["StateSpace(array([[-3., 4., 2.],",
" [-1., -3., 0.],",
" [ 2., 5., 3.]]), array([[ 1., 4.],",
" [-3., -3.],",
" [-2., 1.]]), array([[ 4., 2., -3.],",
" [ 1., 4., 3.]]), array([[-2., 4.],",
" [ 0., 1.]]){dt})"])
assert repr(sys322) == ref322.format(dt='')
sysd = StateSpace(sys322.A, sys322.B,
sys322.C, sys322.D, 0.4)
assert repr(sysd), ref322.format(dt=" == 0.4")
array = np.array # noqa
sysd2 = eval(repr(sysd))
np.testing.assert_allclose(sysd.A, sysd2.A)
np.testing.assert_allclose(sysd.B, sysd2.B)
np.testing.assert_allclose(sysd.C, sysd2.C)
np.testing.assert_allclose(sysd.D, sysd2.D)
def test_str(self, sys322):
"""Test that printing the system works"""
tsys = sys322
tref = ("A = [[-3. 4. 2.]\n"
" [-1. -3. 0.]\n"
" [ 2. 5. 3.]]\n"
"\n"
"B = [[ 1. 4.]\n"
" [-3. -3.]\n"
" [-2. 1.]]\n"
"\n"
"C = [[ 4. 2. -3.]\n"
" [ 1. 4. 3.]]\n"
"\n"
"D = [[-2. 4.]\n"
" [ 0. 1.]]\n")
assert str(tsys) == tref
tsysdtunspec = StateSpace(tsys.A, tsys.B, tsys.C, tsys.D, True)
assert str(tsysdtunspec) == tref + "\ndt unspecified\n"
sysdt1 = StateSpace(tsys.A, tsys.B, tsys.C, tsys.D, 1.)
assert str(sysdt1) == tref + "\ndt = 1.0\n"
def test_pole_static(self):
"""Regression: pole() of static gain is empty array."""
np.testing.assert_array_equal(np.array([]),
StateSpace([], [], [], [[1]]).pole())
def test_horner(self, sys322):
"""Test horner() function"""
# Make sure we can compute the transfer function at a complex value
sys322.horner(1. + 1.j)
# Make sure result agrees with frequency response
mag, phase, omega = sys322.frequency_response([1])
np.testing.assert_array_almost_equal(
np.squeeze(sys322.horner(1.j)),
mag[:, :, 0] * np.exp(1.j * phase[:, :, 0]))
class TestRss:
"""These are tests for the proper functionality of statesp.rss."""
# Maxmimum number of states to test + 1
maxStates = 10
# Maximum number of inputs and outputs to test + 1
maxIO = 5
@pytest.mark.parametrize('states', range(1, maxStates))
@pytest.mark.parametrize('outputs', range(1, maxIO))
@pytest.mark.parametrize('inputs', range(1, maxIO))
def test_shape(self, states, outputs, inputs):
"""Test that rss outputs have the right state, input, and output size."""
sys = rss(states, outputs, inputs)
assert sys.nstates == states
assert sys.ninputs == inputs
assert sys.noutputs == outputs
@pytest.mark.parametrize('states', range(1, maxStates))
@pytest.mark.parametrize('outputs', range(1, maxIO))
@pytest.mark.parametrize('inputs', range(1, maxIO))
def test_pole(self, states, outputs, inputs):
"""Test that the poles of rss outputs have a negative real part."""
sys = rss(states, outputs, inputs)
p = sys.pole()
for z in p:
assert z.real < 0
class TestDrss:
"""These are tests for the proper functionality of statesp.drss."""
# Maximum number of states to test + 1
maxStates = 10
# Maximum number of inputs and outputs to test + 1
maxIO = 5
@pytest.mark.parametrize('states', range(1, maxStates))
@pytest.mark.parametrize('outputs', range(1, maxIO))
@pytest.mark.parametrize('inputs', range(1, maxIO))
def test_shape(self, states, outputs, inputs):
"""Test that drss outputs have the right state, input, and output size."""
sys = drss(states, outputs, inputs)
assert sys.nstates == states
assert sys.ninputs == inputs
assert sys.noutputs == outputs
@pytest.mark.parametrize('states', range(1, maxStates))
@pytest.mark.parametrize('outputs', range(1, maxIO))
@pytest.mark.parametrize('inputs', range(1, maxIO))
def test_pole(self, states, outputs, inputs):
"""Test that the poles of drss outputs have less than unit magnitude."""
sys = drss(states, outputs, inputs)
p = sys.pole()
for z in p:
assert abs(z) < 1
class TestLTIConverter:
"""Test returnScipySignalLTI method"""
@pytest.fixture
def mimoss(self, request):
"""Test system with various dt values"""
n = 5
m = 3
p = 2
bx, bu = np.mgrid[1:n + 1, 1:m + 1]
cy, cx = np.mgrid[1:p + 1, 1:n + 1]
dy, du = np.mgrid[1:p + 1, 1:m + 1]
return StateSpace(np.eye(5) + np.eye(5, 5, 1),
bx * bu,
cy * cx,
dy * du,
request.param)
@pytest.mark.parametrize("mimoss",
[None,
0,
0.1,
1,
True],
indirect=True)
def test_returnScipySignalLTI(self, mimoss):
"""Test returnScipySignalLTI method with strict=False"""
sslti = mimoss.returnScipySignalLTI(strict=False)
for i in range(mimoss.noutputs):
for j in range(mimoss.ninputs):
np.testing.assert_allclose(sslti[i][j].A, mimoss.A)
np.testing.assert_allclose(sslti[i][j].B, mimoss.B[:,
j:j + 1])
np.testing.assert_allclose(sslti[i][j].C, mimoss.C[i:i + 1,
:])
np.testing.assert_allclose(sslti[i][j].D, mimoss.D[i:i + 1,
j:j + 1])
if mimoss.dt == 0:
assert sslti[i][j].dt is None
else:
assert sslti[i][j].dt == mimoss.dt
@pytest.mark.parametrize("mimoss", [None], indirect=True)
def test_returnScipySignalLTI_error(self, mimoss):
"""Test returnScipySignalLTI method with dt=None and strict=True"""
with pytest.raises(ValueError):
mimoss.returnScipySignalLTI()
with pytest.raises(ValueError):
mimoss.returnScipySignalLTI(strict=True)
class TestStateSpaceConfig:
"""Test the configuration of the StateSpace module"""
@pytest.fixture
def matarrayout(self):
"""Override autoused global fixture within this class"""
pass
def test_statespace_defaults(self, matarrayout):
"""Make sure the tests are run with the configured defaults"""
for k, v in _statesp_defaults.items():
assert defaults[k] == v, \
"{} is {} but expected {}".format(k, defaults[k], v)
# test data for test_latex_repr below
LTX_G1 = ([[np.pi, 1e100], [-1.23456789, 5e-23]],
[[0], [1]],
[[987654321, 0.001234]],
[[5]])
LTX_G2 = ([],
[],
[],
[[1.2345, -2e-200], [-1, 0]])
LTX_G1_REF = {
'p3_p' : '\\[\n\\left(\n\\begin{array}{rllrll|rll}\n3.&\\hspace{-1em}14&\\hspace{-1em}\\phantom{\\cdot}&1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{100}&0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n-1.&\\hspace{-1em}23&\\hspace{-1em}\\phantom{\\cdot}&5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-23}&1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\hline\n9.&\\hspace{-1em}88&\\hspace{-1em}\\cdot10^{8}&0.&\\hspace{-1em}00123&\\hspace{-1em}\\phantom{\\cdot}&5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\]',
'p5_p' : '\\[\n\\left(\n\\begin{array}{rllrll|rll}\n3.&\\hspace{-1em}1416&\\hspace{-1em}\\phantom{\\cdot}&1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{100}&0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n-1.&\\hspace{-1em}2346&\\hspace{-1em}\\phantom{\\cdot}&5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-23}&1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\hline\n9.&\\hspace{-1em}8765&\\hspace{-1em}\\cdot10^{8}&0.&\\hspace{-1em}001234&\\hspace{-1em}\\phantom{\\cdot}&5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\]',
'p3_s' : '\\[\n\\begin{array}{ll}\nA = \\left(\\begin{array}{rllrll}\n3.&\\hspace{-1em}14&\\hspace{-1em}\\phantom{\\cdot}&1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{100}\\\\\n-1.&\\hspace{-1em}23&\\hspace{-1em}\\phantom{\\cdot}&5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-23}\\\\\n\\end{array}\\right)\n&\nB = \\left(\\begin{array}{rll}\n0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\\\\nC = \\left(\\begin{array}{rllrll}\n9.&\\hspace{-1em}88&\\hspace{-1em}\\cdot10^{8}&0.&\\hspace{-1em}00123&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n&\nD = \\left(\\begin{array}{rll}\n5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\end{array}\n\\]',
'p5_s' : '\\[\n\\begin{array}{ll}\nA = \\left(\\begin{array}{rllrll}\n3.&\\hspace{-1em}1416&\\hspace{-1em}\\phantom{\\cdot}&1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{100}\\\\\n-1.&\\hspace{-1em}2346&\\hspace{-1em}\\phantom{\\cdot}&5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-23}\\\\\n\\end{array}\\right)\n&\nB = \\left(\\begin{array}{rll}\n0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\\\\nC = \\left(\\begin{array}{rllrll}\n9.&\\hspace{-1em}8765&\\hspace{-1em}\\cdot10^{8}&0.&\\hspace{-1em}001234&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n&\nD = \\left(\\begin{array}{rll}\n5\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\end{array}\n\\]',
}
LTX_G2_REF = {
'p3_p' : '\\[\n\\left(\n\\begin{array}{rllrll}\n1.&\\hspace{-1em}23&\\hspace{-1em}\\phantom{\\cdot}&-2\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-200}\\\\\n-1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}&0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\]',
'p5_p' : '\\[\n\\left(\n\\begin{array}{rllrll}\n1.&\\hspace{-1em}2345&\\hspace{-1em}\\phantom{\\cdot}&-2\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-200}\\\\\n-1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}&0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\]',
'p3_s' : '\\[\n\\begin{array}{ll}\nD = \\left(\\begin{array}{rllrll}\n1.&\\hspace{-1em}23&\\hspace{-1em}\\phantom{\\cdot}&-2\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-200}\\\\\n-1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}&0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\end{array}\n\\]',
'p5_s' : '\\[\n\\begin{array}{ll}\nD = \\left(\\begin{array}{rllrll}\n1.&\\hspace{-1em}2345&\\hspace{-1em}\\phantom{\\cdot}&-2\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\cdot10^{-200}\\\\\n-1\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}&0\\phantom{.}&\\hspace{-1em}&\\hspace{-1em}\\phantom{\\cdot}\\\\\n\\end{array}\\right)\n\\end{array}\n\\]',
}
refkey_n = {None: 'p3', '.3g': 'p3', '.5g': 'p5'}
refkey_r = {None: 'p', 'partitioned': 'p', 'separate': 's'}
@pytest.mark.parametrize(" gmats, ref",
[(LTX_G1, LTX_G1_REF),
(LTX_G2, LTX_G2_REF)])
@pytest.mark.parametrize("repr_type", [None, "partitioned", "separate"])
@pytest.mark.parametrize("num_format", [None, ".3g", ".5g"])
def test_latex_repr(gmats, ref, repr_type, num_format, editsdefaults):
"""Test `._latex_repr_` with different config values
This is a 'gold image' test, so if you change behaviour,
you'll need to regenerate the reference results.
Try something like:
control.reset_defaults()
print(f'p3_p : {g1._repr_latex_()!r}')
"""
from control import set_defaults
if num_format is not None:
set_defaults('statesp', latex_num_format=num_format)
if repr_type is not None:
set_defaults('statesp', latex_repr_type=repr_type)
g = StateSpace(*gmats)
refkey = "{}_{}".format(refkey_n[num_format], refkey_r[repr_type])
assert g._repr_latex_() == ref[refkey]
@pytest.mark.parametrize(
"op",
[pytest.param(getattr(operator, s), id=s) for s in ('add', 'sub', 'mul')])
@pytest.mark.parametrize(
"tf, arr",
[pytest.param(ct.tf([1], [0.5, 1]), np.array(2.), id="0D scalar"),
pytest.param(ct.tf([1], [0.5, 1]), np.array([2.]), id="1D scalar"),
pytest.param(ct.tf([1], [0.5, 1]), np.array([[2.]]), id="2D scalar")])
def test_xferfcn_ndarray_precedence(op, tf, arr):
# Apply the operator to the transfer function and array
ss = ct.tf2ss(tf)
result = op(ss, arr)
assert isinstance(result, ct.StateSpace)
# Apply the operator to the array and transfer function
ss = ct.tf2ss(tf)
result = op(arr, ss)
assert isinstance(result, ct.StateSpace)
| 43.012632
| 835
| 0.511062
|
2872f1a1dcba11fefa3f47ef3d354ccdce714a64
| 1,265
|
py
|
Python
|
three.py/TestTemplate.py
|
lukestanley/three.py
|
a3fa99cb3553aca8c74ceabb8203edeb55450803
|
[
"MIT"
] | 80
|
2019-04-04T13:41:32.000Z
|
2022-01-12T18:40:19.000Z
|
three.py/TestTemplate.py
|
lukestanley/three.py
|
a3fa99cb3553aca8c74ceabb8203edeb55450803
|
[
"MIT"
] | 9
|
2019-04-04T14:43:50.000Z
|
2020-03-29T04:50:53.000Z
|
three.py/TestTemplate.py
|
lukestanley/three.py
|
a3fa99cb3553aca8c74ceabb8203edeb55450803
|
[
"MIT"
] | 17
|
2019-04-04T14:20:42.000Z
|
2022-03-03T16:26:29.000Z
|
from core import *
from cameras import *
from geometry import *
from material import *
from helpers import *
class TestTemplate(Base):
def initialize(self):
self.setWindowTitle('Test')
self.setWindowSize(800,800)
self.renderer = Renderer()
self.renderer.setViewportSize(800,800)
self.renderer.setClearColor(0.25, 0.25, 0.25)
self.scene = Scene()
self.camera = PerspectiveCamera()
self.camera.transform.setPosition(0, 1, 5)
self.camera.transform.lookAt(0, 0, 0)
self.cameraControls = FirstPersonController(self.input, self.camera)
floorMesh = GridHelper(size=10, divisions=10, gridColor=[0,0,0], centerColor=[1,0,0])
floorMesh.transform.rotateX(-3.14/2, Matrix.LOCAL)
self.scene.add(floorMesh)
def update(self):
self.cameraControls.update()
if self.input.resize():
size = self.input.getWindowSize()
self.camera.setAspectRatio( size["width"]/size["height"] )
self.renderer.setViewportSize(size["width"], size["height"])
self.renderer.render(self.scene, self.camera)
# instantiate and run the program
TestTemplate().run()
| 28.75
| 93
| 0.620553
|
dc47aa0968e0db5a777b4723f5cd0104a26eefe2
| 22,128
|
py
|
Python
|
pymclevel/minecraft_server.py
|
gpmidi/MCEdit-Unified
|
60e1408899fa04113412b89616fd3486db5c8545
|
[
"0BSD"
] | null | null | null |
pymclevel/minecraft_server.py
|
gpmidi/MCEdit-Unified
|
60e1408899fa04113412b89616fd3486db5c8545
|
[
"0BSD"
] | null | null | null |
pymclevel/minecraft_server.py
|
gpmidi/MCEdit-Unified
|
60e1408899fa04113412b89616fd3486db5c8545
|
[
"0BSD"
] | null | null | null |
import atexit
import itertools
import logging
import os
from os.path import dirname, join, basename
import random
from pymclevel import PocketLeveldbWorld
import re
import shutil
import subprocess
import sys
import tempfile
import time
import urllib
import json
import urllib2
import infiniteworld
from directories import getCacheDir
from mclevelbase import exhaust, ChunkNotPresent
log = logging.getLogger(__name__)
__author__ = 'Rio'
# Thank you, Stackoverflow
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(f):
return os.path.exists(f) and os.access(f, os.X_OK)
fpath, _fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if sys.platform == "win32":
if "SYSTEMROOT" in os.environ:
root = os.environ["SYSTEMROOT"]
exe_file = os.path.join(root, program)
if is_exe(exe_file):
return exe_file
if "PATH" in os.environ:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
def getVersions(doSnapshot):
JAR_VERSION_URL_TEMPLATE = "https://s3.amazonaws.com/Minecraft.Download/versions/{}/minecraft_server.{}.jar"
versionSite = urllib2.urlopen("http://s3.amazonaws.com/Minecraft.Download/versions/versions.json")
versionSiteResponse = versionSite.read()
versionJSON = json.loads(versionSiteResponse)
if doSnapshot:
version = versionJSON["latest"]["snapshot"]
else:
version = versionJSON["latest"]["release"]
print "Version: " + version
URL = JAR_VERSION_URL_TEMPLATE.format(version, version)
return URL
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
class ServerJarStorage(object):
cacheDir = os.path.join(getCacheDir(), u"ServerJarStorage")
def __init__(self, cacheDir=None):
if not os.path.exists(self.cacheDir):
os.makedirs(self.cacheDir)
readme = os.path.join(self.cacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to store different versions of the
Minecraft Server to use for terrain generation. It should have one or more
subfolders, one for each version of the server. Each subfolder must hold at
least one file named minecraft_server.jar, and the subfolder's name should
have the server's version plus the names of any installed mods.
There may already be a subfolder here (for example, "Release 1.7.10") if you have
used the Chunk Create feature in MCEdit to create chunks using the server.
Version numbers can be automatically detected. If you place one or more
minecraft_server.jar files in this folder, they will be placed automatically
into well-named subfolders the next time you run MCEdit. If a file's name
begins with "minecraft_server" and ends with ".jar", it will be detected in
this way.
""")
self.reloadVersions()
def reloadVersions(self):
cacheDirList = os.listdir(self.cacheDir)
self.versions = list(
reversed(sorted([v for v in cacheDirList if os.path.exists(self.jarfileForVersion(v))], key=alphanum_key)))
if MCServerChunkGenerator.javaExe:
for f in cacheDirList:
p = os.path.join(self.cacheDir, f)
if f.startswith("minecraft_server") and f.endswith(".jar") and os.path.isfile(p):
print "Unclassified minecraft_server.jar found in cache dir. Discovering version number..."
self.cacheNewVersion(p)
os.remove(p)
print "Minecraft_Server.jar storage initialized."
print u"Each server is stored in a subdirectory of {0} named with the server's version number".format(
self.cacheDir)
print "Cached servers: ", self.versions
def downloadCurrentServer(self, getSnapshot):
self.snapshot = getSnapshot
print "Downloading the latest Minecraft Server..."
try:
(filename, headers) = urllib.urlretrieve(getVersions(getSnapshot))
except Exception, e:
print "Error downloading server: {0!r}".format(e)
return
self.cacheNewVersion(filename, allowDuplicate=False)
def cacheNewVersion(self, filename, allowDuplicate=True):
""" Finds the version number from the server jar at filename and copies
it into the proper subfolder of the server jar cache folder"""
version = MCServerChunkGenerator._serverVersionFromJarFile(filename)
print "Found version ", version
versionDir = os.path.join(self.cacheDir, version)
i = 1
newVersionDir = versionDir
while os.path.exists(newVersionDir):
if not allowDuplicate:
return
newVersionDir = versionDir + " (" + str(i) + ")"
i += 1
os.mkdir(newVersionDir)
shutil.copy2(filename, os.path.join(newVersionDir, "minecraft_server.jar"))
if version not in self.versions:
self.versions.append(version)
def jarfileForVersion(self, v):
return os.path.join(self.cacheDir, v, "minecraft_server.jar").encode(sys.getfilesystemencoding())
def checksumForVersion(self, v):
jf = self.jarfileForVersion(v)
with file(jf, "rb") as f:
import hashlib
return hashlib.md5(f.read()).hexdigest()
broken_versions = ["Beta 1.9 Prerelease {0}".format(i) for i in (1, 2, 3)]
@property
def latestVersion(self):
if len(self.versions) == 0:
return None
return max((v for v in self.versions if v not in self.broken_versions), key=alphanum_key)
def getJarfile(self, version=None):
if len(self.versions) == 0:
print "No servers found in cache."
self.downloadCurrentServer(False)
version = version or self.latestVersion
if version not in self.versions:
return None
return self.jarfileForVersion(version)
class JavaNotFound(RuntimeError):
pass
class VersionNotFound(RuntimeError):
pass
def readProperties(filename):
if not os.path.exists(filename):
return {}
with file(filename) as f:
properties = dict((line.split("=", 2) for line in (l.strip() for l in f) if not line.startswith("#")))
return properties
def saveProperties(filename, properties):
with file(filename, "w") as f:
for k, v in properties.iteritems():
f.write("{0}={1}\n".format(k, v))
def findJava():
if sys.platform == "win32":
javaExe = which("java.exe")
if javaExe is None:
KEY_NAME = "HKLM\SOFTWARE\JavaSoft\Java Runtime Environment"
try:
p = subprocess.Popen(["REG", "QUERY", KEY_NAME, "/v", "CurrentVersion"], stdout=subprocess.PIPE,
universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("CurrentVersion"):
words = l.split(None, 2)
version = words[-1]
p = subprocess.Popen(["REG", "QUERY", KEY_NAME + "\\" + version, "/v", "JavaHome"],
stdout=subprocess.PIPE, universal_newlines=True)
o, e = p.communicate()
lines = o.split("\n")
for l in lines:
l = l.strip()
if l.startswith("JavaHome"):
w = l.split(None, 2)
javaHome = w[-1]
javaExe = os.path.join(javaHome, "bin", "java.exe")
print "RegQuery: java.exe found at ", javaExe
break
except Exception, e:
print "Error while locating java.exe using the Registry: ", repr(e)
else:
javaExe = which("java")
return javaExe
class MCServerChunkGenerator(object):
"""Generates chunks using minecraft_server.jar. Uses a ServerJarStorage to
store different versions of minecraft_server.jar in an application support
folder.
from pymclevel import *
Example usage:
gen = MCServerChunkGenerator() # with no arguments, use the newest
# server version in the cache, or download
# the newest one automatically
level = loadWorldNamed("MyWorld")
gen.generateChunkInLevel(level, 12, 24)
Using an older version:
gen = MCServerChunkGenerator("Beta 1.6.5")
"""
defaultJarStorage = None
javaExe = findJava()
jarStorage = None
tempWorldCache = {}
def __init__(self, version=None, jarfile=None, jarStorage=None):
self.jarStorage = jarStorage or self.getDefaultJarStorage()
if self.javaExe is None:
raise JavaNotFound(
"Could not find java. Please check that java is installed correctly. (Could not find java in your PATH environment variable.)")
if jarfile is None:
jarfile = self.jarStorage.getJarfile(version)
if jarfile is None:
raise VersionNotFound(
"Could not find minecraft_server.jar for version {0}. Please make sure that a minecraft_server.jar is placed under {1} in a subfolder named after the server's version number.".format(
version or "(latest)", self.jarStorage.cacheDir))
self.serverJarFile = jarfile
self.serverVersion = version or self._serverVersion()
@classmethod
def getDefaultJarStorage(cls):
if cls.defaultJarStorage is None:
cls.defaultJarStorage = ServerJarStorage()
return cls.defaultJarStorage
@classmethod
def clearWorldCache(cls):
cls.tempWorldCache = {}
for tempDir in os.listdir(cls.worldCacheDir):
t = os.path.join(cls.worldCacheDir, tempDir)
if os.path.isdir(t):
shutil.rmtree(t)
def createReadme(self):
readme = os.path.join(self.worldCacheDir, "README.TXT")
if not os.path.exists(readme):
with file(readme, "w") as f:
f.write("""
About this folder:
This folder is used by MCEdit and pymclevel to cache levels during terrain
generation. Feel free to delete it for any reason.
""")
worldCacheDir = os.path.join(tempfile.gettempdir(), "pymclevel_MCServerChunkGenerator")
def tempWorldForLevel(self, level):
# tempDir = tempfile.mkdtemp("mclevel_servergen")
tempDir = os.path.join(self.worldCacheDir, self.jarStorage.checksumForVersion(self.serverVersion),
str(level.RandomSeed))
propsFile = os.path.join(tempDir, "server.properties")
properties = readProperties(propsFile)
tempWorld = self.tempWorldCache.get((self.serverVersion, level.RandomSeed))
if tempWorld is None:
if not os.path.exists(tempDir):
os.makedirs(tempDir)
self.createReadme()
worldName = "world"
worldName = properties.setdefault("level-name", worldName)
tempWorldDir = os.path.join(tempDir, worldName)
tempWorld = infiniteworld.MCInfdevOldLevel(tempWorldDir, create=True, random_seed=level.RandomSeed)
tempWorld.close()
tempWorldRO = infiniteworld.MCInfdevOldLevel(tempWorldDir, readonly=True)
self.tempWorldCache[self.serverVersion, level.RandomSeed] = tempWorldRO
if level.dimNo == 0:
properties["allow-nether"] = "false"
else:
tempWorld = tempWorld.getDimension(level.dimNo)
properties["allow-nether"] = "true"
properties["server-port"] = int(32767 + random.random() * 32700)
saveProperties(propsFile, properties)
return tempWorld, tempDir
def generateAtPosition(self, tempWorld, tempDir, cx, cz):
return exhaust(self.generateAtPositionIter(tempWorld, tempDir, cx, cz))
@staticmethod
def addEULA(tempDir):
eulaLines = [
"#By changing the setting below to TRUE you are indicating your agreement to our EULA (https://account.mojang.com/documents/minecraft_eula).\n",
"#Wed Jul 23 21:10:11 EDT 2014\n", "eula=true\n"]
with open(tempDir + "/" + "eula.txt", "w") as f:
f.writelines(eulaLines)
def generateAtPositionIter(self, tempWorld, tempDir, cx, cz, simulate=False):
tempWorldRW = infiniteworld.MCInfdevOldLevel(tempWorld.filename)
tempWorldRW.setPlayerSpawnPosition((cx * 16, 64, cz * 16))
tempWorldRW.saveInPlace()
tempWorldRW.close()
del tempWorldRW
tempWorld.unload()
self.addEULA(tempDir)
startTime = time.time()
proc = self.runServer(tempDir)
while proc.poll() is None:
line = proc.stdout.readline().strip()
log.info(line)
yield line
# Forge and FML change stderr output, causing MCServerChunkGenerator to wait endlessly.
#
# Vanilla:
# 2012-11-13 11:29:19 [INFO] Done (9.962s)!
#
# Forge/FML:
# 2012-11-13 11:47:13 [INFO] [Minecraft] Done (8.020s)!
if "INFO" in line and "Done" in line:
if simulate:
duration = time.time() - startTime
simSeconds = max(8, int(duration) + 1)
for i in range(simSeconds):
# process tile ticks
yield "%2d/%2d: Simulating the world for a little bit..." % (i, simSeconds)
time.sleep(1)
proc.stdin.write("stop\n")
proc.wait()
break
if "FAILED TO BIND" in line:
proc.kill()
proc.wait()
raise RuntimeError("Server failed to bind to port!")
stdout, _ = proc.communicate()
if "Could not reserve enough space" in stdout and not MCServerChunkGenerator.lowMemory:
MCServerChunkGenerator.lowMemory = True
for i in self.generateAtPositionIter(tempWorld, tempDir, cx, cz):
yield i
(tempWorld.parentWorld or tempWorld).loadLevelDat() # reload version number
def copyChunkAtPosition(self, tempWorld, level, cx, cz):
if level.containsChunk(cx, cz):
return
try:
tempChunkBytes = tempWorld._getChunkBytes(cx, cz)
except ChunkNotPresent, e:
raise ChunkNotPresent("While generating a world in {0} using server {1} ({2!r})".format(tempWorld,
self.serverJarFile,
e), sys.exc_info()[
2])
if isinstance(level, PocketLeveldbWorld):
level.saveGeneratedChunk(cx, cz, tempChunkBytes)
else:
level.worldFolder.saveChunk(cx, cz, tempChunkBytes)
level._allChunks = None
def generateChunkInLevel(self, level, cx, cz):
assert isinstance(level, infiniteworld.MCInfdevOldLevel)
tempWorld, tempDir = self.tempWorldForLevel(level)
self.generateAtPosition(tempWorld, tempDir, cx, cz)
self.copyChunkAtPosition(tempWorld, level, cx, cz)
minRadius = 5
maxRadius = 20
def createLevel(self, level, box, simulate=False, **kw):
return exhaust(self.createLevelIter(level, box, simulate, **kw))
def createLevelIter(self, level, box, simulate=False, worldType="DEFAULT", **kw):
if isinstance(level, basestring):
filename = level
level = infiniteworld.MCInfdevOldLevel(filename, create=True, **kw)
assert isinstance(level, infiniteworld.MCInfdevOldLevel)
minRadius = self.minRadius
genPositions = list(itertools.product(
xrange(box.mincx, box.maxcx, minRadius * 2),
xrange(box.mincz, box.maxcz, minRadius * 2)))
for i, (cx, cz) in enumerate(genPositions):
log.info("Generating at %s" % ((cx, cz),))
parentDir = dirname(os.path.abspath(level.worldFolder.filename))
propsFile = join(parentDir, "server.properties")
props = readProperties(join(dirname(self.serverJarFile), "server.properties"))
props["level-name"] = basename(level.worldFolder.filename)
props["server-port"] = int(32767 + random.random() * 32700)
props["level-type"] = worldType
saveProperties(propsFile, props)
for p in self.generateAtPositionIter(level, parentDir, cx, cz, simulate):
yield i, len(genPositions), p
level.close()
def generateChunksInLevel(self, level, chunks):
return exhaust(self.generateChunksInLevelIter(level, chunks))
def generateChunksInLevelIter(self, level, chunks, simulate=False):
tempWorld, tempDir = self.tempWorldForLevel(level)
startLength = len(chunks)
minRadius = self.minRadius
maxRadius = self.maxRadius
chunks = set(chunks)
while len(chunks):
length = len(chunks)
centercx, centercz = chunks.pop()
chunks.add((centercx, centercz))
# assume the generator always generates at least an 11x11 chunk square.
centercx += minRadius
centercz += minRadius
# boxedChunks = [cPos for cPos in chunks if inBox(cPos)]
print "Generating {0} chunks out of {1} starting from {2}".format("XXX", len(chunks), (centercx, centercz))
yield startLength - len(chunks), startLength
# chunks = [c for c in chunks if not inBox(c)]
for p in self.generateAtPositionIter(tempWorld, tempDir, centercx, centercz, simulate):
yield startLength - len(chunks), startLength, p
i = 0
for cx, cz in itertools.product(
xrange(centercx - maxRadius, centercx + maxRadius),
xrange(centercz - maxRadius, centercz + maxRadius)):
if level.containsChunk(cx, cz):
chunks.discard((cx, cz))
elif ((cx, cz) in chunks
and all(tempWorld.containsChunk(ncx, ncz) for ncx, ncz in
itertools.product(xrange(cx - 1, cx + 2), xrange(cz - 1, cz + 2)))
):
self.copyChunkAtPosition(tempWorld, level, cx, cz)
i += 1
chunks.discard((cx, cz))
yield startLength - len(chunks), startLength
if length == len(chunks):
print "No chunks were generated. Aborting."
break
level.saveInPlace()
def runServer(self, startingDir):
if isinstance(startingDir, unicode):
startingDir = startingDir.encode(sys.getfilesystemencoding())
return self._runServer(startingDir, self.serverJarFile)
lowMemory = False
@classmethod
def _runServer(cls, startingDir, jarfile):
log.info("Starting server %s in %s", jarfile, startingDir)
if cls.lowMemory:
memflags = []
else:
memflags = ["-Xmx1024M", "-Xms1024M", ]
proc = subprocess.Popen([cls.javaExe, "-Djava.awt.headless=true"] + memflags + ["-jar", jarfile],
executable=cls.javaExe,
cwd=startingDir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
atexit.register(proc.terminate)
return proc
def _serverVersion(self):
return self._serverVersionFromJarFile(self.serverJarFile)
@classmethod
def _serverVersionFromJarFile(cls, jarfile):
tempdir = tempfile.mkdtemp("mclevel_servergen")
proc = cls._runServer(tempdir, jarfile)
version = "Unknown"
# out, err = proc.communicate()
# for line in err.split("\n"):
while proc.poll() is None:
line = proc.stdout.readline()
if "Preparing start region" in line:
break
if "Starting minecraft server version" in line:
version = line.split("Starting minecraft server version")[1].strip()
break
if proc.returncode is None:
try:
proc.kill()
except WindowsError:
pass # access denied, process already terminated
proc.wait()
shutil.rmtree(tempdir)
if ";)" in version:
version = version.replace(";)", "") # Damnit, Jeb!
# Versions like "0.2.1" are alphas, and versions like "1.0.0" without "Beta" are releases
if version[0] == "0":
version = "Alpha " + version
elif 'w' in version or 'pre' in version:
version = "Snapshot " + version
else:
try:
if int(version[0]) > 0:
version = "Release " + version
except ValueError:
pass
return version
| 36.696517
| 199
| 0.592733
|
227e506ba3748da5e727d00c5e9c475c7051bbcc
| 289
|
py
|
Python
|
outsource/outsource/pipelines.py
|
iamjoel/find-outsource-tool
|
dd799298ffebbc25bd416d54f255ecd8ee8b4fd2
|
[
"MIT"
] | null | null | null |
outsource/outsource/pipelines.py
|
iamjoel/find-outsource-tool
|
dd799298ffebbc25bd416d54f255ecd8ee8b4fd2
|
[
"MIT"
] | null | null | null |
outsource/outsource/pipelines.py
|
iamjoel/find-outsource-tool
|
dd799298ffebbc25bd416d54f255ecd8ee8b4fd2
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class OutsourcePipeline(object):
def process_item(self, item, spider):
return item
| 24.083333
| 65
| 0.712803
|
f2c9015e997e8b6b749031eb121f6184265fa6bd
| 4,662
|
py
|
Python
|
cacheman/autosync.py
|
Jenyay/py_cache_manager
|
aa7fa33dcbc30dbd6b2e62cade5f371f6dbbe2cb
|
[
"BSD-2-Clause"
] | null | null | null |
cacheman/autosync.py
|
Jenyay/py_cache_manager
|
aa7fa33dcbc30dbd6b2e62cade5f371f6dbbe2cb
|
[
"BSD-2-Clause"
] | 5
|
2015-02-18T22:17:52.000Z
|
2018-01-23T05:30:09.000Z
|
cacheman/autosync.py
|
Jenyay/py_cache_manager
|
aa7fa33dcbc30dbd6b2e62cade5f371f6dbbe2cb
|
[
"BSD-2-Clause"
] | 2
|
2021-05-31T15:18:50.000Z
|
2022-01-15T16:50:25.000Z
|
from collections import namedtuple, deque
from datetime import datetime
from operator import attrgetter
from builtins import range
from .cachewrap import PersistentCache
TimeCount = namedtuple('TimeCount', ['time_length', 'count'])
MANY_WRITE_TIME_COUNTS = [TimeCount(60, 1000000), TimeCount(300, 10000), TimeCount(900, 1)]
class AutoSyncCacheBase(object):
def __init__(self, base_class, cache_name, time_checks=None, time_bucket_size=None, **kwargs):
# These are sorted from shortest time frame to longest
self.time_checks = sorted(time_checks or [TimeCount(60, 10000), TimeCount(300, 10), TimeCount(900, 1)],
key=attrgetter('time_length'))
self.time_bucket_size = time_bucket_size or 15 # Seconds
self.time_counts = deque(0 for _ in range(self.bucket_count()))
self.last_shift_time = datetime.now()
self.base_class = base_class
self.base_class.__init__(self, cache_name, **kwargs)
def bucket_count(self):
return self.time_checks[-1].time_length // self.time_bucket_size
def _delta_bucket_match(self, delta_shift_time):
return max(min(self.bucket_count(),
int(delta_shift_time.total_seconds() // self.time_bucket_size)), 0)
def find_bucket(self, edit_time):
'''
Raises IndexError on times outside bucket range.
'''
delta_shift_time = self.last_shift_time - edit_time
bucket = self.bucket_count() - 1 - int(delta_shift_time.total_seconds() // self.time_bucket_size)
if bucket < 0 or bucket >= self.bucket_count():
raise IndexError('Time of edit since last shift outside bucket bounds')
return bucket
def time_shift_buckets(self):
shift_time = datetime.now()
snapped_seconds = self.time_bucket_size * (shift_time.second // self.time_bucket_size)
shift_time = shift_time.replace(second=snapped_seconds)
delta_buckets = self._delta_bucket_match(shift_time - self.last_shift_time)
if delta_buckets:
self.time_counts.rotate(-delta_buckets)
for i in range(1, delta_buckets + 1):
self.time_counts[-i] = 0
self.last_shift_time = shift_time
return shift_time
def bucket_within_time(self, bucket, time_check):
return len(self.time_counts) - 1 - bucket < time_check.time_length // self.time_bucket_size
def clear_bucket_counts(self):
for i in range(self.bucket_count()):
self.time_counts[i] = 0
def check_save_conditions(self):
bucket = len(self.time_counts) - 1
for check in self.time_checks:
time_count = 0
while bucket >= 0 and self.bucket_within_time(bucket, check):
time_count += self.time_counts[bucket]
if time_count >= check.count:
self.save()
return True
bucket -= 1
return False
def track_edit(self, count=1, edit_time=None):
self.time_shift_buckets()
if edit_time is None:
edit_time = self.last_shift_time
try:
self.time_counts[self.find_bucket(edit_time)] += count
self.check_save_conditions()
except IndexError:
pass # Edit is too far back or in the future, skip it
def __setitem__(self, *args, **kwargs):
self._check_contents_present()
ret_val = self.contents.__setitem__(*args, **kwargs)
self.track_edit()
return ret_val
def __delitem__(self, *args, **kwargs):
self._check_contents_present()
ret_val = self.contents.__delitem__(*args, **kwargs)
self.track_edit()
return ret_val
def _build(self, *args, **kwargs):
self.clear_bucket_counts()
return self.base_class._build(self, *args, **kwargs)
def load(self, *args, **kwargs):
self.clear_bucket_counts()
return self.base_class.load(self, *args, **kwargs)
def save(self, *args, **kwargs):
self.clear_bucket_counts()
return self.base_class.save(self, *args, **kwargs)
def delete_saved_content(self, *args, **kwargs):
self.clear_bucket_counts()
return self.base_class.delete_saved_content(self, *args, **kwargs)
class AutoSyncCache(AutoSyncCacheBase, PersistentCache):
'''
AutoSyncCache defaults to a pickle basis over PersistentCache.
'''
def __init__(self, cache_name, **kwargs):
AutoSyncCacheBase.__init__(self, PersistentCache, cache_name, **kwargs)
| 39.508475
| 112
| 0.642428
|
21dc2d354a807c7e17fa2f111f5e9fc8df47d080
| 32,102
|
py
|
Python
|
esky/tests/test_esky.py
|
dmckeone/esky
|
32b5c874b232390d1fe29ea8044cc27ceffba303
|
[
"BSD-3-Clause"
] | null | null | null |
esky/tests/test_esky.py
|
dmckeone/esky
|
32b5c874b232390d1fe29ea8044cc27ceffba303
|
[
"BSD-3-Clause"
] | null | null | null |
esky/tests/test_esky.py
|
dmckeone/esky
|
32b5c874b232390d1fe29ea8044cc27ceffba303
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2009-2010, Cloud Matrix Pty. Ltd.
# All rights reserved; available under the terms of the BSD License.
from __future__ import with_statement
import sys
import os
import unittest
from os.path import dirname
import subprocess
import shutil
import zipfile
import threading
import tempfile
import urllib2
import hashlib
import tarfile
import time
from contextlib import contextmanager
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from distutils.core import setup as dist_setup
from distutils import dir_util
import esky
import esky.patch
import esky.sudo
from esky import bdist_esky
from esky.bdist_esky import Executable
from esky.util import extract_zipfile, deep_extract_zipfile, get_platform, \
ESKY_CONTROL_DIR, files_differ, ESKY_APPDATA_DIR, \
really_rmtree
from esky.fstransact import FSTransaction
try:
import py2exe
except ImportError:
py2exe = None
try:
import py2app
except ImportError:
py2app = None
try:
import bbfreeze
except ImportError:
bbfreeze = None
try:
import cx_Freeze
except ImportError:
cx_Freeze = None
try:
import pypy
except ImportError:
pypy = None
sys.path.append(os.path.dirname(__file__))
def assert_freezedir_exists(dist):
assert os.path.exists(dist.freeze_dir)
if not hasattr(HTTPServer,"shutdown"):
import socket
def socketserver_shutdown(self):
try:
self.socket.close()
except socket.error:
pass
HTTPServer.shutdown = socketserver_shutdown
@contextmanager
def setenv(key,value):
oldval = os.environ.get(key,None)
os.environ[key] = value
yield
if oldval is not None:
os.environ[key] = oldval
else:
del os.environ[key]
class TestEsky(unittest.TestCase):
if py2exe is not None:
def test_esky_py2exe(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe"}})
def test_esky_py2exe_bundle1(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"bundle_files": 1}}})
def test_esky_py2exe_bundle2(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"bundle_files": 2}}})
def test_esky_py2exe_bundle3(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"bundle_files": 3}}})
def test_esky_py2exe_skiparchive(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"skip_archive": True}}})
def test_esky_py2exe_unbuffered(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"freezer_options": {
"unbuffered": True}}})
def test_esky_py2exe_nocustomchainload(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
bscode = "_chainload = _orig_chainload\nbootstrap()"
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"bootstrap_code":bscode}})
if esky.sudo.can_get_root():
def test_esky_py2exe_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe"}})
if pypy is not None:
def test_esky_py2exe_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"compile_bootstrap_exes":1}})
def test_esky_py2exe_unbuffered_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2exe",
"compile_bootstrap_exes":1,
"freezer_options": {
"unbuffered": True}}})
if py2app is not None:
def test_esky_py2app(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2app"}})
if esky.sudo.can_get_root():
def test_esky_py2app_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2app"}})
if pypy is not None:
def test_esky_py2app_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"py2app",
"compile_bootstrap_exes":1}})
if bbfreeze is not None:
def test_esky_bbfreeze(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze"}})
if sys.platform == "win32":
def test_esky_bbfreeze_nocustomchainload(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
bscode = "_chainload = _orig_chainload\nbootstrap()"
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze",
"bootstrap_code":bscode}})
if esky.sudo.can_get_root():
def test_esky_bbfreeze_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze"}})
if pypy is not None:
def test_esky_bbfreeze_pypy(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"bbfreeze",
"compile_bootstrap_exes":1}})
if cx_Freeze is not None:
def test_esky_cxfreeze(self):
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze"}})
if sys.platform == "win32":
def test_esky_cxfreeze_nocustomchainload(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
bscode = ["_chainload = _orig_chainload",None]
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze",
"bootstrap_code":bscode}})
if esky.sudo.can_get_root():
def test_esky_cxfreeze_needsroot(self):
with setenv("ESKY_NEEDSROOT","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze"}})
if pypy is not None:
def test_esky_cxfreeze_pypy(self):
with setenv("ESKY_NO_CUSTOM_CHAINLOAD","1"):
self._run_eskytester({"bdist_esky":{"freezer_module":"cxfreeze",
"compile_bootstrap_exes":1}})
def _run_eskytester(self,options):
"""Build and run the eskytester app using the given distutils options.
The "eskytester" application can be found next to this file, and the
sequence of tests performed range across "script1.py" to "script3.py".
"""
olddir = os.path.abspath(os.curdir)
# tdir = os.path.join(os.path.dirname(__file__),"DIST")
# if os.path.exists(tdir):
# really_rmtree(tdir)
# os.mkdir(tdir)
tdir = tempfile.mkdtemp()
server = None
script2 = None
try:
options.setdefault("build",{})["build_base"] = os.path.join(tdir,"build")
options.setdefault("bdist",{})["dist_dir"] = os.path.join(tdir,"dist")
# Set some callbacks to test that they work correctly
options.setdefault("bdist_esky",{}).setdefault("pre_freeze_callback","esky.tests.test_esky.assert_freezedir_exists")
options.setdefault("bdist_esky",{}).setdefault("pre_zip_callback",assert_freezedir_exists)
platform = get_platform()
deploydir = "deploy.%s" % (platform,)
esky_root = dirname(dirname(dirname(__file__)))
os.chdir(tdir)
shutil.copytree(os.path.join(esky_root,"esky","tests","eskytester"),"eskytester")
dir_util._path_created.clear()
# Build three increasing versions of the test package.
# Version 0.2 will include a bundled MSVCRT on win32.
# Version 0.3 will be distributed as a patch.
metadata = dict(name="eskytester",packages=["eskytester"],author="rfk",
description="the esky test package",
data_files=[("data",["eskytester/datafile.txt"])],
package_data={"eskytester":["pkgdata.txt"]},)
options2 = options.copy()
options2["bdist_esky"] = options["bdist_esky"].copy()
options2["bdist_esky"]["bundle_msvcrt"] = True
script1 = "eskytester/script1.py"
script2 = Executable([None,open("eskytester/script2.py")],name="script2")
script3 = "eskytester/script3.py"
dist_setup(version="0.1",scripts=[script1],options=options,script_args=["bdist_esky"],**metadata)
dist_setup(version="0.2",scripts=[script1,script2],options=options2,script_args=["bdist_esky"],**metadata)
dist_setup(version="0.3",scripts=[script2,script3],options=options,script_args=["bdist_esky_patch"],**metadata)
os.unlink(os.path.join(tdir,"dist","eskytester-0.3.%s.zip"%(platform,)))
# Check that the patches apply cleanly
uzdir = os.path.join(tdir,"unzip")
deep_extract_zipfile(os.path.join(tdir,"dist","eskytester-0.1.%s.zip"%(platform,)),uzdir)
with open(os.path.join(tdir,"dist","eskytester-0.3.%s.from-0.1.patch"%(platform,)),"rb") as f:
esky.patch.apply_patch(uzdir,f)
shutil.rmtree(uzdir)
deep_extract_zipfile(os.path.join(tdir,"dist","eskytester-0.2.%s.zip"%(platform,)),uzdir)
with open(os.path.join(tdir,"dist","eskytester-0.3.%s.from-0.2.patch"%(platform,)),"rb") as f:
esky.patch.apply_patch(uzdir,f)
shutil.rmtree(uzdir)
# Serve the updates at http://localhost:8000/dist/
print "running local update server"
server = HTTPServer(("localhost",8000),SimpleHTTPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
# Set up the deployed esky environment for the initial version
zfname = os.path.join(tdir,"dist","eskytester-0.1.%s.zip"%(platform,))
os.mkdir(deploydir)
extract_zipfile(zfname,deploydir)
# Run the scripts in order.
if options["bdist_esky"]["freezer_module"] == "py2app":
appdir = os.path.join(deploydir,os.listdir(deploydir)[0])
cmd1 = os.path.join(appdir,"Contents","MacOS","script1")
cmd2 = os.path.join(appdir,"Contents","MacOS","script2")
cmd3 = os.path.join(appdir,"Contents","MacOS","script3")
else:
appdir = deploydir
if sys.platform == "win32":
cmd1 = os.path.join(deploydir,"script1.exe")
cmd2 = os.path.join(deploydir,"script2.exe")
cmd3 = os.path.join(deploydir,"script3.exe")
else:
cmd1 = os.path.join(deploydir,"script1")
cmd2 = os.path.join(deploydir,"script2")
cmd3 = os.path.join(deploydir,"script3")
print "spawning eskytester script1", options["bdist_esky"]["freezer_module"]
os.unlink(os.path.join(tdir,"dist","eskytester-0.1.%s.zip"%(platform,)))
p = subprocess.Popen(cmd1)
assert p.wait() == 0
os.unlink(os.path.join(appdir,"tests-completed"))
print "spawning eskytester script2"
os.unlink(os.path.join(tdir,"dist","eskytester-0.2.%s.zip"%(platform,)))
p = subprocess.Popen(cmd2)
assert p.wait() == 0
os.unlink(os.path.join(appdir,"tests-completed"))
print "spawning eskytester script3"
p = subprocess.Popen(cmd3)
assert p.wait() == 0
os.unlink(os.path.join(appdir,"tests-completed"))
finally:
if script2:
script2.script[1].close()
os.chdir(olddir)
if sys.platform == "win32":
# wait for the cleanup-at-exit pocess to finish
time.sleep(4)
really_rmtree(tdir)
if server:
server.shutdown()
def test_esky_locking(self):
"""Test that locking an Esky works correctly."""
platform = get_platform()
appdir = tempfile.mkdtemp()
try:
vdir = os.path.join(appdir,ESKY_APPDATA_DIR,"testapp-0.1.%s" % (platform,))
os.makedirs(vdir)
os.mkdir(os.path.join(vdir,ESKY_CONTROL_DIR))
open(os.path.join(vdir,ESKY_CONTROL_DIR,"bootstrap-manifest.txt"),"wb").close()
e1 = esky.Esky(appdir,"http://example.com/downloads/")
assert e1.name == "testapp"
assert e1.version == "0.1"
assert e1.platform == platform
e2 = esky.Esky(appdir,"http://example.com/downloads/")
assert e2.name == "testapp"
assert e2.version == "0.1"
assert e2.platform == platform
locked = []; errors = [];
trigger1 = threading.Event(); trigger2 = threading.Event()
def runit(e,t1,t2):
def runme():
try:
e.lock()
except Exception, err:
errors.append(err)
else:
locked.append(e)
t1.set()
t2.wait()
return runme
t1 = threading.Thread(target=runit(e1,trigger1,trigger2))
t2 = threading.Thread(target=runit(e2,trigger2,trigger1))
t1.start()
t2.start()
t1.join()
t2.join()
assert len(locked) == 1
assert (e1 in locked or e2 in locked)
assert len(errors) == 1
assert isinstance(errors[0],esky.EskyLockedError)
finally:
shutil.rmtree(appdir)
def test_esky_lock_breaking(self):
"""Test that breaking the lock on an Esky works correctly."""
appdir = tempfile.mkdtemp()
try:
os.makedirs(os.path.join(appdir,ESKY_APPDATA_DIR,"testapp-0.1",ESKY_CONTROL_DIR))
open(os.path.join(appdir,ESKY_APPDATA_DIR,"testapp-0.1",ESKY_CONTROL_DIR,"bootstrap-manifest.txt"),"wb").close()
e1 = esky.Esky(appdir,"http://example.com/downloads/")
e2 = esky.Esky(appdir,"http://example.com/downloads/")
trigger1 = threading.Event(); trigger2 = threading.Event()
errors = []
def run1():
try:
e1.lock()
except Exception, err:
errors.append(err)
trigger1.set()
trigger2.wait()
def run2():
trigger1.wait()
try:
e2.lock()
except esky.EskyLockedError:
pass
except Exception, err:
errors.append(err)
else:
errors.append("locked when I shouldn't have")
e2.lock_timeout = 0.1
time.sleep(0.5)
try:
e2.lock()
except Exception, err:
errors.append(err)
trigger2.set()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
assert len(errors) == 0, str(errors)
finally:
shutil.rmtree(appdir)
def test_README(self):
"""Ensure that the README is in sync with the docstring.
This test should always pass; if the README is out of sync it just updates
it with the contents of esky.__doc__.
"""
dirname = os.path.dirname
readme = os.path.join(dirname(dirname(dirname(__file__))),"README.rst")
if not os.path.isfile(readme):
f = open(readme,"wb")
f.write(esky.__doc__.encode())
f.close()
else:
f = open(readme,"rb")
if f.read() != esky.__doc__:
f.close()
f = open(readme,"wb")
f.write(esky.__doc__.encode())
f.close()
class TestFSTransact(unittest.TestCase):
"""Testcases for FSTransact."""
def setUp(self):
self.testdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.testdir)
def path(self,path):
return os.path.join(self.testdir,path)
def setContents(self,path,contents=""):
if not os.path.isdir(os.path.dirname(self.path(path))):
os.makedirs(os.path.dirname(self.path(path)))
with open(self.path(path),"wb") as f:
f.write(contents.encode())
def assertContents(self,path,contents):
with open(self.path(path),"rb") as f:
self.assertEquals(f.read().decode(),contents)
def test_no_move_outside_root(self):
self.setContents("file1","hello world")
trn = FSTransaction(self.testdir)
trn.move(self.path("file1"),"file2")
trn.commit()
self.assertContents("file2","hello world")
trn = FSTransaction(self.testdir)
self.assertRaises(ValueError,trn.move,self.path("file2"),"../file1")
trn.abort()
def test_move_file(self):
self.setContents("file1","hello world")
trn = FSTransaction()
trn.move(self.path("file1"),self.path("file2"))
self.assertContents("file1","hello world")
self.assertFalse(os.path.exists(self.path("file2")))
trn.commit()
self.assertContents("file2","hello world")
self.assertFalse(os.path.exists(self.path("file1")))
def test_move_file_with_unicode_name(self):
self.setContents(u"file\N{SNOWMAN}","hello world")
trn = FSTransaction()
trn.move(self.path(u"file\N{SNOWMAN}"),self.path("file2"))
self.assertContents(u"file\N{SNOWMAN}","hello world")
self.assertFalse(os.path.exists(self.path("file2")))
trn.commit()
self.assertContents("file2","hello world")
self.assertFalse(os.path.exists(self.path(u"file\N{SNOWMAN}")))
def test_copy_file(self):
self.setContents("file1","hello world")
trn = FSTransaction()
trn.copy(self.path("file1"),self.path("file2"))
self.assertContents("file1","hello world")
self.assertFalse(os.path.exists(self.path("file2")))
trn.commit()
self.assertContents("file1","hello world")
self.assertContents("file2","hello world")
def test_move_dir(self):
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.move(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
self.assertFalse(os.path.exists(self.path("dir2")))
trn.commit()
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertFalse(os.path.exists(self.path("dir1")))
def test_copy_dir(self):
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.copy(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
self.assertFalse(os.path.exists(self.path("dir2")))
trn.commit()
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertContents("dir1/file1","hello world")
self.assertContents("dir1/file2","how are you?")
self.assertContents("dir1/subdir/file3","fine thanks")
def test_remove(self):
self.setContents("dir1/file1","hello there world")
trn = FSTransaction()
trn.remove(self.path("dir1/file1"))
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn.commit()
self.assertFalse(os.path.exists(self.path("dir1/file1")))
self.assertTrue(os.path.exists(self.path("dir1")))
trn = FSTransaction()
trn.remove(self.path("dir1"))
trn.commit()
self.assertFalse(os.path.exists(self.path("dir1")))
def test_remove_abort(self):
self.setContents("dir1/file1","hello there world")
trn = FSTransaction()
trn.remove(self.path("dir1/file1"))
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn.abort()
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn = FSTransaction()
trn.remove(self.path("dir1"))
trn.abort()
self.assertTrue(os.path.exists(self.path("dir1/file1")))
trn = FSTransaction()
trn.remove(self.path("dir1"))
trn.commit()
self.assertFalse(os.path.exists(self.path("dir1")))
def test_move_dir_exists(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
self.setContents("dir2/file1","different contents")
self.setContents("dir2/file3","a different file")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.move(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertFalse(os.path.exists(self.path("dir2/file3")))
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertFalse(os.path.exists(self.path("dir1")))
def test_copy_dir_exists(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir1/file1","hello world")
self.setContents("dir1/file2","how are you?")
self.setContents("dir1/subdir/file3","fine thanks")
self.setContents("dir2/file1","different contents")
self.setContents("dir2/file3","a different file")
self.setContents("dir1/subdir/file3","fine thanks")
trn = FSTransaction()
trn.copy(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file1","hello world")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertContents("dir2/file1","hello world")
self.assertContents("dir2/file2","how are you?")
self.assertFalse(os.path.exists(self.path("dir2/file3")))
self.assertContents("dir2/subdir/file3","fine thanks")
self.assertContents("dir1/file0","zero zero zero")
self.assertContents("dir1/file1","hello world")
self.assertContents("dir1/file2","how are you?")
self.assertContents("dir1/subdir/file3","fine thanks")
def test_move_dir_over_file(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir2","actually a file")
trn = FSTransaction()
trn.move(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file0","zero zero zero")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertFalse(os.path.exists(self.path("dir1")))
def test_copy_dir_over_file(self):
self.setContents("dir1/file0","zero zero zero")
self.setContents("dir2","actually a file")
trn = FSTransaction()
trn.copy(self.path("dir1"),self.path("dir2"))
self.assertContents("dir1/file0","zero zero zero")
trn.commit()
self.assertContents("dir2/file0","zero zero zero")
self.assertContents("dir1/file0","zero zero zero")
def test_move_file_over_dir(self):
self.setContents("file0","zero zero zero")
self.setContents("dir2/myfile","hahahahaha!")
trn = FSTransaction()
trn.move(self.path("file0"),self.path("dir2"))
self.assertContents("file0","zero zero zero")
self.assertContents("dir2/myfile","hahahahaha!")
trn.commit()
self.assertContents("dir2","zero zero zero")
self.assertFalse(os.path.exists(self.path("file0")))
def test_copy_file_over_dir(self):
self.setContents("file0","zero zero zero")
self.setContents("dir2/myfile","hahahahaha!")
trn = FSTransaction()
trn.copy(self.path("file0"),self.path("dir2"))
self.assertContents("file0","zero zero zero")
self.assertContents("dir2/myfile","hahahahaha!")
trn.commit()
self.assertContents("dir2","zero zero zero")
self.assertContents("file0","zero zero zero")
class TestPatch(unittest.TestCase):
"""Testcases for esky.patch."""
_TEST_FILES = (
("pyenchant-1.2.0.tar.gz","2fefef0868b110b1da7de89c08344dd2"),
("pyenchant-1.5.2.tar.gz","fa1e4f3f3c473edd98c7bb0e46eea352"),
("pyenchant-1.6.0.tar.gz","3fd7336989764d8d379a367236518439"),
)
_TEST_FILES_URL = "http://pypi.python.org/packages/source/p/pyenchant/"
def setUp(self):
self.tests_root = dirname(__file__)
platform = get_platform()
self.tfdir = tfdir = os.path.join(self.tests_root,"patch-test-files")
self.workdir = workdir = os.path.join(self.tests_root,"patch-test-temp."+platform)
if not os.path.isdir(tfdir):
os.makedirs(tfdir)
if not os.path.isdir(workdir):
os.makedirs(workdir)
# Ensure we have the expected test files.
# Download from PyPI if necessary.
for (tfname,hash) in self._TEST_FILES:
tfpath = os.path.join(tfdir,tfname)
if not os.path.exists(tfpath):
data = urllib2.urlopen(self._TEST_FILES_URL+tfname).read()
assert hashlib.md5(data).hexdigest() == hash
with open(tfpath,"wb") as f:
f.write(data)
def tearDown(self):
shutil.rmtree(self.workdir)
def test_patch_bigfile(self):
tdir = tempfile.mkdtemp()
try:
data = [os.urandom(100)*10 for i in xrange(6)]
for nm in ("source","target"):
with open(os.path.join(tdir,nm),"wb") as f:
for i in xrange(1000):
for chunk in data:
f.write(chunk)
data[2],data[3] = data[3],data[2]
with open(os.path.join(tdir,"patch"),"wb") as f:
esky.patch.write_patch(os.path.join(tdir,"source"),os.path.join(tdir,"target"),f)
dgst1 = esky.patch.calculate_digest(os.path.join(tdir,"target"))
dgst2 = esky.patch.calculate_digest(os.path.join(tdir,"source"))
self.assertNotEquals(dgst1,dgst2)
with open(os.path.join(tdir,"patch"),"rb") as f:
esky.patch.apply_patch(os.path.join(tdir,"source"),f)
dgst3 = esky.patch.calculate_digest(os.path.join(tdir,"source"))
self.assertEquals(dgst1,dgst3)
finally:
shutil.rmtree(tdir)
def test_diffing_back_and_forth(self):
for (tf1,_) in self._TEST_FILES:
for (tf2,_) in self._TEST_FILES:
path1 = self._extract(tf1,"source")
path2 = self._extract(tf2,"target")
with open(os.path.join(self.workdir,"patch"),"wb") as f:
esky.patch.write_patch(path1,path2,f)
if tf1 != tf2:
self.assertNotEquals(esky.patch.calculate_digest(path1),
esky.patch.calculate_digest(path2))
with open(os.path.join(self.workdir,"patch"),"rb") as f:
esky.patch.apply_patch(path1,f)
self.assertEquals(esky.patch.calculate_digest(path1),
esky.patch.calculate_digest(path2))
def test_apply_patch(self):
path1 = self._extract("pyenchant-1.2.0.tar.gz","source")
path2 = self._extract("pyenchant-1.6.0.tar.gz","target")
path1 = os.path.join(path1,"pyenchant-1.2.0")
path2 = os.path.join(path2,"pyenchant-1.6.0")
pf = os.path.join(self.tfdir,"v1.2.0_to_v1.6.0.patch")
if not os.path.exists(pf):
pf = os.path.join(dirname(esky.__file__),"tests","patch-test-files","v1.2.0_to_v1.6.0.patch")
with open(pf,"rb") as f:
esky.patch.apply_patch(path1,f)
self.assertEquals(esky.patch.calculate_digest(path1),
esky.patch.calculate_digest(path2))
def test_copying_multiple_targets_from_a_single_sibling(self):
join = os.path.join
src_dir = src_dir = join(self.workdir, "source")
tgt_dir = tgt_dir = join(self.workdir, "target")
for dirnm in src_dir, tgt_dir:
os.mkdir(dirnm)
zf = zipfile.ZipFile(join(self.tfdir, "movefrom-source.zip"), "r")
zf.extractall(src_dir)
zf = zipfile.ZipFile(join(self.tfdir, "movefrom-target.zip"), "r")
zf.extractall(tgt_dir)
# The two directory structures should initially be difference.
self.assertNotEquals(esky.patch.calculate_digest(src_dir),
esky.patch.calculate_digest(tgt_dir))
# Create patch from source to target.
patch_fname = join(self.workdir, "patch")
with open(patch_fname, "wb") as patchfile:
esky.patch.write_patch(src_dir, tgt_dir, patchfile)
# Try to apply the patch.
with open(patch_fname, "rb") as patchfile:
esky.patch.apply_patch(src_dir, patchfile)
# Then the two directory structures should be equal.
self.assertEquals(esky.patch.calculate_digest(src_dir),
esky.patch.calculate_digest(tgt_dir))
def _extract(self,filename,dest):
dest = os.path.join(self.workdir,dest)
if os.path.exists(dest):
really_rmtree(dest)
f = tarfile.open(os.path.join(self.tfdir,filename),"r:gz")
try:
f.extractall(dest)
finally:
f.close()
return dest
class TestPatch_cxbsdiff(TestPatch):
"""Test the patching code with cx-bsdiff rather than bsdiff4."""
def setUp(self):
self.__orig_bsdiff4 = esky.patch.bsdiff4
if esky.patch.bsdiff4_cx is not None:
esky.patch.bsdiff4 = esky.patch.bsdiff4_cx
return super(TestPatch_cxbsdiff,self).setUp()
def tearDown(self):
esky.patch.bsdiff4 = self.__orig_bsdiff4
return super(TestPatch_cxbsdiff,self).tearDown()
class TestPatch_pybsdiff(TestPatch):
"""Test the patching code with pure-python bsdiff4."""
def setUp(self):
self.__orig_bsdiff4 = esky.patch.bsdiff4
esky.patch.bsdiff4 = esky.patch.bsdiff4_py
return super(TestPatch_pybsdiff,self).setUp()
def tearDown(self):
esky.patch.bsdiff4 = self.__orig_bsdiff4
return super(TestPatch_pybsdiff,self).tearDown()
class TestFilesDiffer(unittest.TestCase):
def setUp(self):
self.tdir = tempfile.mkdtemp()
def _path(self,*names):
return os.path.join(self.tdir,*names)
def _differs(self,data1,data2,start=0,stop=None):
with open(self._path("file1"),"wb") as f:
f.write(data1.encode("ascii"))
with open(self._path("file2"),"wb") as f:
f.write(data2.encode("ascii"))
return files_differ(self._path("file1"),self._path("file2"),start,stop)
def test_files_differ(self):
assert self._differs("one","two")
assert self._differs("onethreetwo","twothreeone")
assert self._differs("onethreetwo","twothreeone",3)
assert not self._differs("onethreetwo","twothreeone",3,-3)
assert self._differs("onethreetwo","twothreeone",2,-3)
assert self._differs("onethreetwo","twothreeone",3,-2)
def tearDown(self):
shutil.rmtree(self.tdir)
| 40.379874
| 124
| 0.605383
|
4637a000c8cb12b0fa1adbae9a69262d87332964
| 5,793
|
py
|
Python
|
carla-aebs/aebs.py
|
kpandey008/carla-aebs
|
649f230d35b1f161705c5f3511b552f2cc7debd8
|
[
"MIT"
] | 12
|
2020-11-28T09:47:52.000Z
|
2022-03-28T09:09:24.000Z
|
carla-aebs/aebs.py
|
kpandey008/carla-aebs
|
649f230d35b1f161705c5f3511b552f2cc7debd8
|
[
"MIT"
] | 2
|
2021-01-31T08:32:23.000Z
|
2021-02-01T06:38:33.000Z
|
carla-aebs/aebs.py
|
kpandey008/carla-aebs
|
649f230d35b1f161705c5f3511b552f2cc7debd8
|
[
"MIT"
] | 3
|
2021-05-25T07:01:37.000Z
|
2022-02-21T10:25:09.000Z
|
# Python module to demonstrate AEBS
import click
import numpy as np
import os
from models.rl_agent.ddpg_agent import ddpgAgent
from models.rl_agent.input_preprocessor import InputPreprocessor
from utils.visualize import plot_metrics
from world import World
@click.command()
@click.option(
'--mode',
type=click.Choice(['out', 'in'], case_sensitive=False), default='in',
help='Mode to run the simulation in (Out of distribution / Normal)'
)
@click.option('--gui', is_flag=True, default=False, help='Run simulation with GUI')
@click.option('--testing', is_flag=True, default=False, help='Run simulation in testing mode')
@click.option('--save-path', help='Path to save checkpoints to. Only used during training mode')
@click.option('--agent-chkpt-path', help='Path to load checkpoint for the RL agent')
@click.option('--perception-chkpt-path', help='Path to load checkpoint for the Perception LEC')
@click.option('--calibration-scores', help='Path to pre-computed calibration scores for the in-distribution data')
@click.option('--vae-chkpt-path', help='Path to load checkpoint for the VAE')
@click.option('--num-episodes', type=int, default=1, help='Number of episodes to run tests for.')
@click.option('--generate-plots', is_flag=True, default=False, help='Generate plots after completing the simulation')
def aebs(
gui=False, testing=False, num_episodes=1,
save_path=os.getcwd(), mode='in', agent_chkpt_path=None,
perception_chkpt_path=None, vae_chkpt_path=None, generate_plots=False,
calibration_scores=None
):
"""Command to run simulation in train/test mode.
For collecting data please refer to the collect command.
Sample Usage: python aebs.py --save-path /home/lexent/carla_simulation/rl_agent/ \
--num-episodes 1 \
--agent-chkpt-path /home/lexent/carla_simulation/rl_agent/ \
--perception-chkpt-path /home/lexent/carla_simulation/perception_chkpt/chkpt_87.pt \
--vae-chkpt-path /home/lexent/carla_simulation/vae_chkpt/chkpt_92.pt \
--calibration-scores /home/lexent/carla_simulation/calibration.npy\
--gui --testing --generate-plots --mode in
Args:
gui (bool, optional): [Run simulation with GUI]. Defaults to False.\n
testing (bool, optional): [Run simulation in testing mode]. Defaults to False.\n
num_episodes (int, optional): [Number of episodes to run tests for.]. Defaults to 1.\n
save_path ([type], optional): [Path to save checkpoints to. Only used during training mode]. Defaults to os.getcwd().\n
mode (str, optional): [Mode to run the simulation in (Out of distribution / Normal)]. Defaults to 'in'.\n
agent_chkpt_path ([type], optional): [Path to load checkpoint for the RL agent]. Defaults to None.\n
"""
agent = ddpgAgent(testing=testing, load_path=agent_chkpt_path)
world = World(
gui=gui, collect=False, testing=testing,
perception_chkpt=perception_chkpt_path, vae_chkpt=vae_chkpt_path,
calibration_scores=calibration_scores
)
input_preprocessor = InputPreprocessor()
if mode == 'in':
ppt_lower_limit = 0
ppt_upper_limit = 20
elif mode == 'out':
ppt_lower_limit = 60
ppt_upper_limit = 100
best_reward = -1000 # Any large negative value will do
for episode in range(num_episodes):
print(f'Running episode:{episode + 1}')
# Sample random distance and velocity values
initial_distance = np.random.normal(100, 1)
initial_velocity = np.random.uniform(25, 28)
# Sample a random precipitation parameter
precipitation = np.random.uniform(ppt_lower_limit, ppt_upper_limit)
print(f'Precipitation: {precipitation}')
# Initialize the world with the sampled params
dist, vel, status = world.init(initial_velocity, initial_distance, precipitation)
if status == 'FAILED':
print(f'Reset failed. Stopping episode {episode + 1} and continuing!')
continue
# Setup the starting state based on the state returned by resetting the world
s = (dist, vel)
s = input_preprocessor(s)
epsilon = 1.0 - (episode+1) / num_episodes
actions = []
while True:
a = agent.getAction(s, epsilon)
actions.append(a[0][0])
dist, vel, reward, episode_status = world.step(brake=a[0][0])
s_ = (dist, vel)
s_ = input_preprocessor(s_)
if testing is False:
# Train the agent if testing is disabled
agent.storeTrajectory(s, a, reward, s_, episode_status)
agent.learn()
s = s_
if episode_status == "DONE":
if reward > best_reward and testing is False:
best_save_path = os.path.join(save_path, 'best')
os.makedirs(best_save_path, exist_ok=True)
agent.save_model(best_save_path)
best_reward = reward
if testing is False:
if np.mod(episode, 10) == 0:
agent.save_model(save_path)
print(f"Episode {episode + 1} is done, the reward is {reward}")
break
# Generate plots after the simulation ends for the last episode
if generate_plots:
comp_distances = np.array(world.computed_distances)
gt_distances = np.array(world.gt_distances)
p_values = np.array(world.p_values)
actions = np.array(actions)
plot_metrics(comp_distances, gt_distances, actions, p_values, mode=mode)
if __name__ == '__main__':
aebs()
| 47.097561
| 127
| 0.646125
|
969e95b609f47f9eb2e5f6fde7571e7d6d648ce2
| 1,165
|
py
|
Python
|
venmo-sim/leiden/run_leiden_on_sampled_venmo_dataset.py
|
akatsarakis/tx_benchmarking
|
f8233e58bba3f4fb54d82273d7ca8631bae36ebc
|
[
"MIT"
] | 3
|
2020-07-07T17:08:41.000Z
|
2022-01-10T19:25:46.000Z
|
venmo-sim/leiden/run_leiden_on_sampled_venmo_dataset.py
|
akatsarakis/tx_benchmarking
|
f8233e58bba3f4fb54d82273d7ca8631bae36ebc
|
[
"MIT"
] | null | null | null |
venmo-sim/leiden/run_leiden_on_sampled_venmo_dataset.py
|
akatsarakis/tx_benchmarking
|
f8233e58bba3f4fb54d82273d7ca8631bae36ebc
|
[
"MIT"
] | null | null | null |
import csv
import leidenalg
import igraph as ig
# build igraph from venmo dataset
fp = open("cleaned_normalized_txes_sampled.csv", "r") # , encoding='utf-8')
venmo_edges = csv.reader(fp)
G = ig.Graph(directed=True) # TODO , weighted=True) # directed, weighted (edge weight = multiple of edge)
# the data does not specify the amount of each transaction, since Venmo does not make this data public.
G.add_vertices(7178381) # For this given dataset, total number of vertices is already known
uid_dict = dict()
for venmo_edge in venmo_edges:
aid = venmo_edge[0]
if aid not in uid_dict:
uid_dict[aid] = len(uid_dict)
anode = uid_dict[aid]
tid = venmo_edge[1]
if tid not in uid_dict:
uid_dict[tid] = len(uid_dict)
tnode = uid_dict[tid]
try:
G.add_edge(anode, tnode)
except:
print(aid, tid, anode, tnode)
break
# print(G) # debug
fp.close()
# run Leiden algorithm
part = leidenalg.find_partition(G, leidenalg.ModularityVertexPartition)
# print(part) # debug
# save result into file
fp = open("cleaned_normalized_clustered_venmo_dataset.txt", "w")
print(part, file=fp)
fp.close()
| 28.414634
| 107
| 0.695279
|
40bea55f9d0220d34ba36a7b034d6613482bcf2d
| 25,440
|
py
|
Python
|
samtranslator/open_api/open_api.py
|
ejafarli/serverless-application-model
|
6355433d1f0dec8624417180b0c989afd2d2930a
|
[
"Apache-2.0"
] | 1
|
2020-10-27T14:27:14.000Z
|
2020-10-27T14:27:14.000Z
|
samtranslator/open_api/open_api.py
|
ejafarli/serverless-application-model
|
6355433d1f0dec8624417180b0c989afd2d2930a
|
[
"Apache-2.0"
] | null | null | null |
samtranslator/open_api/open_api.py
|
ejafarli/serverless-application-model
|
6355433d1f0dec8624417180b0c989afd2d2930a
|
[
"Apache-2.0"
] | null | null | null |
import copy
import re
from six import string_types
from samtranslator.model.intrinsics import ref
from samtranslator.model.intrinsics import make_conditional
from samtranslator.model.intrinsics import is_intrinsic
from samtranslator.model.exceptions import InvalidDocumentException, InvalidTemplateException
import json
class OpenApiEditor(object):
"""
Wrapper class capable of parsing and generating OpenApi JSON. This implements OpenApi spec just enough that SAM
cares about. It is built to handle "partial Swagger" ie. Swagger that is incomplete and won't
pass the Swagger spec. But this is necessary for SAM because it iteratively builds the Swagger starting from an
empty skeleton.
"""
_X_APIGW_INTEGRATION = "x-amazon-apigateway-integration"
_X_APIGW_TAG_VALUE = "x-amazon-apigateway-tag-value"
_X_APIGW_CORS = "x-amazon-apigateway-cors"
_CONDITIONAL_IF = "Fn::If"
_X_ANY_METHOD = "x-amazon-apigateway-any-method"
_ALL_HTTP_METHODS = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "PATCH"]
_DEFAULT_PATH = "$default"
def __init__(self, doc):
"""
Initialize the class with a swagger dictionary. This class creates a copy of the Swagger and performs all
modifications on this copy.
:param dict doc: OpenApi document as a dictionary
:raises ValueError: If the input OpenApi document does not meet the basic OpenApi requirements.
"""
if not OpenApiEditor.is_valid(doc):
raise ValueError(
"Invalid OpenApi document. "
"Invalid values or missing keys for 'openapi' or 'paths' in 'DefinitionBody'."
)
self._doc = copy.deepcopy(doc)
self.paths = self._doc["paths"]
self.security_schemes = self._doc.get("components", {}).get("securitySchemes", {})
self.definitions = self._doc.get("definitions", {})
self.tags = self._doc.get("tags", [])
def get_path(self, path):
"""
Returns the contents of a path, extracting them out of a condition if necessary
:param path: path name
"""
path_dict = self.paths.get(path)
if isinstance(path_dict, dict) and self._CONDITIONAL_IF in path_dict:
path_dict = path_dict[self._CONDITIONAL_IF][1]
return path_dict
def has_path(self, path, method=None):
"""
Returns True if this OpenApi has the given path and optional method
:param string path: Path name
:param string method: HTTP method
:return: True, if this path/method is present in the document
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
path_dict_exists = path_dict is not None
if method:
return path_dict_exists and method in path_dict
return path_dict_exists
def get_integration_function_logical_id(self, path_name, method_name):
"""
Retrieves the function logical id in a lambda integration if it exists
If it doesn't exist, returns false
:param path_name: name of the path
:param method_name: name of the method
"""
if not self.has_integration(path_name, method_name):
return False
method_name = self._normalize_method_name(method_name)
# Get the path
path = self.get_path(path_name)
# Get the method contents
# We only want the first one in case there are multiple (in a conditional)
method = self.get_method_contents(path[method_name])[0]
integration = method.get(self._X_APIGW_INTEGRATION, {})
# Extract the integration uri out of a conditional if necessary
uri = integration.get("uri")
if not isinstance(uri, dict):
return ""
if self._CONDITIONAL_IF in uri:
arn = uri[self._CONDITIONAL_IF][1].get("Fn::Sub")
else:
arn = uri.get("Fn::Sub", "")
# Extract lambda integration (${LambdaName.Arn}) and split ".Arn" off from it
regex = "([A-Za-z0-9]+\.Arn)"
match = re.findall(regex, arn)[0].split(".Arn")[0]
return match
def method_has_integration(self, method):
"""
Returns true if the given method contains a valid method definition.
This uses the get_method_contents function to handle conditionals.
:param dict method: method dictionary
:return: true if method has one or multiple integrations
"""
for method_definition in self.get_method_contents(method):
if self.method_definition_has_integration(method_definition):
return True
return False
def method_definition_has_integration(self, method_definition):
"""
Checks a method definition to make sure it has an apigw integration
:param dict method_defintion: method definition dictionary
:return: True if an integration exists
"""
if method_definition.get(self._X_APIGW_INTEGRATION):
return True
return False
def get_method_contents(self, method):
"""
Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method
"""
if self._CONDITIONAL_IF in method:
return method[self._CONDITIONAL_IF][1:]
return [method]
def has_integration(self, path, method):
"""
Checks if an API Gateway integration is already present at the given path/method
:param string path: Path name
:param string method: HTTP method
:return: True, if an API Gateway integration is already present
"""
method = self._normalize_method_name(method)
path_dict = self.get_path(path)
return (
self.has_path(path, method)
and isinstance(path_dict[method], dict)
and self.method_has_integration(path_dict[method])
) # Integration present and non-empty
def add_path(self, path, method=None):
"""
Adds the path/method combination to the Swagger, if not already present
:param string path: Path name
:param string method: HTTP method
:raises ValueError: If the value of `path` in Swagger is not a dictionary
"""
method = self._normalize_method_name(method)
path_dict = self.paths.setdefault(path, {})
if not isinstance(path_dict, dict):
# Either customers has provided us an invalid Swagger, or this class has messed it somehow
raise InvalidDocumentException(
[
InvalidTemplateException(
"Value of '{}' path must be a dictionary according to Swagger spec.".format(path)
)
]
)
if self._CONDITIONAL_IF in path_dict:
path_dict = path_dict[self._CONDITIONAL_IF][1]
path_dict.setdefault(method, {})
def add_lambda_integration(
self, path, method, integration_uri, method_auth_config=None, api_auth_config=None, condition=None
):
"""
Adds aws_proxy APIGW integration to the given path+method.
:param string path: Path name
:param string method: HTTP Method
:param string integration_uri: URI for the integration.
"""
method = self._normalize_method_name(method)
if self.has_integration(path, method):
# Not throwing an error- we will add lambda integrations to existing swagger if not present
return
self.add_path(path, method)
# Wrap the integration_uri in a Condition if one exists on that function
# This is necessary so CFN doesn't try to resolve the integration reference.
if condition:
integration_uri = make_conditional(condition, integration_uri)
path_dict = self.get_path(path)
path_dict[method][self._X_APIGW_INTEGRATION] = {
"type": "aws_proxy",
"httpMethod": "POST",
"payloadFormatVersion": "2.0",
"uri": integration_uri,
}
if path == self._DEFAULT_PATH and method == self._X_ANY_METHOD:
path_dict[method]["isDefaultRoute"] = True
# If 'responses' key is *not* present, add it with an empty dict as value
path_dict[method].setdefault("responses", {})
# If a condition is present, wrap all method contents up into the condition
if condition:
path_dict[method] = make_conditional(condition, path_dict[method])
def make_path_conditional(self, path, condition):
"""
Wrap entire API path definition in a CloudFormation if condition.
:param path: path name
:param condition: condition name
"""
self.paths[path] = make_conditional(condition, self.paths[path])
def iter_on_path(self):
"""
Yields all the paths available in the Swagger. As a caller, if you add new paths to Swagger while iterating,
they will not show up in this iterator
:yields string: Path name
"""
for path, value in self.paths.items():
yield path
def add_timeout_to_method(self, api, path, method_name, timeout):
"""
Adds a timeout to this path/method.
:param dict api: Reference to the related Api's properties as defined in the template.
:param string path: Path name
:param string method_name: Method name
:param int timeout: Timeout amount, in milliseconds
"""
normalized_method_name = self._normalize_method_name(method_name)
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
if self.method_definition_has_integration(method_definition):
method_definition[self._X_APIGW_INTEGRATION]["timeoutInMillis"] = timeout
def add_path_parameters_to_method(self, api, path, method_name, path_parameters):
"""
Adds path parameters to this path + method
:param dict api: Reference to the related Api's properties as defined in the template.
:param string path: Path name
:param string method_name: Method name
:param list path_parameters: list of strings of path parameters
"""
normalized_method_name = self._normalize_method_name(method_name)
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# create path parameter list
# add it here if it doesn't exist, merge with existing otherwise.
method_definition.setdefault("parameters", [])
for param in path_parameters:
# find an existing parameter with this name if it exists
existing_parameter = next(
(
existing_parameter
for existing_parameter in method_definition.get("parameters", [])
if existing_parameter.get("name") == param
),
None,
)
if existing_parameter:
# overwrite parameter values for existing path parameter
existing_parameter["in"] = "path"
existing_parameter["required"] = True
else:
parameter = {"name": param, "in": "path", "required": True}
method_definition.get("parameters").append(parameter)
def add_payload_format_version_to_method(self, api, path, method_name, payload_format_version="2.0"):
"""
Adds a payload format version to this path/method.
:param dict api: Reference to the related Api's properties as defined in the template.
:param string path: Path name
:param string method_name: Method name
:param string payload_format_version: payload format version sent to the integration
"""
normalized_method_name = self._normalize_method_name(method_name)
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
if self.method_definition_has_integration(method_definition):
method_definition[self._X_APIGW_INTEGRATION]["payloadFormatVersion"] = payload_format_version
def add_authorizers_security_definitions(self, authorizers):
"""
Add Authorizer definitions to the securityDefinitions part of Swagger.
:param list authorizers: List of Authorizer configurations which get translated to securityDefinitions.
"""
self.security_schemes = self.security_schemes or {}
for authorizer_name, authorizer in authorizers.items():
self.security_schemes[authorizer_name] = authorizer.generate_openapi()
def set_path_default_authorizer(self, path, default_authorizer, authorizers, api_authorizers):
"""
Adds the default_authorizer to the security block for each method on this path unless an Authorizer
was defined at the Function/Path/Method level. This is intended to be used to set the
authorizer security restriction for all api methods based upon the default configured in the
Serverless API.
:param string path: Path name
:param string default_authorizer: Name of the authorizer to use as the default. Must be a key in the
authorizers param.
:param list authorizers: List of Authorizer configurations defined on the related Api.
"""
for method_name, method in self.get_path(path).items():
normalized_method_name = self._normalize_method_name(method_name)
# Excluding parameters section
if normalized_method_name == "parameters":
continue
if normalized_method_name != "options":
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
if existing_security:
return
authorizer_list = []
if authorizers:
authorizer_list.extend(authorizers.keys())
security_dict = dict()
security_dict[default_authorizer] = self._get_authorization_scopes(
api_authorizers, default_authorizer
)
authorizer_security = [security_dict]
security = authorizer_security
if security:
method_definition["security"] = security
def add_auth_to_method(self, path, method_name, auth, api):
"""
Adds auth settings for this path/method. Auth settings currently consist of Authorizers
but this method will eventually include setting other auth settings such as Resource Policy, etc.
This is used to configure the security for individual functions.
:param string path: Path name
:param string method_name: Method name
:param dict auth: Auth configuration such as Authorizers
:param dict api: Reference to the related Api's properties as defined in the template.
"""
method_authorizer = auth and auth.get("Authorizer")
authorization_scopes = auth.get("AuthorizationScopes", [])
api_auth = api and api.get("Auth")
authorizers = api_auth and api_auth.get("Authorizers")
if method_authorizer:
self._set_method_authorizer(path, method_name, method_authorizer, authorizers, authorization_scopes)
def _set_method_authorizer(self, path, method_name, authorizer_name, authorizers, authorization_scopes=[]):
"""
Adds the authorizer_name to the security block for each method on this path.
This is used to configure the authorizer for individual functions.
:param string path: Path name
:param string method_name: Method name
:param string authorizer_name: Name of the authorizer to use. Must be a key in the
authorizers param.
:param list authorization_scopes: list of strings that are the auth scopes for this method
"""
normalized_method_name = self._normalize_method_name(method_name)
# It is possible that the method could have two definitions in a Fn::If block.
for method_definition in self.get_method_contents(self.get_path(path)[normalized_method_name]):
# If no integration given, then we don't need to process this definition (could be AWS::NoValue)
if not self.method_definition_has_integration(method_definition):
continue
existing_security = method_definition.get("security", [])
security_dict = dict()
security_dict[authorizer_name] = []
if authorizer_name != "NONE":
method_authorization_scopes = authorizers[authorizer_name].get("AuthorizationScopes")
if authorization_scopes:
method_authorization_scopes = authorization_scopes
if authorizers[authorizer_name] and method_authorization_scopes:
security_dict[authorizer_name] = method_authorization_scopes
authorizer_security = [security_dict]
# This assumes there are no authorizers already configured in the existing security block
security = existing_security + authorizer_security
if security:
method_definition["security"] = security
def add_tags(self, tags):
"""
Adds tags to the OpenApi definition using an ApiGateway extension for tag values.
:param dict tags: dictionary of tagName:tagValue pairs.
"""
for name, value in tags.items():
# find an existing tag with this name if it exists
existing_tag = next((existing_tag for existing_tag in self.tags if existing_tag.get("name") == name), None)
if existing_tag:
# overwrite tag value for an existing tag
existing_tag[self._X_APIGW_TAG_VALUE] = value
else:
tag = {"name": name, self._X_APIGW_TAG_VALUE: value}
self.tags.append(tag)
def add_cors(
self,
allow_origins,
allow_headers=None,
allow_methods=None,
expose_headers=None,
max_age=None,
allow_credentials=None,
):
"""
Add CORS configuration to this Api to _X_APIGW_CORS header in open api definition
Following this guide:
https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-cors.html
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-apigatewayv2-api-cors.html
:param list/dict allowed_origins: Comma separate list of allowed origins.
Value can also be an intrinsic function dict.
:param list/dict allowed_headers: Comma separated list of allowed headers.
Value can also be an intrinsic function dict.
:param list/dict allowed_methods: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param list/dict expose_headers: Comma separated list of allowed methods.
Value can also be an intrinsic function dict.
:param integer/dict max_age: Maximum duration to cache the CORS Preflight request. Value is set on
Access-Control-Max-Age header. Value can also be an intrinsic function dict.
:param bool/None allowed_credentials: Flags whether request is allowed to contain credentials.
"""
ALLOW_ORIGINS = "allowOrigins"
ALLOW_HEADERS = "allowHeaders"
ALLOW_METHODS = "allowMethods"
EXPOSE_HEADERS = "exposeHeaders"
MAX_AGE = "maxAge"
ALLOW_CREDENTIALS = "allowCredentials"
cors_headers = [ALLOW_ORIGINS, ALLOW_HEADERS, ALLOW_METHODS, EXPOSE_HEADERS, MAX_AGE, ALLOW_CREDENTIALS]
cors_configuration = self._doc.get(self._X_APIGW_CORS, dict())
# intrinsics will not work if cors configuration is defined in open api and as a property to the HttpApi
if allow_origins and is_intrinsic(allow_origins):
cors_configuration_string = json.dumps(allow_origins)
for header in cors_headers:
# example: allowOrigins to AllowOrigins
keyword = header[0].upper() + header[1:]
cors_configuration_string = cors_configuration_string.replace(keyword, header)
cors_configuration_dict = json.loads(cors_configuration_string)
cors_configuration.update(cors_configuration_dict)
else:
if allow_origins:
cors_configuration[ALLOW_ORIGINS] = allow_origins
if allow_headers:
cors_configuration[ALLOW_HEADERS] = allow_headers
if allow_methods:
cors_configuration[ALLOW_METHODS] = allow_methods
if expose_headers:
cors_configuration[EXPOSE_HEADERS] = expose_headers
if max_age is not None:
cors_configuration[MAX_AGE] = max_age
if allow_credentials is True:
cors_configuration[ALLOW_CREDENTIALS] = allow_credentials
self._doc[self._X_APIGW_CORS] = cors_configuration
def has_api_gateway_cors(self):
if self._doc.get(self._X_APIGW_CORS):
return True
return False
@property
def openapi(self):
"""
Returns a **copy** of the OpenApi specification as a dictionary.
:return dict: Dictionary containing the OpenApi specification
"""
# Make sure any changes to the paths are reflected back in output
self._doc["paths"] = self.paths
if self.tags:
self._doc["tags"] = self.tags
if self.security_schemes:
self._doc.setdefault("components", {})
self._doc["components"]["securitySchemes"] = self.security_schemes
return copy.deepcopy(self._doc)
@staticmethod
def is_valid(data):
"""
Checks if the input data is a OpenApi document
:param dict data: Data to be validated
:return: True, if data is valid OpenApi
"""
if bool(data) and isinstance(data, dict) and isinstance(data.get("paths"), dict):
if bool(data.get("openapi")):
return OpenApiEditor.safe_compare_regex_with_string(
OpenApiEditor.get_openapi_version_3_regex(), data["openapi"]
)
return False
@staticmethod
def gen_skeleton():
"""
Method to make an empty swagger file, with just some basic structure. Just enough to pass validator.
:return dict: Dictionary of a skeleton swagger document
"""
return {"openapi": "3.0.1", "info": {"version": "1.0", "title": ref("AWS::StackName")}, "paths": {}}
@staticmethod
def _get_authorization_scopes(authorizers, default_authorizer):
"""
Returns auth scopes for an authorizer if present
:param authorizers: authorizer definitions
:param default_authorizer: name of the default authorizer
"""
if authorizers is not None:
if (
authorizers[default_authorizer]
and authorizers[default_authorizer].get("AuthorizationScopes") is not None
):
return authorizers[default_authorizer].get("AuthorizationScopes")
return []
@staticmethod
def _normalize_method_name(method):
"""
Returns a lower case, normalized version of HTTP Method. It also know how to handle API Gateway specific methods
like "ANY"
NOTE: Always normalize before using the `method` value passed in as input
:param string method: Name of the HTTP Method
:return string: Normalized method name
"""
if not method or not isinstance(method, string_types):
return method
method = method.lower()
if method == "any":
return OpenApiEditor._X_ANY_METHOD
else:
return method
@staticmethod
def get_openapi_version_3_regex():
openapi_version_3_regex = r"\A3(\.\d)(\.\d)?$"
return openapi_version_3_regex
@staticmethod
def safe_compare_regex_with_string(regex, data):
return re.match(regex, str(data)) is not None
@staticmethod
def get_path_without_trailing_slash(path):
return re.sub(r"{([a-zA-Z0-9._-]+|proxy\+)}", "*", path)
| 43.118644
| 120
| 0.64717
|
0bef6c108f2bed7fe48039d5b00316dca5dfc26e
| 2,485
|
py
|
Python
|
myDatasets.py
|
tanaka4463/bippa-detection
|
c18eece423e334351432dcbf022e3827d97bcfb3
|
[
"MIT"
] | null | null | null |
myDatasets.py
|
tanaka4463/bippa-detection
|
c18eece423e334351432dcbf022e3827d97bcfb3
|
[
"MIT"
] | null | null | null |
myDatasets.py
|
tanaka4463/bippa-detection
|
c18eece423e334351432dcbf022e3827d97bcfb3
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import os
import json
import pandas as pd
import numpy as np
from PIL import Image
class MyDatasets(Dataset):
def __init__(self, imgDirectory, transform = None):
self.imgDirectory = imgDirectory
self.df = self.createDataFrame()
self.image_ids = self.df['image_id'].unique()
self.transform = transform
def __len__(self):
return self.image_ids.shape[0]
def __getitem__(self, index):
image_id = self.image_ids[index]
img = Image.open(os.path.join(self.imgDirectory, image_id + '.jpg'))
if self.transform:
img = self.transform(img, key = 'train')
# dataframe
df = self.df[self.df['image_id'] == image_id]
boxes = torch.tensor(df[['xmin', 'ymin', 'xmax', 'ymax']].values, dtype = torch.float32)
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
area = torch.as_tensor(area, dtype = torch.float32)
labels = torch.tensor(df['class'].values.astype(np.int64), dtype = torch.int64)
iscrowd = torch.zeros((df.shape[0], ), dtype = torch.int64)
# target準備
target = {}
target['boxes'] = boxes
target['labels'] = labels
target['image_id'] = torch.tensor([index])
target['area'] = area
target['iscrowd'] = iscrowd
return img, target
def createDataFrame(self):
labelClass = {'bippa': 1}
all_files = os.listdir(self.imgDirectory)
abs_files = [os.path.join(self.imgDirectory, f) for f in all_files]
jsonPaths = [f for f in abs_files if os.path.splitext(f)[1] == '.json']
df = pd.DataFrame(columns = ['image_id', 'xmin', 'ymin', 'xmax', 'ymax', 'class'])
for jsonPath in jsonPaths:
image_id = os.path.splitext(os.path.basename(jsonPath))[0]
fileName = open(jsonPath, 'r', encoding = 'utf-8')
jsonData = json.load(fileName)
points = jsonData['shapes']
for i in range(len(points)):
rect = np.array(points[i]['points']).flatten()
df_c = pd.DataFrame([rect], columns = ['xmin', 'ymin', 'xmax', 'ymax'])
df_c['image_id'] = image_id
df_c['class'] = labelClass['bippa']
df = pd.concat([df, df_c])
return df
| 37.089552
| 97
| 0.558149
|
2bb6fe560093bacaec289618d3406731ed1efdeb
| 1,211
|
py
|
Python
|
human/core/api/models/work_experience.py
|
domenicosolazzo/Human
|
272bbd939c90e0874a51022b9f716ed47e886fec
|
[
"MIT"
] | null | null | null |
human/core/api/models/work_experience.py
|
domenicosolazzo/Human
|
272bbd939c90e0874a51022b9f716ed47e886fec
|
[
"MIT"
] | null | null | null |
human/core/api/models/work_experience.py
|
domenicosolazzo/Human
|
272bbd939c90e0874a51022b9f716ed47e886fec
|
[
"MIT"
] | null | null | null |
from place import Place
class Employer(object):
"""
Employer model that represents a real work experience
"""
from_date = ""
to_date = ""
place = None
summary = "",
name = ""
title = ""
sector = "",
keywords = []
def __init__(self, data):
self.name = data.get('name', None)
self.title = data.get('title', None)
self.sector = data.get('sector', None)
self.from_date = data.get('from_date', None)
self.to_date = data.get('to_date', None)
self.summary = data.get('summary', None)
keywords = data.get('keywords', [])
if not isinstance(keywords, list):
keywords = []
self.keywords = keywords
place = data.get('place', None)
if place is not None:
self.place = Place(place)
def to_json(self):
return {
'name': self.name,
'title': self.title,
'sector': self.sector,
'from_date': self.from_date,
'to_date': self.to_date,
'summary': self.summary,
'keywords': self.keywords,
'place': self.place.to_json() if not self.place is None else None
}
| 28.833333
| 77
| 0.535095
|
131f6f2da3f2c69cf8ca88de131984155cc0cfa4
| 185
|
py
|
Python
|
accounts/forms.py
|
iidamakinen/OHSIHA2018
|
76c4f2d754045cc82d57062453e7248d63e5bf4d
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
iidamakinen/OHSIHA2018
|
76c4f2d754045cc82d57062453e7248d63e5bf4d
|
[
"MIT"
] | null | null | null |
accounts/forms.py
|
iidamakinen/OHSIHA2018
|
76c4f2d754045cc82d57062453e7248d63e5bf4d
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Tapahtuma
class TapahtumaForm(forms.ModelForm):
class Meta:
model = Tapahtuma
fields = ['name', 'description', 'date']
| 23.125
| 48
| 0.681081
|
02dc95abb5b6583696e6bdc49adc7c22a82993ce
| 10,868
|
py
|
Python
|
tests/regression_tests/test_aerostruct.py
|
kejacobson/mphys
|
72ff05a1c444461a9431470d32dcf2a7d51752eb
|
[
"Apache-2.0"
] | null | null | null |
tests/regression_tests/test_aerostruct.py
|
kejacobson/mphys
|
72ff05a1c444461a9431470d32dcf2a7d51752eb
|
[
"Apache-2.0"
] | null | null | null |
tests/regression_tests/test_aerostruct.py
|
kejacobson/mphys
|
72ff05a1c444461a9431470d32dcf2a7d51752eb
|
[
"Apache-2.0"
] | null | null | null |
# --- Python 3.8 ---
"""
@File : test_aerostruct.py
@Time : 2020/12/20
@Author : Josh Anibal
@Desc : Aerostructural regression tests used to test if the output produced
by MPHYS has changed
"""
# === Standard Python modules ===
from __future__ import print_function, division
import os
import unittest
# === External Python modules ===
import numpy as np
from mpi4py import MPI
from parameterized import parameterized, parameterized_class
# === Extension modules ===
import openmdao.api as om
from openmdao.utils.assert_utils import assert_near_equal
from mphys.multipoint import Multipoint
from mphys.scenario_aerostructural import ScenarioAeroStructural
# these imports will be from the respective codes' repos rather than mphys
from mphys.solver_builders.mphys_adflow import ADflowBuilder
from tacs.mphys import TacsBuilder
from mphys.solver_builders.mphys_meld import MeldBuilder
# from mphys.solver_builders.mphys_rlt import RltBuilder
from baseclasses import AeroProblem
from tacs import elements, constitutive, functions
# set these for convenience
comm = MPI.COMM_WORLD
rank = comm.rank
baseDir = os.path.dirname(os.path.abspath(__file__))
# Callback function used to setup TACS element objects and DVs
def element_callback(dvNum, compID, compDescript, elemDescripts, specialDVs, **kwargs):
rho = 2780.0 # density, kg/m^3
E = 73.1e9 # elastic modulus, Pa
nu = 0.33 # poisson's ratio
ys = 324.0e6 # yield stress, Pa
thickness = 0.003
min_thickness = 0.002
max_thickness = 0.05
# Setup (isotropic) property and constitutive objects
prop = constitutive.MaterialProperties(rho=rho, E=E, nu=nu, ys=ys)
# Set one thickness dv for every component
con = constitutive.IsoShellConstitutive(prop, t=thickness, tNum=dvNum, tlb=min_thickness, tub=max_thickness)
# For each element type in this component,
# pass back the appropriate tacs element object
transform = None
elem = elements.Quad4Shell(transform, con)
return elem
def problem_setup(scenario_name, fea_assembler, problem):
"""
Helper function to add fixed forces and eval functions
to structural problems used in tacs builder
"""
# Add TACS Functions
# Only include mass from elements that belong to pytacs components (i.e. skip concentrated masses)
problem.addFunction('mass', functions.StructuralMass)
problem.addFunction('ks_vmfailure', functions.KSFailure, safetyFactor=1.0, ksWeight=50.0)
# Add gravity load
g = np.array([0.0, 0.0, -9.81]) # m/s^2
problem.addInertialLoad(g)
class Top(Multipoint):
def setup(self):
################################################################################
# ADflow options
################################################################################
aero_options = {
# I/O Parameters
"gridFile": os.path.join(baseDir, "../input_files/wing_vol.cgns"),
"outputDirectory": ".",
"monitorvariables": ["resrho", "resturb", "cl", "cd"],
"writeTecplotSurfaceSolution": False,
# 'writevolumesolution':False,
# 'writesurfacesolution':False,
# Physics Parameters
"equationType": "RANS",
# Solver Parameters
"smoother": "DADI",
"CFL": 1.5,
"CFLCoarse": 1.25,
"MGCycle": "sg",
"MGStartLevel": -1,
"nCyclesCoarse": 250,
# ANK Solver Parameters
"useANKSolver": True,
"nsubiterturb": 5,
"anksecondordswitchtol": 1e-4,
"ankcoupledswitchtol": 1e-6,
"ankinnerpreconits": 2,
"ankouterpreconits": 2,
"anklinresmax": 0.1,
# Termination Criteria
"L2Convergence": 1e-14,
"L2ConvergenceCoarse": 1e-2,
"L2ConvergenceRel": 1e-4,
"nCycles": 10000,
# force integration
"forcesAsTractions": self.forcesAsTractions,
}
aero_builder = ADflowBuilder(aero_options, scenario="aerostructural")
aero_builder.initialize(self.comm)
self.add_subsystem("mesh_aero", aero_builder.get_mesh_coordinate_subsystem())
################################################################################
# TACS options
################################################################################
tacs_options = {'element_callback' : element_callback,
"problem_setup": problem_setup,
'mesh_file': '../input_files/wingbox.bdf'}
struct_builder = TacsBuilder(tacs_options, coupled=True)
struct_builder.initialize(self.comm)
self.add_subsystem("mesh_struct", struct_builder.get_mesh_coordinate_subsystem())
################################################################################
# Transfer scheme options
################################################################################
if self.xfer_builder_class == MeldBuilder:
xfer_builder = self.xfer_builder_class(aero_builder, struct_builder, isym=1, check_partials=True)
else:
xfer_builder = self.xfer_builder_class(self.xfer_options, aero_builder, struct_builder, check_partials=True)
xfer_builder.initialize(self.comm)
################################################################################
# MPHYS setup
################################################################################
# ivc to keep the top level DVs
dvs = self.add_subsystem("dvs", om.IndepVarComp(), promotes=["*"])
nonlinear_solver = om.NonlinearBlockGS(maxiter=25, iprint=2, use_aitken=True, rtol=1e-14, atol=1e-14)
linear_solver = om.LinearBlockGS(maxiter=25, iprint=2, use_aitken=True, rtol=1e-14, atol=1e-14)
self.mphys_add_scenario(
"cruise",
ScenarioAeroStructural(
aero_builder=aero_builder, struct_builder=struct_builder, ldxfer_builder=xfer_builder
),
nonlinear_solver,
linear_solver,
)
for discipline in ["aero", "struct"]:
self.mphys_connect_scenario_coordinate_source("mesh_%s" % discipline, "cruise", discipline)
# add the structural thickness DVs
ndv_struct = struct_builder.get_ndv()
dvs.add_output("dv_struct", np.array(ndv_struct * [0.01]))
self.connect("dv_struct", "cruise.dv_struct")
def configure(self):
super().configure()
# create the aero problems for both analysis point.
# this is custom to the ADflow based approach we chose here.
# any solver can have their own custom approach here, and we don't
# need to use a common API. AND, if we wanted to define a common API,
# it can easily be defined on the mp group, or the aero group.
aoa = 1.5
ap0 = AeroProblem(
name="ap0",
mach=0.8,
altitude=10000,
alpha=aoa,
areaRef=45.5,
chordRef=3.25,
evalFuncs=["lift", "drag", "cl", "cd"],
)
ap0.addDV("alpha", value=aoa, name="aoa")
ap0.addDV("mach", value=0.8, name="mach")
# here we set the aero problems for every cruise case we have.
# this can also be called set_flow_conditions, we don't need to create and pass an AP,
# just flow conditions is probably a better general API
# this call automatically adds the DVs for the respective scenario
self.cruise.coupling.aero.mphys_set_ap(ap0)
self.cruise.aero_post.mphys_set_ap(ap0)
# define the aero DVs in the IVC
# s0
self.dvs.add_output("aoa0", val=aoa, units="deg")
self.dvs.add_output("mach0", val=0.8)
# connect to the aero for each scenario
self.connect("aoa0", ["cruise.coupling.aero.aoa", "cruise.aero_post.aoa"])
self.connect("mach0", ["cruise.coupling.aero.mach", "cruise.aero_post.mach"])
@parameterized_class(
[
{
"name": "meld",
"xfer_builder_class": MeldBuilder,
"xfer_options": {"isym": 1, "n": 200, "beta": 0.5},
"ref_vals": {
"xa": 5.44356782419053,
"cl": 0.3384087364751269,
"func_struct": 0.25177455023767636,
"cd": 0.029881839034169452,
},
},
# {
# "name": "rlt",
# # "xfer_builder_class": RltBuilder,
# "xfer_options": {"transfergaussorder": 2},
# "ref_vals": {"xa": 5.504999831790868, "func_struct": 0.31363742, "cl": 0.3047756, "cd": 0.0280476},
# },
]
)
class TestAeroStructSolve(unittest.TestCase):
N_PROCS = 1
def setUp(self):
################################################################################
# OpenMDAO setup
################################################################################
prob = om.Problem()
prob.model = Top()
prob.model.xfer_builder_class = self.xfer_builder_class
prob.model.xfer_options = self.xfer_options
if "meld" in self.name:
prob.model.forcesAsTractions = True
else:
prob.model.forcesAsTractions = False
prob.setup()
self.prob = prob
# om.n2(prob, show_browser=False, outfile='test_as.html')
def test_run_model(self):
self.prob.run_model()
# prob.model.list_outputs()
if MPI.COMM_WORLD.rank == 0:
print("Scenario 0")
print("xa =", np.mean(self.prob.get_val("cruise.coupling.geo_disp.x_aero", get_remote=True)))
print("cl =", self.prob.get_val("cruise.aero_post.cl", get_remote=True)[0])
print("cd =", self.prob.get_val("cruise.aero_post.cd", get_remote=True)[0])
print("ks_vmfailure =", self.prob.get_val("cruise.ks_vmfailure", get_remote=True)[0])
assert_near_equal(
np.mean(self.prob.get_val("cruise.coupling.geo_disp.x_aero", get_remote=True)),
self.ref_vals["xa"],
1e-6,
)
assert_near_equal(
np.mean(self.prob.get_val("cruise.aero_post.cl", get_remote=True)), self.ref_vals["cl"], 1e-6
)
assert_near_equal(
np.mean(self.prob.get_val("cruise.aero_post.cd", get_remote=True)), self.ref_vals["cd"], 1e-6
)
assert_near_equal(
np.mean(self.prob.get_val("cruise.ks_vmfailure", get_remote=True)),
self.ref_vals["func_struct"],
1e-6,
)
if __name__ == "__main__":
unittest.main()
| 37.347079
| 120
| 0.568642
|
3e1fe83d89e15bd049040214762d7a85054d0b95
| 7,551
|
py
|
Python
|
test_models.py
|
iqDF/tsn-pytorch
|
c734ee040fcb245542866f0a7812cdc6e417cb2c
|
[
"BSD-2-Clause"
] | 2
|
2019-08-15T10:02:17.000Z
|
2020-03-27T09:45:56.000Z
|
test_models.py
|
iqDF/tsn-pytorch
|
c734ee040fcb245542866f0a7812cdc6e417cb2c
|
[
"BSD-2-Clause"
] | null | null | null |
test_models.py
|
iqDF/tsn-pytorch
|
c734ee040fcb245542866f0a7812cdc6e417cb2c
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Usage:
python3 test_model.py <DATASET> <MODALITY> <VIDEO_FOR_TEST> <CHECKPOINTS_FILE> \
--arch <ARCH> --save_scores <SAVE_SCORES>
e.g:
python3 test_models.py saag01 ARP ./fighting.mkv checkpoints/SAAG01_BNI_ARP_3_model_best.pth.tar \
--arch BNInception --save_scores scores/ --num_segments 3 --consensus_type avg
"""
import argparse
import time
# Computation libs
import numpy as np
import cv2
import torch.nn.parallel
import torch.optim
from multiprocessing import Pool, current_process
from sklearn.metrics import confusion_matrix
from PIL import Image
from dataset import TSNDataSet
from models import TSN
from transforms import *
from ops import ConsensusModule
from convert_to_ARP import cvApproxRankPooling, Buffer
import torch
import torchsummary
def get_options():
parser = argparse.ArgumentParser(
description="Standard video-level testing")
parser.add_argument('dataset', type=str, choices=['ucf101', 'hmdb51', 'kinetics', 'saag01'])
parser.add_argument('modality', type=str, choices=['RGB', 'Flow', 'RGBDiff', 'ARP'])
parser.add_argument('video_path', type=str)
parser.add_argument('checkpoint', type=str)
parser.add_argument('--arch', type=str, default="resnet101")
parser.add_argument('--save_scores', type=str, default=None)
parser.add_argument('--num_segments', type=int, default=3)
parser.add_argument('--max_num', type=int, default=-1)
parser.add_argument('--test_crops', type=int, default=10)
parser.add_argument('--input_size', type=int, default=224)
parser.add_argument('--consensus_type', type=str, default='avg',
choices=['avg', 'max', 'topk'])
parser.add_argument('--k', type=int, default=3)
parser.add_argument('--dropout', type=float, default=0.7)
parser.add_argument('-j', '--workers', default=5, type=int, metavar='N',
help='number of data loading workers (default: 5)')
parser.add_argument('--print_freq', type=int, default=5)
return parser.parse_args()
def forward_pass_model(model, processed_input):
return model(processed_input)
def display_prediction():
pass
def run_video_appx_rank_pooling(
video_path,
num_segments
):
"""Approximated Rank Pooling (ARP) runner for video input
Outputs Rank pooled frames from a video.
"""
current = current_process()
cap = cv2.VideoCapture(video_path)
# Find OpenCV version
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# With webcam get(CV_CAP_PROP_FPS) does not work.
# Let's see for ourselves.
if int(major_ver) < 3 :
num_frames = cap.get(cv2.cv.CAP_PROP_FRAME_COUNT)
else :
num_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(".. Number of Frames total in the video is {0}".format(num_frames))
# Number of frames to capture
buffer_size = (num_frames+1)//3
# Number of frames to capture
buffer = Buffer(buffer_size)
success = True
rank_pooled_frames = []
while success:
success, frame = cap.read()
if buffer.isfull():
frames = buffer.clear()
rank_pooled = cvApproxRankPooling(frames)
rank_pooled = Image.fromarray(np.uint8(rank_pooled))
rank_pooled_frames.append(rank_pooled)
buffer.enqueue(frame)
cap.release()
return rank_pooled_frames
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def sample_frames(frames, sample_indices, new_length):
sampled_frames = list()
for idx in sample_indices:
p = int(idx)
for _ in range(new_length):
seg_imgs = frames[p]
sampled_frames.append(seg_imgs)
if p < num_frames:
p += 1
return sampled_frames
def generate_sample_indices(num_frames, new_length, num_segments):
tick = (num_frames - new_length + 1) / float(num_segments)
offsets = np.array([int(tick / 2.0 + tick * x) for x in range(num_segments)])
return offsets + 1
def get_group_transforms(args, model):
input_mean = model.input_mean
input_std = model.input_std
length = (3 if args.modality in ('RGB', 'ARP', 'RGBDiff') else 2)
if isinstance(input_mean, int):
input_mean = [input_mean] * length
if isinstance(input_mean, int):
input_mean = [input_mean] * length
transforms = torchvision.transforms.Compose([
model.get_augmentation(),
Stack(roll=True),
ToTorchFormatTensor(div=False),
GroupNormalize(input_mean, input_std),
])
return transforms
def load_model_from_checkpoint(
model,
checkpointfile,
was_data_paralleled=True
):
if was_data_paralleled:
model = torch.nn.DataParallel(model, device_ids=[0,]).cuda()
checkpoint = torch.load(checkpointfile)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
state_dict = checkpoint['state_dict']
model.load_state_dict(state_dict)
return model
def init_model(num_classes, new_length, args):
model = TSN(num_classes, args.num_segments, args.modality,
base_model=args.arch,
new_length=new_length,
consensus_type=args.consensus_type,
dropout=0.5,
partial_bn=False)
crop_size = model.crop_size
scale_size = model.scale_size
input_mean = model.input_mean
input_size = model.input_size
input_std = model.input_std
policies = model.get_optim_policies()
train_augmentation = model.get_augmentation()
cropping = torchvision.transforms.Compose([
GroupScale(scale_size),
GroupCenterCrop(input_size),
])
return model
def get_num_classes(dataset):
if dataset == 'ucf101':
num_class = 101
elif dataset == 'hmdb51':
num_class = 51
elif dataset == 'kinetics':
num_class = 400
elif dataset == 'saag01':
num_class = 2
else:
raise ValueError('Unknown dataset ' + args.dataset)
return num_class
def get_data_length(modality):
if modality in ['RGB', 'ARP']:
data_length = 1
elif modality in ['Flow', 'RGBDiff']:
data_length = 5
return data_length
if __name__ == '__main__':
args = get_options()
video_path = args.video_path
dataset = args.dataset
architecture = args.arch
checkpoint = args.checkpoint
num_segments = args.num_segments
modality = args.modality
num_classes = get_num_classes(dataset)
data_length = get_data_length(modality)
print("--------------------------------------------------------------------------")
print("> Model Init: %s" % args.arch)
model = init_model(num_classes, data_length, args)
print("--------------------------------------------------------------------------")
print("> Loading Video to Frames: %s" % video_path)
frames = run_video_appx_rank_pooling(video_path, num_segments)
num_frames = len(frames)
print(".. Frame shape: ", num_frames, frames[0].size)
print("--------------------------------------------------------------------------")
print("Transforming Frames to dataset:")
transformations = get_group_transforms(args, model)
processed_input = transformations(frames)
print(".. Transformed shape: ", processed_input.size())
print("--------------------------------------------------------------------------")
print("> Model Load Checkpoint: %s" % checkpoint)
model = load_model_from_checkpoint(model, checkpoint, was_data_paralleled=True)
torchsummary.summary(model, processed_input.size())
print("--------------------------------------------------------------------------")
print("> Prediction output: ")
output = model(processed_input)
_, pred = output.topk(1)
print(".. class ", pred.item())
| 28.280899
| 99
| 0.696596
|
fafeb2ee5267de29defe924520b0f17055090c39
| 2,201
|
py
|
Python
|
src/homescreen.py
|
Razdeep/BookManagementSystem
|
6b56e00c52ba57a8fb033a9ec7e6a288d075d2ac
|
[
"MIT"
] | null | null | null |
src/homescreen.py
|
Razdeep/BookManagementSystem
|
6b56e00c52ba57a8fb033a9ec7e6a288d075d2ac
|
[
"MIT"
] | 1
|
2018-11-27T03:52:22.000Z
|
2018-11-27T03:52:22.000Z
|
src/homescreen.py
|
Razdeep/BookManagementSystem
|
6b56e00c52ba57a8fb033a9ec7e6a288d075d2ac
|
[
"MIT"
] | null | null | null |
import tkinter
from core.config import *
from submit import Submit
class HomeScreen:
def __init__(self,master):
self.master=master
self.master.config(bg="yellow green")
navbar=tkinter.Frame(self.master)
fictn_nav=tkinter.Button(navbar,text='Fictional Books',relief=tkinter.RAISED,bg="gold",activebackground="white",fg="dark blue",height=2,font=('Comic Sans MS bold',10),command=self.showfictionHere,width=20)
fictn_nav.grid(row=0,column=0)
nonfictn_nav=tkinter.Button(navbar,text='Non-Fictional Books',relief=tkinter.RAISED,bg="gold",activebackground="white",fg="dark blue",height=2,font=('Comic Sans MS bold',10),command=self.shownonfictionHere,width=20)
nonfictn_nav.grid(row=1,column=0)
submit_nav=tkinter.Button(navbar,text='Submit/Request Books',relief=tkinter.RAISED,bg="gold",activebackground="white",fg="dark blue",height=2,font=('Comic Sans MS bold',10),command=self.showsubmitHere,width=20)
submit_nav.grid(row=2,column=0)
del_nav=tkinter.Button(navbar,text='Delivery of Books',relief=tkinter.RAISED,bg="gold",activebackground="white",fg="dark blue",height=2,font=('Comic Sans MS bold',10),command=self.showdeliveryHere,width=20)
del_nav.grid(row=3,column=0)
navbar.grid(row=0,column=0)
self.body=tkinter.Frame(self.master)
self.body.grid(row=0,column=1)
def showfictionHere(self):
self.body.destroy()
self.body=tkinter.Frame(self.master)
# @TODO
self.body.grid(row=0,column=1)
def shownonfictionHere(self):
self.body.destroy()
self.body=tkinter.Frame(self.master)
# @TODO
self.body.grid(row=0,column=1)
def showdeliveryHere(self):
self.body.destroy()
self.body=tkinter.Frame(self.master)
# @TODO
self.body.grid(row=0,column=1)
def showsubmitHere(self):
self.body.destroy()
self.body=tkinter.Frame(self.master)
Submit(self.body)
self.body.grid(row=0,column=1)
if __name__=='__main__':
root=tkinter.Tk()
HomeScreen(root)
root.mainloop()
| 42.326923
| 224
| 0.657428
|
a4ff08cd4d3117255a906809bf49c6fbd3d55961
| 3,148
|
py
|
Python
|
gym_anytrading/envs/stocks_env.py
|
tsb4/dayTradingEnv
|
16d1970a41c8933970152f1f41e504340d48cb08
|
[
"MIT"
] | null | null | null |
gym_anytrading/envs/stocks_env.py
|
tsb4/dayTradingEnv
|
16d1970a41c8933970152f1f41e504340d48cb08
|
[
"MIT"
] | null | null | null |
gym_anytrading/envs/stocks_env.py
|
tsb4/dayTradingEnv
|
16d1970a41c8933970152f1f41e504340d48cb08
|
[
"MIT"
] | null | null | null |
import numpy as np
from .trading_env import TradingEnv#, Actions, Positions
class StocksEnv(TradingEnv):
def __init__(self, df, window_size, frame_bound):
assert len(frame_bound) == 2
self.frame_bound = frame_bound
super().__init__(df, window_size)
self.trade_fee_bid_percent = 0.01 # unit
self.trade_fee_ask_percent = 0.005 # unit
def _process_data(self):
prices = self.df.loc[:, 'Close'].to_numpy()
prices[self.frame_bound[0] - self.window_size] # validate index (TODO: Improve validation)
prices = prices[self.frame_bound[0]-self.window_size:self.frame_bound[1]]
diff = np.insert(np.diff(prices), 0, 0)
signal_features = np.column_stack((prices, diff))
return prices, signal_features
def _calculate_reward(self, action):
step_reward = 0
trade = False
if ((action == Actions.Buy.value and self._position == Positions.Short) or
(action == Actions.Sell.value and self._position == Positions.Long)):
trade = True
if trade:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._last_trade_tick]
price_diff = current_price - last_trade_price
if self._position == Positions.Long:
step_reward += price_diff
return step_reward
def _update_profit(self, action):
trade = False
if ((action == Actions.Buy.value and self._position == Positions.Short) or
(action == Actions.Sell.value and self._position == Positions.Long)):
trade = True
if trade or self._done:
current_price = self.prices[self._current_tick]
last_trade_price = self.prices[self._last_trade_tick]
if self._position == Positions.Long:
shares = (self._total_profit * (1 - self.trade_fee_ask_percent)) / last_trade_price
self._total_profit = (shares * (1 - self.trade_fee_bid_percent)) * current_price
def max_possible_profit(self):
current_tick = self._start_tick
last_trade_tick = current_tick - 1
profit = 1.
while current_tick <= self._end_tick:
position = None
if self.prices[current_tick] < self.prices[current_tick - 1]:
while (current_tick <= self._end_tick and
self.prices[current_tick] < self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Short
else:
while (current_tick <= self._end_tick and
self.prices[current_tick] >= self.prices[current_tick - 1]):
current_tick += 1
position = Positions.Long
if position == Positions.Long:
current_price = self.prices[current_tick - 1]
last_trade_price = self.prices[last_trade_tick]
shares = profit / last_trade_price
profit = shares * current_price
last_trade_tick = current_tick - 1
return profit
| 34.977778
| 99
| 0.606099
|
b67d70a62d2c8d176090eefce22a1fe9d47379da
| 212
|
py
|
Python
|
common/errors.py
|
Hy-Oy/Swipter
|
fa0c2002816a64c3f00372f1e0f069bb03ff4458
|
[
"MIT"
] | null | null | null |
common/errors.py
|
Hy-Oy/Swipter
|
fa0c2002816a64c3f00372f1e0f069bb03ff4458
|
[
"MIT"
] | null | null | null |
common/errors.py
|
Hy-Oy/Swipter
|
fa0c2002816a64c3f00372f1e0f069bb03ff4458
|
[
"MIT"
] | null | null | null |
'''
状态码,
业务码
'''
ok = 0
# 系统保留状态码:1000-1999
# 用户系统:2000-2999
PHONE_NUM_ERR = 2001 #手机号码错误
SMS_SEND_ERR = 2002 #发送失败
VERIFY_CODE_ERR = 2003 #验证失败
LOGIN_REQUIRED_ERR = 2004 #未登录
AVATAR_UPLOAD_ERR = 2005 #头像上传失败
| 15.142857
| 32
| 0.726415
|
b204add0f8a02943d713dc15b0280e2f11174155
| 18,901
|
py
|
Python
|
surface-tool/statistics_scripts/generate_default_graph.py
|
sara-nl/SURFsara-Trace-Archive
|
fb053f241b761c7e6dd863f5b87c8e90428b1f6c
|
[
"Apache-2.0"
] | 1
|
2020-06-17T09:59:15.000Z
|
2020-06-17T09:59:15.000Z
|
surface-tool/statistics_scripts/generate_default_graph.py
|
sara-nl/SURFace
|
fb053f241b761c7e6dd863f5b87c8e90428b1f6c
|
[
"Apache-2.0"
] | null | null | null |
surface-tool/statistics_scripts/generate_default_graph.py
|
sara-nl/SURFace
|
fb053f241b761c7e6dd863f5b87c8e90428b1f6c
|
[
"Apache-2.0"
] | null | null | null |
from scipy.stats.stats import pearsonr, spearmanr, kendalltau
import matplotlib.pyplot as plt
import sys, os
from pathlib import Path
import matplotlib.pylab as pylab
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
import matplotlib.patches as mpatches
sys.path.insert(1, '/home/cmt2002/surfsara-tool/statistics_scripts')
sys.path.insert(2, '/home/cmt2002/surfsara-tool/parser')
sys.path.insert(3, '/home/cmt2002/surfsara-tool/analysis')
from parse_metric import ParseMetric
DAY = 24
MID_DAY = int(DAY / 2)
WEEK = 7 * DAY
TOOL_PATH = Path(os.path.abspath(__file__)).parent.parent
MARKERS = ['s', '*', 'o', 'v', '<', 'p', '.', 'd']
COLORS = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
SHOW_PLOT = False
# Configure label sizes of graphs
params = {
'xtick.labelsize':12,
'ytick.labelsize':12,
'axes.labelsize':16,
'figure.figsize': (18, 8),
'savefig.format': 'pdf',
'axes.titlesize': 16,
'legend.loc': 'best',
'legend.fontsize': "large"
}
pylab.rcParams.update(params)
########### DEFAULT ANALYSIS: COVID VS NON-COVID + CPU vs GPU nodes ###########
class GenerateDefaultGraph:
def __init__(self, title, savefig_title, **kargs):
from diurnal_analysis import DiurnalAnalysis
self.diurnal_analysis = DiurnalAnalysis()
self.title = title
self.savefig_title = savefig_title
self.ylabel = kargs['ylabel']
def figure_daily_per_seasonal(
self, df_cpu_dic, df_gpu_dic
):
_, ((ax_cpu, ax_cpu_violin), (ax_gpu, ax_gpu_violin)) = plt.subplots(2, 2, figsize=(11, 10), constrained_layout=True, sharey=True)
ax_cpu = self.__axes_daily_seasonal_plot(
ax=ax_cpu,
df_covid=df_cpu_dic["covid"],
df_non_covid=df_cpu_dic["non_covid"],
ylabel=self.ylabel,
title=" Generic nodes"
)
ax_cpu_violin = self.__axes_daily_seasonal_violin(
ax=ax_cpu_violin,
df_covid=df_cpu_dic["covid"],
df_non_covid=df_cpu_dic["non_covid"]
)
ax_gpu = self.__axes_daily_seasonal_plot(
ax=ax_gpu,
df_covid=df_gpu_dic["covid"],
df_non_covid=df_gpu_dic["non_covid"],
ylabel=self.ylabel,
title=" ML nodes"
)
ax_gpu_violin = self.__axes_daily_seasonal_violin(
ax=ax_gpu_violin,
df_covid=df_gpu_dic["covid"],
df_non_covid=df_gpu_dic["non_covid"],
)
ax_cpu.set_xticks([tick for tick in range(MID_DAY-1, WEEK, DAY)])
ax_cpu.set_xticklabels(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], fontsize=14)
ax_gpu.set_xticks([tick for tick in range(MID_DAY-1, WEEK, DAY)])
ax_gpu.set_xticklabels(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"], fontsize=14)
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def figure_daily_per_monthly(self, df_cpu, df_gpu, month_dic):
fig, (ax_cpu, ax_gpu) = plt.subplots(2, 1, constrained_layout=True)
for name, value in month_dic.items():
df_cpu_month = self.diurnal_analysis.get_daily_month_df(df_cpu, value)
df_gpu_month = self.diurnal_analysis.get_daily_month_df(df_gpu, value)
ax_cpu.plot(df_cpu_month, marker=MARKERS[value], label=name, color=COLORS[value])
ax_gpu.plot(df_gpu_month, marker=MARKERS[value], label=name, color=COLORS[value])
# After plotting the lines, now construct the graph
self.__construct_daily_montly_plots(ax=ax_cpu, ylabel=self.ylabel, title = self.title + " | CPU nodes | aggregated per month")
self.__construct_daily_montly_plots(ax=ax_gpu, ylabel=self.ylabel, title = self.title + " | GPU nodes | aggregated per month")
ax_cpu.set_xticks([tick for tick in range(MID_DAY-1, WEEK, DAY)])
ax_cpu.set_xticklabels(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"])
ax_gpu.set_xticks([tick for tick in range(MID_DAY-1, WEEK, DAY)])
ax_gpu.set_xticklabels(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"])
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def figure_hourly_monthly(self, df_cpu, df_gpu, month_dic):
fig, (ax_cpu, ax_gpu) = plt.subplots(2, 1, constrained_layout=True)
for name, value in month_dic.items():
df_cpu_month = self.diurnal_analysis.get_hourly_month_df(df_cpu, value)
df_gpu_month = self.diurnal_analysis.get_hourly_month_df(df_gpu, value)
ax_cpu.plot(df_cpu_month, marker=MARKERS[value], label=name, color=COLORS[value])
ax_gpu.plot(df_gpu_month, marker=MARKERS[value], label=name, color=COLORS[value])
# After plotting the lines, now construct the graph
self.__construct_hourly_montly_plots(ax=ax_cpu, ylabel=self.ylabel, title = self.title + " Generic nodes")
self.__construct_hourly_montly_plots(ax=ax_gpu, ylabel=self.ylabel, title = self.title + " ML nodes")
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def figure_hourly_seasonal(
self, df_cpu_dic, df_gpu_dic
):
_, ((ax_cpu, ax_cpu_violin), (ax_gpu, ax_gpu_violin)) = plt.subplots(2, 2, figsize=(11, 10), sharey=True, constrained_layout=True)
self.__axes_hourly_plot(
ax=ax_cpu,
df_covid=df_cpu_dic["covid"],
df_non_covid=df_cpu_dic["non_covid"],
ylabel=self.ylabel,
title="Generic nodes",
xlabel="Time [hours]"
)
self.__axes_daily_seasonal_violin(
ax=ax_cpu_violin,
df_covid=df_cpu_dic["covid"],
df_non_covid=df_cpu_dic["non_covid"]
)
self.__axes_hourly_plot(
ax=ax_gpu,
df_covid=df_gpu_dic["covid"],
df_non_covid=df_gpu_dic["non_covid"],
ylabel=self.ylabel,
title="ML nodes",
xlabel="Time [hours]"
)
self.__axes_daily_seasonal_violin(
ax=ax_gpu_violin,
df_covid=df_gpu_dic["covid"],
df_non_covid=df_gpu_dic["non_covid"]
)
def set_ticks(ax):
ax.set_xticks([i for i in range(24)], minor=True)
ax.tick_params('x', length=12, width=2, which='major')
ax.tick_params('x', length=8, width=1, which='minor')
ax_cpu.set_xticklabels([hour for hour in range(-5, 24, 5)], fontsize=15)
ax_gpu.set_xticklabels([hour for hour in range(-5, 24, 5)], fontsize=15)
set_ticks(ax_cpu)
set_ticks(ax_gpu)
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def figure_rack_analysis(self, df_cpu_dic, df_gpu_dic):
_, (ax_violin_cpu, ax_cpu, ax_violin_gpu, ax_gpu) = plt.subplots(4, 1, figsize=(24, 24), constrained_layout=True)
self.__axes_rack_barplot(
ax=ax_cpu,
df_covid=df_cpu_dic["covid"],
df_non_covid=df_cpu_dic["non_covid"],
subtitle= " Generic racks")
self.__axes_rack_violinplot(
ax=ax_violin_cpu,
df_covid=df_cpu_dic["covid"],
df_non_covid=df_cpu_dic["non_covid"],
subtitle=" Generic racks")
self.__axes_rack_barplot(ax_gpu,
df_covid=df_gpu_dic["covid"],
df_non_covid=df_gpu_dic["non_covid"],
subtitle=" ML racks")
self.__axes_rack_violinplot(
ax=ax_violin_gpu,
df_covid=df_gpu_dic["covid"],
df_non_covid=df_gpu_dic["non_covid"],
subtitle=" ML racks")
# Depict legend on top of the first plot
lightcoral_patch = mpatches.Patch(color='lightcoral', label='covid (left)')
steelblue_patch = mpatches.Patch(color='steelblue', label='non-covid (right)')
ax_violin_cpu.legend(handles=[lightcoral_patch, steelblue_patch], loc="center", bbox_to_anchor=(0.5, 1.17), fontsize=28, ncol=2)
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def scatter_plot(self, title, x, y, savefig_title):
_, ax = plt.subplots(figsize=(10, 8))
ax.scatter(x=x, y=y, marker='*')
ax.set_xlabel("Read", fontsize=16)
ax.set_ylabel("Write", fontsize=16)
ax.set_title(title, fontsize=18)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def get_pearsonr(self, x, y):
return scipy.stats.pearsonr(x=x, y=y)[0] # Return r which is pearson correlation coefficient
def CDF_plot(self, ax_cpu_dic, ax_gpu_dic):
def set_components(ax, subtitle):
ax.set_title(subtitle)
ax.set_ylabel("Density")
ax.set_xlabel(self.title + " " + self.ylabel)
ax.legend(loc='lower right')
fig, (ax_cpu, ax_gpu) = plt.subplots(2, 1)
fig.tight_layout(pad=5.0)
ax_cpu.hist(x=ax_cpu_dic['covid'], density=True, histtype='step', cumulative=True, color='lightcoral', label='covid', bins=100) # covid
ax_cpu.hist(x=ax_cpu_dic['non-covid'], density=True, histtype='step', cumulative=True, color='steelblue', label='non-covid', bins=100) # non-covid
ax_gpu.hist(x=ax_gpu_dic['covid'], density=True, histtype='step', cumulative=True, color='lightcoral', label='covid', bins=100) # covid
ax_gpu.hist(x=ax_gpu_dic['non-covid'], density=True, histtype='step', cumulative=True, color='steelblue', label='covid', bins=100) # covid
set_components(ax_cpu, " Generic Nodes")
set_components(ax_gpu, " ML Nodes")
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
def entire_period_analysis(self, df_cpu, df_gpu):
def set_components(ax, df, subtitle, label, color):
ax.plot(df, label=label, color=color)
ax.set_ylim(0, )
ax.set_xlabel("2020")
ax.set_ylabel(self.ylabel)
ax.set_title(self.title + subtitle)
ax.legend(loc="upper right", fontsize=18)
ax.set_xticklabels(labels=self.__get_converted_xticks(ax))
# Convert index timestamps to utc datetime
df_cpu.index = pd.to_datetime(df_cpu.index, utc=True, unit="s")
df_gpu.index = pd.to_datetime(df_gpu.index, utc=True, unit="s")
# Get the sum and mean of all the nodes
df_cpu_sum = df_cpu.aggregate(func=sum, axis=1)
df_gpu_sum = df_gpu.aggregate(func=sum, axis=1)
df_cpu_mean = df_cpu.mean(axis=1)
df_gpu_mean = df_gpu.mean(axis=1)
fig, (ax_cpu_sum, ax_gpu_sum, ax_cpu_mean, ax_gpu_mean) = plt.subplots(4, 1, figsize=(11, 5*4), constrained_layout=True)
set_components(ax=ax_cpu_sum, df=df_cpu_sum, label="Generic", color=COLORS[0], subtitle=" aggregated values ")
set_components(ax=ax_gpu_sum, df=df_gpu_sum, label="ML", color=COLORS[1], subtitle=" aggregated values ")
set_components(ax=ax_cpu_mean, df=df_cpu_mean, label="Generic", color=COLORS[0], subtitle=" mean values ")
set_components(ax=ax_gpu_mean, df=df_gpu_mean, label="ML", color=COLORS[0], subtitle=" mean values ")
plt.savefig(os.path.join(str(TOOL_PATH) + "/plots/" + self.savefig_title + ".pdf"), dpi=100)
if SHOW_PLOT:
plt.show()
plt.pause(0.0001)
##### PRIVATE FUNCTIONS ######
def __get_custom_values(self, df):
values = np.array([])
for column in df.columns:
arr = df[column].values
mask = (np.isnan(arr) | (arr < 0))
arr = arr[~mask] # Filter out NaN values and less than 0
values = np.append(values, arr)
return values
def __get_max_pdf(self, df):
def normalize(df):
df = df.value_counts(sort=False, normalize=True).rename_axis('target').reset_index(name='pdf')
df["cdf"] = df["pdf"].cumsum()
return df
df = normalize(df)
index_max_pdf = df["pdf"].idxmax()
max_value = df.iloc[index_max_pdf]
return (max_value["pdf"], max_value["target"])
def __get_converted_xticks(self, ax):
"""
:param ax:
:return list of day strings
"""
return [pd.to_datetime(tick, unit='d').date().strftime("%d\n%b") for tick in ax.get_xticks()]
def __axes_hourly_plot(self, ax, df_covid, df_non_covid, title, ylabel, xlabel=None):
ax.plot(df_covid, marker=".", label="covid", color="lightcoral")
ax.plot(df_non_covid, marker="*", label="non-covid", color="steelblue")
ax.set_ylim(0, )
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel("Time [hours]", fontsize=16)
ax.legend(loc='center')
return ax
def __axes_daily_seasonal_plot(self, ax, df_covid, df_non_covid, title, ylabel):
ax.plot(df_covid, marker=".", label="covid", color="lightcoral")
ax.plot(df_non_covid, marker="*", label="non-covid", color="steelblue")
ax.set_ylim(0, )
ax.set_title(title, fontsize=14)
ax.legend(loc='center')
ax.set_xlabel("Time [days]", fontsize=16)
ax.set_ylabel(ylabel)
xcoords = [0] + [xcoord for xcoord in range(23, WEEK, DAY)]
for xc in xcoords:
ax.axvline(x=xc, color="gray", lw=0.5)
return ax
def __axes_daily_seasonal_violin(self, ax, df_covid, df_non_covid):
sns.violinplot(data=[df_covid.values, df_non_covid.values], ax=ax, palette=['lightcoral', 'steelblue'])
ax.set_ylim(0, )
ax.tick_params(axis='both', which='major', labelsize=16)
ax.tick_params(axis='both', which='minor', labelsize=14)
ax.yaxis.tick_right()
ax.set_xticklabels([" ", " "])
ax.text(x=-0.48, y=self.__get_max_pdf(df_covid)[1] , s="{:.2f}".format(self.__get_max_pdf(df_covid)[0]), fontsize=13, color="black")
ax.text(x=1-0.55, y=self.__get_max_pdf(df_non_covid)[1], s="{:.2f}".format(self.__get_max_pdf(df_non_covid)[0]), fontsize=13, color="black")
return ax
# This function belongs to Laurens Versluis: https://github.com/lfdversluis
def __axes_rack_barplot(self, ax, df_covid, df_non_covid, subtitle):
rack_nodes = self.__get_rack_nodes(df_covid) # Get the rack nodes
index = 0
w = 0.4
ax1, ax2 = plt.axes, plt.axes
for rack, columns in rack_nodes.items():
arr_covid = self.__get_custom_values(df_covid[list(columns)])
arr_non_covid = self.__get_custom_values(df_non_covid[list(columns)])
ax1 = ax.bar(x=index - w/2, height=arr_covid.mean(), width=w, yerr=arr_covid.std(), color="lightcoral", capsize=5)
ax2 = ax.bar(x=index + w/2, height=arr_non_covid.mean(), width=w, yerr=arr_non_covid.std(), color="steelblue", capsize=5)
#if arr_covid.std() > 100:
#ax.text(x=index - w/2, y=102.2, s=str(round(arr_covid.std(), 1)), fontsize=22, color="black", va="center")
#if arr_non_covid.std() > 100:
#ax.text(x=index + w/2, y=102.2, s=str(round(arr_non_covid.std(), 1)), fontsize=22, color="black", va="center")
index += 1
ax.tick_params(axis='both', which='major', labelsize=32)
ax.tick_params(axis='both', which='minor', labelsize=32)
ax.set_ylabel(self.ylabel, fontsize=32)
#ax.set_ylim(0, 100)
ax.set_ylim(0, )
ax.set_xlabel(subtitle, fontsize=30)
ax.set_xticks(np.arange(len(rack_nodes.keys())))
ax.set_xticklabels(rack_nodes.keys(), fontsize=32)
def __axes_rack_violinplot(self, ax, df_covid, df_non_covid, subtitle, xlabel=None):
rack_nodes = self.__get_rack_nodes(df_covid) # To get the rack nodes
rack_values = list()
rack_names = list()
violin_width = 0.8
for rack, columns in rack_nodes.items():
arr_covid = self.__get_custom_values(df_covid[list(columns)])
arr_non_covid = self.__get_custom_values(df_non_covid[list(columns)])
rack_values.append(arr_covid)
rack_values.append(arr_non_covid)
rack_names.append(rack)
sns.violinplot(data=rack_values, ax=ax, cut=0, width=violin_width, palette=['lightcoral', 'steelblue'] * (int(len(rack_values)/2)))
ax.set_ylabel(self.ylabel, fontsize=32)
#ax.set_ylim(0, 100)
ax.set_ylim(0, )
ax.tick_params(axis='both', which='major', labelsize=32)
ax.tick_params(axis='both', which='minor', labelsize=32)
ax.set_xticks([i + 0.5 for i in range(0, len(rack_values), 2)])
ax.set_xlabel(subtitle, fontsize=30)
# Depcit the values that exceed 100 load
#for index, val in enumerate(rack_values):
#max_val = np.amax(val)
#if max_val > 100:
#ax.text(x=index-0.2, y=102.2, s=str(int(max_val)), fontsize=22, color="black", va="center")
ax.set_xticklabels(
rack_names,
ha='center', fontsize=32
)
for i in range(0, len(rack_values), 2):
ax.axvline(i + 1.5, lw=2, ls='dashed')
def __get_rack_nodes(self, df):
rack_nodes = {}
for node in df.columns:
rack = node.split("n")[0]
if rack not in rack_nodes:
rack_nodes[rack] = set()
rack_nodes[rack].add(node)
return rack_nodes
def __construct_daily_montly_plots(self, ax, title=None, ylabel=None):
ax.set_ylim(0, )
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.legend(bbox_to_anchor=(1.01, 1), loc='upper left')
xcoords = [0] + [xcoord for xcoord in range(23, WEEK, DAY)]
for xc in xcoords:
ax.axvline(x=xc, color="gray", lw=0.5)
def __construct_hourly_montly_plots(self, ax, ylabel, title):
ax.set_ylim(0, )
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.legend(bbox_to_anchor=(1.01, 1), loc='upper left')
| 41.178649
| 154
| 0.611978
|
99c78de79b9393fae7b2cbe7fd5a83e3f5da2a89
| 383
|
py
|
Python
|
Arase/HEP/DeleteDate.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
Arase/HEP/DeleteDate.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | 1
|
2021-06-10T22:51:09.000Z
|
2021-06-10T22:51:09.000Z
|
Arase/HEP/DeleteDate.py
|
mattkjames7/Arase
|
996167be35a13bbb1fdddfbe75e3a06d124b1d25
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..Tools.Downloading._DeleteDate import _DeleteDate
from .. import Globals
def DeleteDate(Date,L,prod,Confirm=True):
'''
Delete all of the files from a given date
'''
idxfname = Globals.DataPath + 'HEP/Index-L{:01d}-{:s}.dat'.format(L,prod)
datapath = Globals.DataPath + 'HEP/l{:01d}/{:s}/'.format(L,prod)
_DeleteDate(Date,idxfname,datapath,Confirm)
| 27.357143
| 74
| 0.720627
|
122e44a072af7e2008f795d9d7dc7fe94dc5608c
| 2,234
|
py
|
Python
|
Visualization-for-Company-Stakeholders/code.py
|
sdshilpadas/greyatom-python-for-data-science
|
10fdfd55b1879dfcde3a168132ba35e1d3fe2985
|
[
"MIT"
] | null | null | null |
Visualization-for-Company-Stakeholders/code.py
|
sdshilpadas/greyatom-python-for-data-science
|
10fdfd55b1879dfcde3a168132ba35e1d3fe2985
|
[
"MIT"
] | null | null | null |
Visualization-for-Company-Stakeholders/code.py
|
sdshilpadas/greyatom-python-for-data-science
|
10fdfd55b1879dfcde3a168132ba35e1d3fe2985
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Reading the file
data=pd.read_csv(path)
#Code starts here
# Step 1
#Reading the file
data = pd.read_csv(path)
#Creating a new variable to store the value counts
loan_status = data['Loan_Status'].value_counts()
#Plotting bar plot
loan_status.plot(kind='bar')
plt.show()
# Step 2
#Plotting an unstacked bar plot
property_and_loan = data.groupby(['Property_Area', 'Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar', stacked=False)
#Changing the x-axis label
plt.xlabel('Property Area')
#Changing the y-axis label
plt.ylabel('Loan Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
plt.show()
# Step 3
#Plotting a stacked bar plot
education_and_loan = data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar', stacked=True)
#Changing the x-axis label
plt.xlabel('Education Status')
#Changing the y-axis label
plt.ylabel('Loan Status')
#Rotating the ticks of X-axis
plt.xticks(rotation=45)
plt.show()
# Step 4
#Subsetting the dataframe based on 'Education' column
graduate = pd.DataFrame(data[data['Education'] == 'Graduate'])
#Subsetting the dataframe based on 'Education' column
not_graduate = pd.DataFrame(data[data['Education'] == 'Not Graduate'])
#Plotting density plot for 'Graduate'
graduate.plot(kind='density',label='Graduate')
#Plotting density plot for 'Graduate'
not_graduate.plot(kind='density',label='Not Graduate')
#For automatic legend display
plt.legend()
# Step 5
#Setting up the subplots
fig, (ax_1, ax_2, ax_3) = plt.subplots(nrows= 3 , ncols=1, figsize=(10, 20))
#Plotting scatter plot
ax_1.scatter(data['ApplicantIncome'], data['LoanAmount'])
#setting the subplot axis title
ax_1.set_title('Applicant Income')
#plotting scatter plot
ax_2.scatter(data['CoapplicantIncome'], data['LoanAmount'])
#set subplot axis title
ax_2.set_title('Coapplicant Income')
#create a new column TotalIncome
data['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']
print(data)
#plot scatter plot
ax_3.scatter(data['ApplicantIncome'], data['LoanAmount'])
#Setting the subplot axis title
ax_3.set_title('Total Income')
| 24.282609
| 83
| 0.750671
|
3a249c929e37803984803aeac3d16866641edd11
| 1,127
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_dist_mnist_fp16_allreduce.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
from test_dist_base import TestDistBase
class TestDistMnist2x2FP16AllReduce(TestDistBase):
def _setup_config(self):
self._sync_mode = True
self._use_reduce = False
self._nccl2_mode = True
def test_dist_train(self):
import paddle.fluid as fluid
if fluid.core.is_compiled_with_cuda():
self.check_with_place("dist_mnist_fp16_allreduce.py", delta=1e-5)
if __name__ == "__main__":
unittest.main()
| 32.2
| 77
| 0.741792
|
25a94ea0b987f4f7f4092899fab0f251a95b8ac1
| 1,568
|
py
|
Python
|
prototyping/water_flow.py
|
brenshanny/project_lob
|
2eb5aca40b5dfae84bdb0eeb19374c93ae0a7a1f
|
[
"MIT"
] | 1
|
2018-11-09T19:22:23.000Z
|
2018-11-09T19:22:23.000Z
|
prototyping/water_flow.py
|
brenshanny/project_lob
|
2eb5aca40b5dfae84bdb0eeb19374c93ae0a7a1f
|
[
"MIT"
] | null | null | null |
prototyping/water_flow.py
|
brenshanny/project_lob
|
2eb5aca40b5dfae84bdb0eeb19374c93ae0a7a1f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import RPi.GPIO as GPIO
import time, sys
class WaterFlowMonitor(object):
def __init__(self):
self.flow_sensor_pin = 17
self.total_count = 0
self.rate_count = 0
self.timer = 0
self.min_count = 0
self.constant = 0.10
# Setup GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.flow_sensor_pin, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.add_event_detect(
self.flow_sensor_pin,
GPIO.FALLING,
callback=self.count_pulse
)
def count_pulse(self, channel):
self.total_count += 1
self.rate_count += 1
def calc_flow(self):
print('Liters/min -> ', round(self.rate_count * self.constant, 4))
print('Total Liters -> ', round(self.total_count * self.constant, 4))
def reset_rate_count(self):
self.rate_count = 0
def shut_down(self):
GPIO.cleanup()
def run(self):
self.timer = time.time() + 10
print("Running WaterFlowMonitor")
while True:
try:
if time.time() >= self.timer:
self.min_count += 1
self.calc_flow()
self.rate_count = 0
self.timer = time.time() + 10
time.sleep(1)
except KeyboardInterrupt:
print('Shutting down...')
self.shut_down()
sys.exit()
if __name__ == "__main__":
waterFlow = WaterFlowMonitor()
waterFlow.run()
| 28
| 77
| 0.53699
|
21fdcf37050226b8c6b166cde5c607dd4d9157e3
| 3,540
|
py
|
Python
|
2016/11_RadioisotopeThermoelectricGenerators/test_solver.py
|
deanearlwright/AdventOfCode
|
ca4cf6315c0efa38bd7748fb6f4bc99e7934871d
|
[
"MIT"
] | 1
|
2021-01-03T23:09:28.000Z
|
2021-01-03T23:09:28.000Z
|
2016/11_RadioisotopeThermoelectricGenerators/test_solver.py
|
deanearlwright/AdventOfCode
|
ca4cf6315c0efa38bd7748fb6f4bc99e7934871d
|
[
"MIT"
] | 6
|
2020-12-26T21:02:42.000Z
|
2020-12-26T21:02:52.000Z
|
2016/11_RadioisotopeThermoelectricGenerators/test_solver.py
|
deanearlwright/AdventOfCode
|
ca4cf6315c0efa38bd7748fb6f4bc99e7934871d
|
[
"MIT"
] | null | null | null |
# ======================================================================
# Radioisotope Thermoelectric Generators
# Advent of Code 2016 Day 11 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# t e s t _ s o l v e r . p y
# ======================================================================
"Test solver for Advent of Code 2016 day 11, Radioisotope Thermoelectric Generators"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import unittest
import aoc_11
import solver
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
EXAMPLE_TEXT = """
The first floor contains a hydrogen-compatible microchip and a lithium-compatible microchip.
The second floor contains a hydrogen generator.
The third floor contains a lithium generator.
The fourth floor contains nothing relevant.
"""
PART_ONE_TEXT = EXAMPLE_TEXT
PART_TWO_TEXT = ""
PART_ONE_RESULT = 11
PART_TWO_RESULT = None
# ======================================================================
# TestSolver
# ======================================================================
class TestSolver(unittest.TestCase): # pylint: disable=R0904
"Test Solver object"
def test_empty_init(self):
"Test the default Solver creation"
# 1. Create default Solver object
myobj = solver.Solver()
# 2. Make sure it has the default values
self.assertEqual(myobj.part2, False)
self.assertEqual(myobj.text, None)
def test_text_init(self):
"Test the Solver object creation from text"
# 1. Create Solver object from text
myobj = solver.Solver(text=aoc_11.from_text(EXAMPLE_TEXT))
# 2. Make sure it has the expected values
self.assertEqual(myobj.part2, False)
self.assertEqual(len(myobj.text), 4)
# 3. Check methods
self.assertEqual(myobj.number_moves(), 11)
def test_part_one(self):
"Test part one example of Solver object"
# 1. Create Solver object from text
myobj = solver.Solver(text=aoc_11.from_text(PART_ONE_TEXT))
# 2. Check the part one result
self.assertEqual(myobj.part_one(verbose=False), PART_ONE_RESULT)
def test_part_two(self):
"Test part two example of Solver object"
# 1. Create Solver object from text
myobj = solver.Solver(part2=True, text=aoc_11.from_text(PART_TWO_TEXT))
# 2. Check the part two result
self.assertEqual(myobj.part_two(verbose=False), PART_TWO_RESULT)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end t e s t _ s o l v e r . p y end
# ======================================================================
| 36.494845
| 92
| 0.436723
|
0f7a48752bb6045552a4995149ed985ba121ed9c
| 4,574
|
py
|
Python
|
grammar2lale/custom_optimizer.py
|
oudrea/grammar2pddl
|
f5936c5b8f831c99c7378d88e86c7dd6983c7b12
|
[
"Apache-2.0"
] | 11
|
2021-03-18T23:36:13.000Z
|
2022-02-27T11:06:15.000Z
|
grammar2lale/custom_optimizer.py
|
oudrea/grammar2pddl
|
f5936c5b8f831c99c7378d88e86c7dd6983c7b12
|
[
"Apache-2.0"
] | 3
|
2021-07-14T22:54:02.000Z
|
2022-02-22T05:12:28.000Z
|
grammar2lale/custom_optimizer.py
|
oudrea/grammar2pddl
|
f5936c5b8f831c99c7378d88e86c7dd6983c7b12
|
[
"Apache-2.0"
] | 1
|
2022-02-22T02:54:18.000Z
|
2022-02-22T02:54:18.000Z
|
from lale.lib.sklearn import *
from lale.lib.xgboost import *
from lale.lib.lightgbm import *
from lale.lib.lale import ConcatFeatures as Concat
from lale.lib.lale import NoOp
from lale.pretty_print import to_string
import lale.helpers
import lale.search.op2hp
import hyperopt
import statistics
import numpy as np
import time as time
import pandas as pd
from grammar2lale.abstract_optimizer import APipelineOptimizer
from sklearn.metrics import get_scorer
from sklearn.model_selection import train_test_split
import traceback
import sys
import warnings
import multiprocessing
class CustomOptimizer(APipelineOptimizer):
EVALS = 20
def __init__(
self, data, scorer='accuracy', evals=20, val_frac=0.2,
max_runtime=None
):
self.X, self.y = data
self.EVALS = evals
self.train_X, self.test_X, self.train_y, self.test_y = train_test_split(
self.X, self.y, test_size=val_frac, stratify=self.y, random_state=5489
)
self.scorer = get_scorer(scorer)
self.start_time = time.time()
self.eval_history = {
'loss' : [],
'score': [],
'time_from_start' : [],
}
self.max_runtime = max_runtime
print("Running optimization for " + str(self.max_runtime) + " seconds")
def evaluate(self, trained_model):
return self.scorer(trained_model, self.X, self.y)
def search(self, planned_pipeline):
# eval_idx = 0
def point_to_trained(search_point):
trainable = lale.helpers.create_instance_from_hyperopt_search_space(
planned_pipeline, search_point
)
trained = trainable.fit(self.train_X, self.train_y)
return trained
def objective(search_point):
current_time = time.time()
if (self.max_runtime != None) and ((current_time - self.start_time) > self.max_runtime) :
# If crossed runtime, don't even evaluate since we wish to stop but can't exit hyperopt
print('RAN OUT OF TIME')
sys.exit(0)
loss = None
score = None
try:
with warnings.catch_warnings(record=True) as w:
trained = point_to_trained(search_point)
score = self.evaluate(trained)
loss = 1.0 - score
except BaseException as e:
loss = 100
score = 0
eval_time = time.time()
# eval_idx += 1
# print('Eval %i: %g' % (eval_idx, loss))
self.eval_history['loss'].append(loss)
self.eval_history['score'].append(score)
self.eval_history['time_from_start'].append(eval_time - self.start_time)
return {'loss': loss, 'status': hyperopt.STATUS_OK }
search_space = lale.search.op2hp.hyperopt_search_space(planned_pipeline)
trials = hyperopt.Trials()
# If we want to do multiple runs and aggregate performance, this should be unset
rstate = np.random.RandomState(5489)
hyperopt.fmin(objective, search_space, hyperopt.tpe.suggest, self.EVALS, trials, rstate)
best_point = hyperopt.space_eval(search_space, trials.argmin)
result = lale.helpers.create_instance_from_hyperopt_search_space(
planned_pipeline, best_point
)
best_loss = np.min(trials.losses())
return result, 1.0 - best_loss
def evaluate_pipeline(self, pipeline):
if 'lale_pipeline' not in pipeline:
return pipeline
print("Starting to optimize " + pipeline['pipeline'])
start_time = time.time()
trainable_pipeline = None
best_score = 0.0
try:
trainable_pipeline, best_score = self.search(pipeline['lale_pipeline'])
except Exception as e:
print("EXCEPTION OCCURRED: " + str(e))
traceback.print_exc()
end_time = time.time()
print("Completed optimization for " + pipeline['pipeline'])
tlp = pipeline.copy()
tlp.update({
'trained_pipeline': trainable_pipeline,
'best_accuracy': best_score,
'opt_duration': (end_time-start_time)
})
return tlp
def get_eval_history(self) :
return pd.DataFrame.from_dict(self.eval_history)
def print_eval_history(self, out_filename) :
self.get_eval_history().to_csv(out_filename, header=True, index=False)
print('Evaluation history metrics + time saved in ' + out_filename)
| 37.491803
| 103
| 0.629427
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.