hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
89a47c8f2c81a8efae575f3c791d4a82dc5e30e3
| 62,360
|
py
|
Python
|
dlkit/authz_adapter/learning/managers.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/authz_adapter/learning/managers.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/authz_adapter/learning/managers.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""AuthZ Adapter implementations of learning managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from . import sessions
from ..osid import managers as osid_managers
from ..osid.osid_errors import Unimplemented
from ..osid.osid_errors import Unimplemented, OperationFailed, Unsupported
from ..primitives import Id
from ..utilities import raise_null_argument
from dlkit.manager_impls.learning import managers as learning_managers
class LearningProfile(osid_managers.OsidProfile, learning_managers.LearningProfile):
"""Adapts underlying LearningProfile methodswith authorization checks."""
def __init__(self):
osid_managers.OsidProfile.__init__(self)
def _get_hierarchy_session(self, proxy=None):
if proxy is not None:
try:
return self._provider_manager.get_objective_bank_hierarchy_session(proxy)
except Unimplemented:
return None
try:
return self._provider_manager.get_objective_bank_hierarchy_session()
except Unimplemented:
return None
def supports_objective_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_lookup()
def supports_objective_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_query()
def supports_objective_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_admin()
def supports_objective_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_hierarchy()
def supports_objective_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_hierarchy_design()
def supports_objective_sequencing(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_sequencing()
def supports_objective_objective_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_objective_bank()
def supports_objective_objective_bank_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_objective_bank_assignment()
def supports_objective_requisite(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_requisite()
def supports_objective_requisite_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_requisite_assignment()
def supports_activity_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_lookup()
def supports_activity_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_query()
def supports_activity_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_admin()
def supports_activity_objective_bank(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_objective_bank()
def supports_activity_objective_bank_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_activity_objective_bank_assignment()
def supports_proficiency_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_lookup()
def supports_proficiency_query(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_query()
def supports_proficiency_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_admin()
def supports_proficiency_objective_bank_assignment(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_proficiency_objective_bank_assignment()
def supports_objective_bank_lookup(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_lookup()
def supports_objective_bank_admin(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_admin()
def supports_objective_bank_hierarchy(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_hierarchy()
def supports_objective_bank_hierarchy_design(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_objective_bank_hierarchy_design()
def get_objective_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_record_types()
objective_record_types = property(fget=get_objective_record_types)
def get_objective_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_search_record_types()
objective_search_record_types = property(fget=get_objective_search_record_types)
def get_activity_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_activity_record_types()
activity_record_types = property(fget=get_activity_record_types)
def get_activity_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_activity_search_record_types()
activity_search_record_types = property(fget=get_activity_search_record_types)
def get_proficiency_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_proficiency_record_types()
proficiency_record_types = property(fget=get_proficiency_record_types)
def get_proficiency_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_proficiency_search_record_types()
proficiency_search_record_types = property(fget=get_proficiency_search_record_types)
def get_objective_bank_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_bank_record_types()
objective_bank_record_types = property(fget=get_objective_bank_record_types)
def get_objective_bank_search_record_types(self):
# Implemented from azosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_objective_bank_search_record_types()
objective_bank_search_record_types = property(fget=get_objective_bank_search_record_types)
class LearningManager(osid_managers.OsidManager, LearningProfile, learning_managers.LearningManager):
"""Adapts underlying LearningManager methodswith authorization checks."""
def __init__(self):
LearningProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:learningProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_manager('LEARNING', provider_impl)
# need to add version argument
def get_objective_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
objective_lookup_session = property(fget=get_objective_lookup_session)
@raise_null_argument
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_objective_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
objective_query_session = property(fget=get_objective_query_session)
@raise_null_argument
def get_objective_query_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_objective_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_admin_session = property(fget=get_objective_admin_session)
@raise_null_argument
def get_objective_admin_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_hierarchy_session = property(fget=get_objective_hierarchy_session)
@raise_null_argument
def get_objective_hierarchy_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_hierarchy_design_session = property(fget=get_objective_hierarchy_design_session)
@raise_null_argument
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_sequencing_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_sequencing_session = property(fget=get_objective_sequencing_session)
@raise_null_argument
def get_objective_sequencing_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_objective_bank_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankSession')(
provider_session=self._provider_manager.get_objective_objective_bank_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_objective_bank_session = property(fget=get_objective_objective_bank_session)
def get_objective_objective_bank_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_objective_objective_bank_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_objective_bank_assignment_session = property(fget=get_objective_objective_bank_assignment_session)
def get_objective_requisite_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_requisite_session = property(fget=get_objective_requisite_session)
@raise_null_argument
def get_objective_requisite_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_objective_requisite_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_requisite_assignment_session = property(fget=get_objective_requisite_assignment_session)
@raise_null_argument
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_activity_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
activity_lookup_session = property(fget=get_activity_lookup_session)
@raise_null_argument
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_activity_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
activity_query_session = property(fget=get_activity_query_session)
@raise_null_argument
def get_activity_query_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_activity_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
activity_admin_session = property(fget=get_activity_admin_session)
@raise_null_argument
def get_activity_admin_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_activity_objective_bank_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankSession')(
provider_session=self._provider_manager.get_activity_objective_bank_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
activity_objective_bank_session = property(fget=get_activity_objective_bank_session)
def get_activity_objective_bank_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_activity_objective_bank_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
activity_objective_bank_assignment_session = property(fget=get_activity_objective_bank_assignment_session)
def get_proficiency_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
proficiency_lookup_session = property(fget=get_proficiency_lookup_session)
@raise_null_argument
def get_proficiency_lookup_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_proficiency_query_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session()
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
proficiency_query_session = property(fget=get_proficiency_query_session)
@raise_null_argument
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
hierarchy_session=self._get_hierarchy_session(),
query_session=query_session)
def get_proficiency_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
proficiency_admin_session = property(fget=get_proficiency_admin_session)
@raise_null_argument
def get_proficiency_admin_session_for_objective_bank(self, objective_bank_id):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session_for_objective_bank(objective_bank_id),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
def get_proficiency_objective_bank_assignment_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_proficiency_objective_bank_assignment_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
proficiency_objective_bank_assignment_session = property(fget=get_proficiency_objective_bank_assignment_session)
def get_objective_bank_lookup_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankLookupSession')(
provider_session=self._provider_manager.get_objective_bank_lookup_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_lookup_session = property(fget=get_objective_bank_lookup_session)
def get_objective_bank_admin_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankAdminSession')(
provider_session=self._provider_manager.get_objective_bank_admin_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_admin_session = property(fget=get_objective_bank_admin_session)
def get_objective_bank_hierarchy_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchySession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_hierarchy_session = property(fget=get_objective_bank_hierarchy_session)
def get_objective_bank_hierarchy_design_session(self):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_design_session(),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager)
objective_bank_hierarchy_design_session = property(fget=get_objective_bank_hierarchy_design_session)
def get_learning_batch_manager(self):
raise Unimplemented()
learning_batch_manager = property(fget=get_learning_batch_manager)
class LearningProxyManager(osid_managers.OsidProxyManager, LearningProfile, learning_managers.LearningProxyManager):
"""Adapts underlying LearningProxyManager methodswith authorization checks."""
def __init__(self):
LearningProfile.__init__(self)
def initialize(self, runtime):
osid_managers.OsidProxyManager.initialize(self, runtime)
config = self._my_runtime.get_configuration()
parameter_id = Id('parameter:learningProviderImpl@authz_adapter')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
self._provider_manager = runtime.get_proxy_manager('LEARNING', provider_impl)
# need to add version argument
@raise_null_argument
def get_objective_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveLookupSession')(
provider_session=self._provider_manager.get_objective_lookup_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_objective_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_query_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ObjectiveQuerySession')(
provider_session=self._provider_manager.get_objective_query_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_objective_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_admin_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveAdminSession')(
provider_session=self._provider_manager.get_objective_admin_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchySession')(
provider_session=self._provider_manager.get_objective_hierarchy_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_hierarchy_design_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_hierarchy_design_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_sequencing_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_sequencing_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveSequencingSession')(
provider_session=self._provider_manager.get_objective_sequencing_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_objective_bank_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankSession')(
provider_session=self._provider_manager.get_objective_objective_bank_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_objective_bank_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_objective_objective_bank_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteSession')(
provider_session=self._provider_manager.get_objective_requisite_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ObjectiveRequisiteAssignmentSession')(
provider_session=self._provider_manager.get_objective_requisite_assignment_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityLookupSession')(
provider_session=self._provider_manager.get_activity_lookup_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_activity_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_query_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ActivityQuerySession')(
provider_session=self._provider_manager.get_activity_query_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_activity_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_admin_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ActivityAdminSession')(
provider_session=self._provider_manager.get_activity_admin_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_objective_bank_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankSession')(
provider_session=self._provider_manager.get_activity_objective_bank_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_activity_objective_bank_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ActivityObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_activity_objective_bank_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_proficiency_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_lookup_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyLookupSession')(
provider_session=self._provider_manager.get_proficiency_lookup_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_query_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_template
try:
query_session = self._provider_manager.get_proficiency_query_session(proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_query_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
try:
query_session = self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id, proxy)
query_session.use_federated_objective_bank_view()
except Unimplemented:
query_session = None
return getattr(sessions, 'ProficiencyQuerySession')(
provider_session=self._provider_manager.get_proficiency_query_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
proxy=proxy,
hierarchy_session=self._get_hierarchy_session(proxy),
query_session=query_session)
@raise_null_argument
def get_proficiency_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_proficiency_admin_session_for_objective_bank(self, objective_bank_id, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_template
return getattr(sessions, 'ProficiencyAdminSession')(
provider_session=self._provider_manager.get_proficiency_admin_session_for_objective_bank(objective_bank_id, proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_proficiency_objective_bank_assignment_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ProficiencyObjectiveBankAssignmentSession')(
provider_session=self._provider_manager.get_proficiency_objective_bank_assignment_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_lookup_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankLookupSession')(
provider_session=self._provider_manager.get_objective_bank_lookup_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_admin_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankAdminSession')(
provider_session=self._provider_manager.get_objective_bank_admin_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_hierarchy_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchySession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
@raise_null_argument
def get_objective_bank_hierarchy_design_session(self, proxy):
# Implemented from azosid template for -
# osid.resource.ResourceManager.get_resource_admin_session_template
return getattr(sessions, 'ObjectiveBankHierarchyDesignSession')(
provider_session=self._provider_manager.get_objective_bank_hierarchy_design_session(proxy),
authz_session=self._get_authz_session(),
override_lookup_session=self._get_override_lookup_session(),
provider_manager=self._provider_manager,
proxy=proxy)
def get_learning_batch_proxy_manager(self):
raise Unimplemented()
learning_batch_proxy_manager = property(fget=get_learning_batch_proxy_manager)
| 52.535805
| 140
| 0.75085
|
bcdb28ac31bb1112c9527d11c61cb086c9c0d795
| 978
|
py
|
Python
|
HTL.tab/Rebar.panel/SelectSameScheduleMarkRebars.pushbutton/script.py
|
htlcnn/pyrevitscripts
|
b898a3a5e8d212570254772ae314f343498b1398
|
[
"MIT"
] | null | null | null |
HTL.tab/Rebar.panel/SelectSameScheduleMarkRebars.pushbutton/script.py
|
htlcnn/pyrevitscripts
|
b898a3a5e8d212570254772ae314f343498b1398
|
[
"MIT"
] | null | null | null |
HTL.tab/Rebar.panel/SelectSameScheduleMarkRebars.pushbutton/script.py
|
htlcnn/pyrevitscripts
|
b898a3a5e8d212570254772ae314f343498b1398
|
[
"MIT"
] | 2
|
2020-02-15T14:39:46.000Z
|
2020-03-11T18:24:47.000Z
|
# -*- coding: utf-8 -*-
__title__ = 'Select Same\nSchedule Mark Rebar'
__author__ = 'htl'
import clr
clr.AddReference('RevitAPI')
import Autodesk
import rpw
from rpw import doc, uidoc
from System.Collections.Generic import List
class RebarFilter(Autodesk.Revit.UI.Selection.ISelectionFilter):
def AllowElement(self, element):
if element.Category.Name == 'Structural Rebar':
return True
else:
return False
selection = uidoc.Selection.PickObject(Autodesk.Revit.UI.Selection.ObjectType.Element,
RebarFilter(), 'Pick Rebar')
rebar = rpw.db.Element.from_id(selection.ElementId).unwrap()
same_schedule_mark = rpw.db.Collector(of_category='Rebar',
view=doc.ActiveView,
where=lambda x: x.ScheduleMark==rebar.ScheduleMark)
rids = List[rpw.DB.ElementId](same_schedule_mark.element_ids)
uidoc.Selection.SetElementIds(rids)
| 36.222222
| 89
| 0.662577
|
60f4cf950f40aaaa2c4c4d1225e07a50822dd05f
| 1,400
|
py
|
Python
|
contentcuration/contentcuration/migrations/0028_auto_20160926_1527.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 1
|
2019-03-30T18:14:25.000Z
|
2019-03-30T18:14:25.000Z
|
contentcuration/contentcuration/migrations/0028_auto_20160926_1527.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 2
|
2019-04-06T07:06:08.000Z
|
2019-04-08T23:33:53.000Z
|
contentcuration/contentcuration/migrations/0028_auto_20160926_1527.py
|
Tlazypanda/studio
|
cd1c2f169c705027cdd808cbbcae907d0a9b21d2
|
[
"MIT"
] | 1
|
2020-10-20T05:21:56.000Z
|
2020-10-20T05:21:56.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-26 22:27
from __future__ import unicode_literals
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0027_auto_20160926_0945'),
]
operations = [
migrations.AlterField(
model_name='exercise',
name='mastery_model',
field=models.CharField(choices=[('do_all', 'Do all'), ('num_correct_in_a_row_10', '10 in a row'), ('num_correct_in_a_row_3',
'3 in a row'), ('num_correct_in_a_row_5', '5 in a row'), ('skill_check', 'Skill check')], default='do_all', max_length=200),
),
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[('high_res_video', 'High resolution video'), ('low_res_video', 'Low resolution video'), ('vector_video', 'Vector video'), ('video_thumbnail', 'Thumbnail'), ('video_subtitle', 'Subtitle'), (
'audio', 'Audio'), ('audio_thumbnail', 'Thumbnail'), ('document', 'Document'), ('document_thumbnail', 'Thumbnail'), ('exercise', 'Exercise'), ('exercise_thumbnail', 'Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
| 48.275862
| 247
| 0.588571
|
82569570e094a77f1bb6ee940f3ae08d57c0055f
| 38,896
|
py
|
Python
|
base/services/schema_differ/mysql.py
|
joeportela/tinyAPI
|
f2469c38a605b00519acd0b79af17d0041f5ae7b
|
[
"MIT"
] | 6
|
2016-11-18T22:32:44.000Z
|
2021-04-01T17:02:13.000Z
|
base/services/schema_differ/mysql.py
|
joeportela/tinyAPI
|
f2469c38a605b00519acd0b79af17d0041f5ae7b
|
[
"MIT"
] | 1
|
2018-12-20T23:07:52.000Z
|
2018-12-20T23:07:52.000Z
|
base/services/schema_differ/mysql.py
|
joeportela/tinyAPI
|
f2469c38a605b00519acd0b79af17d0041f5ae7b
|
[
"MIT"
] | 10
|
2018-02-23T00:08:21.000Z
|
2020-10-01T03:06:12.000Z
|
# ----- Info ------------------------------------------------------------------
__author__ = 'Michael Montero <mcmontero@gmail.com>'
# ----- Imports ---------------------------------------------------------------
from tinyAPI.base.config import ConfigManager
from tinyAPI.base.data_store.provider import DataStoreMySQL
from tinyAPI.base.services.mysql.index_check import MySQLIndexUsageParser
import os
import re
import subprocess
__all__ = [
'SchemaDiffer'
]
# ----- Public Classes --------------------------------------------------------
class SchemaDiffer(object):
'''Finds all of the schema differences between two MySQL databases.'''
def __init__(self,
source_connection_name,
source_db_name,
target_connection_name,
target_db_name):
self.__cli = None
self.__source = None
self.__target = None
self.__source_db_name = None
self.__target_db_name = None
self.__ref_tables_to_create = None
self.__ref_tables_to_drop = None
self.__tables_to_create = None
self.__tables_to_drop = None
self.__table_create_drop_list = None
self.__ref_table_drop_list = None
self.__columns_to_create = None
self.__columns_to_drop = None
self.__columns_to_modify = None
self.__column_uniqueness_to_drop = None
self.__foreign_keys_to_create = None
self.__foreign_keys_to_drop = None
self.__ref_data_to_add = None
self.__ref_data_to_remove = None
self.__ref_data_to_modify = None
self.__indexes_to_create = None
self.__indexes_to_drop = None
self.__unique_keys_to_create = None
self.__unique_keys_to_drop = None
self.__index_usage_parser = None
self.__source = DataStoreMySQL()
self.__source.select_db(source_connection_name, 'information_schema')
self.__source_db_name = source_db_name
self.__target = DataStoreMySQL()
self.__target.select_db(target_connection_name, 'information_schema')
self.__target_db_name = target_db_name
self.__enable_write_upgrade_scripts = True
def __compute_column_differences(self):
self.__notice('Computing column differences...')
query = \
"""select table_name,
column_name,
column_default,
is_nullable,
character_set_name,
collation_name,
column_type,
column_key,
extra
from columns
where table_schema = %s
and table_name not like '%%\_ref\_%%'"""
if self.__table_create_drop_list:
query += ' and table_name not in (' \
+ self.__table_create_drop_list \
+ ')'
source_columns = \
self.__query_source(
query,
[self.__source_db_name])
target_columns = \
self.__query_target(
query,
[self.__target_db_name])
source_names = []
source = {}
if source_columns:
for source_column in source_columns:
name = source_column['table_name'] \
+ '.' \
+ source_column['column_name']
source_names.append(name)
source[name] = source_column
target_names = []
target = {}
if target_columns:
for target_column in target_columns:
name = target_column['table_name'] \
+ '.' \
+ target_column['column_name']
target_names.append(name)
target[name] = target_column
self.__columns_to_create = \
list(set(source_names).difference(target_names))
for column in self.__columns_to_create:
self.__notice('(+) ' + column, 1)
self.__columns_to_drop = \
list(set(target_names).difference(source_names))
for column in self.__columns_to_drop:
self.__notice('(-) ' + column, 1)
self.__columns_to_modify = {}
self.__column_uniqueness_to_drop = []
for name, data in source.items():
if name in self.__columns_to_create or \
name in self.__columns_to_drop:
continue;
if name not in target.keys():
raise SchemaDifferException(
'could not find column "' + name + '" in the list of '
+ 'target columns')
if data['column_key'] != 'UNI' and \
target[name]['column_key'] == 'UNI':
self.__notice('(-) ' + name + ' (uniqueness)', 1)
self.__column_uniqueness_to_drop.append(name)
for key, value in data.items():
if target[name][key] != value and key != 'column_key':
self.__notice('(=) ' + name + ' (' + key + ')', 1)
self.__columns_to_modify[name] = data
break
def __compute_foreign_key_differences(self):
self.__notice('Computing foreign key differences...')
query = \
"""select k.table_name,
k.column_name,
k.constraint_name,
k.ordinal_position,
k.referenced_table_name,
k.referenced_column_name,
c.delete_rule
from key_column_usage k
left outer join referential_constraints c
on c.constraint_schema = k.constraint_schema
and c.constraint_name = k.constraint_name
and k.constraint_name = k.constraint_name
where k.constraint_schema = %s
and k.constraint_name like '%%\_fk'"""
if self.__table_create_drop_list:
query += ' and k.table_name not in (' \
+ self.__table_create_drop_list \
+ ')' \
source_fks = \
self.__process_fks(
self.__query_source(
query,
[self.__source_db_name]))
target_fks = \
self.__process_fks(
self.__query_target(
query,
[self.__target_db_name]))
source_fk_names = source_fks.keys()
target_fk_names = target_fks.keys()
foreign_keys_to_create = \
list(set(source_fk_names).difference(target_fk_names))
foreign_keys_to_drop = \
list(set(target_fk_names).difference(source_fk_names))
self.__foreign_keys_to_create = []
for name in foreign_keys_to_create:
self.__notice('(+) ' + name, 1)
self.__foreign_keys_to_create.append(source_fks[name])
self.__foreign_keys_to_drop = []
for name in foreign_keys_to_drop:
self.__notice('(-) ' + name, 1)
self.__foreign_keys_to_drop.append(target_fks[name])
for name, fk in source_fks.items():
if name in target_fks.keys() and \
name not in self.__foreign_keys_to_create and \
name not in self.__foreign_keys_to_drop:
if source_fks[name]['table_name'] != \
target_fks[name]['table_name'] or \
source_fks[name]['ref_table_name'] != \
target_fks[name]['ref_table_name'] or \
source_fks[name]['delete_rule'] != \
target_fks[name]['delete_rule'] or \
','.join(list(source_fks[name]['cols'].values())) != \
','.join(list(target_fks[name]['cols'].values())) or \
','.join(list(source_fks[name]['ref_cols'].values())) != \
','.join(list(target_fks[name]['ref_cols'].values())):
self.__notice('(=) ' + name, 1)
self.__foreign_keys_to_drop.append(source_fks[name])
self.__foreign_keys_to_create.append(source_fks[name])
def __compute_index_differences(self):
self.__notice('Computing index differences...')
query = \
"""select table_name,
index_name,
seq_in_index,
column_name
from statistics
where index_schema = %s
and index_name like '%%\_idx'"""
if self.__table_create_drop_list:
query += ' and table_name not in (' \
+ self.__table_create_drop_list \
+ ')'
source_indexes = \
self.__query_source(
query,
[self.__source_db_name])
target_indexes = \
self.__query_target(
query,
[self.__target_db_name])
source_names = []
source = {}
for index in source_indexes:
source_names.append(index['index_name'])
if index['index_name'] not in source:
source[index['index_name']] = {
'table_name': index['table_name'],
'cols': []
}
source[index['index_name']]['cols'] \
.insert(index['seq_in_index'], index['column_name'])
target_names = []
target = {}
for index in target_indexes:
target_names.append(index['index_name'])
if index['index_name'] not in target:
target[index['index_name']] = {
'table_name': index['table_name'],
'cols': []
}
target[index['index_name']]['cols'] \
.insert(index['seq_in_index'], index['column_name'])
indexes_to_create = \
list(set(source_names).difference(target_names))
indexes_to_drop = \
list(set(target_names).difference(source_names))
indexes_to_modify = \
[]
for name, data in source.items():
if name in target.keys() and \
','.join(data['cols']) != ','.join(target[name]['cols']):
indexes_to_modify.append(name)
self.__indexes_to_create = []
for name in indexes_to_create:
self.__notice('(+) ' + name, 1)
self.__indexes_to_create.append({
'table_name': source[name]['table_name'],
'index_name': name,
'cols': source[name]['cols']
})
self.__indexes_to_drop = []
for name in indexes_to_drop:
self.__notice('(-) ' + name, 1)
self.__indexes_to_drop.append({
'table_name': target[name]['table_name'],
'index_name': name,
'cols': target[name]['cols']
})
for name in indexes_to_modify:
self.__notice('(=) ' + name, 1)
self.__indexes_to_create.append({
'table_name': source[name]['table_name'],
'index_name': name,
'cols': source[name]['cols']
})
self.__indexes_to_drop.append({
'table_name': target[name]['table_name'],
'index_name': name,
'cols': target[name]['cols']
})
def __compute_ref_table_data_differences(self):
self.__notice('Computing reference table data differences...')
query = \
"""select table_name
from tables
where table_schema = %s
and table_name like '%%\_ref\_%%'"""
if self.__ref_table_drop_list:
query += ' and table_name not in (' \
+ self.__ref_table_drop_list \
+ ')'
source_tables = \
self.__flatten_tables(
self.__query_source(
query,
[self.__source_db_name]))
target_tables = \
self.__flatten_tables(
self.__query_target(
query,
[self.__target_db_name]))
source_data = {}
for table in source_tables:
source_data[table] = {}
records = self.__query_source(
'''select id,
value,
display_order
from ''' + self.__source_db_name + '.' + table + '''
order by id asc''')
for record in records:
source_data[table][record['id']] = [
str(record['value']),
str(record['display_order'])
]
target_data = {}
for table in target_tables:
target_data[table] = {}
records = self.__query_target(
'''select id,
value,
display_order
from ''' + self.__target_db_name + '.' + table + '''
order by id asc''')
for record in records:
target_data[table][record['id']] = [
str(record['value']),
str(record['display_order'])
]
self.__ref_data_to_add = []
self.__ref_data_to_modify = []
for table, data in source_data.items():
for id, values in data.items():
if table not in target_data or \
id not in target_data[table]:
self.__notice('(+) ' + table + ' #' + str(id), 1)
self.__ref_data_to_add.append([
table,
id,
values[0],
values[1]
])
else:
if ','.join(values) != ','.join(target_data[table][id]):
self.__notice('(=) ' + table + ' #' + str(id), 1)
self.__ref_data_to_modify.append([
table,
id,
values[0],
values[1]
])
self.__ref_data_to_remove = []
for table, data in target_data.items():
for id, values in data.items():
if table not in source_data or \
id not in source_data[table]:
self.__notice('(-) ' + table + '#' + str(id), 1)
self.__ref_data_to_remove.append([
table,
id,
values[0],
values[1]
])
def __compute_ref_table_differences(self):
self.__notice('Computing reference table differences...')
query = \
"""select table_name
from tables
where table_schema = %s
and table_name like '%%\_ref\_%%'"""
source_tables = \
self.__flatten_tables(
self.__query_source(
query,
[self.__source_db_name]))
target_tables = \
self.__flatten_tables(
self.__query_target(
query,
[self.__target_db_name]))
self.__ref_tables_to_create = \
list(set(source_tables).difference(target_tables))
for table in self.__ref_tables_to_create:
self.__notice('(+) ' + table, 1)
drop_list = []
self.__ref_tables_to_drop = \
list(set(target_tables).difference(source_tables))
for table in self.__ref_tables_to_drop:
self.__notice('(-) ' + table, 1)
drop_list.append("'" + table + "'")
self.__ref_table_drop_list = ','.join(drop_list)
def __compute_table_differences(self):
self.__notice('Computing table differences...')
create_drop_list = []
query = \
"""select table_name
from tables
where table_schema = %s
and table_name not like '%%\_ref\_%%'"""
source_tables = \
self.__flatten_tables(
self.__query_source(
query,
[self.__source_db_name]))
target_tables = \
self.__flatten_tables(
self.__query_target(
query,
[self.__target_db_name]))
self.__tables_to_create = \
list(set(source_tables).difference(target_tables))
for table in self.__tables_to_create:
self.__notice('(+) ' + table, 1)
create_drop_list.append("'" + table + "'")
self.__tables_to_drop = \
list(set(target_tables).difference(source_tables))
for table in self.__tables_to_drop:
self.__notice('(-) ' + table, 1)
create_drop_list.append("'" + table + "'")
self.__table_create_drop_list = ','.join(create_drop_list)
def __compute_unique_key_differences(self):
self.__notice('Computing unique key differences...')
query = \
"""select table_name,
constraint_name,
column_name,
ordinal_position
from key_column_usage
where table_schema = %s
and constraint_name like '%%\_uk'"""
if self.__table_create_drop_list:
query += ' and table_name not in (' \
+ self.__table_create_drop_list \
+ ')'
source_uks = \
self.__process_uks(
self.__query_source(
query,
[self.__source_db_name]))
target_uks = \
self.__process_uks(
self.__query_target(
query,
[self.__target_db_name]))
source_uk_names = source_uks.keys()
target_uk_names = target_uks.keys()
unique_keys_to_create = \
list(set(source_uk_names).difference(target_uk_names))
unique_keys_to_drop = \
list(set(target_uk_names).difference(source_uk_names))
self.__unique_keys_to_create = []
for name in unique_keys_to_create:
self.__notice('(+) ' + name, 1)
self.__unique_keys_to_create.append(source_uks[name])
self.__unique_keys_to_drop = []
for name in unique_keys_to_drop:
self.__notice('(-) ' + name, 1)
self.__unique_keys_to_drop.append(target_uks[name])
for name, uk in source_uks.items():
if name in target_uks.keys() and \
name not in unique_keys_to_create and \
name not in unique_keys_to_drop:
if source_uks[name]['table_name'] != \
target_uks[name]['table_name'] or \
','.join(source_uks[name]['cols'].values()) != \
','.join(target_uks[name]['cols'].values()):
self.__notice('(=) ' + name, 1)
self.__unique_keys_to_drop.append(source_uks[name])
self.__unique_keys_to_create.append(source_uks[name])
def dont_write_upgrade_scripts(self):
self.__enable_write_upgrade_scripts = False
return self
def __error(self, message, indent=None):
if not self.__cli:
return
self.__cli.error(message, indent)
def execute(self):
self.__verify_schemas()
self.__compute_ref_table_differences()
self.__compute_table_differences()
self.__compute_column_differences()
self.__compute_foreign_key_differences()
self.__compute_ref_table_data_differences()
self.__compute_index_differences()
self.__compute_unique_key_differences()
self.__perform_index_check()
if not self.there_are_differences():
self.__notice('Both schemas are the same!')
exit(0)
self.__write_upgrade_scripts()
self.__target.close()
self.__source.close()
return self
def __flatten_tables(self, tables=tuple()):
if not tables:
return []
results = []
for table in tables:
results.append(table['table_name'])
return results
def __get_column_terms(self, column_data):
terms = []
if column_data['extra'] is not None and \
len(column_data['extra']) > 0:
terms.append(column_data['extra'])
if column_data['character_set_name']:
terms.append('character set ' + column_data['character_set_name'])
if column_data['collation_name']:
terms.append('collate ' + column_data['collation_name'])
if column_data['column_key'] == 'UNI':
terms.append('unique')
if column_data['column_default']:
terms.append('default '
+ ('current_timestamp'
if column_data['column_default'] ==
'current_timestamp'
else "'" + column_data['column_default'] + "'"))
if column_data['is_nullable'] == 'NO':
terms.append('not null')
return terms
def get_column_uniqueness_to_drop(self):
return self.__column_uniqueness_to_drop
def get_columns_to_create(self):
return self.__columns_to_create
def get_columns_to_drop(self):
return self.__columns_to_drop
def get_columns_to_modify(self):
return self.__columns_to_modify
def get_foreign_keys_to_create(self):
return self.__foreign_keys_to_create
def get_foreign_keys_to_drop(self):
return self.__foreign_keys_to_drop
def get_indexes_to_create(self):
return self.__indexes_to_create
def get_indexes_to_drop(self):
return self.__indexes_to_drop
def get_ref_data_to_add(self):
return self.__ref_data_to_add
def get_ref_data_to_modify(self):
return self.__ref_data_to_modify
def get_ref_data_to_remove(self):
return self.__ref_data_to_remove
def get_ref_tables_to_create(self):
return self.__ref_tables_to_create
def get_ref_tables_to_drop(self):
return self.__ref_tables_to_drop
def get_tables_to_create(self):
return self.__tables_to_create
def get_tables_to_drop(self):
return self.__tables_to_drop
def get_unique_keys_to_create(self):
return self.__unique_keys_to_create
def get_unique_keys_to_drop(self):
return self.__unique_keys_to_drop
def __ksort(self, data):
results = {}
for key in sorted(data.keys()):
results[key] = data[key]
return results
def __notice(self, message, indent=None):
if not self.__cli:
return
self.__cli.notice(message, indent)
def __perform_index_check(self):
self.__notice('Performing index check...')
try:
index_check = ConfigManager.value('index check')
except:
self.__notice('not enabled; skipping', 1)
return False
if not os.path.isfile(index_check['path']):
raise RuntimeError(
'could not find script at "{}"'
.format(index_check['path'])
)
output = \
subprocess.check_output(
[index_check['path'],
'--server={}'.format(index_check['server']),
index_check['database']]
)
self.__index_usage_parser = \
MySQLIndexUsageParser() \
.execute(output)
if len(self.__index_usage_parser.clustered_indexes) > 0:
self.__notice('clustered indexes', 1)
for entry in self.__index_usage_parser.clustered_indexes:
self.__notice(
'(~) {}'
.format(
entry[0][:63] + '..'
if len(entry[0]) >= 66 else
entry[0]
),
2
)
if len(self.__index_usage_parser.redundant_indexes) > 0:
self.__notice('redundant indexes', 1)
for entry in self.__index_usage_parser.redundant_indexes:
self.__notice(
'(!) {}'
.format(
entry[0][:63] + '..'
if len(entry[0]) >= 66 else
entry[0]
),
2
)
def __process_fks(self, data=tuple()):
if not data:
return {}
fks = {}
for fk in data:
if fk['constraint_name'] not in fks.keys():
fks[fk['constraint_name']] = {
'name': fk['constraint_name'],
'table_name': fk['table_name'],
'ref_table_name': fk['referenced_table_name'],
'cols': {},
'ref_cols': {},
'delete_rule': fk['delete_rule']
}
fks[fk['constraint_name']] \
['cols'] \
[int(fk['ordinal_position'])] = \
fk['column_name']
fks[fk['constraint_name']] \
['ref_cols'] \
[int(fk['ordinal_position'])] = \
fk['referenced_column_name']
for constraint_name, fk in fks.items():
fks[constraint_name]['cols'] = \
self.__ksort(fks[constraint_name]['cols'])
fks[constraint_name]['ref_cols'] = \
self.__ksort(fks[constraint_name]['ref_cols'])
return fks
def __process_uks(self, data=tuple()):
uks = {}
for uk in data:
if uk['constraint_name'] not in uks.keys():
uks[uk['constraint_name']] = {
'name': uk['constraint_name'],
'table_name': uk['table_name'],
'cols': {}
}
uks[uk['constraint_name']] \
['cols'] \
[int(uk['ordinal_position'])] = \
uk['column_name']
for name, uk in uks.items():
uks[name]['cols'] = self.__ksort(uks[name]['cols'])
return uks
def __query_source(self, query, binds=tuple()):
return self.__source.query(query, binds)
def __query_target(self, query, binds=tuple()):
return self.__target.query(query, binds)
def set_cli(self, cli):
self.__cli = cli
return self
def there_are_differences(self):
return self.__ref_tables_to_create or \
self.__ref_tables_to_drop or \
self.__tables_to_create or \
self.__tables_to_drop or \
self.__columns_to_create or \
self.__columns_to_drop or \
self.__columns_to_modify or \
self.__column_uniqueness_to_drop or \
self.__foreign_keys_to_create or \
self.__foreign_keys_to_drop or \
self.__ref_data_to_add or \
self.__ref_data_to_remove or \
self.__ref_data_to_modify or \
self.__indexes_to_create or \
self.__indexes_to_drop or \
self.__unique_keys_to_create or \
self.__unique_keys_to_drop
def __verify_schemas(self):
self.__notice('Verifying schemas...')
query = \
'''select 1 as schema_exists
from schemata
where schema_name = %s'''
record = self.__source.query(query, [self.__source_db_name])
if not record:
self.__error('source schema "'
+ self.__source_db_name
+ '" does not exist',
1)
exit(1)
record = self.__target.query(query, [self.__target_db_name])
if not record:
self.__error('target schema "'
+ self.__target_db_name
+ '" does not exist',
1)
exit(1)
def __write_add_foreign_key_constraint_sql(self):
file_name = '55-foreign_keys.sql'
self.__notice(file_name, 1)
contents = ''
for fk in self.__foreign_keys_to_create:
contents += \
('alter table ' + fk['table_name'] + '\n'
+ ' add constraint ' + fk['name'] + '\n'
+ ' foreign key (' + ', '.join(fk['cols'].values()) + ')\n'
+ ' references ' + fk['ref_table_name'] + '\n'
+ ' (' + ', '.join(fk['ref_cols'].values()) + ')\n'
+ ' on delete ' + fk['delete_rule'].lower() + ';\n\n')
self.__write_file(file_name, contents)
def __write_add_indexes_sql(self):
file_name = '65-indexes.sql'
self.__notice(file_name, 1)
contents = ''
for index in self.__indexes_to_create:
contents += 'create index ' + index['index_name'] + '\n' \
+ ' on ' + index['table_name'] + '\n' \
+ ' (' + ', '.join(index['cols']) + ');\n\n'
self.__write_file(file_name, contents)
def __write_add_modify_columns_sql(self):
file_name = '35-columns.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__columns_to_create:
table_name, column_name = name.split('.')
column = self.__source.query(
'''select table_name,
column_name,
column_default,
is_nullable,
character_set_name,
collation_name,
column_type,
column_key,
extra
from information_schema.columns
where table_name = %s
and column_name = %s''',
[table_name, column_name])
contents += 'alter table ' + column[0]['table_name'] + "\n" \
+ ' add ' + column[0]['column_name'] + "\n" \
+ ' ' + column[0]['column_type']
terms = self.__get_column_terms(column[0])
if terms:
contents += "\n"
for index in range(len(terms)):
terms[index] = " " + terms[index]
contents += "\n".join(terms) + ";\n\n"
for column in self.__columns_to_modify.values():
contents += 'alter table ' + column['table_name'] + "\n" \
+ ' modify ' + column['column_name'] + "\n" \
+ ' ' + column['column_type']
terms = self.__get_column_terms(column)
if terms:
contents += "\n"
for index in range(len(terms)):
terms[index] = " " + terms[index]
contents += "\n".join(terms) + ";\n\n"
for column in self.__columns_to_drop:
table_name, column_name = column.split('.')
contents += 'alter table ' + table_name + "\n" \
+ ' drop ' + column_name + ";\n\n"
for column in self.__column_uniqueness_to_drop:
table_name, column_name = column.split('.')
index = self.__target.query(
'''show index
from ''' + self.__target_db_name + "." + table_name + '''
where column_name = %s''',
[column_name])
contents += 'alter table ' + index[0]['Table'] + "\n" \
+ ' drop index ' + index[0]['Key_name'] + ";\n\n"
self.__write_file(file_name, contents)
def __write_add_ref_tables_sql(self):
file_name = '10-ref_tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__ref_tables_to_create:
record = self.__source.query(
'show create table ' + self.__source_db_name + '.' + name)
contents += record[0]['Create Table'] + ";\n\n"
if contents:
contents += "\n"
self.__write_file(file_name, contents)
def __write_add_tables_sql(self):
file_name = '30-tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__tables_to_create:
record = self.__source.query(
'show create table ' + self.__source_db_name + '.' + name)
contents += record[0]['Create Table'] + ";\n\n\n"
self.__write_file(file_name, contents)
def __write_add_unique_key_constraint_sql(self):
file_name = '60-unique_keys.sql'
self.__notice(file_name, 1)
contents = ''
for uk in self.__unique_keys_to_create:
contents += \
'alter table ' + uk['table_name'] + '\n' \
+ ' add unique key ' + uk['name'] + '\n' \
+ ' (' + ', '.join(uk['cols'].values()) + ');\n\n'
self.__write_file(file_name, contents)
def __write_drop_foreign_key_constraint_sql(self):
file_name = '15-foreign_keys.sql'
self.__notice(file_name, 1)
contents = ''
for fk in self.__foreign_keys_to_drop:
contents += 'alter table ' + fk['table_name'] + "\n" \
+ ' drop foreign key ' + fk['name'] + ';\n\n'
self.__write_file(file_name, contents)
def __write_drop_indexes_sql(self):
file_name = '25-indexes.sql'
self.__notice(file_name, 1)
contents = ''
for index in self.__indexes_to_drop:
contents += 'alter table ' + index['table_name'] + "\n" \
+ ' drop index ' + index['index_name'] + ";\n\n"
self.__write_file(file_name, contents)
def __write_drop_ref_tables_sql(self):
file_name = '45-ref_tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__ref_tables_to_drop:
contents += 'drop table if exists ' + name + ';\n\n'
self.__write_file(file_name, contents)
def __write_drop_tables_sql(self):
file_name = '40-tables.sql'
self.__notice(file_name, 1)
contents = ''
for name in self.__tables_to_drop:
contents += 'drop table if exists ' + name + ';\n\n'
self.__write_file(file_name, contents)
def __write_drop_unique_key_constraint_sql(self):
file_name = '20-unique_keys.sql'
self.__notice(file_name, 1)
contents = ''
for uk in self.__unique_keys_to_drop:
contents += 'alter table ' + uk['table_name'] + "\n" \
+ ' drop key ' + uk['name'] + ";\n\n"
self.__write_file(file_name, contents)
def __write_index_check(self):
if self.__index_usage_parser is None:
return
file_name = '90-index_check.txt'
self.__notice(file_name, 1)
contents = ''
for data in self.__index_usage_parser.redundant_indexes:
contents += \
'{}\n {}\n is duplicate of \n{}\n {}\n\n' \
.format(
data[0],
data[1],
data[2],
data[3]
)
if len(self.__index_usage_parser.redundant_indexes) > 0 and \
len(self.__index_usage_parser.clustered_indexes) > 0:
contents += '-' * 78 + '\n\n'
for data in self.__index_usage_parser.clustered_indexes:
contents += \
'{}\n {}\n{}is clustered and potentially redundant\n\n' \
.format(
data[0],
data[1],
' ' * 8
)
self.__write_file(file_name, contents)
def __write_ref_table_data_sql(self):
file_name = '50-ref_data.sql'
self.__notice(file_name, 1)
contents = ''
for data in self.__ref_data_to_add:
contents += ('insert into ' + data[0] + "\n"
+ '(\n'
+ ' id,\n'
+ ' value,\n'
+ ' display_order\n'
+ ')\n'
+ 'values\n'
+ '(\n'
+ ' ' + str(data[1]) + ',\n'
+ " '" + re.sub("'", "''", data[2]) + "',\n"
+ ' ' + str(data[3]) + '\n'
+ ');\n'
+ 'commit;\n\n')
for data in self.__ref_data_to_modify:
contents += ('update ' + data[0] + '\n'
+ " set value = '" + data[2] + "',\n"
+ " display_order = " + str(data[3]) + "\n"
+ " where id = " + str(data[1]) + ";\n"
+ "commit;\n\n")
for data in self.__ref_data_to_remove:
contents += 'delete from ' + data[0] + '\n' \
+ ' where id = ' + str(data[1]) + ';\n' \
+ 'commit;\n\n'
self.__write_file(file_name, contents)
def __write_file(self, file_name, contents):
f = open(file_name, 'w')
f.write(contents)
f.close()
def __write_upgrade_scripts(self):
if not self.__enable_write_upgrade_scripts:
return
self.__notice('Writing upgrade scripts into current directory...')
self.__write_add_ref_tables_sql()
self.__write_drop_foreign_key_constraint_sql()
self.__write_drop_unique_key_constraint_sql()
self.__write_drop_indexes_sql()
self.__write_add_tables_sql()
self.__write_add_modify_columns_sql()
self.__write_drop_tables_sql()
self.__write_drop_ref_tables_sql()
self.__write_ref_table_data_sql()
self.__write_add_foreign_key_constraint_sql()
self.__write_add_unique_key_constraint_sql()
self.__write_add_indexes_sql()
self.__write_index_check()
| 32.630872
| 80
| 0.505553
|
48f0dcecf22eabdb6e8fe0fd118b76126165015a
| 12,229
|
py
|
Python
|
coremltools/converters/caffe/_caffe_converter.py
|
dlminvestments/coremltools
|
cf6db67bab18346e132124783d46a32b8a7f52c6
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/caffe/_caffe_converter.py
|
dlminvestments/coremltools
|
cf6db67bab18346e132124783d46a32b8a7f52c6
|
[
"BSD-3-Clause"
] | 75
|
2020-11-24T05:37:45.000Z
|
2022-02-25T15:14:23.000Z
|
coremltools/converters/caffe/_caffe_converter.py
|
dlminvestments/coremltools
|
cf6db67bab18346e132124783d46a32b8a7f52c6
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import six as _six
from ...models import (
_MLMODEL_FULL_PRECISION,
_MLMODEL_HALF_PRECISION,
_VALID_MLMODEL_PRECISION_TYPES,
)
def convert(
model,
image_input_names=None,
is_bgr=False,
red_bias=0.0,
blue_bias=0.0,
green_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
"""
Convert a Caffe model to Core ML format.
Parameters
----------
model: str | (str, str) | (str, str, str) | (str, str, dict)
A trained Caffe neural network model which can be represented as:
- Path on disk to a trained Caffe model (.caffemodel)
- A tuple of two paths, where the first path is the path to the .caffemodel
file while the second is the path to the deploy.prototxt.
- A tuple of three paths, where the first path is the path to the
trained .caffemodel file, the second is the path to the
deploy.prototxt while the third is a path to the mean image binary, data in
which is subtracted from the input image as a preprocessing step.
- A tuple of two paths to .caffemodel and .prototxt and a dict with image input names
as keys and paths to mean image binaryprotos as values. The keys should be same as
the input names provided via the argument 'image_input_name'.
image_input_names: [str] | str
The name(s) of the input blob(s) in the Caffe model that can be treated
as images by Core ML. All other inputs are treated as MultiArrays (N-D
Arrays) by Core ML.
is_bgr: bool | dict()
Flag indicating the channel order the model internally uses to represent
color images. Set to True if the internal channel order is BGR,
otherwise it will be assumed RGB. This flag is applicable only if
image_input_names is specified. To specify a different value for each
image input, provide a dictionary with input names as keys.
Note that this flag is about the models internal channel order.
An input image can be passed to the model in any color pixel layout
containing red, green and blue values (e.g. 32BGRA or 32ARGB). This flag
determines how those pixel values get mapped to the internal multiarray
representation.
red_bias: float | dict()
Bias value to be added to the red channel of the input image.
Defaults to 0.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the the blue channel of the input image.
Defaults to 0.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image.
Defaults to 0.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale). Defaults to 0.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which the input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: str
Filepath where classes are parsed as a list of newline separated
strings. Class labels map the index of the output of a neural network to labels in a classifier.
Provide this argument to get a model of type classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
model_precision: str
Precision at which model will be saved. Currently full precision (float) and half precision
(float16) models are supported. Defaults to '_MLMODEL_FULL_PRECISION' (full precision).
Returns
-------
model: MLModel
Model in Core ML format.
Examples
--------
.. sourcecode:: python
# Convert it with default input and output names
>>> import coremltools
>>> coreml_model = coremltools.converters.caffe.convert('my_caffe_model.caffemodel')
# Saving the Core ML model to a file.
>>> coreml_model.save('my_model.mlmodel')
Sometimes, critical information in the Caffe converter is missing from the
.caffemodel file. This information is present in the deploy.prototxt file.
You can provide us with both files in the conversion process.
.. sourcecode:: python
>>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'))
Some models (like Resnet-50) also require a mean image file which is
subtracted from the input image before passing through the network. This
file can also be provided during conversion:
.. sourcecode:: python
>>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel',
... 'my_deploy.prototxt', 'mean_image.binaryproto'), image_input_names = 'image_input')
# Multiple mean images for preprocessing
>>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel',
... 'my_deploy.prototxt', {'image1': 'mean_image1.binaryproto', 'image2': 'mean_image2.binaryproto'}),
... image_input_names = ['image1', 'image2'])
# Multiple image inputs and bias/scale values
>>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_deploy.prototxt'),
... red_bias = {'image1': -100, 'image2': -110},
... green_bias = {'image1': -90, 'image2': -125},
... blue_bias = {'image1': -105, 'image2': -120},
... image_input_names = ['image1', 'image2'])
Input and output names used in the interface of the converted Core ML model are inferred from the .prototxt file,
which contains a description of the network architecture.
Input names are read from the input layer definition in the .prototxt. By default, they are of type MultiArray.
Argument "image_input_names" can be used to assign image type to specific inputs.
All the blobs that are "dangling", i.e.
which do not feed as input to any other layer are taken as outputs. The .prototxt file can be modified to specify
custom input and output names.
The converted Core ML model is of type classifier when the argument "class_labels" is specified.
Advanced usage with custom classifiers, and images:
.. sourcecode:: python
# Mark some inputs as Images
>>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'),
... image_input_names = 'my_image_input')
# Export as a classifier with classes from a file
>>> coreml_model = coremltools.converters.caffe.convert(('my_caffe_model.caffemodel', 'my_caffe_model.prototxt'),
... image_input_names = 'my_image_input', class_labels = 'labels.txt')
Sometimes the converter might return a message about not able to infer input data dimensions.
This happens when the input size information is absent from the deploy.prototxt file. This can be easily provided by editing
the .prototxt in a text editor. Simply add a snippet in the beginning, similar to the following, for each of the inputs to the model:
.. code-block:: bash
input: "my_image_input"
input_dim: 1
input_dim: 3
input_dim: 227
input_dim: 227
Here we have specified an input with dimensions (1,3,227,227), using Caffe's convention, in the order (batch, channel, height, width).
Input name string ("my_image_input") must also match the name of the input (or "bottom", as inputs are known in Caffe) of the first layer in the .prototxt.
"""
if image_input_names is None:
image_input_names = []
from ...models import MLModel
from ...models.utils import _convert_neural_network_weights_to_fp16
if model_precision not in _VALID_MLMODEL_PRECISION_TYPES:
raise RuntimeError("Model precision {} is not valid".format(model_precision))
import tempfile
model_path = tempfile.mktemp()
_export(
model_path,
model,
image_input_names,
is_bgr,
red_bias,
blue_bias,
green_bias,
gray_bias,
image_scale,
class_labels,
predicted_feature_name,
)
model = MLModel(model_path)
try:
os.remove(model_path)
except OSError:
pass
if model_precision == _MLMODEL_HALF_PRECISION and model is not None:
model = _convert_neural_network_weights_to_fp16(model)
return model
def _export(
filename,
model,
image_input_names=None,
is_bgr=False,
red_bias=0.0,
blue_bias=0.0,
green_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
):
if image_input_names is None:
image_input_names = []
from ... import libcaffeconverter
if isinstance(model, _six.string_types):
src_model_path = model
prototxt_path = u""
binaryproto_path = dict()
elif isinstance(model, tuple):
if len(model) == 3:
src_model_path, prototxt_path, binaryproto_path = model
else:
src_model_path, prototxt_path = model
binaryproto_path = dict()
if isinstance(image_input_names, _six.string_types):
image_input_names = [image_input_names]
if predicted_feature_name is None:
predicted_feature_name = u"classLabel"
if class_labels is None:
class_labels = u""
if binaryproto_path:
if not image_input_names:
raise RuntimeError(
"'image_input_names' must be provided when a mean image binaryproto path is specified. "
)
if isinstance(binaryproto_path, _six.string_types):
binaryproto_paths = dict()
binaryproto_paths[image_input_names[0]] = binaryproto_path
elif isinstance(binaryproto_path, dict):
binaryproto_paths = binaryproto_path
else:
raise RuntimeError(
"Mean image binaryproto path must be a string or a dictionary of inputs names and paths. "
)
if not isinstance(is_bgr, dict):
is_bgr = dict.fromkeys(image_input_names, is_bgr)
if not isinstance(red_bias, dict):
red_bias = dict.fromkeys(image_input_names, red_bias)
if not isinstance(blue_bias, dict):
blue_bias = dict.fromkeys(image_input_names, blue_bias)
if not isinstance(green_bias, dict):
green_bias = dict.fromkeys(image_input_names, green_bias)
if not isinstance(gray_bias, dict):
gray_bias = dict.fromkeys(image_input_names, gray_bias)
if not isinstance(image_scale, dict):
image_scale = dict.fromkeys(image_input_names, image_scale)
libcaffeconverter._convert_to_file(
src_model_path,
filename,
binaryproto_paths,
set(image_input_names),
is_bgr,
red_bias,
blue_bias,
green_bias,
gray_bias,
image_scale,
prototxt_path,
class_labels,
predicted_feature_name,
)
| 39.704545
| 159
| 0.679859
|
ddf8776429ea9ba0bfac887c3d70d850f3fb1577
| 2,314
|
py
|
Python
|
Vision/AutoMLVisionClassifier/app.py
|
iii-PaulCridland/azure-search-power-skills
|
bbc5848c32b3bd6f2c8942693d854563e0cee708
|
[
"MIT"
] | 128
|
2019-06-12T19:24:34.000Z
|
2022-03-08T18:39:40.000Z
|
Vision/AutoMLVisionClassifier/app.py
|
iii-PaulCridland/azure-search-power-skills
|
bbc5848c32b3bd6f2c8942693d854563e0cee708
|
[
"MIT"
] | 47
|
2019-07-15T22:04:23.000Z
|
2022-03-04T18:35:57.000Z
|
Vision/AutoMLVisionClassifier/app.py
|
iii-PaulCridland/azure-search-power-skills
|
bbc5848c32b3bd6f2c8942693d854563e0cee708
|
[
"MIT"
] | 99
|
2019-06-28T20:56:21.000Z
|
2022-03-30T17:17:24.000Z
|
import os
from typing import Dict, List
import uvicorn
from dotenv import load_dotenv
from fastapi import FastAPI, Security, Depends, HTTPException
from fastapi.security.api_key import APIKeyHeader, APIKey
from pydantic import BaseModel
from starlette.status import HTTP_403_FORBIDDEN
from powerskill import extractor, models
import logging
load_dotenv()
app = FastAPI()
class Values(BaseModel):
values: List = []
class Value(Values):
recordId: str
data: Dict[str, str] = None
API_KEY = os.environ['KEY']
API_KEY_NAME = "Ocp-Apim-Subscription-Key"
api_key_header = APIKeyHeader(name=API_KEY_NAME, auto_error=False)
class_model = models.Models(azureml_model_dir=None, classication_model=None)
experiment_name = os.environ['EXPERIMENT_NAME']
azureml_model_dir = os.environ['AZUREML_MODEL_DIR']
get_latest_model = os.environ['GET_LATEST_MODEL']
@app.on_event("startup")
async def startup_event():
try:
if get_latest_model.lower() == "true":
logging.info(f"Download latest model")
if class_model.get_latest_model(experiment_name):
class_model.load_classification_model('models/train_artifacts/') # The AML artifacts path
else:
class_model.load_classification_model(azureml_model_dir)
else:
class_model.load_classification_model(azureml_model_dir)
except Exception as NOMODELFOUND:
logging.error(f"No model could be loaded {NOMODELFOUND}")
async def get_api_key(
api_key_header: str = Security(api_key_header),
):
if api_key_header == API_KEY:
return api_key_header
else:
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="Key not present"
)
@app.get('/api/healthcheck', status_code=200)
async def healthcheck():
return 'Ready'
@app.post('/api/extraction')
def extract(values: Values, api_key: APIKey = Depends(get_api_key)):
body = values.dict()
if not body:
return 'Expected text within body of request. No text found.', status.HTTP_400_BAD_REQUEST
else:
return extractor.go_extract(body, classification_model=class_model.classication_model)
# Remove these two lines below for non-debug/production mode
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000)
| 29.291139
| 107
| 0.725151
|
7848c9e740fd7159043a18cf5f4e0e1388944743
| 1,811
|
py
|
Python
|
setup.py
|
hammerlab/stanity
|
6c36abc207c4ce94f78968501dab839a56f35a41
|
[
"Apache-2.0"
] | 17
|
2016-07-15T15:59:29.000Z
|
2020-07-30T18:42:04.000Z
|
setup.py
|
hammerlab/stanity
|
6c36abc207c4ce94f78968501dab839a56f35a41
|
[
"Apache-2.0"
] | 18
|
2016-06-03T18:09:00.000Z
|
2019-01-14T20:04:01.000Z
|
setup.py
|
hammerlab/stanity
|
6c36abc207c4ce94f78968501dab839a56f35a41
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
setup(
name="stanity",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author="Jacki Buros and Tim O'Donnell",
author_email="timodonnell@gmail.com",
packages=["stanity"],
url="https://github.com/hammerlab/stanity",
license="Apache License",
description="Helper library for working with Stan models in Python",
long_description=open('README.rst').read(),
download_url='https://github.com/hammerlab/stanity/tarball/%s' % versioneer.get_version(),
entry_points={
},
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
install_requires=[
"Cython>=0.19",
"numpy",
"pystan",
"nose",
"typechecks",
"future>=0.14.3",
"pandas",
"seaborn",
],
)
| 32.927273
| 94
| 0.66317
|
8d1ab32590d0a5a24dcdf87712b21b812c0df9ac
| 4,824
|
py
|
Python
|
python/fastscore/v1/models/verify_info_slots.py
|
modelop/fastscore-sdk
|
2206a4b9294cd83b6b8c2470193070bdc35a9061
|
[
"Apache-2.0"
] | 2
|
2018-06-05T19:14:30.000Z
|
2019-02-06T17:15:10.000Z
|
python/fastscore/v1/models/verify_info_slots.py
|
modelop/fastscore-sdk
|
2206a4b9294cd83b6b8c2470193070bdc35a9061
|
[
"Apache-2.0"
] | 2
|
2018-02-20T21:58:43.000Z
|
2018-10-07T10:10:54.000Z
|
python/fastscore/v1/models/verify_info_slots.py
|
modelop/fastscore-sdk
|
2206a4b9294cd83b6b8c2470193070bdc35a9061
|
[
"Apache-2.0"
] | 1
|
2017-12-29T20:38:06.000Z
|
2017-12-29T20:38:06.000Z
|
# coding: utf-8
"""
FastScore API (proxy)
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class VerifyInfoSlots(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'slot': 'int',
'schema': 'str',
'recordsets': 'bool',
'action': 'str'
}
attribute_map = {
'slot': 'slot',
'schema': 'schema',
'recordsets': 'recordsets',
'action': 'action'
}
def __init__(self, slot=None, schema=None, recordsets=None, action=None):
"""
VerifyInfoSlots - a model defined in Swagger
"""
self._slot = None
self._schema = None
self._recordsets = None
self._action = None
if slot is not None:
self.slot = slot
if schema is not None:
self.schema = schema
if recordsets is not None:
self.recordsets = recordsets
if action is not None:
self.action = action
@property
def slot(self):
"""
Gets the slot of this VerifyInfoSlots.
:return: The slot of this VerifyInfoSlots.
:rtype: int
"""
return self._slot
@slot.setter
def slot(self, slot):
"""
Sets the slot of this VerifyInfoSlots.
:param slot: The slot of this VerifyInfoSlots.
:type: int
"""
self._slot = slot
@property
def schema(self):
"""
Gets the schema of this VerifyInfoSlots.
:return: The schema of this VerifyInfoSlots.
:rtype: str
"""
return self._schema
@schema.setter
def schema(self, schema):
"""
Sets the schema of this VerifyInfoSlots.
:param schema: The schema of this VerifyInfoSlots.
:type: str
"""
self._schema = schema
@property
def recordsets(self):
"""
Gets the recordsets of this VerifyInfoSlots.
:return: The recordsets of this VerifyInfoSlots.
:rtype: bool
"""
return self._recordsets
@recordsets.setter
def recordsets(self, recordsets):
"""
Sets the recordsets of this VerifyInfoSlots.
:param recordsets: The recordsets of this VerifyInfoSlots.
:type: bool
"""
self._recordsets = recordsets
@property
def action(self):
"""
Gets the action of this VerifyInfoSlots.
:return: The action of this VerifyInfoSlots.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""
Sets the action of this VerifyInfoSlots.
:param action: The action of this VerifyInfoSlots.
:type: str
"""
self._action = action
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, VerifyInfoSlots):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23.881188
| 105
| 0.540008
|
ba6af36332b3cb5babafdfa2bdd507f83b931b3c
| 7,574
|
py
|
Python
|
addons/blender_mmd_tools-main/mmd_tools/properties/rigid_body.py
|
V-Sekai/V-Sekai-Blender-tools
|
3473ad4abb737756290a9007273519460742960d
|
[
"MIT"
] | 2
|
2021-12-21T16:38:58.000Z
|
2022-01-08T00:56:35.000Z
|
addons/blender_mmd_tools-main/mmd_tools/properties/rigid_body.py
|
V-Sekai/V-Sekai-Blender-game-tools
|
3473ad4abb737756290a9007273519460742960d
|
[
"MIT"
] | 1
|
2022-01-29T05:46:50.000Z
|
2022-01-29T05:46:50.000Z
|
addons/blender_mmd_tools-main/mmd_tools/properties/rigid_body.py
|
V-Sekai/V-Sekai-Blender-game-tools
|
3473ad4abb737756290a9007273519460742960d
|
[
"MIT"
] | 1
|
2021-11-07T19:41:34.000Z
|
2021-11-07T19:41:34.000Z
|
# -*- coding: utf-8 -*-
import bpy
from mmd_tools import bpyutils, register_wrap
from mmd_tools.core import rigid_body
from mmd_tools.core.model import FnModel, getRigidBodySize
def _updateCollisionGroup(prop, context):
obj = prop.id_data
materials = obj.data.materials
if len(materials) == 0:
materials.append(rigid_body.RigidBodyMaterial.getMaterial(prop.collision_group_number))
else:
obj.material_slots[0].material = rigid_body.RigidBodyMaterial.getMaterial(prop.collision_group_number)
def _updateType(prop, context):
obj = prop.id_data
rb = obj.rigid_body
if rb:
rb.kinematic = (int(prop.type) == rigid_body.MODE_STATIC)
def _updateShape(prop, context):
obj = prop.id_data
if len(obj.data.vertices) > 0:
size = prop.size
prop.size = size # update mesh
rb = obj.rigid_body
if rb:
rb.collision_shape = prop.shape
def _get_bone(prop):
obj = prop.id_data
relation = obj.constraints.get('mmd_tools_rigid_parent', None)
if relation:
arm = relation.target
bone_name = relation.subtarget
if arm is not None and bone_name in arm.data.bones:
return bone_name
return prop.get('bone', '')
def _set_bone(prop, value):
bone_name = value
obj = prop.id_data
relation = obj.constraints.get('mmd_tools_rigid_parent', None)
if relation is None:
relation = obj.constraints.new('CHILD_OF')
relation.name = 'mmd_tools_rigid_parent'
relation.mute = True
arm = relation.target
if arm is None:
root = FnModel.find_root(obj)
if root:
arm = relation.target = FnModel.find_armature(root)
if arm is not None and bone_name in arm.data.bones:
relation.subtarget = bone_name
else:
relation.subtarget = bone_name = ''
prop['bone'] = bone_name
def _get_size(prop):
if prop.id_data.mmd_type != 'RIGID_BODY':
return (0, 0, 0)
return getRigidBodySize(prop.id_data)
def _set_size(prop, value):
obj = prop.id_data
assert(obj.mode == 'OBJECT') # not support other mode yet
shape = prop.shape
mesh = obj.data
rb = obj.rigid_body
if len(mesh.vertices) == 0 or rb is None or rb.collision_shape != shape:
if shape == 'SPHERE':
bpyutils.makeSphere(
radius=value[0],
target_object=obj,
)
elif shape == 'BOX':
bpyutils.makeBox(
size=value,
target_object=obj,
)
elif shape == 'CAPSULE':
bpyutils.makeCapsule(
radius=value[0],
height=value[1],
target_object=obj,
)
mesh.update()
if rb:
rb.collision_shape = shape
else:
if shape == 'SPHERE':
radius = max(value[0], 1e-3)
for v in mesh.vertices:
vec = v.co.normalized()
v.co = vec * radius
elif shape == 'BOX':
x = max(value[0], 1e-3)
y = max(value[1], 1e-3)
z = max(value[2], 1e-3)
for v in mesh.vertices:
x0, y0, z0 = v.co
x0 = -x if x0 < 0 else x
y0 = -y if y0 < 0 else y
z0 = -z if z0 < 0 else z
v.co = [x0, y0, z0]
elif shape == 'CAPSULE':
r0, h0, xx = getRigidBodySize(prop.id_data)
h0 *= 0.5
radius = max(value[0], 1e-3)
height = max(value[1], 1e-3)*0.5
scale = radius/max(r0, 1e-3)
for v in mesh.vertices:
x0, y0, z0 = v.co
x0 *= scale
y0 *= scale
if z0 < 0:
z0 = (z0 + h0)*scale - height
else:
z0 = (z0 - h0)*scale + height
v.co = [x0, y0, z0]
mesh.update()
def _get_rigid_name(prop):
return prop.get('name', '')
def _set_rigid_name(prop, value):
prop['name'] = value
@register_wrap
class MMDRigidBody(bpy.types.PropertyGroup):
name_j: bpy.props.StringProperty(
name='Name',
description='Japanese Name',
default='',
get=_get_rigid_name,
set=_set_rigid_name,
)
name_e: bpy.props.StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
collision_group_number: bpy.props.IntProperty(
name='Collision Group',
description='The collision group of the object',
min=0,
max=15,
default=1,
update=_updateCollisionGroup,
)
collision_group_mask: bpy.props.BoolVectorProperty(
name='Collision Group Mask',
description='The groups the object can not collide with',
size=16,
subtype='LAYER',
)
type: bpy.props.EnumProperty(
name='Rigid Type',
description='Select rigid type',
items=[
(str(rigid_body.MODE_STATIC), 'Bone',
"Rigid body's orientation completely determined by attached bone", 1),
(str(rigid_body.MODE_DYNAMIC), 'Physics',
"Attached bone's orientation completely determined by rigid body", 2),
(str(rigid_body.MODE_DYNAMIC_BONE), 'Physics + Bone',
"Bone determined by combination of parent and attached rigid body", 3),
],
update=_updateType,
)
shape: bpy.props.EnumProperty(
name='Shape',
description='Select the collision shape',
items=[
('SPHERE', 'Sphere', '', 1),
('BOX', 'Box', '', 2),
('CAPSULE', 'Capsule', '', 3),
],
update=_updateShape,
)
bone: bpy.props.StringProperty(
name='Bone',
description='Target bone',
default='',
get=_get_bone,
set=_set_bone,
)
size: bpy.props.FloatVectorProperty(
name='Size',
description='Size of the object',
subtype='XYZ',
size=3,
min=0,
step=0.1,
get=_get_size,
set=_set_size,
)
def _updateSpringLinear(prop, context):
obj = prop.id_data
rbc = obj.rigid_body_constraint
if rbc:
rbc.spring_stiffness_x = prop.spring_linear[0]
rbc.spring_stiffness_y = prop.spring_linear[1]
rbc.spring_stiffness_z = prop.spring_linear[2]
def _updateSpringAngular(prop, context):
obj = prop.id_data
rbc = obj.rigid_body_constraint
if rbc and hasattr(rbc, 'use_spring_ang_x'):
rbc.spring_stiffness_ang_x = prop.spring_angular[0]
rbc.spring_stiffness_ang_y = prop.spring_angular[1]
rbc.spring_stiffness_ang_z = prop.spring_angular[2]
@register_wrap
class MMDJoint(bpy.types.PropertyGroup):
name_j: bpy.props.StringProperty(
name='Name',
description='Japanese Name',
default='',
)
name_e: bpy.props.StringProperty(
name='Name(Eng)',
description='English Name',
default='',
)
spring_linear: bpy.props.FloatVectorProperty(
name='Spring(Linear)',
description='Spring constant of movement',
subtype='XYZ',
size=3,
min=0,
step=0.1,
update=_updateSpringLinear,
)
spring_angular: bpy.props.FloatVectorProperty(
name='Spring(Angular)',
description='Spring constant of rotation',
subtype='XYZ',
size=3,
min=0,
step=0.1,
update=_updateSpringAngular,
)
| 27.541818
| 110
| 0.570108
|
c7650dd0eda65713e250ffb9c61f58edd7ea3da8
| 26,549
|
py
|
Python
|
dodo.py
|
deathbeds/jupyterlab-drawio
|
6cd7d29adc53d73a7a27580b59483205747a400e
|
[
"Apache-2.0"
] | 1
|
2020-11-04T21:31:28.000Z
|
2020-11-04T21:31:28.000Z
|
dodo.py
|
deathbeds/jupyterlab-drawio
|
6cd7d29adc53d73a7a27580b59483205747a400e
|
[
"Apache-2.0"
] | 6
|
2020-08-06T12:10:58.000Z
|
2020-12-27T15:32:58.000Z
|
dodo.py
|
deathbeds/jupyterlab-drawio
|
6cd7d29adc53d73a7a27580b59483205747a400e
|
[
"Apache-2.0"
] | null | null | null |
"""automation for ipydrawio
> see https://pydoit.org/tutorial_1.html#incremental-computation
see what you can do
doit list --status --all | sort
do basically everything to get ready for a release
doit all
maybe before you push
doit -n8 lint
"""
# Copyright 2021 ipydrawio contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import subprocess
import time
from hashlib import sha256
import doit
from doit.action import CmdAction
from doit.tools import PythonInteractiveAction, config_changed
import scripts.project as P
DOIT_CONFIG = dict(
backend="sqlite3",
verbosity=2,
par_type="thread",
default_tasks=["setup"],
)
def task_all():
"""do _everything_ (except start long-running servers)"""
return dict(
uptodate=[lambda: False],
task_dep=["check"],
file_dep=[
*[P.OK_CONDA_TEST / f"{name}.ok" for name in P.CONDA_PKGS],
*P.OK_PYTEST.values(),
P.DOCS_BUILDINFO,
P.OK_ATEST,
P.OK_INTEGRITY,
P.OK_LINK_CHECK,
P.OK_PROVISION,
P.SHA256SUMS,
],
actions=[
(P._show, ["nothing left to do"], {"shasums": P.SHA256SUMS.read_text})
],
)
def task_fetch():
"""fetch local copies of key configuration documentation"""
for path, url in P.DIA_URLS.items():
yield P.fetch_one(url, path)
def task_dist():
"""create a minimum viable release product"""
return dict(
uptodate=[lambda: False],
file_dep=[P.OK_INTEGRITY, P.SHA256SUMS, P.OK_LINT],
actions=[lambda: print(P.SHA256SUMS.read_text())],
)
def task_env():
"""sync environments"""
if P.RTD:
return
for env, inherits in P.ENV_INHERITS.items():
yield dict(
name=f"""{env.relative_to(P.ROOT)}:{':'.join([str(inh.relative_to(P.ROOT)) for inh in inherits])}""",
file_dep=[*inherits, P.YARN_INTEGRITY],
actions=[(P.patch_one_env, [inh, env]) for inh in inherits]
+ [["jlpm", "prettier", "--list-different", "--write", env]],
targets=[env],
)
def task_submodules():
"""ensure submodules are available"""
subs = subprocess.check_output(["git", "submodule"]).decode("utf-8").splitlines()
def _clean():
"""clean drawio, as it gets patched in-place"""
if any([x.startswith("-") for x in subs]) and P.DRAWIO.exists():
shutil.rmtree(P.DRAWIO)
return P._ok(
dict(
uptodate=[config_changed({"subs": subs})],
actions=[_clean, ["git", "submodule", "update", "--init", "--recursive"]],
),
P.OK_SUBMODULES,
)
def task_setup():
"""perform general steps to get ready for development, testing, or releasing"""
if not P.TESTING_IN_CI:
yield dict(
name="js",
file_dep=[P.PACKAGE, P.OK_SUBMODULES]
+ ([P.YARN_LOCK] if P.YARN_LOCK.exists() else []),
actions=[
[*P.JLPM, "--ignore-optional", "--prefer-offline"],
[*P.LERNA, "bootstrap"],
],
targets=[P.YARN_INTEGRITY],
)
yield dict(
name="js:ipde",
file_dep=[P.IPDE_DIE2_PACKAGE_JSON, P.YARN_INTEGRITY],
targets=[P.IPDE_DIE2_YARN_LOCK],
actions=[
CmdAction(
[*P.JLPM, "--ignore-optional", "--prefer-offline"],
shell=False,
cwd=P.IPDE_DIE2,
),
],
)
for pkg, pkg_setup in P.PY_SETUP.items():
# TODO: refactor
ext_deps = [
(
P.JS_PKG_JSON[ext].parent
/ P.JS_PKG_DATA[ext]["jupyterlab"]["outputDir"]
).resolve()
/ "package.json"
for ext, mod in P.JS_LABEXT_PY_HOST.items()
if mod == pkg_setup.parent.name
]
if P.TESTING_IN_CI:
ci_af = {"wheel": P.PY_WHEEL[pkg], "sdist": P.PY_SDIST[pkg]}[P.CI_ARTIFACT]
dist_af = P.DIST / ci_af.name
yield P._ok(
dict(
name=f"py:{pkg}",
file_dep=[dist_af],
actions=[
[
*P.PIP,
"install",
"-vv",
"--ignore-installed",
"--no-deps",
dist_af,
]
],
),
P.OK_PYSETUP[pkg],
)
else:
extra_deps = []
if pkg != "ipydrawio":
extra_deps += [P.OK_PYSETUP["ipydrawio"]]
yield P._ok(
dict(
name=f"py:{pkg}",
file_dep=[pkg_setup, P.PY_SETUP_CFG[pkg], *ext_deps, *extra_deps],
actions=[
CmdAction(
[
*P.PIP,
"install",
"-e",
".",
"--no-deps",
"-vv",
],
shell=False,
cwd=pkg_setup.parent,
),
CmdAction(
[
*P.LAB_EXT,
"develop",
"--debug",
"--overwrite",
".",
],
shell=False,
cwd=pkg_setup.parent,
),
],
),
P.OK_PYSETUP[pkg],
)
yield P._ok(
dict(
name="pip:check",
file_dep=[*P.OK_PYSETUP.values()],
actions=[P.pip_check],
),
P.OK_PIP_CHECK,
)
base_ext_args = [
"jupyter",
"serverextension",
"enable",
"--sys-prefix",
"--py",
]
for ext, ext_py in P.SERVER_EXT.items():
enable_args = [*base_ext_args, ext_py.parent.name]
if P.TESTING_IN_CI:
enable_args = ["echo", "'(installed by pip)'"]
yield P._ok(
dict(
name=f"ext:{ext}",
doc=f"ensure {ext} is a serverextension",
file_dep=[ext_py, P.OK_PIP_CHECK],
actions=[
enable_args,
["jupyter", "serverextension", "list"],
],
),
P.OK_SERVEREXT[ext],
)
def task_lint():
"""format all source files"""
if P.TESTING_IN_CI:
return
yield P._ok(
dict(
name="isort",
file_dep=[*P.ALL_PY, P.SETUP_CFG],
actions=[["isort", *P.ALL_PY]],
),
P.OK_ISORT,
)
yield P._ok(
dict(
name="black",
file_dep=[*P.ALL_PY, P.OK_ISORT],
actions=[["black", "--quiet", *P.ALL_PY]],
),
P.OK_BLACK,
)
yield P._ok(
dict(
name="flake8",
file_dep=[*P.ALL_PY, P.OK_BLACK, P.SETUP_CFG],
actions=[["flake8", *P.ALL_PY]],
),
P.OK_FLAKE8,
)
yield P._ok(
dict(
name="pyflakes",
file_dep=[*P.ALL_PY, P.OK_BLACK],
actions=[["pyflakes", *P.ALL_PY]],
),
P.OK_PYFLAKES,
)
prettier_args = [
"jlpm",
"--silent",
"prettier",
"--list-different",
"--write",
]
if P.CI:
yield P._ok(
dict(
name="prettier",
file_dep=[
P.YARN_INTEGRITY,
*[p for p in P.ALL_PRETTIER if P != P.DEMO_CONFIG],
],
actions=[[*prettier_args, *P.ALL_PRETTIER]],
),
P.OK_PRETTIER,
)
else:
pretty_tasks = []
for path in P.ALL_PRETTIER:
name = f"prettier:{path.relative_to(P.ROOT)}"
pretty_tasks += [f"lint:{name}"]
yield dict(
name=name,
file_dep=[P.YARN_INTEGRITY, path],
actions=[[*prettier_args, path]],
)
yield P._ok(
dict(
name="prettier",
file_dep=[P.YARN_INTEGRITY, *P.ALL_PRETTIER],
task_dep=pretty_tasks,
actions=[["echo", "OK"]],
),
P.OK_PRETTIER,
)
yield P._ok(
dict(
name="eslint",
file_dep=[
P.YARN_INTEGRITY,
*P.ALL_TS,
P.OK_PRETTIER,
P.ESLINTRC,
P.TSCONFIGBASE,
],
actions=[["jlpm", "eslint"]],
),
P.OK_ESLINT,
)
dio_tasks = []
for dio_file in P.ALL_DIO:
name = f"dio:clean:{dio_file.relative_to(P.ROOT)}"
dio_tasks += [f"lint:{name}"]
yield dict(
name=name,
file_dep=[dio_file, *P.OK_PYSETUP.values()],
actions=[["jupyter", "ipydrawio", "clean", dio_file]],
)
yield P._ok(
dict(
name="dio:clean",
file_dep=[*P.ALL_DIO],
task_dep=dio_tasks,
actions=[["echo", "ok"]],
),
P.OK_DIOLINT,
)
yield P._ok(
dict(
name="all",
actions=[P._echo_ok("all ok")],
file_dep=[
P.OK_BLACK,
P.OK_ESLINT,
P.OK_FLAKE8,
P.OK_ISORT,
P.OK_PRETTIER,
P.OK_PYFLAKES,
],
),
P.OK_LINT,
)
yield P._ok(
dict(
name="robot:tidy",
file_dep=P.ALL_ROBOT,
actions=[[*P.PYM, "robot.tidy", "--inplace", *P.ALL_ROBOT]],
),
P.OK_ROBOTIDY,
)
yield P._ok(
dict(
name="robot:lint",
file_dep=[*P.ALL_ROBOT, P.OK_ROBOTIDY],
actions=[["rflint", *P.RFLINT_OPTS, *P.ALL_ROBOT]],
),
P.OK_RFLINT,
)
yield P._ok(
dict(
name="robot:dryrun",
file_dep=[*P.ALL_ROBOT, P.OK_RFLINT],
actions=[[*P.PYM, "scripts.atest", "--dryrun"]],
),
P.OK_ROBOT_DRYRUN,
)
def task_build():
"""build intermediates and release artifacts"""
if P.TESTING_IN_CI:
return
yield P._ok(
dict(
name="js:pre",
file_dep=[
P.YARN_INTEGRITY,
P.IPDW_IGNORE,
P.OK_SUBMODULES,
*sum(P.JS_PY_SCRIPTS.values(), []),
*sum(P.JS_SCHEMAS.values(), []),
],
actions=[[*P.LERNA, "run", "build:pre", "--stream"]],
targets=[P.IPDW_APP],
),
P.OK_JS_BUILD_PRE,
)
yield P._ok(
dict(
name="js",
file_dep=[P.YARN_INTEGRITY, P.OK_JS_BUILD_PRE, *P.ALL_TS, *P.ALL_CSS],
actions=[[*P.LERNA, "run", "build", "--stream"]],
targets=sorted(P.JS_TSBUILDINFO.values()),
),
P.OK_JS_BUILD,
)
yield dict(
name="readme:ipydrawio",
file_dep=[P.README],
targets=[P.IPD / "README.md"],
actions=[
lambda: [(P.IPD / "README.md").write_text(P.README.read_text()), None][-1]
],
)
for pkg, (file_dep, targets) in P.JS_PKG_PACK.items():
yield dict(
name=f"pack:{pkg}",
file_dep=file_dep,
actions=[
CmdAction([P.NPM, "pack", "."], cwd=str(targets[0].parent), shell=False)
],
targets=targets,
)
pkg_data = P.JS_PKG_DATA[pkg]
if "jupyterlab" not in pkg_data:
continue
out_dir = (
P.JS_PKG_JSON[pkg].parent / pkg_data["jupyterlab"]["outputDir"]
).resolve()
yield P._ok(
dict(
name=f"ext:build:{pkg}",
actions=[
CmdAction(
[*P.LAB_EXT, "build", "."],
shell=False,
cwd=P.JS_PKG_JSON[pkg].parent,
)
],
file_dep=targets,
targets=[out_dir / "package.json"],
),
P.OK_EXT_BUILD[pkg],
)
for py_pkg, py_setup in P.PY_SETUP.items():
ext_deps = [
(
P.JS_PKG_JSON[ext].parent
/ P.JS_PKG_DATA[ext]["jupyterlab"]["outputDir"]
).resolve()
/ "package.json"
for ext, mod in P.JS_LABEXT_PY_HOST.items()
if mod == py_setup.parent.name
]
file_dep = sorted(
set(
[
*ext_deps,
*P.PY_SRC[py_pkg],
P.OK_SUBMODULES,
py_setup,
py_setup.parent / "setup.cfg",
py_setup.parent / "MANIFEST.in",
py_setup.parent / "README.md",
py_setup.parent / "LICENSE.txt",
]
)
)
if py_setup.parent == P.IPDE:
file_dep += [P.IPDE_DIE2_YARN_LOCK]
yield dict(
name=f"sdist:{py_pkg}",
file_dep=file_dep,
actions=[
CmdAction(
["python", "setup.py", "sdist"],
shell=False,
cwd=str(py_setup.parent),
),
],
targets=[P.PY_SDIST[py_pkg]],
)
yield dict(
name=f"whl:{py_pkg}",
file_dep=file_dep,
actions=[
CmdAction(
["python", "setup.py", "bdist_wheel"],
shell=False,
cwd=str(py_setup.parent),
),
],
targets=[P.PY_WHEEL[py_pkg]],
)
def _make_hashfile():
# mimic sha256sum CLI
if P.SHA256SUMS.exists():
P.SHA256SUMS.unlink()
if not P.DIST.exists():
P.DIST.mkdir(parents=True)
[shutil.copy2(p, P.DIST / p.name) for p in P.HASH_DEPS]
lines = []
for p in P.HASH_DEPS:
lines += [" ".join([sha256(p.read_bytes()).hexdigest(), p.name])]
output = "\n".join(lines)
print(output)
P.SHA256SUMS.write_text(output)
yield dict(
name="hash",
file_dep=[*P.HASH_DEPS],
targets=[P.SHA256SUMS, *[P.DIST / d.name for d in P.HASH_DEPS]],
actions=[_make_hashfile],
)
def task_conda_build():
"""test building with conda-build"""
yield dict(
name="build",
file_dep=[
P.RECIPE,
*[P.DIST / p.name for p in P.PY_SDIST.values()],
],
actions=[
[
*P.CONDA_BUILD_ARGS,
"--no-test",
"--output-folder",
P.CONDA_BLD,
P.RECIPE.parent,
]
],
targets=[*P.CONDA_PKGS.values()],
)
def task_conda_test():
for name, pkg in P.CONDA_PKGS.items():
yield P._ok(
dict(
name=f"test:{name}",
file_dep=[pkg],
actions=[[*P.CONDA_BUILD_ARGS, "--test", pkg]],
),
P.OK_CONDA_TEST / f"{name}.ok",
)
def task_lab():
"""run JupyterLab "normally" (not watching sources)"""
if P.TESTING_IN_CI:
return
def lab():
proc = subprocess.Popen(P.CMD_LAB, stdin=subprocess.PIPE)
try:
proc.wait()
except KeyboardInterrupt:
print("attempting to stop lab, you may want to check your process monitor")
proc.terminate()
proc.communicate(b"y\n")
proc.wait()
return dict(
uptodate=[lambda: False],
file_dep=[*P.OK_SERVEREXT.values()],
actions=[PythonInteractiveAction(lab)],
)
def _make_lab(watch=False):
def _lab():
if watch:
print(">>> Starting typescript watcher...", flush=True)
ts = subprocess.Popen([*P.LERNA, "run", "watch"])
ext_watchers = [
subprocess.Popen([*P.LAB_EXT, "watch", "."], cwd=str(p.parent))
for p in P.JS_PKG_JSON_LABEXT.values()
]
print(">>> Waiting a bit to JupyterLab...", flush=True)
time.sleep(3)
print(">>> Starting JupyterLab...", flush=True)
lab = subprocess.Popen(
P.CMD_LAB,
stdin=subprocess.PIPE,
)
try:
print(">>> Waiting for JupyterLab to exit (Ctrl+C)...", flush=True)
lab.wait()
except KeyboardInterrupt:
print(
f""">>> {"Watch" if watch else "Run"} canceled by user!""",
flush=True,
)
finally:
print(">>> Stopping watchers...", flush=True)
if watch:
[x.terminate() for x in ext_watchers]
ts.terminate()
lab.terminate()
lab.communicate(b"y\n")
if watch:
ts.wait()
lab.wait()
[x.wait() for x in ext_watchers]
print(
">>> Stopped watchers! maybe check process monitor...",
flush=True,
)
return True
return _lab
def task_watch():
"""watch things"""
if P.TESTING_IN_CI:
return
yield dict(
name="lab",
doc="watch labextensions for changes, rebuilding",
uptodate=[lambda: False],
file_dep=[*P.OK_SERVEREXT.values(), P.OK_PIP_CHECK],
actions=[
P.CMD_LIST_EXTENSIONS,
PythonInteractiveAction(_make_lab(watch=True)),
],
)
yield dict(
name="docs",
doc="watch docs for changes, rebuilding",
uptodate=[lambda: False],
file_dep=[P.DOCS_BUILDINFO, P.OK_PIP_CHECK],
actions=[["sphinx-autobuild", *P.SPHINX_ARGS, "-j8", P.DOCS, P.DOCS_BUILD]],
)
def task_demo():
if not P.LITE_PREFIX:
print("jupyterlite not found, this might be okay", flush=True)
return
demo_dest = [*P.DEMO.glob("*.json")]
demo_tasks = []
final_dest = []
yield dict(
name="pyodide:packages",
doc="fetch the pyodide packages.json",
uptodate=[doit.tools.config_changed(P.PYODIDE_URL)],
targets=[P.PYODIDE_PACKAGES],
actions=[P.fetch_pyodide_packages],
)
demo_tasks += ["demo:wheels"]
yield dict(
name="wheels",
doc="fetch wheels",
file_dep=[P.PYODIDE_PACKAGES, P.DEMO_REQS],
actions=[P.fetch_wheels],
)
yield dict(
name="extensions",
doc="update jupyter-lite.json from the conda env",
file_dep=[P.ENV_BINDER, P.PY_WHEEL[P.IPD.name]],
targets=[P.DEMO_CONFIG],
actions=[
(
P._sync_lite_config,
[
P.ENV_BINDER,
P.DEMO_CONFIG,
P.FED_EXT_MARKER,
[P.PY_WHEEL[P.IPD.name]],
],
)
],
)
for path in P.ALL_DEMO_CONTENTS:
name = f"stage:{path.name}"
dest = P.DEMO_FILES / path.name.replace(" ", "_")
demo_dest += [dest]
demo_tasks += [f"demo:{name}"]
final_dest += [P.DEMO_BUILD / f"files/{path.name}"]
yield dict(
name=name,
file_dep=[path],
targets=[dest],
actions=[(P._copy_one, [path, dest])],
)
assert demo_dest
assert final_dest
assert demo_tasks
yield dict(
name="archive",
task_dep=demo_tasks,
file_dep=[*demo_dest, *P.DEMO_WHEELS.glob(f"*{P.NOARCH_WHL}")],
targets=[P.DEMO_ARCHIVE, P.DEMO_HASHES, *final_dest, P.DEMO_CONTENTS_API],
actions=[P._build_lite],
)
def task_docs():
"""build the docs"""
if P.TESTING_IN_CI:
return
if shutil.which("convert"):
yield dict(
name="favicon",
doc="regenerate the favicon",
file_dep=[P.DOCS_FAVICON_SVG],
actions=[
[
"convert",
"-density",
"256x256",
"-background",
"transparent",
P.DOCS_FAVICON_SVG,
"-define",
"icon:auto-resize",
"-colors",
"256",
P.DOCS_FAVICON_ICO,
]
],
targets=[P.DOCS_FAVICON_ICO],
)
yield dict(
name="typedoc:ensure",
file_dep=[*P.JS_PKG_JSON.values()],
actions=[P.typedoc_conf],
targets=[P.TYPEDOC_JSON, P.TSCONFIG_TYPEDOC],
)
yield dict(
name="typedoc:build",
doc="build the TS API documentation with typedoc",
file_dep=[*P.JS_TSBUILDINFO.values(), *P.TYPEDOC_CONF, P.YARN_INTEGRITY],
actions=[["jlpm", "typedoc", "--options", P.TYPEDOC_JSON]],
targets=[P.DOCS_RAW_TYPEDOC_README],
)
yield dict(
name="typedoc:mystify",
doc="transform raw typedoc into myst markdown",
file_dep=[P.DOCS_RAW_TYPEDOC_README],
targets=[P.DOCS_TS_MYST_INDEX, *P.DOCS_TS_MODULES],
actions=[
P.mystify,
[
"jlpm",
"prettier",
"--list-different",
"--write",
P.DOCS_TS_MYST_INDEX.parent,
],
],
)
sphinx_deps = [
P.DOCS_CONF,
P.DOCS_FAVICON_ICO,
P.OK_PIP_CHECK,
*P.DOCS_SRC,
]
sphinx_task_deps = []
if P.LITE_PREFIX:
sphinx_deps += [
P.DEMO_HASHES,
P.DEMO_ARCHIVE,
]
yield dict(
name="sphinx",
doc="build the documentation site with sphinx",
file_dep=sphinx_deps,
task_dep=sphinx_task_deps,
actions=[
["sphinx-build", *P.SPHINX_ARGS, "-j8", "-b", "html", P.DOCS, P.DOCS_BUILD]
],
targets=[P.DOCS_BUILDINFO],
)
@doit.create_after("docs")
def task_check():
"""check built artifacts"""
file_dep = [*P.DOCS_BUILD.rglob("*.html")]
yield P._ok(
dict(
name="links",
file_dep=[*file_dep, P.DOCS_BUILDINFO],
actions=[
[
"pytest-check-links",
"--check-anchors",
"--check-links-ignore",
"^https?://",
*[p for p in file_dep if p.name not in ["schema.html"]],
]
],
),
P.OK_LINK_CHECK,
)
def task_provision():
"""ensure the ipydrawio-export server has been provisioned with npm (ick)"""
return P._ok(
dict(
file_dep=[*P.OK_SERVEREXT.values()],
actions=[
["jupyter", "ipydrawio-export", "--version"],
["jupyter", "ipydrawio-export", "provision"],
],
),
P.OK_PROVISION,
)
def _pytest(setup_py, *pytest_args):
return CmdAction(
[*P.PYM, "pytest", *P.PYTEST_ARGS, *pytest_args],
shell=False,
cwd=str(setup_py.parent),
)
def task_test():
"""run tests"""
if not P.TESTING_IN_CI:
yield P._ok(
dict(
name="integrity",
file_dep=[
P.SCRIPTS / "integrity.py",
P.OK_LINT,
P.DEMO_CONFIG,
*[*P.OK_SERVEREXT.values()],
*[*P.PY_WHEEL.values()],
*[*P.PY_SDIST.values()],
],
actions=[
["python", "-m", "pytest", "--pyargs", "scripts.integrity", "-vv"]
],
),
P.OK_INTEGRITY,
)
for pkg, setup in P.PY_SETUP.items():
html = P.BUILD / f"pytest/{pkg}/test.html"
htmlcov = P.BUILD / f"pytest/{pkg}/htmlcov"
pytest_args = [
f"--cov-report=html:{htmlcov}",
f"--html={html}",
"--self-contained-html",
]
yield P._ok(
dict(
name=f"pytest:{pkg}",
uptodate=[config_changed(dict(PYTEST_ARGS=P.PYTEST_ARGS))],
file_dep=[
*P.PY_SRC[pkg],
P.PY_SETUP_CFG[pkg],
*P.PY_TEST_DEP.get(pkg, []),
P.OK_PROVISION,
P.OK_PIP_CHECK,
],
actions=[
(P.delete_some, [htmlcov, html]),
_pytest(setup, *pytest_args),
],
targets=[htmlcov / "status.json", html],
),
P.OK_PYTEST[pkg],
)
file_dep = [
*P.ALL_ROBOT,
P.OK_PROVISION,
*sum(P.PY_SRC.values(), []),
*sum(P.JS_TSSRC.values(), []),
P.SCRIPTS / "atest.py",
]
if not P.TESTING_IN_CI:
file_dep += [
P.OK_ROBOT_DRYRUN,
P.DEMO_HASHES,
*P.OK_SERVEREXT.values(),
]
yield P._ok(
dict(
name="robot",
uptodate=[config_changed(dict(ATEST_ARGS=P.ATEST_ARGS))],
file_dep=file_dep,
actions=[["python", "-m", "scripts.atest"]],
),
P.OK_ATEST,
)
| 27.341916
| 113
| 0.460206
|
d33c37a02c4881da198300ca4024f56fe7e52f16
| 1,344
|
py
|
Python
|
app/core/tests/test_admin.py
|
MOFD308/recipe-app-api
|
efea5c28ae92a8c89b43921240855a0ca2b3f3a9
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
MOFD308/recipe-app-api
|
efea5c28ae92a8c89b43921240855a0ca2b3f3a9
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
MOFD308/recipe-app-api
|
efea5c28ae92a8c89b43921240855a0ca2b3f3a9
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email="admin@umich.edu",
password="123"
)
# we log the client in with admin information
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email="mofd@umich.edu",
password="123",
name="MOFD"
)
def test_users_listed(self):
"""Test that users are listed on the user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_page_change(self):
"""Test that the user test page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create user page works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
| 31.255814
| 68
| 0.634673
|
95625f7a2d89ef09b5713aa2010987c0aa83e54a
| 2,735
|
py
|
Python
|
tests/test_tokenization_phobert.py
|
WERimagin/transformers
|
cc7d14511c647f8147494df72f8b0575015e37ab
|
[
"Apache-2.0"
] | 47
|
2021-04-16T22:29:25.000Z
|
2022-02-11T08:19:13.000Z
|
tests/test_tokenization_phobert.py
|
WERimagin/transformers
|
cc7d14511c647f8147494df72f8b0575015e37ab
|
[
"Apache-2.0"
] | 12
|
2021-04-28T19:45:02.000Z
|
2021-08-31T13:56:02.000Z
|
tests/test_tokenization_phobert.py
|
WERimagin/transformers
|
cc7d14511c647f8147494df72f8b0575015e37ab
|
[
"Apache-2.0"
] | 5
|
2021-04-28T21:54:15.000Z
|
2022-02-11T07:48:17.000Z
|
# coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from transformers.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from .test_tokenization_common import TokenizerTesterMixin
class PhobertTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = PhobertTokenizer
def setUp(self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
vocab = ["T@@", "i", "I", "R@@", "r", "e@@"]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "l à</w>"]
self.special_tokens_map = {"unk_token": "<unk>"}
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
for token in vocab_tokens:
fp.write("{} {}".format(token, vocab_tokens[token]) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
def get_tokenizer(self, **kwargs):
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "Tôi là VinAI Research"
output_text = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def test_full_tokenizer(self):
tokenizer = PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
text = "Tôi là VinAI Research"
bpe_tokens = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
tokens = tokenizer.tokenize(text)
print(tokens)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + [tokenizer.unk_token]
input_bpe_tokens = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
| 40.820896
| 103
| 0.668739
|
b98822a332ba2001e90ea72ef66e6fcfbecd4400
| 11,250
|
py
|
Python
|
test/functional/mining_prioritisetransaction.py
|
qogecoin/qogecoin
|
fce42076f1a2746525374f50f35939392f37ca84
|
[
"MIT"
] | 9
|
2021-10-30T01:01:50.000Z
|
2022-02-10T02:20:44.000Z
|
test/functional/mining_prioritisetransaction.py
|
qogecoin/qogecoin
|
fce42076f1a2746525374f50f35939392f37ca84
|
[
"MIT"
] | 4
|
2021-10-17T19:59:16.000Z
|
2021-11-04T19:11:25.000Z
|
test/functional/mining_prioritisetransaction.py
|
qogecoin/qogecoin
|
fce42076f1a2746525374f50f35939392f37ca84
|
[
"MIT"
] | 7
|
2021-11-01T09:09:41.000Z
|
2022-03-23T02:47:30.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin and Qogecoin Core Authors
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
from decimal import Decimal
import time
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.messages import COIN, MAX_BLOCK_WEIGHT
from test_framework.test_framework import QogecoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
from test_framework.wallet import MiniWallet
class PrioritiseTransactionTest(QogecoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-printpriority=1",
"-acceptnonstdtxn=1",
]] * self.num_nodes
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def test_diamond(self):
self.log.info("Test diamond-shape package with priority")
self.generate(self.wallet, COINBASE_MATURITY + 1)
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
# tx_a
# / \
# / \
# tx_b tx_c
# \ /
# \ /
# tx_d
tx_o_a = self.wallet.send_self_transfer_multi(
from_node=self.nodes[0],
num_outputs=2,
)
txid_a = tx_o_a["txid"]
tx_o_b, tx_o_c = [self.wallet.send_self_transfer(
from_node=self.nodes[0],
utxo_to_spend=u,
) for u in tx_o_a["new_utxos"]]
txid_b = tx_o_b["txid"]
txid_c = tx_o_c["txid"]
tx_o_d = self.wallet.send_self_transfer_multi(
from_node=self.nodes[0],
utxos_to_spend=[
self.wallet.get_utxo(txid=txid_b),
self.wallet.get_utxo(txid=txid_c),
],
)
txid_d = tx_o_d["txid"]
self.log.info("Test priority while txs are in mempool")
raw_before = self.nodes[0].getrawmempool(verbose=True)
fee_delta_b = Decimal(9999) / COIN
fee_delta_c_1 = Decimal(-1234) / COIN
fee_delta_c_2 = Decimal(8888) / COIN
self.nodes[0].prioritisetransaction(txid=txid_b, fee_delta=int(fee_delta_b * COIN))
self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_1 * COIN))
self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_2 * COIN))
raw_before[txid_a]["fees"]["descendant"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2
raw_before[txid_b]["fees"]["modified"] += fee_delta_b
raw_before[txid_b]["fees"]["ancestor"] += fee_delta_b
raw_before[txid_b]["fees"]["descendant"] += fee_delta_b
raw_before[txid_c]["fees"]["modified"] += fee_delta_c_1 + fee_delta_c_2
raw_before[txid_c]["fees"]["ancestor"] += fee_delta_c_1 + fee_delta_c_2
raw_before[txid_c]["fees"]["descendant"] += fee_delta_c_1 + fee_delta_c_2
raw_before[txid_d]["fees"]["ancestor"] += fee_delta_b + fee_delta_c_1 + fee_delta_c_2
raw_after = self.nodes[0].getrawmempool(verbose=True)
assert_equal(raw_before[txid_a], raw_after[txid_a])
assert_equal(raw_before, raw_after)
self.log.info("Test priority while txs are not in mempool")
self.restart_node(0, extra_args=["-nopersistmempool"])
self.nodes[0].setmocktime(mock_time)
assert_equal(self.nodes[0].getmempoolinfo()["size"], 0)
self.nodes[0].prioritisetransaction(txid=txid_b, fee_delta=int(fee_delta_b * COIN))
self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_1 * COIN))
self.nodes[0].prioritisetransaction(txid=txid_c, fee_delta=int(fee_delta_c_2 * COIN))
for t in [tx_o_a["hex"], tx_o_b["hex"], tx_o_c["hex"], tx_o_d["hex"]]:
self.nodes[0].sendrawtransaction(t)
raw_after = self.nodes[0].getrawmempool(verbose=True)
assert_equal(raw_before[txid_a], raw_after[txid_a])
assert_equal(raw_before, raw_after)
# Clear mempool
self.generate(self.nodes[0], 1)
# Use default extra_args
self.restart_node(0)
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.test_diamond()
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self, self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_WEIGHT // 4 -- otherwise the test needs to be revised to
# create more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert j in mempool
sizes[i] += mempool[j]['vsize']
assert sizes[i] > MAX_BLOCK_WEIGHT // 4 # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.generate(self.nodes[0], 1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert txids[0][0] not in mempool
assert txids[0][1] in mempool
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert high_fee_tx is not None
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert high_fee_tx in mempool
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert high_fee_tx in mempool
for x in txids[2]:
if (x != high_fee_tx):
assert x not in mempool
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert len(utxo_list) > 0
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert tx_id not in self.nodes[0].getrawmempool()
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert tx_id in self.nodes[0].getrawmempool()
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert template != new_template
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| 46.680498
| 266
| 0.664978
|
aa059eb24e49ae91af4ab81d438cf9f140032381
| 5,078
|
py
|
Python
|
credmon/CredentialMonitors/LocalCredmon.py
|
matyasselmeci/scitokens-credmon
|
70b943c7caf90657c78ac7c52723bfa11dd5e0fd
|
[
"Apache-2.0"
] | 1
|
2019-01-10T22:25:11.000Z
|
2019-01-10T22:25:11.000Z
|
credmon/CredentialMonitors/LocalCredmon.py
|
matyasselmeci/scitokens-credmon
|
70b943c7caf90657c78ac7c52723bfa11dd5e0fd
|
[
"Apache-2.0"
] | 36
|
2019-01-10T21:43:15.000Z
|
2020-09-24T12:36:34.000Z
|
credmon/CredentialMonitors/LocalCredmon.py
|
matyasselmeci/scitokens-credmon
|
70b943c7caf90657c78ac7c52723bfa11dd5e0fd
|
[
"Apache-2.0"
] | 6
|
2019-02-08T02:25:20.000Z
|
2020-12-02T21:34:36.000Z
|
import os
import shutil
import glob
import scitokens
import htcondor
from credmon.CredentialMonitors.OAuthCredmon import OAuthCredmon
from credmon.utils import atomic_output_json
class LocalCredmon(OAuthCredmon):
"""
This credential monitor class provides the ability to self-sign SciTokens
without needing to access a remote service; only useful when the credd host has
a copy of the private signing key.
"""
use_token_metadata = False
def __init__(self, *args, **kwargs):
super(LocalCredmon, self).__init__(*args, **kwargs)
self.provider = "scitokens"
self.token_issuer = None
self.authz_template = "read:/user/{username} write:/user/{username}"
self.token_lifetime = 60*20
if htcondor != None:
self._private_key_location = htcondor.param.get('LOCAL_CREDMON_PRIVATE_KEY', "/etc/condor/scitokens-private.pem")
if self._private_key_location != None and os.path.exists(self._private_key_location):
with open(self._private_key_location, 'r') as private_key:
self._private_key = private_key.read()
self._private_key_id = htcondor.param.get('LOCAL_CREDMON_KEY_ID', "local")
else:
self.log.error("LOCAL_CREDMON_PRIVATE_KEY specified, but not key not found or not readable")
self.provider = htcondor.param.get("LOCAL_CREDMON_PROVIDER_NAME", "scitokens")
self.token_issuer = htcondor.param.get("LOCAL_CREDMON_ISSUER", self.token_issuer)
self.authz_template = htcondor.param.get("LOCAL_CREDMON_AUTHZ_TEMPLATE", self.authz_template)
self.token_lifetime = htcondor.param.get("LOCAL_CREDMON_TOKEN_LIFETIME", self.token_lifetime)
else:
self._private_key_location = None
if not self.token_issuer:
self.token_issuer = 'https://{}'.format(htcondor.param["FULL_HOSTNAME"])
def refresh_access_token(self, username, token_name):
"""
Create a SciToken at the specified path.
"""
token = scitokens.SciToken(algorithm="ES256", key=self._private_key, key_id=self._private_key_id)
token.update_claims({'sub': username})
user_authz = self.authz_template.format(username=username)
token.update_claims({'scope': user_authz})
# Serialize the token and write it to a file
serialized_token = token.serialize(issuer=self.token_issuer, lifetime=int(self.token_lifetime))
oauth_response = {"access_token": serialized_token.decode(),
"expires_in": int(self.token_lifetime)}
access_token_path = os.path.join(self.cred_dir, username, token_name + '.use')
try:
atomic_output_json(oauth_response, access_token_path)
except OSError as oe:
self.log.exception("Failure when writing out new access token to {}: {}.".format(
access_token_path, str(oe)))
return False
return True
def process_cred_file(self, cred_fname):
"""
Split out the file path to get username and base.
Pass that data to the SciToken acquiring function.
Format of cred_path should be:
<cred_dir> / <username> / <provider>.top
"""
# Take the cred_dir out of the cred_path
if htcondor.param.get("LOCAL_CREDMON_KRB_MODE", False):
base = os.path.splitext(cred_fname)[0]
if not os.path.isdir(base):
os.makedirs(base)
os.chmod(base, 0o2775)
else:
base, _ = os.path.split(cred_fname)
username = os.path.basename(base)
if self.should_renew(username, self.provider):
self.log.info('Found %s, acquiring SciToken and .use file', cred_fname)
success = self.refresh_access_token(username, self.provider)
if success:
if htcondor.param.get("LOCAL_CREDMON_KRB_MODE", False):
shutil.copy(os.path.join(base, self.provider + '.use'), base + '.cc')
self.log.info("Successfully renewed SciToken for user: %s", username)
else:
self.log.error("Failed to renew SciToken for user: %s", username)
def scan_tokens(self):
"""
Scan the credential directory for new credential requests.
The LocalCredmon will look for files of the form `<username>/<provider>.top`
and create the corresponding access token files, then invoke the parent OAuthCredmon
method.
"""
provider_glob = os.path.join(self.cred_dir, "*", "{}.top".format(self.provider))
if htcondor.param.get("LOCAL_CREDMON_KRB_MODE", False):
provider_glob = os.path.join(self.cred_dir, "*.cred")
self.log.info("Looking for *.cred files since LOCAL_CREDMON_KRB_MODE is set, found {} files".format(len(glob.glob(provider_glob))))
for file_name in glob.glob(provider_glob):
self.process_cred_file(file_name)
super(LocalCredmon, self).scan_tokens
| 43.775862
| 143
| 0.651241
|
2eb38c0f77ecd778e48b7d12cc89aef7b309e424
| 20,578
|
py
|
Python
|
python/cudf/cudf/utils/dtypes.py
|
etseidl/cudf
|
84073e8c3c9477c8afa974f14058f1208f63aba2
|
[
"Apache-2.0"
] | 1
|
2021-12-12T20:13:05.000Z
|
2021-12-12T20:13:05.000Z
|
python/cudf/cudf/utils/dtypes.py
|
etseidl/cudf
|
84073e8c3c9477c8afa974f14058f1208f63aba2
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/utils/dtypes.py
|
etseidl/cudf
|
84073e8c3c9477c8afa974f14058f1208f63aba2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
import datetime as dt
from collections import namedtuple
from decimal import Decimal
import cupy as cp
import numpy as np
import pandas as pd
import pyarrow as pa
from pandas.core.dtypes.common import infer_dtype_from_object
import cudf
from cudf.core._compat import PANDAS_GE_120
_NA_REP = "<NA>"
"""Map numpy dtype to pyarrow types.
Note that np.bool_ bitwidth (8) is different from pa.bool_ (1). Special
handling is required when converting a Boolean column into arrow.
"""
_np_pa_dtypes = {
np.float64: pa.float64(),
np.float32: pa.float32(),
np.int64: pa.int64(),
np.longlong: pa.int64(),
np.int32: pa.int32(),
np.int16: pa.int16(),
np.int8: pa.int8(),
np.bool_: pa.bool_(),
np.uint64: pa.uint64(),
np.uint32: pa.uint32(),
np.uint16: pa.uint16(),
np.uint8: pa.uint8(),
np.datetime64: pa.date64(),
np.object_: pa.string(),
np.str_: pa.string(),
}
np_dtypes_to_pandas_dtypes = {
np.dtype("uint8"): pd.UInt8Dtype(),
np.dtype("uint16"): pd.UInt16Dtype(),
np.dtype("uint32"): pd.UInt32Dtype(),
np.dtype("uint64"): pd.UInt64Dtype(),
np.dtype("int8"): pd.Int8Dtype(),
np.dtype("int16"): pd.Int16Dtype(),
np.dtype("int32"): pd.Int32Dtype(),
np.dtype("int64"): pd.Int64Dtype(),
np.dtype("bool_"): pd.BooleanDtype(),
np.dtype("object"): pd.StringDtype(),
}
pyarrow_dtypes_to_pandas_dtypes = {
pa.uint8(): pd.UInt8Dtype(),
pa.uint16(): pd.UInt16Dtype(),
pa.uint32(): pd.UInt32Dtype(),
pa.uint64(): pd.UInt64Dtype(),
pa.int8(): pd.Int8Dtype(),
pa.int16(): pd.Int16Dtype(),
pa.int32(): pd.Int32Dtype(),
pa.int64(): pd.Int64Dtype(),
pa.bool_(): pd.BooleanDtype(),
pa.string(): pd.StringDtype(),
}
pandas_dtypes_to_np_dtypes = {
pd.UInt8Dtype(): np.dtype("uint8"),
pd.UInt16Dtype(): np.dtype("uint16"),
pd.UInt32Dtype(): np.dtype("uint32"),
pd.UInt64Dtype(): np.dtype("uint64"),
pd.Int8Dtype(): np.dtype("int8"),
pd.Int16Dtype(): np.dtype("int16"),
pd.Int32Dtype(): np.dtype("int32"),
pd.Int64Dtype(): np.dtype("int64"),
pd.BooleanDtype(): np.dtype("bool_"),
pd.StringDtype(): np.dtype("object"),
}
pandas_dtypes_alias_to_cudf_alias = {
"UInt8": "uint8",
"UInt16": "uint16",
"UInt32": "uint32",
"UInt64": "uint64",
"Int8": "int8",
"Int16": "int16",
"Int32": "int32",
"Int64": "int64",
"boolean": "bool",
}
if PANDAS_GE_120:
np_dtypes_to_pandas_dtypes[np.dtype("float32")] = pd.Float32Dtype()
np_dtypes_to_pandas_dtypes[np.dtype("float64")] = pd.Float64Dtype()
pandas_dtypes_to_np_dtypes[pd.Float32Dtype()] = np.dtype("float32")
pandas_dtypes_to_np_dtypes[pd.Float64Dtype()] = np.dtype("float64")
pandas_dtypes_alias_to_cudf_alias["Float32"] = "float32"
pandas_dtypes_alias_to_cudf_alias["Float64"] = "float64"
SIGNED_INTEGER_TYPES = {"int8", "int16", "int32", "int64"}
UNSIGNED_TYPES = {"uint8", "uint16", "uint32", "uint64"}
INTEGER_TYPES = SIGNED_INTEGER_TYPES | UNSIGNED_TYPES
FLOAT_TYPES = {"float32", "float64"}
SIGNED_TYPES = SIGNED_INTEGER_TYPES | FLOAT_TYPES
NUMERIC_TYPES = SIGNED_TYPES | UNSIGNED_TYPES
DATETIME_TYPES = {
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
}
TIMEDELTA_TYPES = {
"timedelta64[s]",
"timedelta64[ms]",
"timedelta64[us]",
"timedelta64[ns]",
}
OTHER_TYPES = {"bool", "category", "str"}
STRING_TYPES = {"object"}
BOOL_TYPES = {"bool"}
ALL_TYPES = NUMERIC_TYPES | DATETIME_TYPES | TIMEDELTA_TYPES | OTHER_TYPES
def np_to_pa_dtype(dtype):
"""Util to convert numpy dtype to PyArrow dtype."""
# special case when dtype is np.datetime64
if dtype.kind == "M":
time_unit, _ = np.datetime_data(dtype)
if time_unit in ("s", "ms", "us", "ns"):
# return a pa.Timestamp of the appropriate unit
return pa.timestamp(time_unit)
# default is int64_t UNIX ms
return pa.date64()
elif dtype.kind == "m":
time_unit, _ = np.datetime_data(dtype)
if time_unit in ("s", "ms", "us", "ns"):
# return a pa.Duration of the appropriate unit
return pa.duration(time_unit)
# default fallback unit is ns
return pa.duration("ns")
return _np_pa_dtypes[cudf.dtype(dtype).type]
def get_numeric_type_info(dtype):
_TypeMinMax = namedtuple("_TypeMinMax", "min,max")
if dtype.kind in {"i", "u"}:
info = np.iinfo(dtype)
return _TypeMinMax(info.min, info.max)
elif dtype.kind == "f":
return _TypeMinMax(dtype.type("-inf"), dtype.type("+inf"))
else:
raise TypeError(dtype)
def numeric_normalize_types(*args):
"""Cast all args to a common type using numpy promotion logic"""
dtype = np.result_type(*[a.dtype for a in args])
return [a.astype(dtype) for a in args]
def _find_common_type_decimal(dtypes):
# Find the largest scale and the largest difference between
# precision and scale of the columns to be concatenated
s = max([dtype.scale for dtype in dtypes])
lhs = max([dtype.precision - dtype.scale for dtype in dtypes])
# Combine to get the necessary precision and clip at the maximum
# precision
p = min(cudf.Decimal64Dtype.MAX_PRECISION, s + lhs)
return cudf.Decimal64Dtype(p, s)
def cudf_dtype_from_pydata_dtype(dtype):
"""Given a numpy or pandas dtype, converts it into the equivalent cuDF
Python dtype.
"""
if cudf.api.types.is_categorical_dtype(dtype):
return cudf.core.dtypes.CategoricalDtype
elif cudf.api.types.is_decimal32_dtype(dtype):
return cudf.core.dtypes.Decimal32Dtype
elif cudf.api.types.is_decimal64_dtype(dtype):
return cudf.core.dtypes.Decimal64Dtype
elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:
return dtype.type
return infer_dtype_from_object(dtype)
def cudf_dtype_to_pa_type(dtype):
"""Given a cudf pandas dtype, converts it into the equivalent cuDF
Python dtype.
"""
if cudf.api.types.is_categorical_dtype(dtype):
raise NotImplementedError()
elif (
cudf.api.types.is_list_dtype(dtype)
or cudf.api.types.is_struct_dtype(dtype)
or cudf.api.types.is_decimal_dtype(dtype)
):
return dtype.to_arrow()
else:
return np_to_pa_dtype(cudf.dtype(dtype))
def cudf_dtype_from_pa_type(typ):
"""Given a cuDF pyarrow dtype, converts it into the equivalent
cudf pandas dtype.
"""
if pa.types.is_list(typ):
return cudf.core.dtypes.ListDtype.from_arrow(typ)
elif pa.types.is_struct(typ):
return cudf.core.dtypes.StructDtype.from_arrow(typ)
elif pa.types.is_decimal(typ):
return cudf.core.dtypes.Decimal64Dtype.from_arrow(typ)
else:
return cudf.api.types.pandas_dtype(typ.to_pandas_dtype())
def to_cudf_compatible_scalar(val, dtype=None):
"""
Converts the value `val` to a numpy/Pandas scalar,
optionally casting to `dtype`.
If `val` is None, returns None.
"""
if cudf._lib.scalar._is_null_host_scalar(val) or isinstance(
val, cudf.Scalar
):
return val
if not cudf.api.types._is_scalar_or_zero_d_array(val):
raise ValueError(
f"Cannot convert value of type {type(val).__name__} "
"to cudf scalar"
)
if isinstance(val, Decimal):
return val
if isinstance(val, (np.ndarray, cp.ndarray)) and val.ndim == 0:
val = val.item()
if (
(dtype is None) and isinstance(val, str)
) or cudf.api.types.is_string_dtype(dtype):
dtype = "str"
if isinstance(val, dt.datetime):
val = np.datetime64(val)
elif isinstance(val, dt.timedelta):
val = np.timedelta64(val)
elif isinstance(val, pd.Timestamp):
val = val.to_datetime64()
elif isinstance(val, pd.Timedelta):
val = val.to_timedelta64()
val = cudf.api.types.pandas_dtype(type(val)).type(val)
if dtype is not None:
val = val.astype(dtype)
if val.dtype.type is np.datetime64:
time_unit, _ = np.datetime_data(val.dtype)
if time_unit in ("D", "W", "M", "Y"):
val = val.astype("datetime64[s]")
elif val.dtype.type is np.timedelta64:
time_unit, _ = np.datetime_data(val.dtype)
if time_unit in ("D", "W", "M", "Y"):
val = val.astype("timedelta64[ns]")
return val
def is_column_like(obj):
"""
This function checks if the given `obj`
is a column-like (Series, Index...)
type or not.
Parameters
----------
obj : object of any type which needs to be validated.
Returns
-------
Boolean: True or False depending on whether the
input `obj` is column-like or not.
"""
return (
isinstance(
obj,
(
cudf.core.column.ColumnBase,
cudf.Series,
cudf.Index,
pd.Series,
pd.Index,
),
)
or (
hasattr(obj, "__cuda_array_interface__")
and len(obj.__cuda_array_interface__["shape"]) == 1
)
or (
hasattr(obj, "__array_interface__")
and len(obj.__array_interface__["shape"]) == 1
)
)
def can_convert_to_column(obj):
"""
This function checks if the given `obj`
can be used to create a column or not.
Parameters
----------
obj : object of any type which needs to be validated.
Returns
-------
Boolean: True or False depending on whether the
input `obj` is column-compatible or not.
"""
return is_column_like(obj) or cudf.api.types.is_list_like(obj)
def min_scalar_type(a, min_size=8):
return min_signed_type(a, min_size=min_size)
def min_signed_type(x, min_size=8):
"""
Return the smallest *signed* integer dtype
that can represent the integer ``x``
"""
for int_dtype in np.sctypes["int"]:
if (cudf.dtype(int_dtype).itemsize * 8) >= min_size:
if np.iinfo(int_dtype).min <= x <= np.iinfo(int_dtype).max:
return int_dtype
# resort to using `int64` and let numpy raise appropriate exception:
return np.int64(x).dtype
def min_unsigned_type(x, min_size=8):
"""
Return the smallest *unsigned* integer dtype
that can represent the integer ``x``
"""
for int_dtype in np.sctypes["uint"]:
if (cudf.dtype(int_dtype).itemsize * 8) >= min_size:
if 0 <= x <= np.iinfo(int_dtype).max:
return int_dtype
# resort to using `uint64` and let numpy raise appropriate exception:
return np.uint64(x).dtype
def min_column_type(x, expected_type):
"""
Return the smallest dtype which can represent all
elements of the `NumericalColumn` `x`
If the column is not a subtype of `np.signedinteger` or `np.floating`
returns the same dtype as the dtype of `x` without modification
"""
if not isinstance(x, cudf.core.column.NumericalColumn):
raise TypeError("Argument x must be of type column.NumericalColumn")
if x.valid_count == 0:
return x.dtype
if np.issubdtype(x.dtype, np.floating):
return get_min_float_dtype(x)
elif np.issubdtype(expected_type, np.integer):
max_bound_dtype = np.min_scalar_type(x.max())
min_bound_dtype = np.min_scalar_type(x.min())
result_type = np.promote_types(max_bound_dtype, min_bound_dtype)
else:
result_type = x.dtype
return cudf.dtype(result_type)
def get_min_float_dtype(col):
max_bound_dtype = np.min_scalar_type(float(col.max()))
min_bound_dtype = np.min_scalar_type(float(col.min()))
result_type = np.promote_types(
"float32", np.promote_types(max_bound_dtype, min_bound_dtype)
)
return cudf.dtype(result_type)
def is_mixed_with_object_dtype(lhs, rhs):
return (lhs.dtype == "object" and rhs.dtype != "object") or (
rhs.dtype == "object" and lhs.dtype != "object"
)
def get_time_unit(obj):
if isinstance(
obj,
(
cudf.core.column.datetime.DatetimeColumn,
cudf.core.column.timedelta.TimeDeltaColumn,
),
):
return obj.time_unit
time_unit, _ = np.datetime_data(obj.dtype)
return time_unit
def _get_nan_for_dtype(dtype):
dtype = cudf.dtype(dtype)
if pd.api.types.is_datetime64_dtype(
dtype
) or pd.api.types.is_timedelta64_dtype(dtype):
time_unit, _ = np.datetime_data(dtype)
return dtype.type("nat", time_unit)
elif dtype.kind == "f":
return dtype.type("nan")
else:
return np.float64("nan")
def _decimal_to_int64(decimal: Decimal) -> int:
"""
Scale a Decimal such that the result is the integer
that would result from removing the decimal point.
Examples
--------
>>> _decimal_to_int64(Decimal('1.42'))
142
>>> _decimal_to_int64(Decimal('0.0042'))
42
>>> _decimal_to_int64(Decimal('-1.004201'))
-1004201
"""
return int(f"{decimal:0f}".replace(".", ""))
def get_allowed_combinations_for_operator(dtype_l, dtype_r, op):
error = TypeError(
f"{op} not supported between {dtype_l} and {dtype_r} scalars"
)
to_numpy_ops = {
"__add__": _ADD_TYPES,
"__sub__": _SUB_TYPES,
"__mul__": _MUL_TYPES,
"__floordiv__": _FLOORDIV_TYPES,
"__truediv__": _TRUEDIV_TYPES,
"__mod__": _MOD_TYPES,
"__pow__": _POW_TYPES,
}
allowed = to_numpy_ops.get(op, op)
# special rules for string
if dtype_l == "object" or dtype_r == "object":
if (dtype_l == dtype_r == "object") and op == "__add__":
return "str"
else:
raise error
# Check if we can directly operate
for valid_combo in allowed:
ltype, rtype, outtype = valid_combo
if np.can_cast(dtype_l.char, ltype) and np.can_cast(
dtype_r.char, rtype
):
return outtype
raise error
def find_common_type(dtypes):
"""
Wrapper over np.find_common_type to handle special cases
Corner cases:
1. "M8", "M8" -> "M8" | "m8", "m8" -> "m8"
Parameters
----------
dtypes : iterable, sequence of dtypes to find common types
Returns
-------
dtype : np.dtype optional, the result from np.find_common_type,
None if input is empty
"""
if len(dtypes) == 0:
return None
# Early exit for categoricals since they're not hashable and therefore
# can't be put in a set.
if any(cudf.api.types.is_categorical_dtype(dtype) for dtype in dtypes):
if all(
(
cudf.api.types.is_categorical_dtype(dtype)
and (not dtype.ordered if hasattr(dtype, "ordered") else True)
)
for dtype in dtypes
):
if len(set(dtype._categories.dtype for dtype in dtypes)) == 1:
return cudf.CategoricalDtype(
cudf.core.column.concat_columns(
[dtype._categories for dtype in dtypes]
).unique()
)
else:
raise ValueError(
"Only unordered categories of the same underlying type "
"may be coerced to a common type."
)
else:
# TODO: Should this be an error case (mixing categorical with other
# dtypes) or should this return object? Unclear if we have enough
# information to decide right now, may have to come back to this as
# usage of find_common_type increases.
return cudf.dtype("O")
# Aggregate same types
dtypes = set(dtypes)
if any(cudf.api.types.is_decimal_dtype(dtype) for dtype in dtypes):
if all(
cudf.api.types.is_decimal_dtype(dtype)
or cudf.api.types.is_numeric_dtype(dtype)
for dtype in dtypes
):
return _find_common_type_decimal(
[
dtype
for dtype in dtypes
if cudf.api.types.is_decimal_dtype(dtype)
]
)
else:
return cudf.dtype("O")
# Corner case 1:
# Resort to np.result_type to handle "M" and "m" types separately
dt_dtypes = set(
filter(lambda t: cudf.api.types.is_datetime_dtype(t), dtypes)
)
if len(dt_dtypes) > 0:
dtypes = dtypes - dt_dtypes
dtypes.add(np.result_type(*dt_dtypes))
td_dtypes = set(
filter(lambda t: pd.api.types.is_timedelta64_dtype(t), dtypes)
)
if len(td_dtypes) > 0:
dtypes = dtypes - td_dtypes
dtypes.add(np.result_type(*td_dtypes))
common_dtype = np.find_common_type(list(dtypes), [])
if common_dtype == np.dtype("float16"):
return cudf.dtype("float32")
return cudf.dtype(common_dtype)
def _can_cast(from_dtype, to_dtype):
"""
Utility function to determine if we can cast
from `from_dtype` to `to_dtype`. This function primarily calls
`np.can_cast` but with some special handling around
cudf specific dtypes.
"""
if from_dtype in {None, cudf.NA}:
return True
if isinstance(from_dtype, type):
from_dtype = cudf.dtype(from_dtype)
if isinstance(to_dtype, type):
to_dtype = cudf.dtype(to_dtype)
# TODO : Add precision & scale checking for
# decimal types in future
if isinstance(from_dtype, cudf.core.dtypes.Decimal64Dtype):
if isinstance(to_dtype, cudf.core.dtypes.Decimal64Dtype):
return True
elif isinstance(to_dtype, np.dtype):
if to_dtype.kind in {"i", "f", "u", "U", "O"}:
return True
else:
return False
elif isinstance(from_dtype, np.dtype):
if isinstance(to_dtype, np.dtype):
return np.can_cast(from_dtype, to_dtype)
elif isinstance(to_dtype, cudf.core.dtypes.Decimal64Dtype):
if from_dtype.kind in {"i", "f", "u", "U", "O"}:
return True
else:
return False
elif isinstance(to_dtype, cudf.core.types.CategoricalDtype):
return True
else:
return False
elif isinstance(from_dtype, cudf.core.dtypes.ListDtype):
# TODO: Add level based checks too once casting of
# list columns is supported
if isinstance(to_dtype, cudf.core.dtypes.ListDtype):
return np.can_cast(from_dtype.leaf_type, to_dtype.leaf_type)
else:
return False
elif isinstance(from_dtype, cudf.core.dtypes.CategoricalDtype):
if isinstance(to_dtype, cudf.core.dtypes.CategoricalDtype):
return True
elif isinstance(to_dtype, np.dtype):
return np.can_cast(from_dtype._categories.dtype, to_dtype)
else:
return False
else:
return np.can_cast(from_dtype, to_dtype)
# Type dispatch loops similar to what are found in `np.add.types`
# In NumPy, whether or not an op can be performed between two
# operands is determined by checking to see if NumPy has a c/c++
# loop specifically for adding those two operands built in. If
# not it will search lists like these for a loop for types that
# the operands can be safely cast to. These are those lookups,
# modified slightly for cuDF's rules
_ADD_TYPES = [
"???",
"BBB",
"HHH",
"III",
"LLL",
"bbb",
"hhh",
"iii",
"lll",
"fff",
"ddd",
"mMM",
"MmM",
"mmm",
"LMM",
"MLM",
"Lmm",
"mLm",
]
_SUB_TYPES = [
"BBB",
"HHH",
"III",
"LLL",
"bbb",
"hhh",
"iii",
"lll",
"fff",
"ddd",
"???",
"MMm",
"mmm",
"MmM",
"MLM",
"mLm",
"Lmm",
]
_MUL_TYPES = [
"???",
"BBB",
"HHH",
"III",
"LLL",
"bbb",
"hhh",
"iii",
"lll",
"fff",
"ddd",
"mLm",
"Lmm",
"mlm",
"lmm",
]
_FLOORDIV_TYPES = [
"bbb",
"BBB",
"HHH",
"III",
"LLL",
"hhh",
"iii",
"lll",
"fff",
"ddd",
"???",
"mqm",
"mdm",
"mmq",
]
_TRUEDIV_TYPES = ["fff", "ddd", "mqm", "mmd", "mLm"]
_MOD_TYPES = [
"bbb",
"BBB",
"hhh",
"HHH",
"iii",
"III",
"lll",
"LLL",
"fff",
"ddd",
"mmm",
]
_POW_TYPES = [
"bbb",
"BBB",
"hhh",
"HHH",
"iii",
"III",
"lll",
"LLL",
"fff",
"ddd",
]
| 28.266484
| 79
| 0.610701
|
cfa90ad0739d44ee6e49cd16650612b9b0211440
| 6,350
|
py
|
Python
|
log_plotter.py
|
fartashf/nuqsgd
|
848b84e28b8df32d52e1f0a865adb9cf768b3221
|
[
"Apache-2.0"
] | 5
|
2019-12-19T22:49:34.000Z
|
2021-06-09T21:40:56.000Z
|
log_plotter.py
|
fartashf/nuqsgd
|
848b84e28b8df32d52e1f0a865adb9cf768b3221
|
[
"Apache-2.0"
] | null | null | null |
log_plotter.py
|
fartashf/nuqsgd
|
848b84e28b8df32d52e1f0a865adb9cf768b3221
|
[
"Apache-2.0"
] | 2
|
2020-01-10T15:27:33.000Z
|
2020-04-08T10:16:42.000Z
|
from scipy.interpolate import spline
import numpy as np
import os
import re
import torch
import pylab as plt
import matplotlib.ticker as mtick
def get_run_names(logdir, patterns):
run_names = []
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names += [root]
# print(run_names)
run_names.sort()
return run_names
def get_data_pth(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name in run_names:
d = {}
logdata = torch.load(run_name + '/log.pth.tar')
for tag_name in tag_names:
if tag_name not in logdata:
continue
js = logdata[tag_name]
d[tag_name] = np.array([[x[j] for x in js]
for j in range(1, 3)])
data += [d]
return data
def plot_smooth(x, y, npts=100, order=3, *args, **kwargs):
x_smooth = np.linspace(x.min(), x.max(), npts)
y_smooth = spline(x, y, x_smooth, order=order)
# x_smooth = x
# y_smooth = y
plt.plot(x_smooth, y_smooth, *args, **kwargs)
def plot_smooth_o1(x, y, *args, **kwargs):
plot_smooth(x, y, 100, 1, *args, **kwargs)
def get_legend(lg_tags, run_name, lg_replace=[]):
lg = ""
for lgt in lg_tags:
res = ".*?($|,)" if ',' not in lgt and '$' not in lgt else ''
mg = re.search(lgt + res, run_name)
if mg:
lg += mg.group(0)
lg = lg.replace('_,', ',')
lg = lg.strip(',')
for a, b in lg_replace:
lg = lg.replace(a, b)
return lg
def plot_tag(data, plot_f, run_names, tag_name, lg_tags, ylim=None, color0=0,
ncolor=None, lg_replace=[], no_title=False):
xlabel = {}
ylabel = {'Tacc': 'Training Accuracy (%)', 'Terror': 'Training Error (%)',
'train/accuracy': 'Training Accuracy (%)',
'Vacc': 'Test Accuracy (%)', 'Verror': 'Test Error (%)',
'valid/accuracy': 'Test Accuracy (%)',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss', 'Vloss': 'Loss', 'lr': 'Learning rate',
'grad_bias': 'Gradient Diff norm',
'est_var': 'Mean variance',
'est_snr': 'Mean SNR',
'est_nvar': 'Mean Normalized Variance'}
titles = {'Tacc': 'Training Accuracy', 'Terror': 'Training Error',
'train/accuracy': 'Training Accuracy',
'Vacc': 'Test Accuracy', 'Verror': 'Test Error',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss on full training set', 'lr': 'Learning rate',
'Vloss': 'Loss on validation set',
'grad_bias': 'Optimization Step Bias',
'est_var': 'Optimization Step Variance (w/o learning rate)',
'est_snr': 'Optimization Step SNR',
'est_nvar': 'Optimization Step Normalized Variance (w/o lr)',
}
yscale_log = ['Tloss', 'Vloss'] # , 'est_var'
yscale_base = []
# yscale_sci = ['est_bias', 'est_var']
plot_fs = {'Tacc': plot_f, 'Vacc': plot_f,
'Terror': plot_f, 'Verror': plot_f,
'Tloss': plot_f, 'Vloss': plot_f,
}
for k in list(ylabel.keys()):
if k not in xlabel:
xlabel[k] = 'Training Iteration'
if k not in plot_fs:
plot_fs[k] = plot_f
if k not in plot_fs:
plot_fs[k] = plt.plot
if not isinstance(data, list):
data = [data]
run_names = [run_names]
color = ['blue', 'orangered', 'limegreen', 'darkkhaki', 'cyan', 'grey']
color = color[:ncolor]
style = ['-', '--', ':', '-.']
# plt.rcParams.update({'font.size': 12})
plt.grid(linewidth=1)
legends = []
for i in range(len(data)):
if tag_name not in data[i]:
continue
legends += [get_legend(lg_tags, run_names[i], lg_replace)]
plot_fs[tag_name](
data[i][tag_name][0], data[i][tag_name][1],
linestyle=style[(color0 + i) // len(color)],
color=color[(color0 + i) % len(color)], linewidth=2)
if not no_title:
plt.title(titles[tag_name])
if tag_name in yscale_log:
ax = plt.gca()
if tag_name in yscale_base:
ax.set_yscale('log', basey=np.e)
ax.yaxis.set_major_formatter(mtick.FuncFormatter(ticks))
else:
ax.set_yscale('log')
else:
ax = plt.gca()
ax.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
if ylim is not None:
plt.ylim(ylim)
# plt.xlim([0, 25000])
plt.legend(legends)
plt.xlabel(xlabel[tag_name])
plt.ylabel(ylabel[tag_name])
def ticks(y, pos):
return r'$e^{{{:.0f}}}$'.format(np.log(y))
def plot_runs_and_tags(get_data_f, plot_f, logdir, patterns, tag_names,
fig_name, lg_tags, ylim, batch_size=None, sep_h=True,
ncolor=None, save_single=False, lg_replace=[],
no_title=False):
run_names = get_run_names(logdir, patterns)
data = get_data_f(logdir, run_names, tag_names, batch_size)
if len(data) == 0:
return data, run_names
num = len(tag_names)
height = (num + 1) // 2
width = 2 if num > 1 else 1
if not save_single:
fig = plt.figure(figsize=(7 * width, 4 * height))
fig.subplots(height, width)
else:
plt.figure(figsize=(7, 4))
plt.tight_layout(pad=1., w_pad=3., h_pad=3.0)
fi = 1
if save_single:
fig_dir = fig_name[:fig_name.rfind('.')]
try:
os.makedirs(fig_dir)
except os.error:
pass
for i in range(len(tag_names)):
yl = ylim[i]
if not isinstance(yl, list) and yl is not None:
yl = ylim
if not save_single:
plt.subplot(height, width, fi)
plot_tag(data, plot_f, run_names, tag_names[i], lg_tags, yl,
ncolor=ncolor, lg_replace=lg_replace, no_title=no_title)
if save_single:
plt.savefig('%s/%s.pdf' % (fig_dir, tag_names[i]),
dpi=100, bbox_inches='tight')
plt.figure(figsize=(7, 4))
fi += 1
plt.savefig(fig_name, dpi=100, bbox_inches='tight')
return data, run_names
| 34.51087
| 78
| 0.546929
|
90b741b8a0a7a5afc73a8f866e8965cecc185288
| 387
|
py
|
Python
|
vendor/cache/pygments.rb-2cada028da50/vendor/pygments-main/tests/examplefiles/linecontinuation.py
|
drnic/gitlabhq
|
778083f71e94fbf1c98f2f890b43d9ccc8faee03
|
[
"MIT"
] | 1
|
2019-06-27T12:45:36.000Z
|
2019-06-27T12:45:36.000Z
|
vendor/cache/pygments.rb-2cada028da50/vendor/pygments-main/tests/examplefiles/linecontinuation.py
|
drnic/gitlabhq
|
778083f71e94fbf1c98f2f890b43d9ccc8faee03
|
[
"MIT"
] | null | null | null |
vendor/cache/pygments.rb-2cada028da50/vendor/pygments-main/tests/examplefiles/linecontinuation.py
|
drnic/gitlabhq
|
778083f71e94fbf1c98f2f890b43d9ccc8faee03
|
[
"MIT"
] | null | null | null |
apple.filter(x, y)
apple.\
filter(x, y)
1 \
. \
__str__
from os import path
from \
os \
import \
path
import os.path as something
import \
os.path \
as \
something
class \
Spam:
pass
class Spam: pass
class Spam(object):
pass
class \
Spam \
(
object
) \
:
pass
def \
spam \
( \
) \
: \
pass
| 8.413043
| 27
| 0.483204
|
83f30aed88e651b286d4c2d6666ce20863cbc3a5
| 4,023
|
py
|
Python
|
pandas/tests/extension/test_period.py
|
Sanjay8874/pandas
|
353a0f9ebfbd87642d1dd7154f25be0286cdaf93
|
[
"BSD-3-Clause"
] | 4
|
2021-03-02T19:57:18.000Z
|
2021-06-20T19:23:57.000Z
|
pandas/tests/extension/test_period.py
|
16umm001/pandas
|
a2e599499667b256bc5b8b13a75f0601eccfd432
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/extension/test_period.py
|
16umm001/pandas
|
a2e599499667b256bc5b8b13a75f0601eccfd432
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas.core.arrays import PeriodArray
from pandas.tests.extension import base
import pandas.util.testing as tm
@pytest.fixture
def dtype():
return PeriodDtype(freq='D')
@pytest.fixture
def data(dtype):
return PeriodArray(np.arange(1970, 2070), freq=dtype.freq)
@pytest.fixture
def data_for_sorting(dtype):
return PeriodArray([2018, 2019, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing(dtype):
return PeriodArray([iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing_for_sorting(dtype):
return PeriodArray([2018, iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_for_grouping(dtype):
B = 2018
NA = iNaT
A = 2017
C = 2019
return PeriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq)
@pytest.fixture
def na_value():
return pd.NaT
class BasePeriodTests(object):
pass
class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests):
pass
class TestConstructors(BasePeriodTests, base.BaseConstructorsTests):
pass
class TestGetitem(BasePeriodTests, base.BaseGetitemTests):
pass
class TestMethods(BasePeriodTests, base.BaseMethodsTests):
def test_combine_add(self, data_repeated):
# Period + Period is not defined.
pass
class TestInterface(BasePeriodTests, base.BaseInterfaceTests):
def test_no_values_attribute(self, data):
# We have a values attribute.
pass
class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests):
implements = {'__sub__', '__rsub__'}
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# we implement substitution...
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0],
exc=None)
else:
# ... but not the rest.
super(TestArithmeticOps, self).test_arith_series_with_scalar(
data, all_arithmetic_operators
)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0],
exc=None)
else:
# ... but not the rest.
super(TestArithmeticOps, self).test_arith_series_with_scalar(
data, all_arithmetic_operators
)
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
super(TestArithmeticOps, self)._check_divmod_op(
s, op, other, exc=TypeError
)
def test_add_series_with_extension_array(self, data):
# we don't implement + for Period
s = pd.Series(data)
msg = (r"unsupported operand type\(s\) for \+: "
r"\'PeriodArray\' and \'PeriodArray\'")
with tm.assert_raises_regex(TypeError, msg):
s + data
def test_error(self):
pass
def test_direct_arith_with_series_returns_not_implemented(self, data):
# Override to use __sub__ instead of __add__
other = pd.Series(data)
result = data.__sub__(other)
assert result is NotImplemented
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
# the base test is not appropriate for us. We raise on comparison
# with (some) integers, depending on the value.
pass
class TestMissing(BasePeriodTests, base.BaseMissingTests):
pass
class TestReshaping(BasePeriodTests, base.BaseReshapingTests):
pass
class TestSetitem(BasePeriodTests, base.BaseSetitemTests):
pass
class TestGroupby(BasePeriodTests, base.BaseGroupbyTests):
pass
| 25.462025
| 76
| 0.683072
|
1e81032c76e4feee1c6efac7e965387eaa9e5414
| 2,076
|
py
|
Python
|
tests/pytests/functional/conftest.py
|
Bacon-Unlimited/salt
|
9b1b791d212a6810c430dd15c63fbce3a4f7e1d6
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/pytests/functional/conftest.py
|
Bacon-Unlimited/salt
|
9b1b791d212a6810c430dd15c63fbce3a4f7e1d6
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/pytests/functional/conftest.py
|
Bacon-Unlimited/salt
|
9b1b791d212a6810c430dd15c63fbce3a4f7e1d6
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
import logging
import shutil
import pytest
from saltfactories.utils.functional import Loaders
log = logging.getLogger(__name__)
@pytest.fixture(scope="package")
def minion_id():
return "func-tests-minion"
@pytest.fixture(scope="module")
def state_tree(tmp_path_factory):
state_tree_path = tmp_path_factory.mktemp("state-tree-base")
try:
yield state_tree_path
finally:
shutil.rmtree(str(state_tree_path), ignore_errors=True)
@pytest.fixture(scope="module")
def state_tree_prod(tmp_path_factory):
state_tree_path = tmp_path_factory.mktemp("state-tree-prod")
try:
yield state_tree_path
finally:
shutil.rmtree(str(state_tree_path), ignore_errors=True)
@pytest.fixture(scope="module")
def minion_config_defaults():
"""
Functional test modules can provide this fixture to tweak the default configuration dictionary
passed to the minion factory
"""
return {}
@pytest.fixture(scope="module")
def minion_config_overrides():
"""
Functional test modules can provide this fixture to tweak the configuration overrides dictionary
passed to the minion factory
"""
return {}
@pytest.fixture(scope="module")
def minion_opts(
salt_factories,
minion_id,
state_tree,
state_tree_prod,
minion_config_defaults,
minion_config_overrides,
):
minion_config_overrides.update(
{
"file_client": "local",
"file_roots": {"base": [str(state_tree)], "prod": [str(state_tree_prod)]},
"features": {"enable_slsvars_fixes": True},
}
)
factory = salt_factories.salt_minion_daemon(
minion_id,
defaults=minion_config_defaults or None,
overrides=minion_config_overrides,
)
return factory.config.copy()
@pytest.fixture(scope="module")
def loaders(minion_opts):
return Loaders(minion_opts)
@pytest.fixture(autouse=True)
def reset_loaders_state(loaders):
try:
# Run the tests
yield
finally:
# Reset the loaders state
loaders.reset_state()
| 23.590909
| 100
| 0.69316
|
0e5cf2c6e190000bb8ec500ad435b987c7899e72
| 423
|
py
|
Python
|
Conditionals and Loops/Nth Fibbonacci Number.py
|
SaiPrasanth212/Coding-ninjas-Introduction-To-Python
|
f6aabc3b7b0f2ae82e2870c8f2bd1f37e3fe3005
|
[
"MIT"
] | 2
|
2021-12-13T19:28:40.000Z
|
2022-03-07T16:36:29.000Z
|
Conditionals and Loops/Nth Fibbonacci Number.py
|
SaiPrasanth212/Coding-ninjas-Introduction-To-Python
|
f6aabc3b7b0f2ae82e2870c8f2bd1f37e3fe3005
|
[
"MIT"
] | null | null | null |
Conditionals and Loops/Nth Fibbonacci Number.py
|
SaiPrasanth212/Coding-ninjas-Introduction-To-Python
|
f6aabc3b7b0f2ae82e2870c8f2bd1f37e3fe3005
|
[
"MIT"
] | null | null | null |
## Read input as specified in the question.
## Print output as specified in the question.
''''def fibo(n):
if n == 1 or n == 2:
return 1
return fibo(n-1) + fibo(n-2)'''
def fibo(n):
if n == 1 or n == 2:
return 1
x = 1
y = 1
while n > 2:
z = x+y
x = y
y = z
n = n-1
return z
#main
n = int(input())
print(fibo(n))
| 15.107143
| 45
| 0.432624
|
9df13205923bda5739bd76e4072919adaf1dc0f0
| 12,484
|
py
|
Python
|
mne/io/utils.py
|
achilleas-k/mne-python
|
0078e1af13a92ab47498dd167bc5ec73be864427
|
[
"BSD-3-Clause"
] | 1
|
2020-07-28T16:09:54.000Z
|
2020-07-28T16:09:54.000Z
|
mne/io/utils.py
|
achilleas-k/mne-python
|
0078e1af13a92ab47498dd167bc5ec73be864427
|
[
"BSD-3-Clause"
] | 4
|
2016-06-04T15:28:08.000Z
|
2016-12-22T14:23:13.000Z
|
mne/io/utils.py
|
achilleas-k/mne-python
|
0078e1af13a92ab47498dd167bc5ec73be864427
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Denis Engemann <denis.engemann@gmail.com>
# Teon Brooks <teon.brooks@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
# Mainak Jas <mainak.jas@telecom-paristech.fr>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD (3-clause)
import numpy as np
import os
from .constants import FIFF
from .meas_info import _get_valid_units
from .. import __version__
from ..utils import warn
def _deprecate_stim_channel(stim_channel, removed_in='0.19'):
minor_current = int(__version__.split('.')[1])
minor_removed_in = int(removed_in.split('.')[1])
if minor_current == minor_removed_in - 2:
if stim_channel is None:
_MSG = (
'The parameter `stim_channel` controlling the stim channel'
' synthesis has not been specified. In 0.%s it defaults to'
' True but will change to False in 0.%s (when no stim channel'
' synthesis will be allowed) and be removed in %s; migrate'
' code to use `stim_channel=False` and'
' :func:`mne.events_from_annotations` or set'
' `stim_channel=True` to avoid this warning.'
% (minor_removed_in - 2, minor_removed_in - 1, removed_in))
warn(_MSG, FutureWarning)
elif minor_current == minor_removed_in - 1:
if stim_channel is not False:
_MSG = ('stim_channel must be False or omitted; it will be '
'removed in %s' % removed_in)
raise ValueError(_MSG, DeprecationWarning)
else:
raise RuntimeError('stim_channel was supposed to be removed in version'
' %s, and it is still present in %s' %
(removed_in, __version__))
def _check_orig_units(orig_units):
"""Check original units from a raw file.
Units that are close to a valid_unit but not equal can be remapped to fit
into the valid_units. All other units that are not valid will be replaced
with "n/a".
Parameters
----------
orig_units : dict
Dictionary mapping channel names to their units as specified in
the header file. Example: {'FC1': 'nV'}
Returns
-------
orig_units_remapped : dict
Dictionary mapping channel names to their VALID units as specified in
the header file. Invalid units are now labeled "n/a".
Example: {'FC1': 'nV', 'Hfp3erz': 'n/a'}
"""
if orig_units is None:
return
valid_units = _get_valid_units()
valid_units_lowered = [unit.lower() for unit in valid_units]
orig_units_remapped = dict(orig_units)
for ch_name, unit in orig_units.items():
# Be lenient: we ignore case for now.
if unit.lower() in valid_units_lowered:
continue
# Common "invalid units" can be remapped to their valid equivalent
remap_dict = dict()
remap_dict['uv'] = u'µV'
remap_dict[u'μv'] = u'µV' # greek letter mu vs micro sign. use micro
if unit.lower() in remap_dict:
orig_units_remapped[ch_name] = remap_dict[unit.lower()]
continue
# Some units cannot be saved, they are invalid: assign "n/a"
orig_units_remapped[ch_name] = 'n/a'
return orig_units_remapped
def _find_channels(ch_names, ch_type='EOG'):
"""Find EOG channel."""
substrings = (ch_type,)
substrings = [s.upper() for s in substrings]
if ch_type == 'EOG':
substrings = ('EOG', 'EYE')
eog_idx = [idx for idx, ch in enumerate(ch_names) if
any(substring in ch.upper() for substring in substrings)]
return eog_idx
def _mult_cal_one(data_view, one, idx, cals, mult):
"""Take a chunk of raw data, multiply by mult or cals, and store."""
one = np.asarray(one, dtype=data_view.dtype)
assert data_view.shape[1] == one.shape[1]
if mult is not None:
data_view[:] = np.dot(mult, one)
else:
if isinstance(idx, slice):
data_view[:] = one[idx]
else:
# faster than doing one = one[idx]
np.take(one, idx, axis=0, out=data_view)
if cals is not None:
data_view *= cals
def _blk_read_lims(start, stop, buf_len):
"""Deal with indexing in the middle of a data block.
Parameters
----------
start : int
Starting index.
stop : int
Ending index (exclusive).
buf_len : int
Buffer size in samples.
Returns
-------
block_start_idx : int
The first block to start reading from.
r_lims : list
The read limits.
d_lims : list
The write limits.
Notes
-----
Consider this example::
>>> start, stop, buf_len = 2, 27, 10
+---------+---------+---------
File structure: | buf0 | buf1 | buf2 |
+---------+---------+---------
File time: 0 10 20 30
+---------+---------+---------
Requested time: 2 27
| |
blockstart blockstop
| |
start stop
We need 27 - 2 = 25 samples (per channel) to store our data, and
we need to read from 3 buffers (30 samples) to get all of our data.
On all reads but the first, the data we read starts at
the first sample of the buffer. On all reads but the last,
the data we read ends on the last sample of the buffer.
We call ``this_data`` the variable that stores the current buffer's data,
and ``data`` the variable that stores the total output.
On the first read, we need to do this::
>>> data[0:buf_len-2] = this_data[2:buf_len] # doctest: +SKIP
On the second read, we need to do::
>>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len] # doctest: +SKIP
On the final read, we need to do::
>>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3] # doctest: +SKIP
This function encapsulates this logic to allow a loop over blocks, where
data is stored using the following limits::
>>> data[d_lims[ii, 0]:d_lims[ii, 1]] = this_data[r_lims[ii, 0]:r_lims[ii, 1]] # doctest: +SKIP
""" # noqa: E501
# this is used to deal with indexing in the middle of a sampling period
assert all(isinstance(x, int) for x in (start, stop, buf_len))
block_start_idx = (start // buf_len)
block_start = block_start_idx * buf_len
last_used_samp = stop - 1
block_stop = last_used_samp - last_used_samp % buf_len + buf_len
read_size = block_stop - block_start
n_blk = read_size // buf_len + (read_size % buf_len != 0)
start_offset = start - block_start
end_offset = block_stop - stop
d_lims = np.empty((n_blk, 2), int)
r_lims = np.empty((n_blk, 2), int)
for bi in range(n_blk):
# Triage start (sidx) and end (eidx) indices for
# data (d) and read (r)
if bi == 0:
d_sidx = 0
r_sidx = start_offset
else:
d_sidx = bi * buf_len - start_offset
r_sidx = 0
if bi == n_blk - 1:
d_eidx = stop - start
r_eidx = buf_len - end_offset
else:
d_eidx = (bi + 1) * buf_len - start_offset
r_eidx = buf_len
d_lims[bi] = [d_sidx, d_eidx]
r_lims[bi] = [r_sidx, r_eidx]
return block_start_idx, r_lims, d_lims
def _file_size(fname):
"""Get the file size in bytes."""
with open(fname, 'rb') as f:
f.seek(0, os.SEEK_END)
return f.tell()
def _read_segments_file(raw, data, idx, fi, start, stop, cals, mult,
dtype='<i2', n_channels=None, offset=0,
trigger_ch=None):
"""Read a chunk of raw data."""
n_channels = raw.info['nchan'] if n_channels is None else n_channels
n_bytes = np.dtype(dtype).itemsize
# data_offset and data_left count data samples (channels x time points),
# not bytes.
data_offset = n_channels * start * n_bytes + offset
data_left = (stop - start) * n_channels
# Read up to 100 MB of data at a time, block_size is in data samples
block_size = ((int(100e6) // n_bytes) // n_channels) * n_channels
block_size = min(data_left, block_size)
with open(raw._filenames[fi], 'rb', buffering=0) as fid:
fid.seek(data_offset)
# extract data in chunks
for sample_start in np.arange(0, data_left, block_size) // n_channels:
count = min(block_size, data_left - sample_start * n_channels)
block = np.fromfile(fid, dtype, count)
if block.size != count:
raise RuntimeError('Incorrect number of samples (%s != %s), '
'please report this error to MNE-Python '
'developers' % (block.size, count))
block = block.reshape(n_channels, -1, order='F')
n_samples = block.shape[1] # = count // n_channels
sample_stop = sample_start + n_samples
if trigger_ch is not None:
stim_ch = trigger_ch[start:stop][sample_start:sample_stop]
block = np.vstack((block, stim_ch))
data_view = data[:, sample_start:sample_stop]
_mult_cal_one(data_view, block, idx, cals, mult)
def read_str(fid, count=1):
"""Read string from a binary file in a python version compatible way."""
dtype = np.dtype('>S%i' % count)
string = fid.read(dtype.itemsize)
data = np.frombuffer(string, dtype=dtype)[0]
bytestr = b''.join([data[0:data.index(b'\x00') if
b'\x00' in data else count]])
return str(bytestr.decode('ascii')) # Return native str type for Py2/3
def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc):
"""Initialize info['chs'] for eeg channels."""
chs = list()
for idx, ch_name in enumerate(ch_names):
if ch_name in eog or idx in eog:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_EOG_CH
elif ch_name in ecg or idx in ecg:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_ECG_CH
elif ch_name in emg or idx in emg:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_EMG_CH
elif ch_name in misc or idx in misc:
coil_type = FIFF.FIFFV_COIL_NONE
kind = FIFF.FIFFV_MISC_CH
else:
coil_type = ch_coil
kind = ch_kind
chan_info = {'cal': cals[idx], 'logno': idx + 1, 'scanno': idx + 1,
'range': 1.0, 'unit_mul': 0., 'ch_name': ch_name,
'unit': FIFF.FIFF_UNIT_V,
'coord_frame': FIFF.FIFFV_COORD_HEAD,
'coil_type': coil_type, 'kind': kind, 'loc': np.zeros(12)}
chs.append(chan_info)
return chs
def _synthesize_stim_channel(events, n_samples):
"""Synthesize a stim channel from events read from an event file.
Parameters
----------
events : array, shape (n_events, 3)
Each row representing an event.
n_samples : int
The number of samples.
Returns
-------
stim_channel : array, shape (n_samples,)
An array containing the whole recording's event marking.
"""
# select events overlapping buffer
events = events.copy()
events[events[:, 1] < 1, 1] = 1
# create output buffer
stim_channel = np.zeros(n_samples, int)
for onset, duration, trigger in events:
stim_channel[onset:onset + duration] = trigger
return stim_channel
def _construct_bids_filename(base, ext, part_idx):
"""Construct a BIDS compatible filename for split files."""
# insert index in filename
deconstructed_base = base.split('_')
bids_supported = ['meg', 'eeg', 'ieeg']
for mod in bids_supported:
if mod in deconstructed_base:
idx = deconstructed_base.index(mod)
modality = deconstructed_base.pop(idx)
base = '_'.join(deconstructed_base)
use_fname = '%s_part-%02d_%s%s' % (base, part_idx, modality, ext)
return use_fname
| 36.717647
| 104
| 0.590436
|
a1d3dd5c494a6016fae745b4d9f5cf26a5360384
| 3,123
|
py
|
Python
|
reddit/settings.py
|
RaghuDalal/Django-Reddit-API
|
bdaba40f003da59be5ba4cf7ab6d83b6bae7b99b
|
[
"MIT"
] | null | null | null |
reddit/settings.py
|
RaghuDalal/Django-Reddit-API
|
bdaba40f003da59be5ba4cf7ab6d83b6bae7b99b
|
[
"MIT"
] | null | null | null |
reddit/settings.py
|
RaghuDalal/Django-Reddit-API
|
bdaba40f003da59be5ba4cf7ab6d83b6bae7b99b
|
[
"MIT"
] | null | null | null |
"""
Django settings for reddit project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cb6jiv6-*^^#p(43u+w(+l%0^ejyuot=$(sx(kgchoc5^02b27'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'posts',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'reddit.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'reddit.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| 25.390244
| 91
| 0.692923
|
ad7f48b015718bb166c1e2248c1d3f81b466c6bf
| 2,678
|
py
|
Python
|
airflow/providers/google/cloud/example_dags/example_dataprep.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 1
|
2020-12-23T05:03:17.000Z
|
2020-12-23T05:03:17.000Z
|
airflow/providers/google/cloud/example_dags/example_dataprep.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | 20
|
2021-01-23T12:33:08.000Z
|
2021-12-07T22:30:37.000Z
|
airflow/providers/google/cloud/example_dags/example_dataprep.py
|
daemon-demon/airflow
|
6f96e81f0123b30750fb68ec496246023bf63f35
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use Google Dataprep.
"""
import os
from airflow import models
from airflow.providers.google.cloud.operators.dataprep import (
DataprepGetJobGroupOperator,
DataprepGetJobsForJobGroupOperator,
DataprepRunJobGroupOperator,
)
from airflow.utils import dates
DATAPREP_JOB_ID = int(os.environ.get('DATAPREP_JOB_ID', 12345677))
DATAPREP_JOB_RECIPE_ID = int(os.environ.get('DATAPREP_JOB_RECIPE_ID', 12345677))
DATAPREP_BUCKET = os.environ.get("DATAPREP_BUCKET", "gs://afl-sql/name@email.com")
DATA = {
"wrangledDataset": {"id": DATAPREP_JOB_RECIPE_ID},
"overrides": {
"execution": "dataflow",
"profiler": False,
"writesettings": [
{
"path": DATAPREP_BUCKET,
"action": "create",
"format": "csv",
"compression": "none",
"header": False,
"asSingleFile": False,
}
],
},
}
with models.DAG(
"example_dataprep", schedule_interval=None, start_date=dates.days_ago(1), # Override to match your needs
) as dag:
# [START how_to_dataprep_run_job_group_operator]
run_job_group = DataprepRunJobGroupOperator(task_id="run_job_group", body_request=DATA)
# [END how_to_dataprep_run_job_group_operator]
# [START how_to_dataprep_get_jobs_for_job_group_operator]
get_jobs_for_job_group = DataprepGetJobsForJobGroupOperator(
task_id="get_jobs_for_job_group", job_id=DATAPREP_JOB_ID
)
# [END how_to_dataprep_get_jobs_for_job_group_operator]
# [START how_to_dataprep_get_job_group_operator]
get_job_group = DataprepGetJobGroupOperator(
task_id="get_job_group", job_group_id=DATAPREP_JOB_ID, embed="", include_deleted=False,
)
# [END how_to_dataprep_get_job_group_operator]
run_job_group >> [get_jobs_for_job_group, get_job_group]
| 36.684932
| 109
| 0.720687
|
1c4c705af7ccdb33a8700f125f9ffa320bb1deb0
| 24,136
|
py
|
Python
|
include/TestController.py
|
DonaldTsang/hydrus
|
1ffd13469c0ea98ea78961ab377aff1c6325379b
|
[
"WTFPL"
] | null | null | null |
include/TestController.py
|
DonaldTsang/hydrus
|
1ffd13469c0ea98ea78961ab377aff1c6325379b
|
[
"WTFPL"
] | null | null | null |
include/TestController.py
|
DonaldTsang/hydrus
|
1ffd13469c0ea98ea78961ab377aff1c6325379b
|
[
"WTFPL"
] | null | null | null |
import collections
import os
import random
import threading
import collections
import shutil
import sys
import tempfile
import time
import traceback
import unittest
import wx
from . import HydrusConstants as HC
from . import ClientConstants as CC
from . import HydrusGlobals as HG
from . import ClientAPI
from . import ClientDefaults
from . import ClientFiles
from . import ClientNetworking
from . import ClientNetworkingBandwidth
from . import ClientNetworkingDomain
from . import ClientNetworkingLogin
from . import ClientNetworkingSessions
from . import ClientServices
from . import ClientTags
from . import ClientThreading
from . import HydrusDB
from . import HydrusExceptions
from . import HydrusPubSub
from . import HydrusSessions
from . import HydrusTags
from . import HydrusThreading
from . import TestClientAPI
from . import TestClientConstants
from . import TestClientDaemons
from . import TestClientData
from . import TestClientDB
from . import TestClientDBDuplicates
from . import TestClientImageHandling
from . import TestClientImportOptions
from . import TestClientImportSubscriptions
from . import TestClientListBoxes
from . import TestClientMigration
from . import TestClientNetworking
from . import TestClientTags
from . import TestClientThreading
from . import TestDialogs
from . import TestFunctions
from . import TestHydrusNATPunch
from . import TestHydrusNetworking
from . import TestHydrusSerialisable
from . import TestHydrusServer
from . import TestHydrusSessions
from . import TestServerDB
from twisted.internet import reactor
from . import ClientCaches
from . import ClientData
from . import ClientOptions
from . import HydrusData
from . import HydrusPaths
DB_DIR = None
tiniest_gif = b'\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\xFF\x00\x2C\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x00\x3B'
LOCAL_RATING_LIKE_SERVICE_KEY = HydrusData.GenerateKey()
LOCAL_RATING_NUMERICAL_SERVICE_KEY = HydrusData.GenerateKey()
def ConvertServiceKeysToContentUpdatesToComparable( service_keys_to_content_updates ):
comparable_dict = {}
for ( service_key, content_updates ) in list(service_keys_to_content_updates.items()):
comparable_dict[ service_key ] = set( content_updates )
return comparable_dict
class MockController( object ):
def __init__( self ):
self.new_options = ClientOptions.ClientOptions()
def CallToThread( self, callable, *args, **kwargs ):
return HG.test_controller.CallToThread( callable, *args, **kwargs )
def JustWokeFromSleep( self ):
return False
def pub( self, *args, **kwargs ):
pass
def sub( self, *args, **kwargs ):
pass
class MockServicesManager( object ):
def __init__( self, services ):
self._service_keys_to_services = { service.GetServiceKey() : service for service in services }
def GetName( self, service_key ):
return self._service_keys_to_services[ service_key ].GetName()
def GetService( self, service_key ):
return self._service_keys_to_services[ service_key ]
def ServiceExists( self, service_key ):
return service_key in self._service_keys_to_services
class FakeWebSessionManager():
def EnsureLoggedIn( self, name ):
pass
def GetCookies( self, *args, **kwargs ):
return { 'session_cookie' : 'blah' }
class TestFrame( wx.Frame ):
def __init__( self ):
wx.Frame.__init__( self, None )
def SetPanel( self, panel ):
vbox = wx.BoxSizer( wx.VERTICAL )
vbox.Add( panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.SetSizer( vbox )
self.Fit()
self.Show()
only_run = None
class Controller( object ):
def __init__( self, win, only_run ):
self.win = win
self.only_run = only_run
self.db_dir = tempfile.mkdtemp()
global DB_DIR
DB_DIR = self.db_dir
self._server_files_dir = os.path.join( self.db_dir, 'server_files' )
self._updates_dir = os.path.join( self.db_dir, 'test_updates' )
client_files_default = os.path.join( self.db_dir, 'client_files' )
HydrusPaths.MakeSureDirectoryExists( self._server_files_dir )
HydrusPaths.MakeSureDirectoryExists( self._updates_dir )
HydrusPaths.MakeSureDirectoryExists( client_files_default )
HG.controller = self
HG.client_controller = self
HG.server_controller = self
HG.test_controller = self
self.db = self
self.gui = self
self._call_to_threads = []
self._pubsub = HydrusPubSub.HydrusPubSub( self )
self.new_options = ClientOptions.ClientOptions()
HC.options = ClientDefaults.GetClientDefaultOptions()
self.options = HC.options
def show_text( text ): pass
HydrusData.ShowText = show_text
self._reads = {}
self._reads[ 'local_booru_share_keys' ] = []
self._reads[ 'messaging_sessions' ] = []
self._reads[ 'options' ] = ClientDefaults.GetClientDefaultOptions()
self._reads[ 'file_system_predicates' ] = []
self._reads[ 'media_results' ] = []
self.example_tag_repo_service_key = HydrusData.GenerateKey()
services = []
services.append( ClientServices.GenerateService( CC.LOCAL_BOORU_SERVICE_KEY, HC.LOCAL_BOORU, 'local booru' ) )
services.append( ClientServices.GenerateService( CC.CLIENT_API_SERVICE_KEY, HC.CLIENT_API_SERVICE, 'client api' ) )
services.append( ClientServices.GenerateService( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, HC.COMBINED_LOCAL_FILE, 'all local files' ) )
services.append( ClientServices.GenerateService( CC.LOCAL_FILE_SERVICE_KEY, HC.LOCAL_FILE_DOMAIN, 'my files' ) )
services.append( ClientServices.GenerateService( CC.TRASH_SERVICE_KEY, HC.LOCAL_FILE_TRASH_DOMAIN, 'trash' ) )
services.append( ClientServices.GenerateService( CC.DEFAULT_LOCAL_TAG_SERVICE_KEY, HC.LOCAL_TAG, 'my tags' ) )
services.append( ClientServices.GenerateService( self.example_tag_repo_service_key, HC.TAG_REPOSITORY, 'example tag repo' ) )
services.append( ClientServices.GenerateService( CC.COMBINED_TAG_SERVICE_KEY, HC.COMBINED_TAG, 'all known tags' ) )
services.append( ClientServices.GenerateService( LOCAL_RATING_LIKE_SERVICE_KEY, HC.LOCAL_RATING_LIKE, 'example local rating like service' ) )
services.append( ClientServices.GenerateService( LOCAL_RATING_NUMERICAL_SERVICE_KEY, HC.LOCAL_RATING_NUMERICAL, 'example local rating numerical service' ) )
self._reads[ 'services' ] = services
client_files_locations = {}
for prefix in HydrusData.IterateHexPrefixes():
for c in ( 'f', 't' ):
client_files_locations[ c + prefix ] = client_files_default
self._reads[ 'client_files_locations' ] = client_files_locations
self._reads[ 'sessions' ] = []
self._reads[ 'tag_parents' ] = {}
self._reads[ 'tag_siblings' ] = {}
self._reads[ 'in_inbox' ] = False
self._writes = collections.defaultdict( list )
self._managers = {}
self.services_manager = ClientCaches.ServicesManager( self )
self.client_files_manager = ClientFiles.ClientFilesManager( self )
self.parsing_cache = ClientCaches.ParsingCache()
bandwidth_manager = ClientNetworkingBandwidth.NetworkBandwidthManager()
session_manager = ClientNetworkingSessions.NetworkSessionManager()
domain_manager = ClientNetworkingDomain.NetworkDomainManager()
ClientDefaults.SetDefaultDomainManagerData( domain_manager )
login_manager = ClientNetworkingLogin.NetworkLoginManager()
self.network_engine = ClientNetworking.NetworkEngine( self, bandwidth_manager, session_manager, domain_manager, login_manager )
self.CallToThreadLongRunning( self.network_engine.MainLoop )
self.tag_display_manager = ClientTags.TagDisplayManager()
self.tag_siblings_manager = ClientCaches.TagSiblingsManager( self )
self.tag_parents_manager = ClientCaches.TagParentsManager( self )
self._managers[ 'undo' ] = ClientCaches.UndoManager( self )
self.server_session_manager = HydrusSessions.HydrusSessionManagerServer()
self.bitmap_manager = ClientCaches.BitmapManager( self )
self.local_booru_manager = ClientCaches.LocalBooruCache( self )
self.client_api_manager = ClientAPI.APIManager()
self._cookies = {}
self._job_scheduler = HydrusThreading.JobScheduler( self )
self._job_scheduler.start()
def _GetCallToThread( self ):
for call_to_thread in self._call_to_threads:
if not call_to_thread.CurrentlyWorking():
return call_to_thread
if len( self._call_to_threads ) > 100:
raise Exception( 'Too many call to threads!' )
call_to_thread = HydrusThreading.THREADCallToThread( self, 'CallToThread' )
self._call_to_threads.append( call_to_thread )
call_to_thread.start()
return call_to_thread
def _SetupWx( self ):
self.locale = wx.Locale( wx.LANGUAGE_DEFAULT ) # Very important to init this here and keep it non garbage collected
CC.GlobalBMPs.STATICInitialise()
self.frame_icon = wx.Icon( os.path.join( HC.STATIC_DIR, 'hydrus_32_non-transparent.png' ), wx.BITMAP_TYPE_PNG )
def pub( self, topic, *args, **kwargs ):
pass
def pubimmediate( self, topic, *args, **kwargs ):
self._pubsub.pubimmediate( topic, *args, **kwargs )
def sub( self, object, method_name, topic ):
self._pubsub.sub( object, method_name, topic )
def AcquirePageKey( self ):
return HydrusData.GenerateKey()
def CallBlockingToWX( self, win, func, *args, **kwargs ):
def wx_code( win, job_key ):
try:
if win is not None and not win:
raise HydrusExceptions.WXDeadWindowException( 'Parent Window was destroyed before wx command was called!' )
result = func( *args, **kwargs )
job_key.SetVariable( 'result', result )
except ( HydrusExceptions.WXDeadWindowException, HydrusExceptions.InsufficientCredentialsException, HydrusExceptions.ShutdownException ) as e:
job_key.SetVariable( 'error', e )
except Exception as e:
job_key.SetVariable( 'error', e )
HydrusData.Print( 'CallBlockingToWX just caught this error:' )
HydrusData.DebugPrint( traceback.format_exc() )
finally:
job_key.Finish()
job_key = ClientThreading.JobKey()
job_key.Begin()
wx.CallAfter( wx_code, win, job_key )
while not job_key.IsDone():
if HG.model_shutdown:
raise HydrusExceptions.ShutdownException( 'Application is shutting down!' )
time.sleep( 0.05 )
if job_key.HasVariable( 'result' ):
# result can be None, for wx_code that has no return variable
result = job_key.GetIfHasVariable( 'result' )
return result
error = job_key.GetIfHasVariable( 'error' )
if error is not None:
raise error
raise HydrusExceptions.ShutdownException()
def CallToThread( self, callable, *args, **kwargs ):
call_to_thread = self._GetCallToThread()
call_to_thread.put( callable, *args, **kwargs )
CallToThreadLongRunning = CallToThread
def CallLater( self, initial_delay, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = HydrusThreading.SchedulableJob( self, self._job_scheduler, initial_delay, call )
self._job_scheduler.AddJob( job )
return job
def CallLaterWXSafe( self, window, initial_delay, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = ClientThreading.WXAwareJob( self, self._job_scheduler, window, initial_delay, call )
self._job_scheduler.AddJob( job )
return job
def CallRepeating( self, initial_delay, period, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = HydrusThreading.RepeatingJob( self, self._job_scheduler, initial_delay, period, call )
self._job_scheduler.AddJob( job )
return job
def CallRepeatingWXSafe( self, window, initial_delay, period, func, *args, **kwargs ):
call = HydrusData.Call( func, *args, **kwargs )
job = ClientThreading.WXAwareRepeatingJob( self, self._job_scheduler, window, initial_delay, period, call )
self._job_scheduler.AddJob( job )
return job
def ClearWrites( self, name ):
if name in self._writes:
del self._writes[ name ]
def DBCurrentlyDoingJob( self ):
return False
def GetCurrentSessionPageAPIInfoDict( self ):
return {
"name" : "top pages notebook",
"page_key" : "3b28d8a59ec61834325eb6275d9df012860a1ecfd9e1246423059bc47fb6d5bd",
"page_type" : 10,
"selected" : True,
"pages" : [
{
"name" : "files",
"page_key" : "d436ff5109215199913705eb9a7669d8a6b67c52e41c3b42904db083255ca84d",
"page_type" : 6,
"selected" : False
},
{
"name" : "thread watcher",
"page_key" : "40887fa327edca01e1d69b533dddba4681b2c43e0b4ebee0576177852e8c32e7",
"page_type" : 9,
"selected" : False
},
{
"name" : "pages",
"page_key" : "2ee7fa4058e1e23f2bd9e915cdf9347ae90902a8622d6559ba019a83a785c4dc",
"page_type" : 10,
"selected" : True,
"pages" : [
{
"name" : "urls",
"page_key" : "9fe22cb760d9ee6de32575ed9f27b76b4c215179cf843d3f9044efeeca98411f",
"page_type" : 7,
"selected" : True
},
{
"name" : "files",
"page_key" : "2977d57fc9c588be783727bcd54225d577b44e8aa2f91e365a3eb3c3f580dc4e",
"page_type" : 6,
"selected" : False
}
]
}
]
}
def GetFilesDir( self ):
return self._server_files_dir
def GetNewOptions( self ):
return self.new_options
def GetManager( self, manager_type ):
return self._managers[ manager_type ]
def GetPageAPIInfoDict( self, page_key, simple ):
return {}
def GetWrite( self, name ):
write = self._writes[ name ]
del self._writes[ name ]
return write
def ImportURLFromAPI( self, url, service_keys_to_tags, destination_page_name, destination_page_key, show_destination_page ):
normalised_url = self.network_engine.domain_manager.NormaliseURL( url )
human_result_text = '"{}" URL added successfully.'.format( normalised_url )
self.Write( 'import_url_test', url, service_keys_to_tags, destination_page_name, destination_page_key, show_destination_page )
return ( normalised_url, human_result_text )
def IsBooted( self ):
return True
def IsCurrentPage( self, page_key ):
return False
def IsFirstStart( self ):
return True
def IShouldRegularlyUpdate( self, window ):
return True
def JustWokeFromSleep( self ):
return False
def PageAlive( self, page_key ):
return False
def PageClosedButNotDestroyed( self, page_key ):
return False
def PauseAndDisconnect( self, pause_and_disconnect ):
pass
def Read( self, name, *args, **kwargs ):
return self._reads[ name ]
def RegisterUIUpdateWindow( self, window ):
pass
def ReleasePageKey( self, page_key ):
pass
def ReportDataUsed( self, num_bytes ):
pass
def ReportRequestUsed( self ):
pass
def ResetIdleTimer( self ): pass
def Run( self, window ):
# we are in wx thread here, we can do this
self._SetupWx()
suites = []
if self.only_run is None:
run_all = True
else:
run_all = False
# the gui stuff runs fine on its own but crashes in the full test if it is not early, wew
# something to do with the delayed button clicking stuff
if run_all or self.only_run == 'gui':
suites.append( unittest.TestLoader().loadTestsFromModule( TestDialogs ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientListBoxes ) )
if run_all or self.only_run == 'client_api':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientAPI ) )
if run_all or self.only_run == 'daemons':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientDaemons ) )
if run_all or self.only_run == 'data':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientConstants ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientData ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImportOptions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientTags ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientThreading ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestFunctions ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusSerialisable ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusSessions ) )
if run_all or self.only_run == 'db':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientDB ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestServerDB ) )
if run_all or self.only_run in ( 'db', 'db_duplicates' ):
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientDBDuplicates ) )
if run_all or self.only_run == 'networking':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientNetworking ) )
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusNetworking ) )
if run_all or self.only_run == 'import':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImportSubscriptions ) )
if run_all or self.only_run == 'image':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientImageHandling ) )
if run_all or self.only_run == 'migration':
suites.append( unittest.TestLoader().loadTestsFromModule( TestClientMigration ) )
if run_all or self.only_run == 'nat':
pass
#suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusNATPunch ) )
if run_all or self.only_run == 'server':
suites.append( unittest.TestLoader().loadTestsFromModule( TestHydrusServer ) )
suite = unittest.TestSuite( suites )
runner = unittest.TextTestRunner( verbosity = 2 )
def do_it():
try:
runner.run( suite )
finally:
wx.CallAfter( self.win.DestroyLater )
self.win.Show()
test_thread = threading.Thread( target = do_it )
test_thread.start()
def SetRead( self, name, value ):
self._reads[ name ] = value
def SetStatusBarDirty( self ):
pass
def SetWebCookies( self, name, value ):
self._cookies[ name ] = value
def ShouldStopThisWork( self, maintenance_mode, stop_time = None ):
return False
def ShowPage( self, page_key ):
self.Write( 'show_page', page_key )
def TidyUp( self ):
time.sleep( 2 )
HydrusPaths.DeletePath( self.db_dir )
def WaitUntilModelFree( self ):
return
def WaitUntilViewFree( self ):
return
def Write( self, name, *args, **kwargs ):
self._writes[ name ].append( ( args, kwargs ) )
def WriteSynchronous( self, name, *args, **kwargs ):
self._writes[ name ].append( ( args, kwargs ) )
if name == 'import_file':
( file_import_job, ) = args
if file_import_job.GetHash().hex() == 'a593942cb7ea9ffcd8ccf2f0fa23c338e23bfecd9a3e508dfc0bcf07501ead08': # 'blarg' in sha256 hex
raise Exception( 'File failed to import for some reason!' )
else:
return ( CC.STATUS_SUCCESSFUL_AND_NEW, 'test note' )
| 30.436318
| 164
| 0.568611
|
3eceb285cba16177f3c663eaa22775ccfc7ca53e
| 14,707
|
py
|
Python
|
pymessenger/bot.py
|
dranem03/messenger-chat-bot
|
2f5aaa034337c4ec116e9d1627e7fcb7305d011a
|
[
"MIT"
] | null | null | null |
pymessenger/bot.py
|
dranem03/messenger-chat-bot
|
2f5aaa034337c4ec116e9d1627e7fcb7305d011a
|
[
"MIT"
] | null | null | null |
pymessenger/bot.py
|
dranem03/messenger-chat-bot
|
2f5aaa034337c4ec116e9d1627e7fcb7305d011a
|
[
"MIT"
] | null | null | null |
import os
from enum import Enum
import requests
from requests_toolbelt import MultipartEncoder
from pymessenger import utils
DEFAULT_API_VERSION = 2.6
class NotificationType(Enum):
regular = "REGULAR"
silent_push = "SILENT_PUSH"
no_push = "NO_PUSH"
class Bot:
def __init__(self, access_token, **kwargs):
"""
@required:
access_token
@optional:
api_version
app_secret
"""
self.api_version = kwargs.get('api_version') or DEFAULT_API_VERSION
self.app_secret = kwargs.get('app_secret')
self.graph_url = 'https://graph.facebook.com/v{0}'.format(self.api_version)
self.access_token = access_token
@property
def auth_args(self):
if not hasattr(self, '_auth_args'):
auth = {
'access_token': self.access_token
}
if self.app_secret is not None:
appsecret_proof = utils.generate_appsecret_proof(self.access_token, self.app_secret)
auth['appsecret_proof'] = appsecret_proof
self._auth_args = auth
return self._auth_args
def send_recipient(self, recipient_id, payload, notification_type=NotificationType.regular):
payload['recipient'] = {
'id': recipient_id
}
payload['notification_type'] = notification_type.value
return self.send_raw(payload)
def send_message(self, recipient_id, message, notification_type=NotificationType.regular):
return self.send_recipient(recipient_id, {
'message': message
}, notification_type)
def send_attachment(self, recipient_id, attachment_type, attachment_path,
notification_type=NotificationType.regular):
"""Send an attachment to the specified recipient using local path.
Input:
recipient_id: recipient id to send to
attachment_type: type of attachment (image, video, audio, file)
attachment_path: Path of attachment
Output:
Response from API as <dict>
"""
payload = {
'recipient': {
{
'id': recipient_id
}
},
'notification_type': notification_type,
'message': {
{
'attachment': {
'type': attachment_type,
'payload': {}
}
}
},
'filedata': (os.path.basename(attachment_path), open(attachment_path, 'rb'))
}
multipart_data = MultipartEncoder(payload)
multipart_header = {
'Content-Type': multipart_data.content_type
}
return requests.post(self.graph_url, data=multipart_data,
params=self.auth_args, headers=multipart_header).json()
def send_attachment_url(self, recipient_id, attachment_type, attachment_url,
notification_type=NotificationType.regular):
"""Send an attachment to the specified recipient using URL.
Input:
recipient_id: recipient id to send to
attachment_type: type of attachment (image, video, audio, file)
attachment_url: URL of attachment
Output:
Response from API as <dict>
"""
return self.send_message(recipient_id, {
'attachment': {
'type': attachment_type,
'payload': {
'url': attachment_url
}
}
}, notification_type)
def send_text_message(self, recipient_id, message, notification_type=NotificationType.regular):
"""Send text messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/text-message
Input:
recipient_id: recipient id to send to
message: message to send
Output:
Response from API as <dict>
"""
return self.send_message(recipient_id, {
'text': message
}, notification_type)
def send_generic_message(self, recipient_id, elements, notification_type=NotificationType.regular):
"""Send generic messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/generic-template
Input:
recipient_id: recipient id to send to
elements: generic message elements to send
Output:
Response from API as <dict>
"""
return self.send_message(recipient_id, {
"attachment": {
"type": "template",
"payload": {
"template_type": "generic",
"elements": elements
}
}
}, notification_type)
def send_button_message(self, recipient_id, text, buttons, notification_type=NotificationType.regular):
"""Send text messages to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/button-template
Input:
recipient_id: recipient id to send to
text: text of message to send
buttons: buttons to send
Output:
Response from API as <dict>
"""
return self.send_message(recipient_id, {
"attachment": {
"type": "template",
"payload": {
"template_type": "button",
"text": text,
"buttons": buttons
}
}
}, notification_type)
def send_action(self, recipient_id, action, notification_type=NotificationType.regular):
"""Send typing indicators or send read receipts to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/sender-actions
Input:
recipient_id: recipient id to send to
action: action type (mark_seen, typing_on, typing_off)
Output:
Response from API as <dict>
"""
return self.send_recipient(recipient_id, {
'sender_action': action
}, notification_type)
def send_image(self, recipient_id, image_path, notification_type=NotificationType.regular):
"""Send an image to the specified recipient.
Image must be PNG or JPEG or GIF (more might be supported).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/image-attachment
Input:
recipient_id: recipient id to send to
image_path: path to image to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment(recipient_id, "image", image_path, notification_type)
def send_image_url(self, recipient_id, image_url, notification_type=NotificationType.regular):
"""Send an image to specified recipient using URL.
Image must be PNG or JPEG or GIF (more might be supported).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/image-attachment
Input:
recipient_id: recipient id to send to
image_url: url of image to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment_url(recipient_id, "image", image_url, notification_type)
def send_audio(self, recipient_id, audio_path, notification_type=NotificationType.regular):
"""Send audio to the specified recipient.
Audio must be MP3 or WAV
https://developers.facebook.com/docs/messenger-platform/send-api-reference/audio-attachment
Input:
recipient_id: recipient id to send to
audio_path: path to audio to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment(recipient_id, "audio", audio_path, notification_type)
def send_audio_url(self, recipient_id, audio_url, notification_type=NotificationType.regular):
"""Send audio to specified recipient using URL.
Audio must be MP3 or WAV
https://developers.facebook.com/docs/messenger-platform/send-api-reference/audio-attachment
Input:
recipient_id: recipient id to send to
audio_url: url of audio to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment_url(recipient_id, "audio", audio_url, notification_type)
def send_video(self, recipient_id, video_path, notification_type=NotificationType.regular):
"""Send video to the specified recipient.
Video should be MP4 or MOV, but supports more (https://www.facebook.com/help/218673814818907).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/video-attachment
Input:
recipient_id: recipient id to send to
video_path: path to video to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment(recipient_id, "video", video_path, notification_type)
def send_video_url(self, recipient_id, video_url, notification_type=NotificationType.regular):
"""Send video to specified recipient using URL.
Video should be MP4 or MOV, but supports more (https://www.facebook.com/help/218673814818907).
https://developers.facebook.com/docs/messenger-platform/send-api-reference/video-attachment
Input:
recipient_id: recipient id to send to
video_url: url of video to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment_url(recipient_id, "video", video_url, notification_type)
def send_file(self, recipient_id, file_path, notification_type=NotificationType.regular):
"""Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_path: path to file to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment(recipient_id, "file", file_path, notification_type)
def send_file_url(self, recipient_id, file_url, notification_type=NotificationType.regular):
"""Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_url: url of file to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment_url(recipient_id, "file", file_url, notification_type)
def get_user_info(self, recipient_id, fields=None):
"""Getting information about the user
https://developers.facebook.com/docs/messenger-platform/user-profile
Input:
recipient_id: recipient id to send to
Output:
Response from API as <dict>
"""
params = {}
if fields is not None and isinstance(fields, (list, tuple)):
params['fields'] = ",".join(fields)
params.update(self.auth_args)
request_endpoint = '{0}/{1}'.format(self.graph_url, recipient_id)
response = requests.get(request_endpoint, params=params)
if response.status_code == 200:
return response.json()
return None
def send_raw(self, payload):
request_endpoint = '{0}/me/messages'.format(self.graph_url)
response = requests.post(
request_endpoint,
params=self.auth_args,
json=payload
)
result = response.json()
return result
def _send_payload(self, payload):
""" Deprecated, use send_raw instead """
return self.send_raw(payload)
def set_get_started(self, gs_obj):
"""Set a get started button shown on welcome screen for first time users
https://developers.facebook.com/docs/messenger-platform/reference/messenger-profile-api/get-started-button
Input:
gs_obj: Your formatted get_started object as described by the API docs
Output:
Response from API as <dict>
"""
request_endpoint = '{0}/me/messenger_profile'.format(self.graph_url)
response = requests.post(
request_endpoint,
params = self.auth_args,
json = gs_obj
)
result = response.json()
return result
def set_persistent_menu(self, pm_obj):
"""Set a persistent_menu that stays same for every user. Before you can use this, make sure to have set a get started button.
https://developers.facebook.com/docs/messenger-platform/reference/messenger-profile-api/persistent-menu
Input:
pm_obj: Your formatted persistent menu object as described by the API docs
Output:
Response from API as <dict>
"""
request_endpoint = '{0}/me/messenger_profile'.format(self.graph_url)
response = requests.post(
request_endpoint,
params = self.auth_args,
json = pm_obj
)
result = response.json()
return result
def remove_get_started(self):
"""delete get started button.
https://developers.facebook.com/docs/messenger-platform/reference/messenger-profile-api/#delete
Output:
Response from API as <dict>
"""
delete_obj = {"fields": ["get_started"]}
request_endpoint = '{0}/me/messenger_profile'.format(self.graph_url)
response = requests.delete(
request_endpoint,
params = self.auth_args,
json = delete_obj
)
result = response.json()
return result
def remove_persistent_menu(self):
"""delete persistent menu.
https://developers.facebook.com/docs/messenger-platform/reference/messenger-profile-api/#delete
Output:
Response from API as <dict>
"""
delete_obj = {"fields": ["persistent_menu"]}
request_endpoint = '{0}/me/messenger_profile'.format(self.graph_url)
response = requests.delete(
request_endpoint,
params = self.auth_args,
json = delete_obj
)
result = response.json()
return result
| 39.323529
| 133
| 0.613653
|
bb841156ada6a3600fd8652aa2eb9ac69f91208d
| 3,974
|
py
|
Python
|
dev/Gems/CloudGemFramework/v1/AWS/lambda-code/PlayerAccessTokenExchange/lwa_auth.py
|
brianherrera/lumberyard
|
f85344403c1c2e77ec8c75deb2c116e97b713217
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemFramework/v1/AWS/lambda-code/PlayerAccessTokenExchange/lwa_auth.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemFramework/v1/AWS/lambda-code/PlayerAccessTokenExchange/lwa_auth.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
#
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
import json
# Python 2.7/3.7 Compatibility
from six.moves import http_client
from auth_token_exception import AuthTokenException
def get_amazon_refresh_token(auth_code, auth_settings):
connection = http_client.HTTPSConnection('api.amazon.com', 443)
headers = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}
body = 'grant_type=authorization_code&code={0}&client_id={1}&client_secret={2}&redirect_uri={3}'.format(auth_code, auth_settings['client_id'],
auth_settings['client_secret'],
auth_settings['redirect_uri'])
connection.request('POST', '/auth/o2/token', body, headers)
response = connection.getresponse()
if response.status == 200:
response_data = json.loads(response.read())
print('response_data {}'.format(response_data))
amzn_refresh_token_data = {
'access_token': response_data['access_token'],
'expires_in': response_data['expires_in'],
'refresh_token': response_data['refresh_token']
}
return amzn_refresh_token_data
else:
error_result = response.read()
print('error {}'.format(error_result))
raise Exception(response.read())
def amazon_refresh_access_token(refresh_token, auth_settings):
connection = http_client.HTTPSConnection('api.amazon.com', 443)
headers = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}
body = 'grant_type=refresh_token&refresh_token={0}&client_id={1}&client_secret={2}&redirect_uri={3}'.format(refresh_token, auth_settings['client_id'],
auth_settings['client_secret'],
auth_settings['redirect_uri'])
connection.request('POST', '/auth/o2/token', body, headers)
response = connection.getresponse()
if response.status == 200:
response_data = json.loads(response.read())
print('response_data {}'.format(response_data))
amzn_refresh_token_data = {
'access_token': response_data['access_token'],
'expires_in': response_data['expires_in'],
'refresh_token': refresh_token
}
return amzn_refresh_token_data
else:
error_result = response.read()
print('error {}'.format(error_result))
raise Exception(error_result)
def handler(event, context, auth_settings):
try:
if 'code' in event:
code = event['code']
print('code {}'.format(code))
return get_amazon_refresh_token(code, auth_settings['amazon'])
elif 'refresh_token' in event:
refresh_token = event['refresh_token']
print('refresh_token {}'.format(refresh_token))
return amazon_refresh_access_token(refresh_token, auth_settings['amazon'])
except Exception as e:
print(e)
raise AuthTokenException(event, context, str(e))
print('error {}'.format('code or refresh_token must be supplied as input'))
raise AuthTokenException(event, context, 'code or refresh_token must be supplied as input')
| 46.209302
| 154
| 0.618017
|
fb0df07f722a340801c47117bccb41dbc6af10b3
| 52
|
py
|
Python
|
pyteen/__init__.py
|
deeplook/tenliners
|
dc1b2e430d8ce0528544f1cf8dc8c6c1b1acc2ef
|
[
"MIT"
] | 1
|
2020-09-09T09:48:52.000Z
|
2020-09-09T09:48:52.000Z
|
pyteen/__init__.py
|
deeplook/tenliners
|
dc1b2e430d8ce0528544f1cf8dc8c6c1b1acc2ef
|
[
"MIT"
] | 22
|
2020-07-27T21:46:28.000Z
|
2020-08-09T21:18:36.000Z
|
pyteen/__init__.py
|
deeplook/tenliners
|
dc1b2e430d8ce0528544f1cf8dc8c6c1b1acc2ef
|
[
"MIT"
] | null | null | null |
from . import snippets
from .core import Collection
| 17.333333
| 28
| 0.807692
|
59dbaf44ce16bac742a80d69196f10b6698364b7
| 8,680
|
py
|
Python
|
melati/cmds/configure.py
|
Melati-Network/melati-blockchain
|
6b96f6a84b9c9abf49c6acc239795d4650ddfab2
|
[
"Apache-2.0"
] | 12
|
2021-07-13T15:39:57.000Z
|
2022-02-09T04:32:12.000Z
|
melati/cmds/configure.py
|
Melati-Network/melati-blockchain
|
6b96f6a84b9c9abf49c6acc239795d4650ddfab2
|
[
"Apache-2.0"
] | 1
|
2021-07-16T12:41:41.000Z
|
2021-07-16T12:42:48.000Z
|
melati/cmds/configure.py
|
Melati-Network/melati-blockchain
|
6b96f6a84b9c9abf49c6acc239795d4650ddfab2
|
[
"Apache-2.0"
] | 3
|
2021-07-13T05:35:30.000Z
|
2021-08-06T13:11:14.000Z
|
from pathlib import Path
from typing import Dict
import click
from melati.util.config import load_config, save_config, str2bool
from melati.util.default_root import DEFAULT_ROOT_PATH
def configure(
root_path: Path,
set_farmer_peer: str,
set_node_introducer: str,
set_fullnode_port: str,
set_harvester_port: str,
set_log_level: str,
enable_upnp: str,
set_outbound_peer_count: str,
set_peer_count: str,
testnet: str,
):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if set_node_introducer:
try:
if set_node_introducer.index(":"):
host, port = (
":".join(set_node_introducer.split(":")[:-1]),
set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if set_farmer_peer:
try:
if set_farmer_peer.index(":"):
host, port = (
":".join(set_farmer_peer.split(":")[:-1]),
set_farmer_peer.split(":")[-1],
)
config["full_node"]["farmer_peer"]["host"] = host
config["full_node"]["farmer_peer"]["port"] = int(port)
config["harvester"]["farmer_peer"]["host"] = host
config["harvester"]["farmer_peer"]["port"] = int(port)
print("Farmer peer updated, make sure your harvester has the proper cert installed")
change_made = True
except ValueError:
print("Farmer address must be in format [IP:Port]")
if set_fullnode_port:
config["full_node"]["port"] = int(set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["introducer"]["port"] = int(set_fullnode_port)
print("Default full node port updated")
change_made = True
if set_harvester_port:
config["harvester"]["port"] = int(set_harvester_port)
config["farmer"]["harvester_peer"]["port"] = int(set_harvester_port)
print("Default harvester port updated")
change_made = True
if set_log_level:
levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
if set_log_level in levels:
config["logging"]["log_level"] = set_log_level
print(f"Logging level updated. Check {DEFAULT_ROOT_PATH}/log/debug.log")
change_made = True
else:
print(f"Logging level not updated. Use one of: {levels}")
if enable_upnp is not None:
config["full_node"]["enable_upnp"] = str2bool(enable_upnp)
if str2bool(enable_upnp):
print("uPnP enabled")
else:
print("uPnP disabled")
change_made = True
if set_outbound_peer_count is not None:
config["full_node"]["target_outbound_peer_count"] = int(set_outbound_peer_count)
print("Target outbound peer count updated")
change_made = True
if set_peer_count is not None:
config["full_node"]["target_peer_count"] = int(set_peer_count)
print("Target peer count updated")
change_made = True
if testnet is not None:
if testnet == "true" or testnet == "t":
print("Setting Testnet")
testnet_port = "57444"
testnet_introducer = "beta1_introducer.melatinetwork.net"
testnet = "testnet7"
config["full_node"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["port"] = int(testnet_port)
config["farmer"]["full_node_peer"]["port"] = int(testnet_port)
config["timelord"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["introducer_peer"]["port"] = int(testnet_port)
config["introducer"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["host"] = testnet_introducer
config["selected_network"] = testnet
config["harvester"]["selected_network"] = testnet
config["pool"]["selected_network"] = testnet
config["farmer"]["selected_network"] = testnet
config["timelord"]["selected_network"] = testnet
config["full_node"]["selected_network"] = testnet
config["ui"]["selected_network"] = testnet
config["introducer"]["selected_network"] = testnet
config["wallet"]["selected_network"] = testnet
print("Default full node port, introducer and network setting updated")
change_made = True
elif testnet == "false" or testnet == "f":
print("Setting Mainnet")
mainnet_port = "2444"
mainnet_introducer = "introducer.melatinetwork.net"
net = "mainnet"
config["full_node"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["port"] = int(mainnet_port)
config["farmer"]["full_node_peer"]["port"] = int(mainnet_port)
config["timelord"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["introducer_peer"]["port"] = int(mainnet_port)
config["introducer"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["host"] = mainnet_introducer
config["selected_network"] = net
config["harvester"]["selected_network"] = net
config["pool"]["selected_network"] = net
config["farmer"]["selected_network"] = net
config["timelord"]["selected_network"] = net
config["full_node"]["selected_network"] = net
config["ui"]["selected_network"] = net
config["introducer"]["selected_network"] = net
config["wallet"]["selected_network"] = net
print("Default full node port, introducer and network setting updated")
change_made = True
else:
print("Please choose True or False")
if change_made:
print("Restart any running melati services for changes to take effect")
save_config(root_path, "config.yaml", config)
return 0
@click.command("configure", short_help="Modify configuration")
@click.option(
"--testnet",
"-t",
help="configures for connection to testnet",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option("--set-node-introducer", help="Set the introducer for node - IP:Port", type=str)
@click.option("--set-farmer-peer", help="Set the farmer peer for harvester - IP:Port", type=str)
@click.option(
"--set-fullnode-port",
help="Set the port to use for the fullnode, useful for testing",
type=str,
)
@click.option(
"--set-harvester-port",
help="Set the port to use for the harvester, useful for testing",
type=str,
)
@click.option(
"--set-log-level",
"--log-level",
"-log-level",
help="Set the instance log level",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]),
)
@click.option(
"--enable-upnp",
"--upnp",
"-upnp",
help="Enable or disable uPnP",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option(
"--set_outbound-peer-count",
help="Update the target outbound peer count (default 8)",
type=str,
)
@click.option("--set-peer-count", help="Update the target peer count (default 80)", type=str)
@click.pass_context
def configure_cmd(
ctx,
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_harvester_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
):
configure(
ctx.obj["root_path"],
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_harvester_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
)
| 40.560748
| 100
| 0.606452
|
d78ef4a6740a2914c6ae2da2189982d14eaa3adb
| 2,642
|
py
|
Python
|
isdbeads/inference.py
|
michaelhabeck/bayesian-random-tomography
|
9429a3688df368f0fe8fd7beaa8202386951164a
|
[
"MIT"
] | 2
|
2021-04-17T14:05:05.000Z
|
2022-02-24T16:03:29.000Z
|
isdbeads/inference.py
|
michaelhabeck/bayesian-random-tomography
|
9429a3688df368f0fe8fd7beaa8202386951164a
|
[
"MIT"
] | null | null | null |
isdbeads/inference.py
|
michaelhabeck/bayesian-random-tomography
|
9429a3688df368f0fe8fd7beaa8202386951164a
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.special import logsumexp
from .mcmc import (
AdaptiveWalk as _AdaptiveWalk,
)
from .hmc import (
HamiltonianMonteCarlo as _HamiltonianMonteCarlo
)
class AdaptiveWalk(_AdaptiveWalk):
"""Adaptive Walk
keeping track of the stepsizes...
"""
stepsizes = []
@property
def stepsize(self):
return self._stepsize
@stepsize.setter
def stepsize(self, value):
self._stepsize = float(value)
self.stepsizes.append(self.stepsize)
class RotationSampler(AdaptiveWalk):
quaternions = np.load('../data/quaternions.npz')
quaternions = [quaternions['level{}'.format(level)]
for level in range(3)]
def __init__(self, image, stepsize=1e-2, level=-1):
rotation = image.mock._rotation
super(RotationSampler, self).__init__(
image, rotation, stepsize, adapt_until=int(1e2)
)
self.activate()
self.level = int(level)
assert self.level in [-1, 0, 1]
def sample_initial(self):
"""Systematic scan. """
if self.level < 0: return
# evaluate log probability for all quaternions in the 600-cell
log_prob = []
quaternions = RotationSampler.quaternions[self.level]
quaternions = np.vstack([self.parameter.get().copy().reshape(1, -1),
quaternions])
for q in quaternions:
self.parameter.set(q)
self.model.update()
log_prob.append(self.model.log_prob())
log_prob = np.array(log_prob) - logsumexp(log_prob)
prob = np.exp(log_prob)
i = np.random.choice(np.arange(len(prob)), p=prob)
q = quaternions[i]
self.parameter.set(q)
self.state = self.create_state()
def sample(self, n_steps=10, resample_frequency=0.):
"""Metropolis-Hastings with an occasional systematic scan. """
self.level = np.random.choice(
[0, -1],
p=[resample_frequency, 1-resample_frequency]
)
self.sample_initial()
samples = []
while len(samples) < n_steps:
samples.append(next(self))
return samples
class HamiltonianMonteCarlo(_HamiltonianMonteCarlo):
def next(self):
result = super(HamiltonianMonteCarlo, self).next()
# print some info
if len(self.history) and not len(self.history) % 20:
print('{0}, stepsize = {1:.3e}, -log_prob = {2:.3e}'.format(
self.history, self.stepsize, self.state.potential_energy)
)
return result
| 27.520833
| 76
| 0.596518
|
17f37673a7a1aa6730fefb8ad411cb51a2cd2fd8
| 68
|
py
|
Python
|
pycnv/__init__.py
|
martinhaagmans/pyCNV
|
af3ad803312aa8b2a7fe1547783f90e0d0d5ee83
|
[
"MIT"
] | null | null | null |
pycnv/__init__.py
|
martinhaagmans/pyCNV
|
af3ad803312aa8b2a7fe1547783f90e0d0d5ee83
|
[
"MIT"
] | null | null | null |
pycnv/__init__.py
|
martinhaagmans/pyCNV
|
af3ad803312aa8b2a7fe1547783f90e0d0d5ee83
|
[
"MIT"
] | null | null | null |
from .analysis import analyse
from .analysis import create_database
| 22.666667
| 37
| 0.852941
|
93ab9c25500ad7354aca27b8f04aa33dcc835a55
| 2,099
|
py
|
Python
|
main.py
|
parizad1188/py-sr25519-bindings
|
604f08ec15962c6695670df0647ac6818ffa356e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
parizad1188/py-sr25519-bindings
|
604f08ec15962c6695670df0647ac6818ffa356e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
parizad1188/py-sr25519-bindings
|
604f08ec15962c6695670df0647ac6818ffa356e
|
[
"Apache-2.0"
] | null | null | null |
import os
import sr25519
from substrateinterface import Keypair
x1_mnemonic = 'stone cereal magnet search zoo split dish leisure crouch uniform elite panic'
x2_mnemonic = 'diagram truck orient actress resource attitude you initial during slight actress cluster'
x1_kp = Keypair.create_from_mnemonic(x1_mnemonic)
x2_kp = Keypair.create_from_mnemonic(x2_mnemonic)
p = 2**255-19
l = 2**252 + 27742317777372353535851937790883648493
print('x1_priv = ', x1_kp.private_key[2:66])
print('x1_pub = ', x1_kp.public_key)
publickey_sum = sr25519.sum_public_points(bytes.fromhex(x1_kp.public_key[2:]), bytes.fromhex(x2_kp.public_key[2:]))
print(publickey_sum.hex())
x1 = int.from_bytes(bytes.fromhex(x1_kp.private_key[2:66]), 'little')
x2 = int.from_bytes(bytes.fromhex(x2_kp.private_key[2:66]), 'little')
k1 = int.from_bytes(os.urandom(32), 'big') % l
R1 = sr25519.public_from_secret_key(k1.to_bytes(32, 'little') + bytes.fromhex('44' * 32))
k2 = int.from_bytes(os.urandom(32), 'big') % l
R2 = sr25519.public_from_secret_key(k2.to_bytes(32, 'little') + bytes.fromhex('44' * 32))
R = sr25519.sum_public_points(R1, R2)
x_gold = (x1 + x2) % l
p_gold = sr25519.public_from_secret_key(x_gold.to_bytes(32, 'little') + bytes.fromhex('33' * 32))
print(p_gold.hex())
data = bytes.fromhex('33445566')
sig1 = sr25519.multi_sign((p_gold, bytes.fromhex(x1_kp.private_key[2:])),
data,
R,
k1.to_bytes(32, 'little') + bytes.fromhex('44' * 32))
sig2 = sr25519.multi_sign((p_gold, bytes.fromhex(x2_kp.private_key[2:])),
data,
R,
k2.to_bytes(32, 'little') + bytes.fromhex('44' * 32))
print('sig1', sig1.hex())
print('sig2', sig2.hex())
s_gold = (int.from_bytes(sig1[32:], 'little') + int.from_bytes(sig2[32:], 'little')) % l
s_gold_bytes = s_gold.to_bytes(32, 'little')
sig_gold = R + s_gold_bytes[:31] + (int(s_gold_bytes[31]) | 128).to_bytes(1,'little')
print('sigG', sig_gold.hex())
ver_gold = sr25519.verify(sig_gold, data, p_gold)
print(ver_gold)
exit()
| 36.824561
| 115
| 0.676036
|
5fd83f39504913f477db7f00cf8176c102c27de1
| 1,334
|
py
|
Python
|
BFS/69.py
|
wilbertgeng/LintCode_exercise
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
[
"MIT"
] | null | null | null |
BFS/69.py
|
wilbertgeng/LintCode_exercise
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
[
"MIT"
] | null | null | null |
BFS/69.py
|
wilbertgeng/LintCode_exercise
|
e7a343b746e98ca3b4bc7b36655af7291f3150db
|
[
"MIT"
] | null | null | null |
"""69. Binary Tree Level Order Traversal
"""
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: A Tree
@return: Level order a list of lists of integer
"""
def levelOrder(self, root):
# write your code here
### Practice:
# single queue
if not root:
return []
res = []
queue = collections.deque([root])
while queue:
level = []
for _ in range(len(queue)):
node = queue.popleft()
level.append(node.val)
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
res.append(level)
return res
## double queues
if not root:
return []
queue = [root]
res = []
while queue:
next_queue = []
res.append([node.val for node in queue])
for node in queue:
if node.left:
next_queue.append(node.left)
if node.right:
next_queue.append(node.right)
queue = next_queue
return res
| 23.403509
| 52
| 0.471514
|
711b9a24f0b193fe99b8eacd925158c8a35f72d7
| 1,162
|
py
|
Python
|
Transformer.py
|
kyle-gao/TF_Transformer
|
7029ccb88f6eb9a2ac2e10fbd820413b42f461a1
|
[
"Apache-2.0"
] | 6
|
2020-09-11T16:48:51.000Z
|
2021-09-30T19:23:16.000Z
|
Transformer.py
|
kyle-gao/TF_Transformer
|
7029ccb88f6eb9a2ac2e10fbd820413b42f461a1
|
[
"Apache-2.0"
] | 1
|
2021-08-30T08:58:54.000Z
|
2021-08-30T08:58:54.000Z
|
Transformer.py
|
kyle-gao/TF_Transformer
|
7029ccb88f6eb9a2ac2e10fbd820413b42f461a1
|
[
"Apache-2.0"
] | 1
|
2022-03-23T23:30:20.000Z
|
2022-03-23T23:30:20.000Z
|
import tensorflow as tf
from Encoder import *
from Decoder import *
class Transformer(tf.keras.Model):
def __init__(self, num_layers, num_heads, d_model, dense_dim, in_vocab_size, tar_vocab_size,
input_max_position, target_max_position, rate=0.1):
super().__init__()
self.encoder = Encoder(num_layers, num_heads, d_model, dense_dim,
in_vocab_size, max_encoding_position=input_max_position, dropout=0.1)
self.decoder = Decoder(num_layers, num_heads, d_model, dense_dim,
tar_vocab_size, max_encoding_position=target_max_position, dropout=0.1)
self.dense = tf.keras.layers.Dense(tar_vocab_size)
def call(self, input, target, training=False, enc_mask=None, dec_forward_mask=None, dec_padding_mask=None):
out_encoder = self.encoder(input, training=training, mask=enc_mask)
out_decoder = self.decoder(out_encoder, target, training=training, forward_mask=dec_forward_mask,
padding_mask=dec_padding_mask)
out = self.dense(out_decoder)
return out
| 40.068966
| 112
| 0.663511
|
d7eea412b9f9961bc640e93419a9d886280819ba
| 1,396
|
py
|
Python
|
jobsapp/documents.py
|
rishiparekh/Job-Portal
|
0e9f8da132896fdba2f8c051cf79bb2ab5b63de8
|
[
"MIT"
] | null | null | null |
jobsapp/documents.py
|
rishiparekh/Job-Portal
|
0e9f8da132896fdba2f8c051cf79bb2ab5b63de8
|
[
"MIT"
] | null | null | null |
jobsapp/documents.py
|
rishiparekh/Job-Portal
|
0e9f8da132896fdba2f8c051cf79bb2ab5b63de8
|
[
"MIT"
] | 5
|
2020-11-07T08:41:35.000Z
|
2021-06-09T23:15:26.000Z
|
from django_elasticsearch_dsl import Document
from django_elasticsearch_dsl.registries import registry
from .models import Job
# TODO disabled for testing purpose
# @registry.register_document
class JobDocument(Document):
class Index:
# Name of the Elasticsearch index
name = 'jobs'
# See Elasticsearch Indices API reference for available settings
settings = {'number_of_shards': 2}
# settings = {'number_of_shards': 1,
# 'number_of_replicas': 0}
class Django:
model = Job # The model associated with this Document
# The fields of the model you want to be indexed in Elasticsearch
fields = [
'title',
'location',
]
# Ignore auto updating of Elasticsearch when a model is saved
# or deleted:
# ignore_signals = True
# Don't perform an index refresh after every update (overrides global setting):
# auto_refresh = False
# Paginate the django queryset used to populate the index with the specified size
# (by default it uses the database driver's default setting)
# queryset_pagination = 5000
# def get_queryset(self):
# """Not mandatory but to improve performance we can select related in one sql request"""
# return super(JobDocument, self).get_queryset().select_related('study')
| 34.9
| 97
| 0.660458
|
4a903a273bfd11b47c25c41be746fb316dfc35af
| 3,573
|
py
|
Python
|
Figure_1_twoblock/figS1-simulations.py
|
MGarrod1/unobserved_spin_influence
|
7bc3a304e1e09cf6cfaddb50ec3203ecc8808cbe
|
[
"MIT"
] | null | null | null |
Figure_1_twoblock/figS1-simulations.py
|
MGarrod1/unobserved_spin_influence
|
7bc3a304e1e09cf6cfaddb50ec3203ecc8808cbe
|
[
"MIT"
] | null | null | null |
Figure_1_twoblock/figS1-simulations.py
|
MGarrod1/unobserved_spin_influence
|
7bc3a304e1e09cf6cfaddb50ec3203ecc8808cbe
|
[
"MIT"
] | null | null | null |
"""
Code used for Figure S1 in the paper. Investigates the impact of changing
the coupling matrix on the magnetisation markup.
M Garrod, Jan 2021.
"""
import random
import numpy as np
from ising_block_level_influence import N_Block_sbm_class as NBlock
#Seed the random number generators:
seed = 1
random.seed(seed)
np.random.seed(seed)
K_11_vals = np.arange(2.5,10.5,1.0)
for K_11 in K_11_vals :
N_Block=250
coupling_matrix = np.asarray([[K_11,2.5],[2.5,5.5]])
block_sizes=[N_Block,N_Block]
block_background_field=np.asarray([0.0,0.0])
block_system = NBlock.block_mf_ising_system(coupling_matrix,block_sizes,block_background_field)
sbm_graph=block_system.make_sbm() #Sample a particular SBM from the ensemble
ising_analysis = NBlock.ising_analysis(sbm_graph, coupling_matrix, block_sizes, block_background_field)
MC_Sims=15
spin_alignment=1.0 #Start at fully aligned metastable state.
H_vals = [2000]
save_file_prefix=f"as_kap_data/{K_11}_kappa_two_block".replace(".","-")
#The figure only compares block level and full IIM controls.
ising_analysis.controls_to_get = {'no control':True,
'uniform control':True,
'NC control':False,
'SV control':False,
'Block control':True,
'Full control':True,
'AOB control':False,
'Degree control':False}
for beta_factor in [0.5,1.2,1.5] :
#Block control optimization:
ising_analysis.gamma = 1.0
ising_analysis.tol = 1E-5
ising_analysis.max_mf_fp_iterations = 10000
ising_analysis.mf_fp_init_state = spin_alignment*np.ones(len(ising_analysis.block_sizes))
ising_analysis.mf_fp_noisy = False
ising_analysis.max_mf_iim_iterations = 3000
ising_analysis.mf_iim_tolerance = 1E-8
ising_analysis.mf_iim_step_size = 1.0
ising_analysis.mf_iim_init_control = 'uniform'
ising_analysis.mf_iim_noisy = True
# Full control optimization:
ising_analysis.full_mf_system.gamma = 1.0
ising_analysis.full_mf_system.tol = 1E-5
ising_analysis.full_mf_system.max_mf_fp_iterations = 10000
ising_analysis.full_mf_system.mf_fp_init_state = spin_alignment * np.ones(len(ising_analysis.full_graph))
ising_analysis.full_mf_system.mf_fp_noisy = False
ising_analysis.full_mf_system.max_mf_iim_iterations = 1000
ising_analysis.full_mf_system.mf_iim_step_size = 50.0
ising_analysis.full_mf_system.mf_iim_tolerance = 1E-6
ising_analysis.full_mf_system.mf_iim_init_control = 'uniform'
ising_analysis.full_mf_system.mf_iim_noisy = False
#MC Parameters
ising_analysis.T = 20000
ising_analysis.T_Burn = 10000
ising_analysis.MC_Runs = MC_Sims
ising_analysis.eval_initial_state = spin_alignment * np.ones(len(ising_analysis.full_graph))
ising_analysis.H_sweep_data_fname = "Data/{}_data_spins{}_bf_{}".format(save_file_prefix,
round(spin_alignment, 0),beta_factor).replace('.', '-')
ising_analysis.H_sweep_diagnostics_fname = "Data/{}_diagnostics_spins{}_bf_{}".format(save_file_prefix,
round(spin_alignment, 0),beta_factor).replace('.', '-')
ising_analysis.make_h_sweep_data(beta_factor, H_vals)
ising_analysis.save_iim_eval_parameters("Data/{}_params.csv".format(save_file_prefix))
| 38.836957
| 113
| 0.68206
|
bb34e6aa8c55fe88d7a167d173a8ca05a5e0478e
| 1,723
|
py
|
Python
|
src/util/binary.py
|
zrthxn/CryptoGAN
|
e8dcdff0af4362d969874097516063f6bd39f5a1
|
[
"MIT"
] | 4
|
2021-07-09T03:06:10.000Z
|
2022-01-20T14:41:49.000Z
|
src/util/binary.py
|
meisme123/CryptoGAN
|
e8dcdff0af4362d969874097516063f6bd39f5a1
|
[
"MIT"
] | 1
|
2021-05-13T08:29:38.000Z
|
2021-05-13T08:29:38.000Z
|
src/util/binary.py
|
meisme123/CryptoGAN
|
e8dcdff0af4362d969874097516063f6bd39f5a1
|
[
"MIT"
] | 1
|
2021-07-09T03:07:39.000Z
|
2021-07-09T03:07:39.000Z
|
from torch.tensor import Tensor
from config import defaults
def str_to_binlist(text: str, values: tuple = (-1, 1), encoding: str = "utf-8"):
binlen = defaults[defaults["model"]]["blocksize"] // 8
byte = list()
if len(text) % binlen != 0:
text += "~"
if encoding == "utf-8":
byte = [ pad(bin(b).lstrip("0b")) for b in bytearray(text, "utf8") ]
elif encoding == "hex":
byte = [ pad(bin(b).lstrip("0b")) for b in bytearray.fromhex(text) ]
for i in range(0, len(byte), binlen):
binlist = list()
string = "".join([ byte[i + j] for j in range(binlen) ])
for _, s in enumerate(string):
binlist.append(float(values[0] if s == "0" else values[1]))
yield binlist
def str_to_bintensor(text: str, **kwargs):
return [ Tensor(b).unsqueeze(dim=0) for b in str_to_binlist(text, **kwargs) ]
def binlist_to_str(binlist: list, digest: str = "ascii", decision_point: int = 0):
string = list()
for byte in binlist:
if len(byte) % 8 != 0:
raise IndexError("Binary list must be divisible into bytes.")
for token in binlist:
token = [token[i:i + 8] for i in range(0, len(token), 8)]
for byte in token:
string.append("".join([
("0" if bit < decision_point else "1")
for bit in byte]))
for i, char in enumerate(string):
_int = int(char, 2)
if digest == "ascii":
try:
string[i] = _int.to_bytes((_int.bit_length() + 7) // 8, "big").decode()
except:
string[i] = hex(_int).lstrip("0x").rstrip("L")
elif digest == "hex":
string[i] = hex(_int).lstrip("0x").rstrip("L")
return "".join(string).replace("~", " ")
def pad(s: str):
for _ in range(len(s), 8, 1):
s = '0' + s
return s
| 28.716667
| 82
| 0.587928
|
d5f5dc94d63275795be8faeeb4a752d45d885315
| 545
|
py
|
Python
|
tests/doc/message_default_conf/conf.py
|
yosuke/sphinxcontrib-ros
|
929f708f3c837b80e039d81a4be681e142a5784c
|
[
"BSD-2-Clause"
] | 9
|
2015-10-04T02:59:28.000Z
|
2018-12-15T19:16:18.000Z
|
tests/doc/message_default_conf/conf.py
|
yosuke/sphinxcontrib-ros
|
929f708f3c837b80e039d81a4be681e142a5784c
|
[
"BSD-2-Clause"
] | 4
|
2015-11-09T01:47:38.000Z
|
2020-01-28T08:27:30.000Z
|
tests/doc/message_default_conf/conf.py
|
yosuke/sphinxcontrib-ros
|
929f708f3c837b80e039d81a4be681e142a5784c
|
[
"BSD-2-Clause"
] | 2
|
2019-07-19T18:30:51.000Z
|
2020-01-24T00:27:42.000Z
|
import os, sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__) + '/../../../src'))
from imp import reload; import sphinxcontrib; reload(sphinxcontrib)
master_doc = 'index'
extensions = ['sphinxcontrib.ros', 'sphinx.ext.intersphinx']
ros_base_path = ['/opt/ros/indigo/share']
#intersphinx_mapping = {'ros': ('file:///home/tamaki/work/sphinxros/doc/indigo/_build/html', None)}
# https://otamachan.github.io/sphinxros
import sphinx_rtd_theme
#html_theme = 'sphinx_rtd_theme'
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| 45.416667
| 99
| 0.754128
|
b52eb00cc7ef26a93429f4034a375787070d8ca2
| 486
|
py
|
Python
|
py/mount_passwords.py
|
christophfranke/continuous-integration-tools
|
eeb77a624303f5b5971ea7110a8352ff72feb312
|
[
"MIT"
] | null | null | null |
py/mount_passwords.py
|
christophfranke/continuous-integration-tools
|
eeb77a624303f5b5971ea7110a8352ff72feb312
|
[
"MIT"
] | null | null | null |
py/mount_passwords.py
|
christophfranke/continuous-integration-tools
|
eeb77a624303f5b5971ea7110a8352ff72feb312
|
[
"MIT"
] | null | null | null |
from modules import engine
from modules import out
from modules import run
@engine.prepare_and_clean
def execute():
out.log("Mounting passwords...")
run.local('mkdir -p ' + engine.PASSWORD_DIRECTORY)
run.local('chmod 700 ' + engine.PASSWORD_DIRECTORY)
run.local('sshfs macmini@Mac-minis-Mac-mini.local:Zugangsdaten ' + engine.PASSWORD_DIRECTORY + ' -o volname=Zugangsdaten')
def help():
out.log("Mount the passwords that are stored on a remote machine.", 'help')
| 32.4
| 126
| 0.73251
|
2e4a520e7bbcc1bd3ff2d0a94e7a7adc62702653
| 9,248
|
py
|
Python
|
Packs/FeedElasticsearch/Integrations/FeedElasticsearch/FeedElasticsearch_test.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-04-19T11:05:42.000Z
|
2020-04-19T11:05:42.000Z
|
Packs/FeedElasticsearch/Integrations/FeedElasticsearch/FeedElasticsearch_test.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | null | null | null |
Packs/FeedElasticsearch/Integrations/FeedElasticsearch/FeedElasticsearch_test.py
|
ddi-danielsantander/content
|
67e2edc404f50c332d928dbdbce00a447bb5532f
|
[
"MIT"
] | 1
|
2020-05-27T15:26:48.000Z
|
2020-05-27T15:26:48.000Z
|
class MockHit:
def __init__(self, hit_val):
self._hit_val = hit_val
def to_dict(self):
return self._hit_val
"""MOCKED RESPONSES"""
CUSTOM_VAL_KEY = 'indicatorValue'
CUSTOM_TYPE_KEY = 'indicatorType'
CUSTOM_HIT = {
CUSTOM_VAL_KEY: '5.5.5.5',
CUSTOM_TYPE_KEY: 'IP'
}
PARSED_CUSTOM_HIT = {
'indicatorValue': '5.5.5.5',
'indicatorType': 'IP',
'value': '5.5.5.5',
'rawJSON': {
'indicatorValue': '5.5.5.5',
'indicatorType': 'IP',
'value': '5.5.5.5'
},
'type': 'IP'
}
PARSED_INSIGHT_HIT = {
"id": "1d5920f4b44b27a802bd77c4f0536f5a",
"version": 3,
"modified": "2020-01-26T14:16:44.641927Z",
"sortValues": None,
"account": "acc1",
"type": "Domain",
"value": "google.com",
"rawName": "google.com",
"createdTime": "2020-01-26T16:16:18.801688+02:00",
"investigationIDs": [
"57ec1eb4-454e-4561-8059-a9beb3f830c0"
],
"investigationsCount": 1,
"sourceInstances": [
"VirusTotal"
],
"sourceBrands": [
"VirusTotal"
],
"isIoc": True,
"lastSeen": "2020-01-26T16:16:18.801508+02:00",
"firstSeen": "2020-01-26T16:16:18.801509+02:00",
"lastSeenEntryID": "4@57ec1eb4-454e-4561-8059-a9beb3f830c0",
"firstSeenEntryID": "4@57ec1eb4-454e-4561-8059-a9beb3f830c0",
"lastReputationRun": "2020-01-26T16:16:13.219824+02:00",
"isShared": True,
"calculatedTime": "2020-01-26T16:16:18.801508+02:00",
"score": 1,
"manualSetTime": "0001-01-01T00:00:00Z",
"context": [],
"comment": "",
"CustomFields": None,
"manuallyEditedFields": None,
"modifiedTime": "2020-01-26T16:16:09.855733+02:00",
"moduleToFeedMap": {
"VirusTotal.VirusTotal": {
"reliability": "A+ - 3rd party enrichment",
"rawJSON": None,
"fetchTime": "2020-01-26T16:16:09.855733+02:00",
"sourceBrand": "VirusTotal",
"sourceInstance": "VirusTotal",
"expirationPolicy": "indicatorType",
"expirationInterval": 0,
"expiration": "0001-01-01T00:00:00Z",
"ExpirationSource": None,
"bypassExclusionList": False,
"type": "domain",
"value": "google.com",
"score": 1,
"timestamp": "0001-01-01T00:00:00Z",
"lastSeen": "0001-01-01T00:00:00Z",
"firstSeen": "0001-01-01T00:00:00Z",
"CustomFields": None,
"modifiedTime": "0001-01-01T00:00:00Z",
"isEnrichment": True
},
"Whois.Whois": {
"reliability": "A+ - 3rd party enrichment",
"rawJSON": None,
"fetchTime": "2020-01-26T16:16:09.855733+02:00",
"sourceBrand": "VirusTotal",
"sourceInstance": "VirusTotal",
"expirationPolicy": "indicatorType",
"expirationInterval": 0,
"expiration": "0001-01-01T00:00:00Z",
"ExpirationSource": None,
"bypassExclusionList": False,
"type": "domain",
"value": "google.com",
"score": 1,
"timestamp": "0001-01-01T00:00:00Z",
"lastSeen": "0001-01-01T00:00:00Z",
"firstSeen": "0001-01-01T00:00:00Z",
"CustomFields": None,
"modifiedTime": "0001-01-01T00:00:00Z",
"isEnrichment": True
},
"Demisto.Demisto": {
"reliability": "A+ - 3rd party enrichment",
"rawJSON": None,
"fetchTime": "2020-01-26T16:16:09.855733+02:00",
"sourceBrand": "VirusTotal",
"sourceInstance": "VirusTotal",
"expirationPolicy": "indicatorType",
"expirationInterval": 0,
"expiration": "0001-01-01T00:00:00Z",
"ExpirationSource": None,
"bypassExclusionList": False,
"type": "domain",
"value": "google.com",
"score": 1,
"timestamp": "0001-01-01T00:00:00Z",
"lastSeen": "0001-01-01T00:00:00Z",
"firstSeen": "0001-01-01T00:00:00Z",
"CustomFields": None,
"modifiedTime": "0001-01-01T00:00:00Z",
"isEnrichment": False
}
},
"expiration": "0001-01-01T00:00:00Z",
"expirationStatus": "active",
"expirationSource": None
}
FEED_IOC_KEYS = (
'rawJSON',
'fetchTime',
'sourceBrand',
'sourceInstance',
'expirationPolicy',
'expirationInterval',
'expiration',
'ExpirationSource',
'bypassExclusionList',
'type',
'value',
'score',
'timestamp',
'lastSeen',
'firstSeen',
'CustomFields',
'modifiedTime',
'isEnrichment'
)
def test_hit_to_indicator():
import FeedElasticsearch as esf
ioc = esf.hit_to_indicator(MockHit(CUSTOM_HIT), CUSTOM_VAL_KEY, CUSTOM_TYPE_KEY, None)
assert ioc == PARSED_CUSTOM_HIT
no_type_hit = dict(CUSTOM_HIT)
no_type_hit[CUSTOM_TYPE_KEY] = ''
ioc = esf.hit_to_indicator(MockHit(no_type_hit), CUSTOM_VAL_KEY, CUSTOM_TYPE_KEY, 'IP')
assert ioc['type'] == 'IP'
assert ioc[CUSTOM_TYPE_KEY] == ''
def test_extract_indicators_from_insight_hit(mocker):
import FeedElasticsearch as esf
mocker.patch.object(esf, 'hit_to_indicator', return_value=dict(PARSED_INSIGHT_HIT))
ioc_lst, ioc_enrch_lst = esf.extract_indicators_from_insight_hit(PARSED_INSIGHT_HIT)
# moduleToFeedMap with isEnrichment: False should not be added to ioc_lst
assert len(ioc_lst) == 1
assert len(ioc_enrch_lst[0]) == 2
assert ioc_lst[0].get('value')
# moduleToFeedMap with isEnrichment: False should be added to ioc_lst
assert ioc_lst[0].get('moduleToFeedMap').get('Demisto.Demisto')
assert ioc_lst[0].get('moduleToFeedMap').get('VirusTotal.VirusTotal') is None
set(FEED_IOC_KEYS).issubset(ioc_enrch_lst[0][0])
set(FEED_IOC_KEYS).issubset(ioc_enrch_lst[0][1])
def test_extract_indicators_from_generic_hit(mocker):
import FeedElasticsearch as esf
mocker.patch.object(esf, 'hit_to_indicator', return_value=PARSED_CUSTOM_HIT)
ioc_lst = esf.extract_indicators_from_generic_hit(CUSTOM_HIT, CUSTOM_VAL_KEY, CUSTOM_TYPE_KEY, None)
assert ioc_lst == [PARSED_CUSTOM_HIT]
def test_create_enrichment_batches_one_indicator(mocker):
import FeedElasticsearch as esf
mocker.patch.object(esf, 'hit_to_indicator', return_value=PARSED_INSIGHT_HIT)
_, ioc_enrch_lst = esf.extract_indicators_from_insight_hit(PARSED_INSIGHT_HIT)
ioc_enrch_lst_of_lsts = esf.create_enrichment_batches(ioc_enrch_lst)
assert len(ioc_enrch_lst_of_lsts) == 2
assert ioc_enrch_lst_of_lsts[0][0] == ioc_enrch_lst[0][0]
assert ioc_enrch_lst_of_lsts[1][0] == ioc_enrch_lst[0][1]
def test_create_enrichment_batches_mult_indicators():
import FeedElasticsearch as esf
ioc_enrch_lst = [
[1, 2, 3],
[4, 5],
[6, 7, 8, 9]
]
ioc_enrch_lst_of_lsts = esf.create_enrichment_batches(ioc_enrch_lst)
assert len(ioc_enrch_lst_of_lsts) == 4
assert ioc_enrch_lst_of_lsts[0] == [1, 4, 6]
assert ioc_enrch_lst_of_lsts[1] == [2, 5, 7]
assert ioc_enrch_lst_of_lsts[2] == [3, 8]
assert ioc_enrch_lst_of_lsts[3] == [9]
def test_elasticsearch_builder_called_with_username_password(mocker):
from elasticsearch import Elasticsearch
import FeedElasticsearch as esf
es_mock = mocker.patch.object(Elasticsearch, '__init__', return_value=None)
username = 'demisto'
password = 'mock'
client = esf.ElasticsearchClient(username=username, password=password)
client._elasticsearch_builder()
assert es_mock.call_args[1].get('http_auth') == (username, password)
assert es_mock.call_args[1].get('api_key') is None
def test_elasticsearch_builder_called_with_api_key(mocker):
from elasticsearch import Elasticsearch
import FeedElasticsearch as esf
es_mock = mocker.patch.object(Elasticsearch, '__init__', return_value=None)
api_id = 'demisto'
api_key = 'mock'
client = esf.ElasticsearchClient(api_key=api_key, api_id=api_id)
client._elasticsearch_builder()
assert es_mock.call_args[1].get('http_auth') is None
assert es_mock.call_args[1].get('api_key') == (api_id, api_key)
def test_elasticsearch_builder_called_with_no_creds(mocker):
from elasticsearch import Elasticsearch
import FeedElasticsearch as esf
es_mock = mocker.patch.object(Elasticsearch, '__init__', return_value=None)
client = esf.ElasticsearchClient()
client._elasticsearch_builder()
assert es_mock.call_args[1].get('http_auth') is None
assert es_mock.call_args[1].get('api_key') is None
def test_extract_api_from_username_password_empty():
import FeedElasticsearch as esf
assert esf.extract_api_from_username_password(None, None) == (None, None)
def test_extract_api_from_username_password_username_username():
import FeedElasticsearch as esf
assert esf.extract_api_from_username_password('username', 'password') == (None, None)
def test_extract_api_from_username_password_username_api_key():
import FeedElasticsearch as esf
username = esf.API_KEY_PREFIX + 'api_id'
assert esf.extract_api_from_username_password(username, 'api_key') == ('api_id', 'api_key')
| 34.766917
| 104
| 0.648248
|
64ee1b69f879df17d80bc655ff5cff7faf8735c5
| 905
|
py
|
Python
|
man/genlist-from-docbooks.py
|
charles-rose/nvme-stas
|
7af1f489d98dad53d7bbb697eccc53e5c08dbfcc
|
[
"Apache-2.0"
] | null | null | null |
man/genlist-from-docbooks.py
|
charles-rose/nvme-stas
|
7af1f489d98dad53d7bbb697eccc53e5c08dbfcc
|
[
"Apache-2.0"
] | null | null | null |
man/genlist-from-docbooks.py
|
charles-rose/nvme-stas
|
7af1f489d98dad53d7bbb697eccc53e5c08dbfcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import glob
from lxml import etree
exclude_list = list(glob.glob('standard-*.xml'))
PARSER = etree.XMLParser(remove_blank_text=True)
def extract_data(fname):
et = etree.parse(fname, PARSER)
manvolnum = et.find('./refmeta/manvolnum')
manvolnum = manvolnum.text if manvolnum is not None else 0
deps = set()
for elem in et.iter():
keys = elem.keys()
if 'href' in keys and 'xpointer' in keys:
dep = elem.values()[0]
if dep in exclude_list:
deps.add(dep)
return manvolnum, list(deps)
output = list()
file_list = glob.glob('*.xml')
for fname in file_list:
if fname not in exclude_list:
stem = fname[0:-4]
manvolnum, deps = extract_data(fname)
deps = ':'.join(deps) if deps else 'None'
output.append(','.join([stem, manvolnum, fname, deps]))
print(';'.join(output))
| 26.617647
| 63
| 0.623204
|
3c4764ea302b24cfff8763bf4846bea8de10fcd0
| 4,941
|
py
|
Python
|
src/dsfs/network.py
|
dbradf/dsfs
|
efcd08ca56b4e14b926cc824f15474b04a9d94cb
|
[
"Apache-2.0"
] | null | null | null |
src/dsfs/network.py
|
dbradf/dsfs
|
efcd08ca56b4e14b926cc824f15474b04a9d94cb
|
[
"Apache-2.0"
] | null | null | null |
src/dsfs/network.py
|
dbradf/dsfs
|
efcd08ca56b4e14b926cc824f15474b04a9d94cb
|
[
"Apache-2.0"
] | null | null | null |
from collections import deque, Counter
import random
from typing import NamedTuple, Dict, List, Tuple
import tqdm
from dsfs.linalg.matrix import Matrix, make_matrix, shape
from dsfs.linalg.vector import Vector, dot, magnitude, distance
class User(NamedTuple):
id: int
name: str
users = [
User(0, "Hero"),
User(1, "Dunn"),
User(2, "Sue"),
User(3, "Chi"),
User(4, "Thor"),
User(5, "Clive"),
User(6, "Hicks"),
User(7, "Devin"),
User(8, "Kate"),
User(9, "Klein"),
]
friend_pairs = [
(0, 1),
(0, 2),
(1, 2),
(1, 3),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(5, 7),
(6, 8),
(7, 8),
(8, 9),
]
endorsements = [
(0, 1),
(1, 0),
(0, 2),
(2, 0),
(1, 2),
(2, 1),
(1, 3),
(2, 3),
(3, 4),
(5, 4),
(5, 6),
(7, 5),
(6, 8),
(8, 7),
(8, 9),
]
Friendships = Dict[int, List[int]]
def build_friendships(users: List[User]) -> Friendships:
friendships: Friendships = {user.id: [] for user in users}
for i, j in friend_pairs:
friendships[i].append(j)
friendships[j].append(i)
return friendships
Path = List[int]
def shortest_path_from(from_user_id: int, friendships: Friendships) -> Dict[int, List[Path]]:
shortest_paths_to: Dict[int, List[Path]] = {from_user_id: [[]]}
frontier = deque((from_user_id, friend_id) for friend_id in friendships[from_user_id])
while frontier:
prev_user_id, user_id = frontier.popleft()
paths_to_prev_user = shortest_paths_to[prev_user_id]
new_paths_to_user = [path + [user_id] for path in paths_to_prev_user]
old_paths_to_user = shortest_paths_to.get(user_id, [])
if old_paths_to_user:
min_path_length = len(old_paths_to_user[0])
else:
min_path_length = float("inf")
new_paths_to_user = [
path
for path in new_paths_to_user
if len(path) <= min_path_length and path not in old_paths_to_user
]
shortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user
frontier.extend(
(user_id, friend_id)
for friend_id in friendships[user_id]
if friend_id not in shortest_paths_to
)
return shortest_paths_to
def betweenness_centrality(users, friendships: Friendships):
shortest_paths = {user.id: shortest_path_from(user.id, friendships) for user in users}
betweenness = {user.id: 0.0 for user in users}
for source in users:
for target_id, paths in shortest_paths[source.id].items():
if source.id < target_id:
num_paths = len(paths)
contrib = 1 / num_paths
for path in paths:
for between_id in path:
if between_id not in [source.id, target_id]:
betweenness[between_id] += contrib
return betweenness
def farness(user_id: int, shortest_paths) -> float:
return sum(len(paths[0]) for paths in shortest_paths[user_id].values())
def closeness_centrality(users, friendships):
shortest_paths = {user.id: shortest_path_from(user.id, friendships) for user in users}
return {user.id: 1 / farness(user.id, shortest_paths) for user in users}
def matrix_times_matrix(m1: Matrix, m2: Matrix) -> Matrix:
nr1, nc1 = shape(m1)
nr2, nc2 = shape(m2)
assert nc1 == nr2
def entry_fn(i: int, j: int) -> float:
return sum(m1[i][k] * m2[k[j]] for k in range(nc1))
return make_matrix(nr1, nc2, entry_fn)
def matrix_times_vector(m: Matrix, v: Vector) -> Vector:
nr, nc = shape(m)
n = len(v)
assert nc == n
return [dot(row, v) for row in m]
def find_eigenvector(m: Matrix, tolerance: float = 0.00001) -> Tuple[Vector, float]:
guess = [random.random() for _ in m]
while True:
result = matrix_times_vector(m, guess)
norm = magnitude(result)
next_guess = [x / norm for x in result]
if distance(guess, next_guess) < tolerance:
return next_guess, norm
guess = next_guess
def make_adjacency_matrix(friend_pairs):
def entry_fn(i: int, j: int):
return 1 if (i, j) in friend_pairs or (j, i) in friend_pairs else 0
n = len(users)
return make_matrix(n, n, entry_fn)
def page_rank(
users: List[User],
endorsements: List[Tuple[int, int]],
damping: float = 0.85,
num_iters: int = 100,
) -> Dict[int, float]:
outgoing_counts = Counter(target for source, target in endorsements)
num_users = len(users)
pr = {user.id: 1 / num_users for user in users}
base_pr = (1 - damping) / num_users
for iter in tqdm.trange(num_iters):
next_pr = {user.id: base_pr for user in users}
for source, target in endorsements:
next_pr[target] += damping * pr[source] / outgoing_counts[source]
pr = next_pr
return pr
| 25.209184
| 93
| 0.604938
|
f234e43e553b10b2559e610d6422d7847a4991f0
| 7,936
|
py
|
Python
|
google/cloud/datacatalog_v1/types/policytagmanagerserialization.py
|
LaudateCorpus1/python-datacatalog
|
7d8c3bc9bf540d3e5c0b0bd80a619792162c4fe2
|
[
"Apache-2.0"
] | 41
|
2020-05-12T08:00:04.000Z
|
2022-03-28T22:54:06.000Z
|
google/cloud/datacatalog_v1/types/policytagmanagerserialization.py
|
LaudateCorpus1/python-datacatalog
|
7d8c3bc9bf540d3e5c0b0bd80a619792162c4fe2
|
[
"Apache-2.0"
] | 114
|
2020-02-07T02:48:37.000Z
|
2022-03-23T00:46:01.000Z
|
google/cloud/datacatalog_v1/types/policytagmanagerserialization.py
|
LaudateCorpus1/python-datacatalog
|
7d8c3bc9bf540d3e5c0b0bd80a619792162c4fe2
|
[
"Apache-2.0"
] | 21
|
2020-01-31T21:14:59.000Z
|
2022-02-15T07:26:39.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.datacatalog_v1.types import policytagmanager
__protobuf__ = proto.module(
package="google.cloud.datacatalog.v1",
manifest={
"SerializedTaxonomy",
"SerializedPolicyTag",
"ReplaceTaxonomyRequest",
"ImportTaxonomiesRequest",
"InlineSource",
"CrossRegionalSource",
"ImportTaxonomiesResponse",
"ExportTaxonomiesRequest",
"ExportTaxonomiesResponse",
},
)
class SerializedTaxonomy(proto.Message):
r"""A nested protocol buffer that represents a taxonomy and the
hierarchy of its policy tags. Used for taxonomy replacement,
import, and export.
Attributes:
display_name (str):
Required. Display name of the taxonomy. At
most 200 bytes when encoded in UTF-8.
description (str):
Description of the serialized taxonomy. At
most 2000 bytes when encoded in UTF-8. If not
set, defaults to an empty description.
policy_tags (Sequence[google.cloud.datacatalog_v1.types.SerializedPolicyTag]):
Top level policy tags associated with the
taxonomy, if any.
activated_policy_types (Sequence[google.cloud.datacatalog_v1.types.Taxonomy.PolicyType]):
A list of policy types that are activated per
taxonomy.
"""
display_name = proto.Field(proto.STRING, number=1,)
description = proto.Field(proto.STRING, number=2,)
policy_tags = proto.RepeatedField(
proto.MESSAGE, number=3, message="SerializedPolicyTag",
)
activated_policy_types = proto.RepeatedField(
proto.ENUM, number=4, enum=policytagmanager.Taxonomy.PolicyType,
)
class SerializedPolicyTag(proto.Message):
r"""A nested protocol buffer that represents a policy tag and all
its descendants.
Attributes:
policy_tag (str):
Resource name of the policy tag.
This field is ignored when calling ``ImportTaxonomies``.
display_name (str):
Required. Display name of the policy tag. At
most 200 bytes when encoded in UTF-8.
description (str):
Description of the serialized policy tag. At
most 2000 bytes when encoded in UTF-8. If not
set, defaults to an empty description.
child_policy_tags (Sequence[google.cloud.datacatalog_v1.types.SerializedPolicyTag]):
Children of the policy tag, if any.
"""
policy_tag = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
child_policy_tags = proto.RepeatedField(
proto.MESSAGE, number=4, message="SerializedPolicyTag",
)
class ReplaceTaxonomyRequest(proto.Message):
r"""Request message for
[ReplaceTaxonomy][google.cloud.datacatalog.v1.PolicyTagManagerSerialization.ReplaceTaxonomy].
Attributes:
name (str):
Required. Resource name of the taxonomy to
update.
serialized_taxonomy (google.cloud.datacatalog_v1.types.SerializedTaxonomy):
Required. Taxonomy to update along with its
child policy tags.
"""
name = proto.Field(proto.STRING, number=1,)
serialized_taxonomy = proto.Field(
proto.MESSAGE, number=2, message="SerializedTaxonomy",
)
class ImportTaxonomiesRequest(proto.Message):
r"""Request message for
[ImportTaxonomies][google.cloud.datacatalog.v1.PolicyTagManagerSerialization.ImportTaxonomies].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
parent (str):
Required. Resource name of project that the
imported taxonomies will belong to.
inline_source (google.cloud.datacatalog_v1.types.InlineSource):
Inline source taxonomy to import.
This field is a member of `oneof`_ ``source``.
cross_regional_source (google.cloud.datacatalog_v1.types.CrossRegionalSource):
Cross-regional source taxonomy to import.
This field is a member of `oneof`_ ``source``.
"""
parent = proto.Field(proto.STRING, number=1,)
inline_source = proto.Field(
proto.MESSAGE, number=2, oneof="source", message="InlineSource",
)
cross_regional_source = proto.Field(
proto.MESSAGE, number=3, oneof="source", message="CrossRegionalSource",
)
class InlineSource(proto.Message):
r"""Inline source containing taxonomies to import.
Attributes:
taxonomies (Sequence[google.cloud.datacatalog_v1.types.SerializedTaxonomy]):
Required. Taxonomies to import.
"""
taxonomies = proto.RepeatedField(
proto.MESSAGE, number=1, message="SerializedTaxonomy",
)
class CrossRegionalSource(proto.Message):
r"""Cross-regional source used to import an existing taxonomy
into a different region.
Attributes:
taxonomy (str):
Required. The resource name of the source
taxonomy to import.
"""
taxonomy = proto.Field(proto.STRING, number=1,)
class ImportTaxonomiesResponse(proto.Message):
r"""Response message for
[ImportTaxonomies][google.cloud.datacatalog.v1.PolicyTagManagerSerialization.ImportTaxonomies].
Attributes:
taxonomies (Sequence[google.cloud.datacatalog_v1.types.Taxonomy]):
Imported taxonomies.
"""
taxonomies = proto.RepeatedField(
proto.MESSAGE, number=1, message=policytagmanager.Taxonomy,
)
class ExportTaxonomiesRequest(proto.Message):
r"""Request message for
[ExportTaxonomies][google.cloud.datacatalog.v1.PolicyTagManagerSerialization.ExportTaxonomies].
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
parent (str):
Required. Resource name of the project that
the exported taxonomies belong to.
taxonomies (Sequence[str]):
Required. Resource names of the taxonomies to
export.
serialized_taxonomies (bool):
Serialized export taxonomies that contain all
the policy tags as nested protocol buffers.
This field is a member of `oneof`_ ``destination``.
"""
parent = proto.Field(proto.STRING, number=1,)
taxonomies = proto.RepeatedField(proto.STRING, number=2,)
serialized_taxonomies = proto.Field(proto.BOOL, number=3, oneof="destination",)
class ExportTaxonomiesResponse(proto.Message):
r"""Response message for
[ExportTaxonomies][google.cloud.datacatalog.v1.PolicyTagManagerSerialization.ExportTaxonomies].
Attributes:
taxonomies (Sequence[google.cloud.datacatalog_v1.types.SerializedTaxonomy]):
List of taxonomies and policy tags as nested
protocol buffers.
"""
taxonomies = proto.RepeatedField(
proto.MESSAGE, number=1, message="SerializedTaxonomy",
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 34.354978
| 110
| 0.686744
|
274bc3c9a286b1ede971d8084648e88a8f32d8e3
| 5,955
|
py
|
Python
|
conformer/models/conformer/modules.py
|
phanxuanphucnd/conformer
|
a14562ef118c7539ebeade469d0e164ffb5f57a1
|
[
"MIT"
] | 5
|
2021-11-05T17:15:42.000Z
|
2022-01-11T04:38:05.000Z
|
conformer/models/conformer/modules.py
|
phanxuanphucnd/conformer
|
a14562ef118c7539ebeade469d0e164ffb5f57a1
|
[
"MIT"
] | null | null | null |
conformer/models/conformer/modules.py
|
phanxuanphucnd/conformer
|
a14562ef118c7539ebeade469d0e164ffb5f57a1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch import Tensor
from typing import Optional
from arizona_asr.models.activation import Swish, GLU
from arizona_asr.models.modules import Transpose, Linear
from arizona_asr.models.attention import RelativeMultiHeadAttention
from arizona_asr.models.transformer.embeddings import PositionalEncoding
from arizona_asr.models.convolutional import PointwiseConv1d, DepthwiseConv1d
class FeedForwardModule(nn.Module):
"""
Conformer Feed Forward Module follow pre-norm residual units and apply layer normalization within the residual unit
and on the input before the first linear layer. This module also apply Swish activation and dropout, which helps
regularizing the network.
Args:
encoder_dim (int): Dimension of conformer encoder
expansion_factor (int): Expansion factor of feed forward module.
dropout_p (float): Ratio of dropout
device (torch.device): torch device (cuda or cpu)
Inputs: inputs
- **inputs** (batch, time, dim): Tensor contains input sequences
Outputs: outputs
- **outputs** (batch, time, dim): Tensor produces by feed forward module.
"""
def __init__(
self,
encoder_dim: int = 512,
expansion_factor: int = 4,
dropout_p: float = 0.1,
device: torch.device = 'cuda',
) -> None:
super(FeedForwardModule, self).__init__()
self.device = device
self.sequential = nn.Sequential(
nn.LayerNorm(encoder_dim),
Linear(encoder_dim, encoder_dim * expansion_factor, bias=True),
Swish(),
nn.Dropout(p=dropout_p),
Linear(encoder_dim * expansion_factor, encoder_dim, bias=True),
nn.Dropout(p=dropout_p),
)
def forward(self, inputs: Tensor) -> Tensor:
return self.sequential(inputs.to(self.device))
class ConformerConvModule(nn.Module):
"""
Conformer convolution module starts with a pointwise convolution and a gated linear unit (GLU).
This is followed by a single 1-D depthwise convolution layer. Batchnorm is deployed just after the convolution
to aid training deep models.
Args:
in_channels (int): Number of channels in the input
kernel_size (int or tuple, optional): Size of the convolving kernel Default: 31
dropout_p (float, optional): probability of dropout
device (torch.device): torch device (cuda or cpu)
Inputs: inputs
inputs (batch, time, dim): Tensor contains input sequences
Outputs: outputs
outputs (batch, time, dim): Tensor produces by conformer convolution module.
"""
def __init__(
self,
in_channels: int,
kernel_size: int = 31,
expansion_factor: int = 2,
dropout_p: float = 0.1,
device: torch.device = 'cuda',
) -> None:
super(ConformerConvModule, self).__init__()
assert (kernel_size - 1) % 2 == 0, "kernel_size should be a odd number for 'SAME' padding"
assert expansion_factor == 2, "Currently, Only Supports expansion_factor 2"
self.device = device
self.sequential = nn.Sequential(
nn.LayerNorm(in_channels),
Transpose(shape=(1, 2)),
PointwiseConv1d(in_channels, in_channels * expansion_factor, stride=1, padding=0, bias=True),
GLU(dim=1),
DepthwiseConv1d(in_channels, in_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2),
nn.BatchNorm1d(in_channels),
Swish(),
PointwiseConv1d(in_channels, in_channels, stride=1, padding=0, bias=True),
nn.Dropout(p=dropout_p),
)
def forward(self, inputs: Tensor) -> Tensor:
return self.sequential(inputs.to(self.device)).transpose(1, 2)
class MultiHeadedSelfAttentionModule(nn.Module):
"""
Conformer employ multi-headed self-attention (MHSA) while integrating an important technique from Transformer-XL,
the relative sinusoidal positional encoding scheme. The relative positional encoding allows the self-attention
module to generalize better on different input length and the resulting encoder is more robust to the variance of
the utterance length. Conformer use prenorm residual units with dropout which helps training
and regularizing deeper models.
Args:
d_model (int): The dimension of model
num_heads (int): The number of attention heads.
dropout_p (float): probability of dropout
device (torch.device): torch device (cuda or cpu)
Inputs: inputs, mask
- **inputs** (batch, time, dim): Tensor containing input vector
- **mask** (batch, 1, time2) or (batch, time1, time2): Tensor containing indices to be masked
Returns:
- **outputs** (batch, time, dim): Tensor produces by relative multi headed self attention module.
"""
def __init__(
self,
d_model: int,
num_heads: int,
dropout_p: float = 0.1,
device: torch.device = 'cuda',
) -> None:
super(MultiHeadedSelfAttentionModule, self).__init__()
self.positional_encoding = PositionalEncoding(d_model)
self.layer_norm = nn.LayerNorm(d_model)
self.attention = RelativeMultiHeadAttention(d_model, num_heads, dropout_p)
self.dropout = nn.Dropout(p=dropout_p)
self.device = device
def forward(self, inputs: Tensor, mask: Optional[Tensor] = None) -> Tensor:
batch_size, seq_length, _ = inputs.size()
pos_embedding = self.positional_encoding(seq_length).to(self.device)
pos_embedding = pos_embedding.repeat(batch_size, 1, 1)
inputs = self.layer_norm(inputs)
outputs = self.attention(inputs, inputs, inputs, pos_embedding=pos_embedding, mask=mask)
return self.dropout(outputs)
| 42.535714
| 119
| 0.664652
|
3fa114358c99ff4f75ad6777b7e9291261cfa566
| 1,164
|
py
|
Python
|
nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py
|
nicholsn/nipype
|
6601b00aac39d17bb9fb3a6801f5a740a6ebb1e3
|
[
"BSD-3-Clause"
] | 1
|
2018-04-18T12:13:37.000Z
|
2018-04-18T12:13:37.000Z
|
nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/freesurfer/tests/test_auto_ExtractMainComponent.py
|
ito-takuya/nipype
|
9099a5809487b55868cdec82a719030419cbd6ba
|
[
"BSD-3-Clause"
] | 1
|
2021-09-08T14:31:47.000Z
|
2021-09-08T14:31:47.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.utils import ExtractMainComponent
def test_ExtractMainComponent_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=1,
),
out_file=dict(argstr='%s',
name_source='in_file',
name_template='%s.maincmp',
position=2,
),
terminal_output=dict(mandatory=True,
nohash=True,
),
)
inputs = ExtractMainComponent.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ExtractMainComponent_outputs():
output_map = dict(out_file=dict(),
)
outputs = ExtractMainComponent.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 27.714286
| 78
| 0.676976
|
a76963548702aa0e7c6efde23e38741f722969da
| 1,181
|
py
|
Python
|
dcrnn_train.py
|
ScanLab-ossi/SDN-DCRNN
|
ed74497717fc7d0a6bcadc1b3d48e2e848a60175
|
[
"MIT"
] | null | null | null |
dcrnn_train.py
|
ScanLab-ossi/SDN-DCRNN
|
ed74497717fc7d0a6bcadc1b3d48e2e848a60175
|
[
"MIT"
] | null | null | null |
dcrnn_train.py
|
ScanLab-ossi/SDN-DCRNN
|
ed74497717fc7d0a6bcadc1b3d48e2e848a60175
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import yaml
from lib.utils import load_pickle
from model.dcrnn_supervisor import DCRNNSupervisor
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
adj_mx = load_pickle(graph_pkl_filename)
tf_config = tf.ConfigProto()
if args.use_cpu_only:
tf_config = tf.ConfigProto(device_count={'GPU': 0})
tf_config.gpu_options.allow_growth = True
with tf.Session(config=tf_config) as sess:
supervisor = DCRNNSupervisor(adj_mx=adj_mx, **supervisor_config)
supervisor.train(sess=sess)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_filename', type=str,
help='Configuration filename for the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
args = parser.parse_args()
main(args)
| 31.918919
| 104
| 0.701948
|
f2679a50f274f52887565ed9aa0f47ebae43d55e
| 10,063
|
py
|
Python
|
pl_bolts/datamodules/experience_source.py
|
ahmedhshahin/pytorch-lightning-bolts
|
9298367968955095db756a4864eee21e0f902ac3
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/datamodules/experience_source.py
|
ahmedhshahin/pytorch-lightning-bolts
|
9298367968955095db756a4864eee21e0f902ac3
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/datamodules/experience_source.py
|
ahmedhshahin/pytorch-lightning-bolts
|
9298367968955095db756a4864eee21e0f902ac3
|
[
"Apache-2.0"
] | 1
|
2021-03-24T15:13:02.000Z
|
2021-03-24T15:13:02.000Z
|
"""
Datamodules for RL models that rely on experiences generated during training
Based on implementations found here: https://github.com/Shmuma/ptan/blob/master/ptan/experience.py
"""
from abc import ABC
from collections import deque, namedtuple
from typing import Callable, Iterable, List, Tuple
import torch
from gym import Env
from torch.utils.data import IterableDataset
# Datasets
Experience = namedtuple(
"Experience", field_names=["state", "action", "reward", "done", "new_state"]
)
class ExperienceSourceDataset(IterableDataset):
"""
Basic experience source dataset. Takes a generate_batch function that returns an iterator.
The logic for the experience source and how the batch is generated is defined the Lightning model itself
"""
def __init__(self, generate_batch: Callable):
self.generate_batch = generate_batch
def __iter__(self) -> Iterable:
iterator = self.generate_batch()
return iterator
# Experience Sources
class BaseExperienceSource(ABC):
"""
Simplest form of the experience source
"""
def __init__(self, env, agent) -> None:
"""
Args:
env: Environment that is being used
agent: Agent being used to make decisions
"""
self.env = env
self.agent = agent
def runner(self) -> Experience:
"""Iterable method that yields steps from the experience source"""
raise NotImplementedError("ExperienceSource has no stepper method implemented")
class ExperienceSource(BaseExperienceSource):
"""
Experience source class handling single and multiple environment steps
"""
def __init__(self, env, agent, n_steps: int = 1) -> None:
"""
Args:
env: Environment that is being used
agent: Agent being used to make decisions
n_steps: Number of steps to return from each environment at once
"""
super().__init__(env, agent)
self.pool = env if isinstance(env, (list, tuple)) else [env]
self.exp_history_queue = deque()
self.n_steps = n_steps
self.total_steps = []
self.states = []
self.histories = []
self.cur_rewards = []
self.cur_steps = []
self.iter_idx = 0
self._total_rewards = []
self.init_environments()
def runner(self, device: torch.device) -> Tuple[Experience]:
"""Experience Source iterator yielding Tuple of experiences for n_steps. These come from the pool
of environments provided by the user.
Args:
device: current device to be used for executing experience steps
Returns:
Tuple of Experiences
"""
while True:
# get actions for all envs
actions = self.env_actions(device)
# step through each env
for env_idx, (env, action) in enumerate(zip(self.pool, actions)):
exp = self.env_step(env_idx, env, action)
history = self.histories[env_idx]
history.append(exp)
self.states[env_idx] = exp.new_state
self.update_history_queue(env_idx, exp, history)
# Yield all accumulated history tuples to model
while self.exp_history_queue:
yield self.exp_history_queue.popleft()
self.iter_idx += 1
def update_history_queue(self, env_idx, exp, history) -> None:
"""
Updates the experience history queue with the lastest experiences. In the event of an experience step is in
the done state, the history will be incrementally appended to the queue, removing the tail of the history
each time.
Args:
env_idx: index of the environment
exp: the current experience
history: history of experience steps for this environment
"""
# If there is a full history of step, append history to queue
if len(history) == self.n_steps:
self.exp_history_queue.append(tuple(history))
if exp.done:
if 0 < len(history) < self.n_steps:
self.exp_history_queue.append(tuple(history))
# generate tail of history, incrementally append history to queue
while len(history) > 2:
history.popleft()
self.exp_history_queue.append(tuple(history))
# when there are only 2 experiences left in the history,
# append to the queue then update the env stats and reset the environment
if len(history) > 1:
self.update_env_stats(env_idx)
history.popleft()
self.exp_history_queue.append(tuple(history))
# Clear that last tail in the history once all others have been added to the queue
history.clear()
def init_environments(self) -> None:
"""
For each environment in the pool setups lists for tracking history of size n, state, current reward and
current step
"""
for env in self.pool:
self.states.append(env.reset())
self.histories.append(deque(maxlen=self.n_steps))
self.cur_rewards.append(0.0)
self.cur_steps.append(0)
def env_actions(self, device) -> List[List[int]]:
"""
For each environment in the pool, get the correct action
Returns:
List of actions for each env, with size (num_envs, action_size)
"""
actions = []
states_actions = self.agent(self.states, device)
assert len(self.states) == len(states_actions)
for idx, action in enumerate(states_actions):
actions.append(action if isinstance(action, list) else [action])
return actions
def env_step(self, env_idx: int, env: Env, action: List[int]) -> Experience:
"""
Carries out a step through the given environment using the given action
Args:
env_idx: index of the current environment
env: env at index env_idx
action: action for this environment step
Returns:
Experience tuple
"""
next_state, r, is_done, _ = env.step(action[0])
self.cur_rewards[env_idx] += r
self.cur_steps[env_idx] += 1
exp = Experience(state=self.states[env_idx], action=action[0], reward=r, done=is_done, new_state=next_state)
return exp
def update_env_stats(self, env_idx: int) -> None:
"""
To be called at the end of the history tail generation during the termination state. Updates the stats
tracked for all environments
Args:
env_idx: index of the environment used to update stats
"""
self._total_rewards.append(self.cur_rewards[env_idx])
self.total_steps.append(self.cur_steps[env_idx])
self.cur_rewards[env_idx] = 0
self.cur_steps[env_idx] = 0
self.states[env_idx] = self.pool[env_idx].reset()
def pop_total_rewards(self) -> List[float]:
"""
Returns the list of the current total rewards collected
Returns:
list of total rewards for all completed episodes for each environment since last pop
"""
rewards = self._total_rewards
if rewards:
self._total_rewards = []
self.total_steps = []
return rewards
def pop_rewards_steps(self):
"""
Returns the list of the current total rewards and steps collected
Returns:
list of total rewards and steps for all completed episodes for each environment since last pop
"""
res = list(zip(self._total_rewards, self.total_steps))
if res:
self._total_rewards, self.total_steps = [], []
return res
class DiscountedExperienceSource(ExperienceSource):
"""Outputs experiences with a discounted reward over N steps"""
def __init__(self, env: Env, agent, n_steps: int = 1, gamma: float = 0.99):
super().__init__(env, agent, (n_steps + 1))
self.gamma = gamma
self.steps = n_steps
def runner(self, device: torch.device) -> Experience:
"""
Iterates through experience tuple and calculate discounted experience
Args:
device: current device to be used for executing experience steps
Yields:
Discounted Experience
"""
for experiences in super().runner(device):
last_exp_state, tail_experiences = self.split_head_tail_exp(experiences)
total_reward = self.discount_rewards(tail_experiences)
yield Experience(state=experiences[0].state, action=experiences[0].action,
reward=total_reward, done=experiences[0].done, new_state=last_exp_state)
def split_head_tail_exp(self, experiences: Tuple[Experience]) -> Tuple[List, Tuple[Experience]]:
"""
Takes in a tuple of experiences and returns the last state and tail experiences based on
if the last state is the end of an episode
Args:
experiences: Tuple of N Experience
Returns:
last state (Array or None) and remaining Experience
"""
if experiences[-1].done and len(experiences) <= self.steps:
last_exp_state = experiences[-1].new_state
tail_experiences = experiences
else:
last_exp_state = experiences[-1].state
tail_experiences = experiences[:-1]
return last_exp_state, tail_experiences
def discount_rewards(self, experiences: Tuple[Experience]) -> float:
"""
Calculates the discounted reward over N experiences
Args:
experiences: Tuple of Experience
Returns:
total discounted reward
"""
total_reward = 0.0
for exp in reversed(experiences):
total_reward = (self.gamma * total_reward) + exp.reward
return total_reward
| 34.111864
| 116
| 0.625062
|
912135c69210676089259583b0907ceb2abd5bff
| 1,772
|
py
|
Python
|
platform/core/polyaxon/options/registry/cleaning.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/options/registry/cleaning.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/polyaxon/options/registry/cleaning.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
from options import option_namespaces, option_subjects
from options.cache import LONG_CACHE_TTL
from options.option import NAMESPACE_DB_OPTION_MARKER, Option, OptionStores
from options.types import CONF_TYPES
CLEANING_INTERVALS_ACTIVITY_LOGS = '{}{}{}'.format(option_namespaces.CLEANING_INTERVALS,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.ACTIVITY_LOGS)
CLEANING_INTERVALS_NOTIFICATIONS = '{}{}{}'.format(option_namespaces.CLEANING_INTERVALS,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.NOTIFICATIONS)
CLEANING_INTERVALS_ARCHIVES = '{}{}{}'.format(option_namespaces.CLEANING_INTERVALS,
NAMESPACE_DB_OPTION_MARKER,
option_subjects.ARCHIVES)
class CleaningIntervalsOption(Option):
is_global = True
is_secret = False
is_optional = True
is_list = False
typing = CONF_TYPES.INT
store = OptionStores.DB_OPTION
options = None
cache_ttl = LONG_CACHE_TTL
class CleaningIntervalsActivityLogs(CleaningIntervalsOption):
key = CLEANING_INTERVALS_ACTIVITY_LOGS
default = 30
description = 'A cleaning interval for activity logs in days'
class CleaningIntervalsNotifications(CleaningIntervalsOption):
key = CLEANING_INTERVALS_NOTIFICATIONS
default = 30
description = 'A cleaning interval for notifications in days'
class CleaningIntervalsArchives(CleaningIntervalsOption):
key = CLEANING_INTERVALS_ARCHIVES
store = OptionStores.DB_OPTION
default = 7
description = 'A cleaning interval for archives in days'
| 38.521739
| 88
| 0.6693
|
4ffbc659b6171acc21abbc96f91af0753ae7e47c
| 15,193
|
py
|
Python
|
recipe_modules/file/resources/fileutil.py
|
Acidburn0zzz/luci
|
d8993f4684839b58f5f966dd6273d1d8fd001eae
|
[
"Apache-2.0"
] | 1
|
2021-04-24T04:03:01.000Z
|
2021-04-24T04:03:01.000Z
|
recipe_modules/file/resources/fileutil.py
|
Acidburn0zzz/luci
|
d8993f4684839b58f5f966dd6273d1d8fd001eae
|
[
"Apache-2.0"
] | null | null | null |
recipe_modules/file/resources/fileutil.py
|
Acidburn0zzz/luci
|
d8993f4684839b58f5f966dd6273d1d8fd001eae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Utility exporting basic filesystem operations.
This file was cut from "scripts/common/chromium_utils.py" at:
91310531c31fa645256b4fb5d44b460c42b3e151
"""
from __future__ import print_function
import argparse
import errno
import fnmatch
import glob2
import hashlib
import itertools
import json
import os
import shutil
import subprocess
import sys
import tempfile
import time
def _RmGlob(file_wildcard, root, include_hidden):
"""Removes files matching 'file_wildcard' in root and its subdirectories, if
any exists.
An exception is thrown if root doesn't exist."""
wildcard = os.path.join(os.path.realpath(root), file_wildcard)
for item in glob2.glob(wildcard, include_hidden=include_hidden):
try:
os.remove(item)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _RmContents(path):
if os.path.exists(path):
os.chmod(path, 0o770)
for p in (os.path.join(path, x) for x in os.listdir(path)):
if os.path.isdir(p):
_RmTree(p)
else:
os.unlink(p)
def _RmTree(path):
"""Recursively removes a directory, even if it's marked read-only.
Remove the directory located at path, if it exists.
shutil.rmtree() doesn't work on Windows if any of the files or directories
are read-only, which svn repositories and some .svn files are. We need to
be able to force the files to be writable (i.e., deletable) as we traverse
the tree.
Even with all this, Windows still sometimes fails to delete a file, citing
a permission error (maybe something to do with antivirus scans or disk
indexing). The best suggestion any of the user forums had was to wait a
bit and try again, so we do that too. It's hand-waving, but sometimes it
works. :/
"""
if not os.path.exists(path):
print('WARNING: Failed to find %s during rmtree. Ignoring.\n' % path)
return
if sys.platform == 'win32':
# Give up and use cmd.exe's rd command.
cmd = ['cmd.exe', '/c', 'rd', '/q', '/s', os.path.normcase(path)]
for _ in xrange(3):
print('RemoveDirectory running %s' % (' '.join(cmd)))
if not subprocess.call(cmd):
break
print(' Failed')
time.sleep(3)
return
# If we call "rmtree" on a file, just delete it.
if not os.path.isdir(path):
os.remove(path)
return
def RemoveWithRetry_non_win(rmfunc, path):
if os.path.islink(path):
return os.remove(path)
return rmfunc(path)
remove_with_retry = RemoveWithRetry_non_win
def RmTreeOnError(function, path, excinfo):
r"""This works around a problem whereby python 2.x on Windows has no ability
to check for symbolic links. os.path.islink always returns False. But
shutil.rmtree will fail if invoked on a symbolic link whose target was
deleted before the link. E.g., reproduce like this:
> mkdir test
> mkdir test\1
> mklink /D test\current test\1
> python -c "import chromium_utils; chromium_utils.RemoveDirectory('test')"
To avoid this issue, we pass this error-handling function to rmtree. If
we see the exact sort of failure, we ignore it. All other failures we re-
raise.
"""
exception_type = excinfo[0]
exception_value = excinfo[1]
# If shutil.rmtree encounters a symbolic link on Windows, os.listdir will
# fail with a WindowsError exception with an ENOENT errno (i.e., file not
# found). We'll ignore that error. Note that WindowsError is not defined
# for non-Windows platforms, so we use OSError (of which it is a subclass)
# to avoid lint complaints about an undefined global on non-Windows
# platforms.
if (function is os.listdir) and issubclass(exception_type, OSError):
if exception_value.errno == errno.ENOENT:
# File does not exist, and we're trying to delete, so we can ignore the
# failure.
print('WARNING: Failed to list %s during rmtree. Ignoring.\n' % path)
else:
raise
else:
raise
for root, dirs, files in os.walk(path, topdown=False):
# For POSIX: making the directory writable guarantees removability.
# Windows will ignore the non-read-only bits in the chmod value.
os.chmod(root, 0o770)
for name in files:
remove_with_retry(os.remove, os.path.join(root, name))
for name in dirs:
remove_with_retry(lambda p: shutil.rmtree(p, onerror=RmTreeOnError),
os.path.join(root, name))
remove_with_retry(os.rmdir, path)
def _EnsureDir(mode, dest):
if not os.path.isdir(dest):
if os.path.exists(dest):
raise OSError(errno.EEXIST, os.strerror(errno.EEXIST))
os.makedirs(dest, mode)
def _Glob(base, pattern, include_hidden):
base = os.path.realpath(base)
hits = glob2.glob(os.path.join(base, pattern), include_hidden=include_hidden)
if hits:
print('\n'.join(sorted((os.path.relpath(hit, start=base) for hit in hits))))
def _ListDir(base, recursive):
if recursive:
out = []
for dirpath, _, files in os.walk(base):
out.extend(os.path.relpath(os.path.join(dirpath, f), base) for f in files)
else:
out = os.listdir(base)
print('\n'.join(sorted(out)), end='')
def _Remove(path):
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _Truncate(path, size_mb):
with open(path, 'w') as f:
f.truncate(size_mb * 1024 * 1024)
def _FlattenSingleDirectories(path):
assert os.path.isabs(path), 'nonabs path: %r' % (path,)
assert os.path.isdir(path), 'nondir path: %r' % (path,)
first_single_dir = None
print('flattening single directories in %r' % (path,))
for root, dirs, files in os.walk(path):
# if it's a single dir, we keep walking
if len(dirs) == 1 and not files:
if not first_single_dir:
first_single_dir = os.path.join(path, dirs[0])
continue
# otherwise we found some stuff!
if not first_single_dir:
# if we didn't find a first_single_dir, we're still in the base directory
# and don't have anything to do.
print('contents appears already flattened')
return 0
print('found contents at: %r' % (os.path.relpath(root, path),))
# first move the first_single_dir out of the way, in case there's
# a file/folder we need to move that has a conflicting name.
tmpname = tempfile.mktemp(dir=path)
print('moving root folder out of the way: %r -> %r' % (first_single_dir, tmpname))
os.rename(first_single_dir, tmpname)
for name in itertools.chain(dirs, files):
fullname = os.path.join(root, name).replace(first_single_dir, tmpname)
to = os.path.join(path, name)
print('mv %r %r' % (fullname, to))
os.rename(fullname, to)
print('moved %d dirs and %d files' % (len(dirs), len(files)))
print('rm -rf %r' % (tmpname,))
shutil.rmtree(tmpname)
return 0
def _FileHash(sha, rel_path, base_path):
path = os.path.join(base_path, rel_path)
with open(path, 'rb') as f:
sha.update(str(len(rel_path)))
sha.update(rel_path)
while True:
f_stream = f.read(4096)
if not f_stream:
break
sha.update(str(len(f_stream)))
sha.update(f_stream)
def _ComputeHashPaths(base_path, *rel_paths):
sha = hashlib.sha256()
for rel_path in rel_paths:
path = os.path.join(base_path, rel_path)
if os.path.isfile(path):
_FileHash(sha, rel_path, base_path)
elif os.path.isdir(path):
for root, dirs, files in os.walk(path, topdown=True):
dirs.sort() # ensure we walk dirs in sorted order
files.sort()
for f_name in files:
rel_file_path = os.path.relpath(os.path.join(root, f_name), base_path)
_FileHash(sha, rel_file_path, base_path)
print(sha.hexdigest())
return 0
def _CalculateHash(path):
sha = hashlib.sha256()
with open(path, 'rb') as f:
while True:
f_stream = f.read(4096)
if not f_stream:
break
sha.update(f_stream)
print(sha.hexdigest())
return 0
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--json-output', required=True,
type=argparse.FileType('w'),
help="path to JSON output file")
subparsers = parser.add_subparsers()
# Subcommand: rmtree
subparser = subparsers.add_parser('rmtree',
help='Recursively remove a directory.')
subparser.add_argument('source', help='A path to remove.')
subparser.set_defaults(func=lambda opts: _RmTree(opts.source))
# Subcommand: rmcontents
subparser = subparsers.add_parser('rmcontents',
help='Recursively remove the contents of a directory.')
subparser.add_argument('source', help='The target directory.')
subparser.set_defaults(func=lambda opts: _RmContents(opts.source))
# Subcommand: rmwildcard
subparser = subparsers.add_parser('rmglob',
help='Recursively remove the contents of a directory.')
subparser.add_argument('root', help='The directory to search through.')
subparser.add_argument('wildcard', help='The wildcard expression to remove.')
subparser.add_argument('--hidden', action='store_true',
help='Include hidden files.')
subparser.set_defaults(func=lambda opts:
_RmGlob(opts.wildcard, opts.root, opts.hidden))
# Subcommand: copy
subparser = subparsers.add_parser('copy',
help='Copy one file to another. Behaves like shutil.copy().')
subparser.add_argument('source', help='The file to copy.')
subparser.add_argument('dest', help='The destination to copy to.')
subparser.set_defaults(func=lambda opts: shutil.copy(opts.source, opts.dest))
# Subcommand: copytree
subparser = subparsers.add_parser('copytree',
help='Recursively copy a file tree. Behaves like shutil.copytree().')
subparser.add_argument('--symlinks', action='store_true',
help='Copy symlinks as symlinks.')
subparser.add_argument('source', help='The directory to copy.')
subparser.add_argument('dest', help='The destination directory to copy to.')
subparser.set_defaults(
func=lambda opts: shutil.copytree(opts.source, opts.dest, opts.symlinks))
# Subcommand: move
subparser = subparsers.add_parser('move',
help='Moves/renames a file. Behaves like shutil.move().')
subparser.add_argument('source', help='The item to move.')
subparser.add_argument('dest', help='The destination name.')
subparser.set_defaults(
func=lambda opts: shutil.move(opts.source, opts.dest))
# Subcommand: glob
subparser = subparsers.add_parser('glob',
help='Prints a list of absolute paths with match the pattern.')
subparser.add_argument('base', help='The directory to glob in.')
subparser.add_argument('pattern', help='The glob patern to expand.')
subparser.add_argument('--hidden', action='store_true',
help='Include hidden files.')
subparser.set_defaults(func=lambda opts:
_Glob(opts.base, opts.pattern, opts.hidden))
# Subcommand: remove
subparser = subparsers.add_parser('remove',
help='Remove a file')
subparser.add_argument('source', help='The file to remove.')
subparser.set_defaults(func=lambda opts: _Remove(opts.source))
# Subcommand: listdir
subparser = subparsers.add_parser('listdir',
help='Print all entries in the given folder to stdout.')
subparser.add_argument('source', help='The dir to list.')
subparser.add_argument('--recursive', action='store_true',
help='Recurse into subdirectories.')
subparser.set_defaults(
func=lambda opts: _ListDir(opts.source, opts.recursive))
# Subcommand: ensure-directory
subparser = subparsers.add_parser('ensure-directory',
help='Ensures that the given path is a directory.')
subparser.add_argument('--mode', help='The octal mode of the directory.',
type=lambda s: int(s, 8))
subparser.add_argument('dest', help='The dir to ensure.')
subparser.set_defaults(func=lambda opts: _EnsureDir(opts.mode, opts.dest))
# Subcommand: filesizes
subparser = subparsers.add_parser('filesizes',
help='Prints a list for sizes in bytes (1 per line) for each given file')
subparser.add_argument('file', nargs='+', help='Path to a file')
subparser.set_defaults(
func=lambda opts: print('\n'.join(str(os.stat(f).st_size)
for f in opts.file)))
# Subcommand: filesizes
subparser = subparsers.add_parser('symlink',
help='Creates a symlink. Behaves like os.symlink.')
subparser.add_argument('source', help='The thing to link to.')
subparser.add_argument('link', help='The link to create.')
subparser.set_defaults(
func=lambda opts: os.symlink(opts.source, opts.link))
# Subcommand: truncate
subparser = subparsers.add_parser(
'truncate', help='Creates an empty file with specified size.')
subparser.add_argument('path', help='The path to the file.')
subparser.add_argument('size_mb', help='The size of the file in megabytes.',
type=int)
subparser.set_defaults(func=lambda opts: _Truncate(opts.path, opts.size_mb))
# Subcommand: flatten_single_directories
subparser = subparsers.add_parser(
'flatten_single_directories',
help=('Moves contents of single/dir/with/contents to the top level '
'directory.'))
subparser.add_argument('path', help='The path to flatten from.')
subparser.set_defaults(func=lambda opts: _FlattenSingleDirectories(opts.path))
# Subcommand: compute_hash
subparser = subparsers.add_parser(
'compute_hash',
help='Computes hash of provided absolute directories and/or files.')
subparser.add_argument('base_path', help='Base path to normalize all files.')
subparser.add_argument('rel_paths', nargs='+',
help='List of relative paths of directories '
'and/or files.')
subparser.set_defaults(func=lambda opts: _ComputeHashPaths(opts.base_path,
*opts.rel_paths))
# Subcommand: file_hash
subparser = subparsers.add_parser(
'file_hash',
help='Computes hash of a file in provided absolute path.')
subparser.add_argument('file_path', help='Absolute path for the file.')
subparser.set_defaults(func=lambda opts: _CalculateHash(opts.file_path))
# Parse arguments.
opts = parser.parse_args(args)
# Actually do the thing.
data = {
'ok': False,
'errno_name': '',
'message': '',
}
try:
opts.func(opts)
data['ok'] = True
except OSError as e:
data['errno_name'] = errno.errorcode[e.errno]
data['message'] = str(e)
except shutil.Error as e:
# Note that shutil.Error's "message" field can sometimes be a tuple, just
# render the entire exception as a string to be safe.
data['message'] = str(e)
except Exception as e:
data['message'] = 'UNKNOWN: %s' % e
with opts.json_output:
json.dump(data, opts.json_output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 35.917258
| 86
| 0.679787
|
001a4551d9d979a77f27ee213af1616a5bdb3dc9
| 4,539
|
py
|
Python
|
libs/groupdocs_conversion_cloud/models/webp_convert_options.py
|
rocketbot-cl/pdf2word
|
e46f6f574f69aa744e300baf4802e426b71bf9b2
|
[
"MIT"
] | null | null | null |
libs/groupdocs_conversion_cloud/models/webp_convert_options.py
|
rocketbot-cl/pdf2word
|
e46f6f574f69aa744e300baf4802e426b71bf9b2
|
[
"MIT"
] | null | null | null |
libs/groupdocs_conversion_cloud/models/webp_convert_options.py
|
rocketbot-cl/pdf2word
|
e46f6f574f69aa744e300baf4802e426b71bf9b2
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="WebpConvertOptions.py">
# Copyright (c) 2003-2019 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
from groupdocs_conversion_cloud.models import ImageConvertOptions
class WebpConvertOptions(ImageConvertOptions):
"""
Webp convert options
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lossless': 'bool'
}
attribute_map = {
'lossless': 'Lossless'
}
def __init__(self, lossless=None, **kwargs): # noqa: E501
"""Initializes new instance of WebpConvertOptions""" # noqa: E501
self._lossless = None
if lossless is not None:
self.lossless = lossless
base = super(WebpConvertOptions, self)
base.__init__(**kwargs)
self.swagger_types.update(base.swagger_types)
self.attribute_map.update(base.attribute_map)
@property
def lossless(self):
"""
Gets the lossless. # noqa: E501
Indicates if the compression of the converted file will be lossless # noqa: E501
:return: The lossless. # noqa: E501
:rtype: bool
"""
return self._lossless
@lossless.setter
def lossless(self, lossless):
"""
Sets the lossless.
Indicates if the compression of the converted file will be lossless # noqa: E501
:param lossless: The lossless. # noqa: E501
:type: bool
"""
if lossless is None:
raise ValueError("Invalid value for `lossless`, must not be `None`") # noqa: E501
self._lossless = lossless
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, WebpConvertOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.131387
| 94
| 0.593743
|
bc9f9176bc326a0e935338d4d11476312b79c433
| 21,608
|
py
|
Python
|
src/test/tinc/tincrepo/mpp/models/test/sql_related/test_sql_test_case.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 9
|
2018-04-20T03:31:01.000Z
|
2020-05-13T14:10:53.000Z
|
src/test/tinc/tincrepo/mpp/models/test/sql_related/test_sql_test_case.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 36
|
2017-09-21T09:12:27.000Z
|
2020-06-17T16:40:48.000Z
|
src/test/tinc/tincrepo/mpp/models/test/sql_related/test_sql_test_case.py
|
rodel-talampas/gpdb
|
9c955e350334abbd922102f289f782697eb52069
|
[
"PostgreSQL",
"Apache-2.0"
] | 32
|
2017-08-31T12:50:52.000Z
|
2022-03-01T07:34:53.000Z
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import shutil
from contextlib import closing
from datetime import datetime
from StringIO import StringIO
import unittest2 as unittest
from unittest2.runner import _WritelnDecorator
from tinctest import TINCTestLoader
from tinctest import TINCTextTestResult
from mpp.models.mpp_tc import _MPPMetaClassType
from mpp.models.mpp_tc import MPPDUT
from mpp.models import SQLTestCase, SQLTestCaseException
# Since we overwrite optimizer_mode depending on product/version, force the internal variables to gpdb/4.3
# This will ensure that optimizer_mode both works as designed, and all the tests written for that works.
# _MPPMetaClassType.DUT = MPPDUT('gpdb', '4.3')
@unittest.skip('mock')
class MockSQLTestCase(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@gucs gp_optimizer=on;gp_log_optimizer=on
@optimizer_mode ON
"""
def setUp(self):
pass
def test_explicit_definition(self):
pass
@unittest.skip('mock')
class MockSQLTemplateTestCase(SQLTestCase):
template_dir = 'template_dir'
template_subs = {'%PERCENTAGE%' : 'my_percent',
'&&' : 'my_amp',
'@AT' : 'my_at'}
@unittest.skip('mock')
class MockSQLTemplateTestCaseExplicit(SQLTestCase):
template_dir = 'template_dir'
template_subs = {'%PERCENTAGE%' : 'my_percent',
'&&' : 'my_amp',
'@AT' : 'my_at'}
@unittest.skip('mock')
class MockSQLTemplateTestCaseRegular(SQLTestCase):
template_dir = 'template_dir'
template_subs = {'%PERCENTAGE%' : 'my_percent',
'&&' : 'my_amp',
'@AT' : 'my_at'}
class MockMPPMetaClassTypeGPDB43(_MPPMetaClassType):
_MPPMetaClassType.DUT = MPPDUT('gpdb', '4.3')
@unittest.skip('mock')
class MockSQLTestCaseForOptimizerMode(SQLTestCase):
"""
@description test case with metadata
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags orca hashagg
@gucs gp_optimizer=on;gp_log_optimizer=on
@optimizer_mode on
"""
__metaclass__ = MockMPPMetaClassTypeGPDB43
pass
@unittest.skip('mock')
class MockSQLTestCaseForOptimizerModeBoth(SQLTestCase):
"""
@optimizer_mode both
"""
__metaclass__ = MockMPPMetaClassTypeGPDB43
pass
@unittest.skip('mock')
class MockSQLTestCaseInvalidOptimizerMode(SQLTestCase):
"""
@optimizer_mode invalid_value
"""
__metaclass__ = MockMPPMetaClassTypeGPDB43
pass
class MockMPPMetaClassTypeHAWQ(_MPPMetaClassType):
_MPPMetaClassType.DUT = MPPDUT('hawq', '1.1.0.0')
@unittest.skip('mock')
class MockSQLTestCaseOptimizerModeHAWQ(SQLTestCase):
__metaclass__ = MockMPPMetaClassTypeHAWQ
def test_optimizer_mode_both(self):
"""
@optimizer_mode both
"""
pass
def test_optimizer_mode_on(self):
"""
@optimizer_mode on
"""
pass
def test_optimizer_mode_off(self):
"""
@optimizer_mode off
"""
pass
class SQLTestCaseTests(unittest.TestCase):
def test_infer_metadata(self):
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
test_case = None
for case in test_suite._tests:
if case.name == "MockSQLTestCase.test_query02":
test_case = case
self.assertNotEqual(test_case, None)
self.assertEqual(test_case.name, "MockSQLTestCase.test_query02")
self.assertEqual(test_case.author, 'kumara64')
self.assertEqual(test_case.description, 'test sql test case')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-08 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
self.assertEqual(test_case.gucs, set(['gp_optimizer=on', 'gp_log_optimizer=on']))
def test_optimizer_mode_from_sql_file(self):
test_case = MockSQLTestCaseForOptimizerMode('test_query02')
# sql file query02.sql has overriden optimizer_mode
self.assertEqual(test_case.optimizer_mode, 'off')
def test_optimizer_mode_from_class(self):
test_case = MockSQLTestCaseForOptimizerMode('test_query03')
self.assertEqual(test_case.optimizer_mode, 'on')
def test_optimizer_mode_invalid_value(self):
with self.assertRaises(SQLTestCaseException) as cm:
test_case = MockSQLTestCaseInvalidOptimizerMode('test_query01')
def test_direct_instantiation(self):
test_case = MockSQLTestCase('test_query02')
self.assertEqual(test_case.name, "MockSQLTestCase.test_query02")
self.assertEqual(test_case.author, 'kumara64')
self.assertEqual(test_case.description, 'test sql test case')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-08 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
def test_explicit_test_fixtures(self):
test_case = MockSQLTestCase('test_explicit_definition')
self.assertEqual(test_case.name, "MockSQLTestCase.test_explicit_definition")
self.assertEqual(test_case.author, 'balasr3')
self.assertEqual(test_case.description, 'test case with metadata')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-05 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-05 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg']))
def test_explicit_test_fixtures_through_loading(self):
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCase)
# 4 tests for 3 sqls in the directory and 1 explicit test method
self.assertEqual(test_suite.countTestCases(), 4)
def test_optimizer_mode_both(self):
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForOptimizerModeBoth)
for test in test_suite._tests:
# Data provider should exists for query01 and query03.
# query02 shouldn't have it, since its optimizer mode is overwritten with value 'off'
if test.name == "MockSQLTestCaseForOptimizerModeBoth.test_query01" or test.name == "MockSQLTestCaseForOptimizerModeBoth.test_query03":
self.assertEqual(test.optimizer_mode, "both")
self.assertEqual(test.data_provider, "optimizer_handling")
else:
self.assertNotEqual(test.optimizer_mode, "both")
self.assertTrue(test.data_provider is None)
def test_optimizer_mode_hawq(self):
"""
Test whether optimizer_mode both is overriden in hawq to None
"""
test_case = MockSQLTestCaseOptimizerModeHAWQ('test_optimizer_mode_both')
self.assertIsNone(test_case.optimizer_mode)
test_case = MockSQLTestCaseOptimizerModeHAWQ('test_optimizer_mode_on')
self.assertEquals(test_case.optimizer_mode, 'on')
test_case = MockSQLTestCaseOptimizerModeHAWQ('test_optimizer_mode_off')
self.assertEquals(test_case.optimizer_mode, 'off')
class MockSQLTestCaseForSkip(SQLTestCase):
"""
@description test case to test skip tag
@created 2012-08-07 12:00:00
@modified 2012-08-07 12:00:02
"""
class SQLTestCaseSkipTests(unittest.TestCase):
def test_skip_tag_in_sql_file(self):
test_case = MockSQLTestCaseForSkip('test_query01')
self.assertEqual(test_case.name, "MockSQLTestCaseForSkip.test_query01")
self.assertEqual(test_case.skip, 'demonstrating skipping')
def test_skip_when_tag_in_sql_file(self):
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromTestCase(MockSQLTestCaseForSkip)
test_case = None
for case in test_suite._tests:
if case.name == "MockSQLTestCaseForSkip.test_query01":
test_case = case
self.assertNotEqual(test_case, None)
self.assertEqual(test_case.name, "MockSQLTestCaseForSkip.test_query01")
with closing(_WritelnDecorator(StringIO())) as buffer:
test_result = TINCTextTestResult(buffer, True, 1)
test_case.run(test_result)
self.assertEqual(test_result.testsRun, 1)
self.assertEqual(len(test_result.failures), 0)
self.assertEqual(len(test_result.skipped), 1)
self.assertEqual(len(test_result.errors), 0)
@unittest.skip('mock')
class MockSQLTestCaseForLoader(SQLTestCase):
@classmethod
def setUpClass(cls):
pass
class SQLTestLoaderTests(unittest.TestCase):
def test_load_implicit_python_from_name(self):
"""Test loadTestsFromName for a dynamically generated sql test method"""
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTestCaseForLoader.test_query01')
test_case = test_suite._tests[0]
self.assertEqual(test_case.name, "MockSQLTestCaseForLoader.test_query01")
self.assertEqual(test_case.author, 'lammin')
self.assertEqual(test_case.description, 'test sql test case')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
def test_load_test_from_class_name(self):
"""Test loadTestsFromName for a class name"""
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTestCaseForLoader')
test_case = None
for my_test_case in test_suite._tests:
if my_test_case.name == 'MockSQLTestCaseForLoader.test_query01':
test_case = my_test_case
break
self.assertTrue(test_case is not None)
self.assertEqual(test_case.name, "MockSQLTestCaseForLoader.test_query01")
self.assertEqual(test_case.author, 'lammin')
self.assertEqual(test_case.description, 'test sql test case')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
def test_load_test_from_class_name_with_supplementary_sqls(self):
"""Test loadTestsFromName for a class name"""
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTestCaseForLoader')
# 3 tests for 3 sql tests in the current directory.
self.assertEquals(len(test_suite._tests), 3)
for test_case in test_suite._tests:
if test_case.name == 'MockSQLTestCaseForLoader.test_query03':
break
self.assertEqual(test_case.name, "MockSQLTestCaseForLoader.test_query03")
self.assertEqual(test_case.author, 'balasr3')
self.assertEqual(test_case.description, 'test sql test case sql')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
class SQLTemplateTests(unittest.TestCase):
def test_templates_regular_sql(self):
"""Test loadTestsFromName for a dynamically generated sql test method."""
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTemplateTestCaseRegular.test_query01')
test_case = test_suite._tests[0]
# Non-template test case should work as is...
self.assertEqual(test_case.name, "MockSQLTemplateTestCaseRegular.test_query01")
self.assertEqual(test_case.author, 'lammin')
self.assertEqual(test_case.description, 'test sql test case')
self.assertEqual(test_case.created_datetime, datetime.strptime('2012-07-20 12:00:00', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.modified_datetime, datetime.strptime('2012-07-20 12:00:02', '%Y-%m-%d %H:%M:%S'))
self.assertEqual(test_case.tags, set(['orca', 'hashagg', 'executor']))
def test_templates_template_sql_file(self):
"""Test loadTestsFromName for a dynamically generated sql template test method."""
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTemplateTestCaseExplicit.test_template_query04')
test_case = test_suite._tests[0]
# Template test case should work as if it is non-template test case...
self.assertEqual(test_case.name, "MockSQLTemplateTestCaseExplicit.test_template_query04")
self.assertEqual(test_case.author, 'shahn17')
self.assertEqual(test_case.description, 'template test case')
sql_file_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCaseExplicit", "template_query04.sql")
ans_file_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCaseExplicit", "template_query04.ans")
original_sql_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.sql_dir, test_case.__class__.template_dir, "query04.sql")
original_ans_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.ans_dir, test_case.__class__.template_dir, "query04.ans")
self.assertEqual(test_case.sql_file, sql_file_path)
self.assertEqual(test_case.ans_file, ans_file_path)
self.assertEqual(test_case._original_sql_file, original_sql_file_path)
self.assertEqual(test_case._original_ans_file, original_ans_file_path)
self.assertTrue(os.path.exists(test_case.sql_file))
self.assertTrue(os.path.exists(test_case.ans_file))
self.assertTrue(os.path.exists(test_case._original_sql_file))
self.assertTrue(os.path.exists(test_case._original_ans_file))
# Cleanup
dir_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCaseExplicit")
self.assertTrue(os.path.exists(dir_path))
shutil.rmtree(dir_path)
def test_templates_all_files(self):
"""Test loadTestsFromName for a class name"""
test_loader = TINCTestLoader()
test_suite = test_loader.loadTestsFromName('mpp.models.test.sql_related.test_sql_test_case.MockSQLTemplateTestCase')
# 5 tests for 3 sql files in the current directory, and 2 sql files in the template directory
self.assertEquals(len(test_suite._tests), 5)
for test_case in test_suite._tests:
if test_case.name == 'MockSQLTemplateTestCase.test_template_query04':
break
self.assertEqual(test_case.name, "MockSQLTemplateTestCase.test_template_query04")
self.assertEqual(test_case.author, 'shahn17')
self.assertEqual(test_case.description, 'template test case')
sql_file_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCase", "template_query04.sql")
ans_file_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCase", "template_query04.ans")
original_sql_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.sql_dir, test_case.__class__.template_dir, "query04.sql")
original_ans_file_path = os.path.join(os.path.dirname(sys.modules[test_case.__class__.__module__].__file__), test_case.__class__.ans_dir, test_case.__class__.template_dir, "query04.ans")
self.assertEqual(test_case.sql_file, sql_file_path)
self.assertEqual(test_case.ans_file, ans_file_path)
self.assertEqual(test_case._original_sql_file, original_sql_file_path)
self.assertEqual(test_case._original_ans_file, original_ans_file_path)
self.assertTrue(os.path.exists(test_case.sql_file))
self.assertTrue(os.path.exists(test_case.ans_file))
self.assertTrue(os.path.exists(test_case._original_sql_file))
self.assertTrue(os.path.exists(test_case._original_ans_file))
# Template test case sql file should exists
sql_file_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCase", "template_query04.sql")
self.assertTrue(os.path.exists(sql_file_path))
sql_file_data = None
with open(sql_file_path, 'r') as sql_file_object:
sql_file_data = sql_file_object.read()
self.assertTrue(sql_file_data is not None)
# Correct substitution
self.assertTrue('my_percent' in sql_file_data)
# Error in python code
self.assertTrue('my_at@' in sql_file_data)
# Error in sql template
self.assertTrue('&' in sql_file_data)
# Template test case ans file should exists
ans_file_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCase", "template_query05.ans")
self.assertTrue(os.path.exists(ans_file_path))
ans_file_data = None
with open(ans_file_path, 'r') as sql_file_object:
ans_file_data = sql_file_object.read()
self.assertTrue(ans_file_data is not None)
# Correct substitution
self.assertTrue('my_percent' in ans_file_data)
# Error in python code
self.assertTrue('my_at@' in ans_file_data)
# Error in ans template
self.assertTrue('&' in ans_file_data)
# Cleanup
dir_path = os.path.join(test_case.get_out_dir(), "MockSQLTemplateTestCase")
self.assertTrue(os.path.exists(dir_path))
shutil.rmtree(dir_path)
@unittest.skip('mock')
class MockTINCTestCaseForLoaderDiscovery(SQLTestCase):
def test_lacking_product_version(self):
"""
@maintainer balasr3
@description test stuff
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags storage
"""
pass
def test_containing_product_version(self):
"""
@maintainer balasr3
@description test stuff
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags storage
@product_version gpdb: 4.2
"""
pass
def test_main_product_version(self):
"""
@maintainer balasr3
@description test stuff
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags storage
@product_version gpdb: main
"""
pass
def test_containing_product_version_exclusive_range(self):
"""
@maintainer balasr3
@description test stuff
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags storage
@product_version gpdb: (4.1.0.0-main)
"""
pass
def test_containing_product_version_inclusive_range(self):
"""
@maintainer balasr3
@description test stuff
@created 2012-07-05 12:00:00
@modified 2012-07-05 12:00:02
@tags storage
@product_version gpdb: [4.2.0.0-main]
"""
pass
class TINCTestLoaderDiscoveryTests(unittest.TestCase):
def test_matching_author(self):
test_case = MockTINCTestCaseForLoaderDiscovery('test_lacking_product_version')
self.assertTrue(test_case.match_metadata("author", "pedroc"))
self.assertFalse(test_case.match_metadata("author", "kumara64"))
def test_matching_maintainer(self):
test_case = MockTINCTestCaseForLoaderDiscovery('test_lacking_product_version')
self.assertTrue(test_case.match_metadata("maintainer", "balasr3"))
self.assertFalse(test_case.match_metadata("maintainer", "kumara64"))
def test_matching_tags(self):
test_case = MockTINCTestCaseForLoaderDiscovery('test_lacking_product_version')
self.assertTrue(test_case.match_metadata("tags", "storage"))
self.assertFalse(test_case.match_metadata("tags", "text_analytics"))
| 45.586498
| 194
| 0.698954
|
54abb65abf4b369c3bf41ea983057c782fd3bce7
| 9,305
|
py
|
Python
|
model.py
|
EricSchles/RNN-data-gen
|
02cc59c8c44fffe375f7c51e1cf8f48811f6cc2f
|
[
"MIT"
] | 1
|
2019-04-22T16:47:05.000Z
|
2019-04-22T16:47:05.000Z
|
model.py
|
afcarl/RNN-data-gen
|
02cc59c8c44fffe375f7c51e1cf8f48811f6cc2f
|
[
"MIT"
] | null | null | null |
model.py
|
afcarl/RNN-data-gen
|
02cc59c8c44fffe375f7c51e1cf8f48811f6cc2f
|
[
"MIT"
] | 1
|
2018-08-12T15:16:11.000Z
|
2018-08-12T15:16:11.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
print(batch.shape, labels.shape)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) # in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i, :]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| 36.206226
| 81
| 0.693498
|
6256362f43e9730a133bed84987e0a9b8695470f
| 212
|
gyp
|
Python
|
socket/rc522/binding.gyp
|
tryy3/skolschema
|
50ff7db91f9a1f6d526341de920f595b7b6bf9d7
|
[
"BSD-3-Clause"
] | null | null | null |
socket/rc522/binding.gyp
|
tryy3/skolschema
|
50ff7db91f9a1f6d526341de920f595b7b6bf9d7
|
[
"BSD-3-Clause"
] | null | null | null |
socket/rc522/binding.gyp
|
tryy3/skolschema
|
50ff7db91f9a1f6d526341de920f595b7b6bf9d7
|
[
"BSD-3-Clause"
] | null | null | null |
{
"targets": [
{
"target_name": "rc522",
"sources": [
"src/rc522.c",
"src/rfid.c",
"src/accessor.cc"
],
"libraries": [
"-lbcm2835"
]
}
]
}
| 14.133333
| 29
| 0.363208
|
3d428117d51286a63d94e35461d561297ff8d9a1
| 4,878
|
py
|
Python
|
clients/python-flask/generated/openapi_server/models/computer_set.py
|
PankTrue/swaggy-jenkins
|
aca35a7cca6e1fcc08bd399e05148942ac2f514b
|
[
"MIT"
] | 23
|
2017-08-01T12:25:26.000Z
|
2022-01-25T03:44:11.000Z
|
clients/python-flask/generated/openapi_server/models/computer_set.py
|
PankTrue/swaggy-jenkins
|
aca35a7cca6e1fcc08bd399e05148942ac2f514b
|
[
"MIT"
] | 35
|
2017-06-14T03:28:15.000Z
|
2022-02-14T10:25:54.000Z
|
clients/python-flask/generated/openapi_server/models/computer_set.py
|
PankTrue/swaggy-jenkins
|
aca35a7cca6e1fcc08bd399e05148942ac2f514b
|
[
"MIT"
] | 11
|
2017-08-31T19:00:20.000Z
|
2021-12-19T12:04:12.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.hudson_master_computer import HudsonMasterComputer # noqa: F401,E501
from openapi_server import util
class ComputerSet(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, busy_executors: int=None, computer: List[HudsonMasterComputer]=None, display_name: str=None, total_executors: int=None): # noqa: E501
"""ComputerSet - a model defined in OpenAPI
:param _class: The _class of this ComputerSet. # noqa: E501
:type _class: str
:param busy_executors: The busy_executors of this ComputerSet. # noqa: E501
:type busy_executors: int
:param computer: The computer of this ComputerSet. # noqa: E501
:type computer: List[HudsonMasterComputer]
:param display_name: The display_name of this ComputerSet. # noqa: E501
:type display_name: str
:param total_executors: The total_executors of this ComputerSet. # noqa: E501
:type total_executors: int
"""
self.openapi_types = {
'_class': str,
'busy_executors': int,
'computer': List[HudsonMasterComputer],
'display_name': str,
'total_executors': int
}
self.attribute_map = {
'_class': '_class',
'busy_executors': 'busyExecutors',
'computer': 'computer',
'display_name': 'displayName',
'total_executors': 'totalExecutors'
}
self.__class = _class
self._busy_executors = busy_executors
self._computer = computer
self._display_name = display_name
self._total_executors = total_executors
@classmethod
def from_dict(cls, dikt) -> 'ComputerSet':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ComputerSet of this ComputerSet. # noqa: E501
:rtype: ComputerSet
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self) -> str:
"""Gets the _class of this ComputerSet.
:return: The _class of this ComputerSet.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class: str):
"""Sets the _class of this ComputerSet.
:param _class: The _class of this ComputerSet.
:type _class: str
"""
self.__class = _class
@property
def busy_executors(self) -> int:
"""Gets the busy_executors of this ComputerSet.
:return: The busy_executors of this ComputerSet.
:rtype: int
"""
return self._busy_executors
@busy_executors.setter
def busy_executors(self, busy_executors: int):
"""Sets the busy_executors of this ComputerSet.
:param busy_executors: The busy_executors of this ComputerSet.
:type busy_executors: int
"""
self._busy_executors = busy_executors
@property
def computer(self) -> List[HudsonMasterComputer]:
"""Gets the computer of this ComputerSet.
:return: The computer of this ComputerSet.
:rtype: List[HudsonMasterComputer]
"""
return self._computer
@computer.setter
def computer(self, computer: List[HudsonMasterComputer]):
"""Sets the computer of this ComputerSet.
:param computer: The computer of this ComputerSet.
:type computer: List[HudsonMasterComputer]
"""
self._computer = computer
@property
def display_name(self) -> str:
"""Gets the display_name of this ComputerSet.
:return: The display_name of this ComputerSet.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name: str):
"""Sets the display_name of this ComputerSet.
:param display_name: The display_name of this ComputerSet.
:type display_name: str
"""
self._display_name = display_name
@property
def total_executors(self) -> int:
"""Gets the total_executors of this ComputerSet.
:return: The total_executors of this ComputerSet.
:rtype: int
"""
return self._total_executors
@total_executors.setter
def total_executors(self, total_executors: int):
"""Sets the total_executors of this ComputerSet.
:param total_executors: The total_executors of this ComputerSet.
:type total_executors: int
"""
self._total_executors = total_executors
| 28.694118
| 175
| 0.637351
|
c55db49df1433faa66e5ecfeb6e8adaad9fafed8
| 8,126
|
py
|
Python
|
salt/states/saltmod.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/states/saltmod.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
salt/states/saltmod.py
|
skrobul/salt
|
ef7fb71082cce7a9783e00b9c65062fefae09263
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Control the Salt command interface
==================================
The Salt state is used to control the salt command interface. This state is
intended for use primarily from the state runner from the master.
The salt.state declaration can call out a highstate or a list of sls:
webservers:
salt.state:
- tgt: 'web*'
- sls:
- apache
- django
- core
- saltenv: prod
databases:
salt.state:
- tgt: role:database
- tgt_type: grain
- highstate: True
'''
# Import python libs
import logging
# Import salt libs
import salt.utils
import salt._compat
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'salt'
def __virtual__():
'''
Named salt
'''
return __virtualname__
def state(
name,
tgt,
ssh=False,
tgt_type=None,
expr_form=None,
ret='',
highstate=None,
sls=None,
env=None,
test=False,
fail_minions=None,
allow_fail=0,
concurrent=False,
timeout=None):
'''
Invoke a state run on a given target
name
An arbitrary name used to track the state execution
tgt
The target specification for the state run.
tgt_type | expr_form
The target type to resolve, defaults to glob
ret
Optionally set a single or a list of returners to use
highstate
Defaults to None, if set to True the target systems will ignore any
sls references specified in the sls option and call state.highstate
on the targeted minions
sls
A group of sls files to execute. This can be defined as a single string
containing a single sls file, or a list of sls files
saltenv
The default salt environment to pull sls files from
ssh
Set to `True` to use the ssh client instaed of the standard salt client
roster
In the event of using salt-ssh, a roster system can be set
fail_minions
An optional list of targeted minions where failure is an option
concurrent
Allow multiple state runs to occur at once.
WARNING: This flag is potentially dangerous. It is designed
for use when multiple state runs can safely be run at the same
Do not use this flag for performance optimization.
'''
cmd_kw = {'arg': [], 'kwarg': {}, 'ret': ret, 'timeout': timeout}
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if env is not None:
msg = (
'Passing a salt environment should be done using \'saltenv\' not '
'\'env\'. This warning will go away in Salt Boron and this '
'will be the default and expected behaviour. Please update your '
'state files.'
)
salt.utils.warn_until('Boron', msg)
ret.setdefault('warnings', []).append(msg)
# No need to set __env__ = env since that's done in the state machinery
if expr_form and tgt_type:
ret.setdefault('warnings', []).append(
'Please only use \'tgt_type\' or \'expr_form\' not both. '
'Preferring \'tgt_type\' over \'expr_form\''
)
expr_form = None
elif expr_form and not tgt_type:
tgt_type = expr_form
elif not tgt_type and not expr_form:
tgt_type = 'glob'
cmd_kw['expr_form'] = tgt_type
cmd_kw['ssh'] = ssh
if highstate:
fun = 'state.highstate'
elif sls:
fun = 'state.sls'
if isinstance(sls, list):
sls = ','.join(sls)
cmd_kw['arg'].append(sls)
else:
ret['comment'] = 'No highstate or sls specified, no execution made'
ret['result'] = False
return ret
if test:
cmd_kw['kwarg']['test'] = test
cmd_kw['kwarg']['saltenv'] = __env__
if isinstance(concurrent, bool):
cmd_kw['kwarg']['concurrent'] = concurrent
else:
ret['comment'] = ('Must pass in boolean for value of \'concurrent\'')
ret['result'] = False
return ret
if __opts__['test'] is True:
ret['comment'] = (
'State run to be executed on target {0} as test={1}'
).format(tgt, str(test))
ret['result'] = None
return ret
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
changes = {}
fail = set()
failures = {}
no_change = set()
if fail_minions is None:
fail_minions = ()
elif isinstance(fail_minions, salt._compat.string_types):
fail_minions = [minion.strip() for minion in fail_minions.split(',')]
elif not isinstance(fail_minions, list):
ret.setdefault('warnings', []).append(
'\'fail_minions\' needs to be a list or a comma separated '
'string. Ignored.'
)
fail_minions = ()
for minion, mdata in cmd_ret.iteritems():
if mdata['out'] != 'highstate':
log.warning("Output from salt state not highstate")
m_ret = mdata['ret']
m_state = salt.utils.check_state_result(m_ret)
if not m_state:
if minion not in fail_minions:
fail.add(minion)
failures[minion] = m_ret
continue
for state_item in m_ret.itervalues():
if state_item['changes']:
changes[minion] = m_ret
break
else:
no_change.add(minion)
if changes:
ret['changes'] = {'out': 'highstate', 'ret': changes}
if fail:
ret['result'] = False
ret['comment'] = 'Run failed on minions: {0}'.format(', '.join(fail))
else:
ret['comment'] = 'States ran successfully.'
if changes:
ret['comment'] += ' Updating {0}.'.format(', '.join(changes))
if no_change:
ret['comment'] += ' No changes made to {0}.'.format(', '.join(no_change))
if failures:
ret['comment'] += '\nFailures:\n'
for minion, failure in failures.iteritems():
ret['comment'] += '\n'.join(
(' ' * 4 + l)
for l in salt.output.out_format(
{minion: failure},
'highstate',
__opts__,
).splitlines()
)
ret['comment'] += '\n'
return ret
def function(
name,
tgt,
ssh=False,
tgt_type=None,
expr_form=None,
ret='',
arg=None,
kwarg=None,
timeout=None):
'''
Execute a single module function on a remote minion via salt or salt-ssh
name
The name of the function to run, aka cmd.run or pkg.install
tgt
The target specification, aka '*' for all minions
tgt_type | expr_form
The target type, defaults to glob
arg
The list of arguments to pass into the function
kwarg
The list of keyword arguments to pass into the function
ret
Optionally set a single or a list of returners to use
ssh
Set to `True` to use the ssh client instaed of the standard salt client
'''
if kwarg is None:
kwarg = {}
cmd_kw = {'arg': arg or [], 'kwarg': kwarg, 'ret': ret, 'timeout': timeout}
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if expr_form and tgt_type:
ret['warnings'] = [
'Please only use \'tgt_type\' or \'expr_form\' not both. '
'Preferring \'tgt_type\' over \'expr_form\''
]
expr_form = None
elif expr_form and not tgt_type:
tgt_type = expr_form
elif not tgt_type and not expr_form:
tgt_type = 'glob'
cmd_kw['expr_form'] = tgt_type
cmd_kw['ssh'] = ssh
fun = name
cmd_ret = __salt__['saltutil.cmd'](tgt, fun, **cmd_kw)
ret['changes'] = cmd_ret
ret['comment'] = 'Function {0} ran successfully on {0}'.format(
', '.join(cmd_ret))
return ret
| 27.924399
| 85
| 0.562023
|
5b050bb123e179303959b3e0c251104489653e1d
| 5,519
|
py
|
Python
|
Atrial_LDRBM/LDRBM/Fiber_RA/ra_main.py
|
KIT-IBT/AugmentA
|
b44ea02a4c6e33fd6c14ce3cf180d5603ff660b1
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
Atrial_LDRBM/LDRBM/Fiber_RA/ra_main.py
|
KIT-IBT/AugmentA
|
b44ea02a4c6e33fd6c14ce3cf180d5603ff660b1
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-02-25T13:58:03.000Z
|
2022-03-01T14:53:42.000Z
|
Atrial_LDRBM/LDRBM/Fiber_RA/ra_main.py
|
KIT-IBT/AugmentA
|
b44ea02a4c6e33fd6c14ce3cf180d5603ff660b1
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 19 14:55:02 2021
@author: Luca Azzolin
Copyright 2021 Luca Azzolin
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import os
import numpy as np
import vtk
from vtk.util import numpy_support
import subprocess as sp
import datetime
from carputils import tools
from ra_laplace import ra_laplace
from ra_generate_fiber import ra_generate_fiber
def parser():
# Generate the standard command line parser
parser = tools.standard_parser()
# Add arguments
parser.add_argument('--mesh',
type=str,
default="",
help='path to meshname')
parser.add_argument('--ifmt',
type=str,
default="vtk",
help='input mesh format')
parser.add_argument('--mesh_type',
default='bilayer',
choices=['vol',
'bilayer'],
help='Mesh type')
parser.add_argument('--debug',
type=int,
default=0,
help='path to meshname')
parser.add_argument('--scale',
type=int,
default=1,
help='normal unit is mm, set scaling factor if different')
parser.add_argument('--ofmt',
default='vtu',
choices=['vtu','vtk'],
help='Output mesh format')
parser.add_argument('--normals_outside',
type=int,
default=1,
help='set to 1 if surface normals are pointing outside')
parser.add_argument('--add_bridges',
type=int,
default=1,
help='set to 1 to compute and add interatrial bridges, 0 otherwise')
return parser
def jobID(args):
ID = '{}_fibers'.format(args.mesh)
return ID
@tools.carpexample(parser, jobID)
def run(args, job):
RA_mesh = args.mesh+'_surf/RA'
if args.mesh_type == "bilayer":
reader = vtk.vtkPolyDataReader()
else:
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(RA_mesh+'.vtk')
reader.Update()
RA = reader.GetOutput()
if args.normals_outside:
reverse = vtk.vtkReverseSense()
reverse.ReverseCellsOn()
reverse.ReverseNormalsOn()
reverse.SetInputConnection(reader.GetOutputPort())
reverse.Update()
RA = reverse.GetOutput()
pts = numpy_support.vtk_to_numpy(RA.GetPoints().GetData())
# cells = numpy_support.vtk_to_numpy(RA.GetPolys().GetData())
# cells = cells.reshape(int(len(cells)/4),4)[:,1:]
with open(RA_mesh+'.pts',"w") as f:
f.write("{}\n".format(len(pts)))
for i in range(len(pts)):
f.write("{} {} {}\n".format(pts[i][0], pts[i][1], pts[i][2]))
with open(RA_mesh+'.elem',"w") as f:
f.write("{}\n".format(RA.GetNumberOfCells()))
for i in range(RA.GetNumberOfCells()):
cell = RA.GetCell(i)
if cell.GetNumberOfPoints() == 2:
f.write("Ln {} {} {}\n".format(cell.GetPointIds().GetId(0), cell.GetPointIds().GetId(1), 1))
elif cell.GetNumberOfPoints() == 3:
f.write("Tr {} {} {} {}\n".format(cell.GetPointIds().GetId(0), cell.GetPointIds().GetId(1), cell.GetPointIds().GetId(2), 1))
elif cell.GetNumberOfPoints() == 4:
f.write("Tt {} {} {} {} {}\n".format(cell.GetPointIds().GetId(0), cell.GetPointIds().GetId(1), cell.GetPointIds().GetId(2), cell.GetPointIds().GetId(3), 1))
fibers = np.zeros((RA.GetNumberOfCells(),6))
fibers[:,0]=1
fibers[:,4]=1
with open(RA_mesh+'.lon',"w") as f:
f.write("2\n")
for i in range(len(fibers)):
f.write("{} {} {} {} {} {}\n".format(fibers[i][0], fibers[i][1], fibers[i][2], fibers[i][3],fibers[i][4],fibers[i][5]))
start_time = datetime.datetime.now()
print('[Step 1] Solving laplace-ddirichlet... ' + str(start_time))
output_laplace = ra_laplace(args, job, RA)
end_time = datetime.datetime.now()
running_time = end_time - start_time
print('[Step 1] Solving laplace-ddirichlet...done! ' + str(end_time) + '\nRunning time: ' + str(running_time) + '\n')
start_time = datetime.datetime.now()
print('[Step 2] Generating fibers... ' + str(start_time))
ra_generate_fiber(output_laplace, args, job)
end_time = datetime.datetime.now()
running_time = end_time - start_time
print('[Step 2] Generating fibers...done! ' + str(end_time) + '\nRunning time: ' + str(running_time) + '\n')
if __name__ == '__main__':
run()
| 37.544218
| 176
| 0.589237
|
dff3c720c16c4d01354dad494b2e9827e3857347
| 389
|
py
|
Python
|
Lektion_5/gps_project/main.py
|
tvotan/dhbw_python_kivy
|
41d363d41a79e1881128be54dc30b5d0c58afb70
|
[
"MIT"
] | 1
|
2020-10-27T15:27:06.000Z
|
2020-10-27T15:27:06.000Z
|
Lektion_5/gps_project/main.py
|
tvotan/dhbw_python_kivy
|
41d363d41a79e1881128be54dc30b5d0c58afb70
|
[
"MIT"
] | null | null | null |
Lektion_5/gps_project/main.py
|
tvotan/dhbw_python_kivy
|
41d363d41a79e1881128be54dc30b5d0c58afb70
|
[
"MIT"
] | null | null | null |
from kivymd.app import MDApp
from map_view import FarmersMapView
import sqlite3
class MainApp(MDApp):
connection = None
cursor = None
# Initialisiert die GPS-Funktion
def on_start(self):
# Database verbinden
self.connection = sqlite3.connect("markets.db")
self.cursor = self.connection.cursor()
if __name__ == "__main__":
MainApp().run()
| 18.52381
| 55
| 0.678663
|
9d8cf60b2bf8ee6c5b115f348a54eb9e242baedd
| 3,147
|
py
|
Python
|
tests/components/tradfri/test_init.py
|
FilHarr/core
|
c3a2eedf0beb0d1a66ff1a39705e715ded35e085
|
[
"Apache-2.0"
] | 1
|
2022-03-20T05:31:13.000Z
|
2022-03-20T05:31:13.000Z
|
tests/components/tradfri/test_init.py
|
FilHarr/core
|
c3a2eedf0beb0d1a66ff1a39705e715ded35e085
|
[
"Apache-2.0"
] | null | null | null |
tests/components/tradfri/test_init.py
|
FilHarr/core
|
c3a2eedf0beb0d1a66ff1a39705e715ded35e085
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for Tradfri setup."""
from unittest.mock import patch
from homeassistant.components import tradfri
from homeassistant.helpers import device_registry as dr
from . import GATEWAY_ID
from tests.common import MockConfigEntry
async def test_entry_setup_unload(hass, mock_api_factory):
"""Test config entry setup and unload."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_GATEWAY_ID: GATEWAY_ID,
},
)
entry.add_to_hass(hass)
with patch.object(
hass.config_entries, "async_forward_entry_setup", return_value=True
) as setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert setup.call_count == len(tradfri.PLATFORMS)
dev_reg = dr.async_get(hass)
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
assert dev_entries
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert unload.call_count == len(tradfri.PLATFORMS)
assert mock_api_factory.shutdown.call_count == 1
async def test_remove_stale_devices(hass, mock_api_factory):
"""Test remove stale device registry entries."""
entry = MockConfigEntry(
domain=tradfri.DOMAIN,
data={
tradfri.CONF_HOST: "mock-host",
tradfri.CONF_IDENTITY: "mock-identity",
tradfri.CONF_KEY: "mock-key",
tradfri.CONF_GATEWAY_ID: GATEWAY_ID,
},
)
entry.add_to_hass(hass)
dev_reg = dr.async_get(hass)
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(tradfri.DOMAIN, "stale_device_id")},
)
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
assert len(dev_entries) == 1
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {(tradfri.DOMAIN, "stale_device_id")}
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
dev_entries = dr.async_entries_for_config_entry(dev_reg, entry.entry_id)
# Check that only the gateway device entry remains.
assert len(dev_entries) == 1
dev_entry = dev_entries[0]
assert dev_entry.identifiers == {
(tradfri.DOMAIN, entry.data[tradfri.CONF_GATEWAY_ID])
}
assert dev_entry.manufacturer == tradfri.ATTR_TRADFRI_MANUFACTURER
assert dev_entry.name == tradfri.ATTR_TRADFRI_GATEWAY
assert dev_entry.model == tradfri.ATTR_TRADFRI_GATEWAY_MODEL
| 34.582418
| 76
| 0.708611
|
fa1e76baa3066c61b6d477217677ff0640a695da
| 291
|
py
|
Python
|
examples/advanced/multiple_tasks_single_process.py
|
thepycoder/clearml
|
717edba8c2b39fb7486bd2aba9ca0294f309b4c3
|
[
"Apache-2.0"
] | 2,097
|
2019-06-11T14:36:25.000Z
|
2020-12-21T03:52:59.000Z
|
examples/advanced/multiple_tasks_single_process.py
|
thepycoder/clearml
|
717edba8c2b39fb7486bd2aba9ca0294f309b4c3
|
[
"Apache-2.0"
] | 247
|
2019-06-11T15:10:26.000Z
|
2020-12-21T17:34:32.000Z
|
examples/advanced/multiple_tasks_single_process.py
|
thepycoder/clearml
|
717edba8c2b39fb7486bd2aba9ca0294f309b4c3
|
[
"Apache-2.0"
] | 256
|
2019-06-11T14:36:28.000Z
|
2020-12-18T08:32:47.000Z
|
from clearml import Task
for i in range(3):
task = Task.init(project_name="examples", task_name="Same process, Multiple tasks, Task #{}".format(i))
# Doing Task processing here
print("Task #{} running".format(i))
#
print("Task #{} done :) ".format(i))
task.close()
| 26.454545
| 107
| 0.632302
|
3850cdc8424c8cb27ed59df67150eff2b0dcd2d6
| 5,693
|
py
|
Python
|
ocs_ci/ocs/must_gather/must_gather.py
|
nbalacha/ocs-ci
|
9c5a5474d62777e868b80894d6b0f3567a7b605d
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/must_gather/must_gather.py
|
nbalacha/ocs-ci
|
9c5a5474d62777e868b80894d6b0f3567a7b605d
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/must_gather/must_gather.py
|
nbalacha/ocs-ci
|
9c5a5474d62777e868b80894d6b0f3567a7b605d
|
[
"MIT"
] | null | null | null |
import os
import logging
import shutil
import tempfile
import re
from pathlib import Path
from ocs_ci.helpers.helpers import storagecluster_independent_check
from ocs_ci.ocs.resources.pod import get_all_pods
from ocs_ci.ocs.utils import collect_ocs_logs
from ocs_ci.ocs.must_gather.const_must_gather import GATHER_COMMANDS_VERSION
from ocs_ci.ocs.ocp import get_ocs_parsed_version
logger = logging.getLogger(__name__)
class MustGather(object):
"""
MustGather Class
"""
def __init__(self):
self.type_log = None
self.root = None
self.files_path = dict()
self.empty_files = list()
self.files_not_exist = list()
self.files_content_issue = list()
@property
def log_type(self):
return self.type_log
@log_type.setter
def log_type(self, type_log):
if not isinstance(type_log, str):
raise ValueError("log type arg must be a string")
self.type_log = type_log
def collect_must_gather(self):
"""
Collect ocs_must_gather and copy the logs to a temporary folder.
"""
temp_folder = tempfile.mkdtemp()
collect_ocs_logs(dir_name=temp_folder, ocp=False)
self.root = temp_folder + "_ocs_logs"
def search_file_path(self):
"""
Search File Path
"""
version = get_ocs_parsed_version()
if self.type_log == "OTHERS" and storagecluster_independent_check():
files = GATHER_COMMANDS_VERSION[version]["OTHERS_EXTERNAL"]
else:
files = GATHER_COMMANDS_VERSION[version][self.type_log]
for file in files:
self.files_not_exist.append(file)
for dir_name, subdir_list, files_list in os.walk(self.root):
if file in files_list:
self.files_path[file] = os.path.join(dir_name, file)
self.files_not_exist.remove(file)
break
def validate_file_size(self):
"""
Validate the file is not empty
"""
for path, subdirs, files in os.walk(self.root):
for file in files:
file_path = os.path.join(path, file)
if Path(file_path).stat().st_size == 0:
logger.error(f"log file {file} empty!")
self.empty_files.append(file)
def validate_expected_files(self):
"""
Make sure all the relevant files exist
"""
self.search_file_path()
self.verify_noobaa_diagnostics()
for file, file_path in self.files_path.items():
if not Path(file_path).is_file():
self.files_not_exist.append(file)
elif Path(file_path).stat().st_size == 0:
self.empty_files.append(file)
elif re.search(r"\.yaml$", file):
with open(file_path, "r") as f:
if "kind" not in f.read().lower():
self.files_content_issue.append(file)
def compare_running_pods(self):
"""
Compare running pods list to "/pods" subdirectories
"""
must_gather_helper = re.compile(r"must-gather-.*.-helper")
pod_objs = get_all_pods(namespace="openshift-storage")
pod_names = []
for pod in pod_objs:
if not must_gather_helper.match(pod.name):
pod_names.append(pod.name)
for dir_name, subdir_list, files_list in os.walk(self.root):
if re.search("openshift-storage/pods$", dir_name):
pod_path = dir_name
pod_files = []
for pod_file in os.listdir(pod_path):
if not must_gather_helper.match(pod_file):
pod_files.append(pod_file)
assert set(sorted(pod_files)) == set(sorted(pod_names)), (
f"List of openshift-storage pods are not equal to list of logs "
f"directories list of pods: {pod_names} list of log directories: {pod_files}"
)
def print_invalid_files(self):
"""
Print Invalid Files
"""
if any([self.empty_files, self.files_not_exist, self.files_content_issue]):
error = (
f"Files don't exist:\n{self.files_not_exist}\n"
f"Empty files:\n{self.empty_files}\n"
f"Content issues:\n{self.files_content_issue}"
)
self.empty_files = list()
self.files_not_exist = list()
self.files_content_issue = list()
raise Exception(error)
def verify_noobaa_diagnostics(self):
"""
Verify noobaa_diagnostics folder exist
"""
if self.type_log == "OTHERS" and get_ocs_parsed_version() >= 4.6:
flag = False
logger.info("Verify noobaa_diagnostics folder exist")
for path, subdirs, files in os.walk(self.root):
for file in files:
if re.search(r"noobaa_diagnostics_.*.tar.gz", file):
flag = True
if not flag:
logger.error("noobaa_diagnostics.tar.gz does not exist")
self.files_not_exist.append("noobaa_diagnostics.tar.gz")
def validate_must_gather(self):
"""
Validate must_gather
"""
self.validate_file_size()
self.validate_expected_files()
self.print_invalid_files()
self.compare_running_pods()
def cleanup(self):
"""
Delete temporary folder.
"""
logger.info(f"Delete must gather folder {self.root}")
if re.search("_ocs_logs$", self.root):
shutil.rmtree(path=self.root, ignore_errors=False, onerror=None)
| 32.718391
| 89
| 0.595468
|
d02fdfbff0cd41986ce36dd2bb29348f68cee7ba
| 6,804
|
py
|
Python
|
modules/tools/record_play/rtk_recorder.py
|
qwetqwe/simulation
|
1d6a87b7595db1d32c656ae783aa365de551c5bc
|
[
"Apache-2.0"
] | 2
|
2020-06-04T01:11:43.000Z
|
2020-06-04T05:32:15.000Z
|
modules/tools/record_play/rtk_recorder.py
|
LishaUnfoolish/simulation
|
1d6a87b7595db1d32c656ae783aa365de551c5bc
|
[
"Apache-2.0"
] | 7
|
2021-03-10T18:14:25.000Z
|
2022-02-27T04:46:46.000Z
|
modules/tools/record_play/rtk_recorder.py
|
LishaUnfoolish/simulation
|
1d6a87b7595db1d32c656ae783aa365de551c5bc
|
[
"Apache-2.0"
] | 2
|
2020-08-05T12:52:42.000Z
|
2021-10-19T13:07:49.000Z
|
#!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Record GPS and IMU data
"""
import atexit
import logging
import math
import os
import sys
import time
from cyber_py import cyber
from gflags import FLAGS
from common.logger import Logger
from modules.canbus.proto import chassis_pb2
from modules.localization.proto import localization_pb2
class RtkRecord(object):
"""
rtk recording class
"""
def write(self, data):
"""Wrap file write function to flush data to disk"""
self.file_handler.write(data)
self.file_handler.flush()
def __init__(self, record_file):
self.firstvalid = False
self.logger = Logger.get_logger("RtkRecord")
self.record_file = record_file
self.logger.info("Record file to: " + record_file)
try:
self.file_handler = open(record_file, 'w')
except IOError:
self.logger.error("Open file %s failed" % (record_file))
self.file_handler.close()
sys.exit(1)
self.write("x,y,z,speed,acceleration,curvature,"
"curvature_change_rate,time,theta,gear,s,throttle,brake,steering\n")
self.localization = localization_pb2.LocalizationEstimate()
self.chassis = chassis_pb2.Chassis()
self.chassis_received = False
self.cars = 0.0
self.startmoving = False
self.terminating = False
self.carcurvature = 0.0
self.prev_carspeed = 0.0
def chassis_callback(self, data):
"""
New message received
"""
if self.terminating is True:
self.logger.info("terminating when receive chassis msg")
return
self.chassis.CopyFrom(data)
#self.chassis = data
if math.isnan(self.chassis.speed_mps):
self.logger.warning("find nan speed_mps: %s" % str(self.chassis))
if math.isnan(self.chassis.steering_percentage):
self.logger.warning(
"find nan steering_percentage: %s" % str(self.chassis))
self.chassis_received = True
def localization_callback(self, data):
"""
New message received
"""
if self.terminating is True:
self.logger.info("terminating when receive localization msg")
return
if not self.chassis_received:
self.logger.info(
"chassis not received when localization is received")
return
self.localization.CopyFrom(data)
#self.localization = data
carx = self.localization.pose.position.x
cary = self.localization.pose.position.y
carz = self.localization.pose.position.z
cartheta = self.localization.pose.heading
if math.isnan(self.chassis.speed_mps):
self.logger.warning("find nan speed_mps: %s" % str(self.chassis))
return
if math.isnan(self.chassis.steering_percentage):
self.logger.warning(
"find nan steering_percentage: %s" % str(self.chassis))
return
carspeed = self.chassis.speed_mps
caracceleration = self.localization.pose.linear_acceleration_vrf.y
speed_epsilon = 1e-9
if abs(self.prev_carspeed) < speed_epsilon \
and abs(carspeed) < speed_epsilon:
caracceleration = 0.0
carsteer = self.chassis.steering_percentage
curvature = math.tan(math.radians(carsteer / 100 * 490) / 14) / 2.8448
if abs(carspeed) >= speed_epsilon:
carcurvature_change_rate = (curvature - self.carcurvature) / (
carspeed * 0.01)
else:
carcurvature_change_rate = 0.0
self.carcurvature = curvature
cartime = self.localization.header.timestamp_sec
cargear = self.chassis.gear_location
if abs(carspeed) >= speed_epsilon:
if self.startmoving is False:
self.logger.info(
"carspeed !=0 and startmoving is False, Start Recording")
self.startmoving = True
if self.startmoving:
self.cars += carspeed * 0.01
self.write(
"%s, %s, %s, %s, %s, %s, %s, %.4f, %s, %s, %s, %s, %s, %s\n" %
(carx, cary, carz, carspeed, caracceleration, self.carcurvature,
carcurvature_change_rate, cartime, cartheta, cargear,
self.cars, self.chassis.throttle_percentage,
self.chassis.brake_percentage,
self.chassis.steering_percentage))
self.logger.debug(
"started moving and write data at time %s" % cartime)
else:
self.logger.debug("not start moving, do not write data to file")
self.prev_carspeed = carspeed
def shutdown(self):
"""
shutdown node
"""
self.terminating = True
self.logger.info("Shutting Down...")
self.logger.info("File is written into %s" % self.record_file)
self.file_handler.close()
def main(argv):
"""
Main node
"""
node = cyber.Node("rtk_recorder")
argv = FLAGS(argv)
log_dir = os.path.dirname(os.path.abspath(
__file__)) + "/../../../data/log/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
Logger.config(
log_file=log_dir + "rtk_recorder.log",
use_stdout=True,
log_level=logging.DEBUG)
print("runtime log is in %s%s" % (log_dir, "rtk_recorder.log"))
record_file = log_dir + "/garage.csv"
recorder = RtkRecord(record_file)
atexit.register(recorder.shutdown)
node.create_reader('/apollo/canbus/chassis',
chassis_pb2.Chassis,
recorder.chassis_callback)
node.create_reader('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
recorder.localization_callback)
while not cyber.is_shutdown():
time.sleep(0.002)
if __name__ == '__main__':
cyber.init()
main(sys.argv)
cyber.shutdown()
| 33.517241
| 87
| 0.604203
|
480aade4bc5d376e3ba7cd9df7d768f02bd4b3fe
| 8,299
|
py
|
Python
|
python/ray/tune/experiment.py
|
hhbyyh/ray
|
e4565c9cc6caa15e0d32694d974d8db28841630e
|
[
"Apache-2.0"
] | 1
|
2020-10-25T22:51:38.000Z
|
2020-10-25T22:51:38.000Z
|
python/ray/tune/experiment.py
|
hhbyyh/ray
|
e4565c9cc6caa15e0d32694d974d8db28841630e
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/experiment.py
|
hhbyyh/ray
|
e4565c9cc6caa15e0d32694d974d8db28841630e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import inspect
import logging
import os
import six
import types
from ray.tune.error import TuneError
from ray.tune.registry import register_trainable
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.sample import sample_from
logger = logging.getLogger(__name__)
def _raise_deprecation_note(deprecated, replacement, soft=False):
"""User notification for deprecated parameter.
Arguments:
deprecated (str): Deprecated parameter.
replacement (str): Replacement parameter to use instead.
soft (bool): Fatal if True.
"""
error_msg = ("`{deprecated}` is deprecated. Please use `{replacement}`. "
"`{deprecated}` will be removed in future versions of "
"Ray.".format(deprecated=deprecated, replacement=replacement))
if soft:
logger.warning(error_msg)
else:
raise DeprecationWarning(error_msg)
class Experiment(object):
"""Tracks experiment specifications.
Implicitly registers the Trainable if needed.
Examples:
>>> experiment_spec = Experiment(
>>> "my_experiment_name",
>>> my_func,
>>> stop={"mean_accuracy": 100},
>>> config={
>>> "alpha": tune.grid_search([0.2, 0.4, 0.6]),
>>> "beta": tune.grid_search([1, 2]),
>>> },
>>> resources_per_trial={
>>> "cpu": 1,
>>> "gpu": 0
>>> },
>>> num_samples=10,
>>> local_dir="~/ray_results",
>>> checkpoint_freq=10,
>>> max_failures=2)
"""
def __init__(self,
name,
run,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
export_formats=None,
max_failures=3,
restore=None,
repeat=None,
trial_resources=None,
sync_function=None):
if repeat:
_raise_deprecation_note("repeat", "num_samples", soft=False)
if trial_resources:
_raise_deprecation_note(
"trial_resources", "resources_per_trial", soft=False)
if sync_function:
_raise_deprecation_note(
"sync_function", "sync_to_driver", soft=False)
stop = stop or {}
if not isinstance(stop, dict) and not callable(stop):
raise ValueError("Invalid stop criteria: {}. Must be a callable "
"or dict".format(stop))
if callable(stop):
nargs = len(inspect.getargspec(stop).args)
is_method = isinstance(stop, types.MethodType)
if (is_method and nargs != 3) or (not is_method and nargs != 2):
raise ValueError(
"Invalid stop criteria: {}. Callable "
"criteria must take exactly 2 parameters.".format(stop))
config = config or {}
self._run_identifier = Experiment._register_if_needed(run)
spec = {
"run": self._run_identifier,
"stop": stop,
"config": config,
"resources_per_trial": resources_per_trial,
"num_samples": num_samples,
"local_dir": os.path.abspath(
os.path.expanduser(local_dir or DEFAULT_RESULTS_DIR)),
"upload_dir": upload_dir,
"trial_name_creator": trial_name_creator,
"loggers": loggers,
"sync_to_driver": sync_to_driver,
"checkpoint_freq": checkpoint_freq,
"checkpoint_at_end": checkpoint_at_end,
"keep_checkpoints_num": keep_checkpoints_num,
"checkpoint_score_attr": checkpoint_score_attr,
"export_formats": export_formats or [],
"max_failures": max_failures,
"restore": os.path.abspath(os.path.expanduser(restore))
if restore else None
}
self.name = name or self._run_identifier
self.spec = spec
@classmethod
def from_json(cls, name, spec):
"""Generates an Experiment object from JSON.
Args:
name (str): Name of Experiment.
spec (dict): JSON configuration of experiment.
"""
if "run" not in spec:
raise TuneError("No trainable specified!")
# Special case the `env` param for RLlib by automatically
# moving it into the `config` section.
if "env" in spec:
spec["config"] = spec.get("config", {})
spec["config"]["env"] = spec["env"]
del spec["env"]
spec = copy.deepcopy(spec)
run_value = spec.pop("run")
try:
exp = cls(name, run_value, **spec)
except TypeError:
raise TuneError("Improper argument from JSON: {}.".format(spec))
return exp
@classmethod
def _register_if_needed(cls, run_object):
"""Registers Trainable or Function at runtime.
Assumes already registered if run_object is a string.
Also, does not inspect interface of given run_object.
Arguments:
run_object (str|function|class): Trainable to run. If string,
assumes it is an ID and does not modify it. Otherwise,
returns a string corresponding to the run_object name.
Returns:
A string representing the trainable identifier.
"""
if isinstance(run_object, six.string_types):
return run_object
elif isinstance(run_object, sample_from):
logger.warning("Not registering trainable. Resolving as variant.")
return run_object
elif isinstance(run_object, type) or callable(run_object):
name = "DEFAULT"
if hasattr(run_object, "__name__"):
name = run_object.__name__
else:
logger.warning(
"No name detected on trainable. Using {}.".format(name))
register_trainable(name, run_object)
return name
else:
raise TuneError("Improper 'run' - not string nor trainable.")
@property
def local_dir(self):
return self.spec.get("local_dir")
@property
def checkpoint_dir(self):
if self.local_dir:
return os.path.join(self.local_dir, self.name)
@property
def remote_checkpoint_dir(self):
if self.spec["upload_dir"]:
return os.path.join(self.spec["upload_dir"], self.name)
@property
def run_identifier(self):
"""Returns a string representing the trainable identifier."""
return self._run_identifier
def convert_to_experiment_list(experiments):
"""Produces a list of Experiment objects.
Converts input from dict, single experiment, or list of
experiments to list of experiments. If input is None,
will return an empty list.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
Returns:
List of experiments.
"""
exp_list = experiments
# Transform list if necessary
if experiments is None:
exp_list = []
elif isinstance(experiments, Experiment):
exp_list = [experiments]
elif type(experiments) is dict:
exp_list = [
Experiment.from_json(name, spec)
for name, spec in experiments.items()
]
# Validate exp_list
if (type(exp_list) is list
and all(isinstance(exp, Experiment) for exp in exp_list)):
if len(exp_list) > 1:
logger.warning("All experiments will be "
"using the same SearchAlgorithm.")
else:
raise TuneError("Invalid argument: {}".format(experiments))
return exp_list
| 33.59919
| 79
| 0.581998
|
9b9516f6d0bdedb30e9ddcb419639920fe6e000f
| 10,340
|
py
|
Python
|
tensorflow/lite/python/interpreter.py
|
Sonata-Wang/tensorflow
|
8bbef0cd77879d05ed69bf30e76087847a8ca4a2
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/lite/python/interpreter.py
|
shekharpalit/tensorflow
|
6aa83398ab03bfae822f36772757097bcb98b6ed
|
[
"Apache-2.0"
] | 30
|
2016-10-04T15:38:08.000Z
|
2020-07-16T12:09:33.000Z
|
tensorflow/lite/python/interpreter.py
|
shekharpalit/tensorflow
|
6aa83398ab03bfae822f36772757097bcb98b6ed
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TF-Lite interpreter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Lazy load since some of the performance benchmark skylark rules
# break dependencies. Must use double quotes to match code internal rewrite
# rule.
# pylint: disable=g-inconsistent-quotes
_interpreter_wrapper = LazyLoader(
"_interpreter_wrapper", globals(),
"tensorflow.lite.python.interpreter_wrapper."
"tensorflow_wrap_interpreter_wrapper")
# pylint: enable=g-inconsistent-quotes
del LazyLoader
except ImportError:
# When full Tensorflow Python PIP is not available do not use lazy load
# and instead uf the tflite_runtime path.
from tflite_runtime.lite.python import interpreter_wrapper as _interpreter_wrapper
def tf_export_dummy(*x, **kwargs):
del x, kwargs
return lambda x: x
_tf_export = tf_export_dummy
@_tf_export('lite.Interpreter')
class Interpreter(object):
"""Interpreter inferace for TF-Lite Models."""
def __init__(self, model_path=None, model_content=None):
"""Constructor.
Args:
model_path: Path to TF-Lite Flatbuffer file.
model_content: Content of model.
Raises:
ValueError: If the interpreter was unable to create.
"""
if model_path and not model_content:
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromFile(
model_path))
if not self._interpreter:
raise ValueError('Failed to open {}'.format(model_path))
elif model_content and not model_path:
# Take a reference, so the pointer remains valid.
# Since python strings are immutable then PyString_XX functions
# will always return the same pointer.
self._model_content = model_content
self._interpreter = (
_interpreter_wrapper.InterpreterWrapper_CreateWrapperCPPFromBuffer(
model_content))
elif not model_path and not model_path:
raise ValueError('`model_path` or `model_content` must be specified.')
else:
raise ValueError('Can\'t both provide `model_path` and `model_content`')
def allocate_tensors(self):
self._ensure_safe()
return self._interpreter.AllocateTensors()
def _safe_to_run(self):
"""Returns true if there exist no numpy array buffers.
This means it is safe to run tflite calls that may destroy internally
allocated memory. This works, because in the wrapper.cc we have made
the numpy base be the self._interpreter.
"""
# NOTE, our tensor() call in cpp will use _interpreter as a base pointer.
# If this environment is the only _interpreter, then the ref count should be
# 2 (1 in self and 1 in temporary of sys.getrefcount).
return sys.getrefcount(self._interpreter) == 2
def _ensure_safe(self):
"""Makes sure no numpy arrays pointing to internal buffers are active.
This should be called from any function that will call a function on
_interpreter that may reallocate memory e.g. invoke(), ...
Raises:
RuntimeError: If there exist numpy objects pointing to internal memory
then we throw.
"""
if not self._safe_to_run():
raise RuntimeError("""There is at least 1 reference to internal data
in the interpreter in the form of a numpy array or slice. Be sure to
only hold the function returned from tensor() if you are using raw
data access.""")
def _get_tensor_details(self, tensor_index):
"""Gets tensor details.
Args:
tensor_index: Tensor index of tensor to query.
Returns:
a dictionary containing the name, index, shape and type of the tensor.
Raises:
ValueError: If tensor_index is invalid.
"""
tensor_index = int(tensor_index)
tensor_name = self._interpreter.TensorName(tensor_index)
tensor_size = self._interpreter.TensorSize(tensor_index)
tensor_type = self._interpreter.TensorType(tensor_index)
tensor_quantization = self._interpreter.TensorQuantization(tensor_index)
if not tensor_name or not tensor_type:
raise ValueError('Could not get tensor details')
details = {
'name': tensor_name,
'index': tensor_index,
'shape': tensor_size,
'dtype': tensor_type,
'quantization': tensor_quantization,
}
return details
def get_tensor_details(self):
"""Gets tensor details for every tensor with valid tensor details.
Tensors where required information about the tensor is not found are not
added to the list. This includes temporary tensors without a name.
Returns:
A list of dictionaries containing tensor information.
"""
tensor_details = []
for idx in range(self._interpreter.NumTensors()):
try:
tensor_details.append(self._get_tensor_details(idx))
except ValueError:
pass
return tensor_details
def get_input_details(self):
"""Gets model input details.
Returns:
A list of input details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.InputIndices()
]
def set_tensor(self, tensor_index, value):
"""Sets the value of the input tensor. Note this copies data in `value`.
If you want to avoid copying, you can use the `tensor()` function to get a
numpy buffer pointing to the input buffer in the tflite interpreter.
Args:
tensor_index: Tensor index of tensor to set. This value can be gotten from
the 'index' field in get_input_details.
value: Value of tensor to set.
Raises:
ValueError: If the interpreter could not set the tensor.
"""
self._interpreter.SetTensor(tensor_index, value)
def resize_tensor_input(self, input_index, tensor_size):
"""Resizes an input tensor.
Args:
input_index: Tensor index of input to set. This value can be gotten from
the 'index' field in get_input_details.
tensor_size: The tensor_shape to resize the input to.
Raises:
ValueError: If the interpreter could not resize the input tensor.
"""
self._ensure_safe()
# `ResizeInputTensor` now only accepts int32 numpy array as `tensor_size
# parameter.
tensor_size = np.array(tensor_size, dtype=np.int32)
self._interpreter.ResizeInputTensor(input_index, tensor_size)
def get_output_details(self):
"""Gets model output details.
Returns:
A list of output details.
"""
return [
self._get_tensor_details(i) for i in self._interpreter.OutputIndices()
]
def get_tensor(self, tensor_index):
"""Gets the value of the input tensor (get a copy).
If you wish to avoid the copy, use `tensor()`. This function cannot be used
to read intermediate results.
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
a numpy array.
"""
return self._interpreter.GetTensor(tensor_index)
def tensor(self, tensor_index):
"""Returns function that gives a numpy view of the current tensor buffer.
This allows reading and writing to this tensors w/o copies. This more
closely mirrors the C++ Interpreter class interface's tensor() member, hence
the name. Be careful to not hold these output references through calls
to `allocate_tensors()` and `invoke()`. This function cannot be used to read
intermediate results.
Usage:
```
interpreter.allocate_tensors()
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])
for i in range(10):
input().fill(3.)
interpreter.invoke()
print("inference %s" % output())
```
Notice how this function avoids making a numpy array directly. This is
because it is important to not hold actual numpy views to the data longer
than necessary. If you do, then the interpreter can no longer be invoked,
because it is possible the interpreter would resize and invalidate the
referenced tensors. The NumPy API doesn't allow any mutability of the
the underlying buffers.
WRONG:
```
input = interpreter.tensor(interpreter.get_input_details()[0]["index"])()
output = interpreter.tensor(interpreter.get_output_details()[0]["index"])()
interpreter.allocate_tensors() # This will throw RuntimeError
for i in range(10):
input.fill(3.)
interpreter.invoke() # this will throw RuntimeError since input,output
```
Args:
tensor_index: Tensor index of tensor to get. This value can be gotten from
the 'index' field in get_output_details.
Returns:
A function that can return a new numpy array pointing to the internal
TFLite tensor state at any point. It is safe to hold the function forever,
but it is not safe to hold the numpy array forever.
"""
return lambda: self._interpreter.tensor(self._interpreter, tensor_index)
def invoke(self):
"""Invoke the interpreter.
Be sure to set the input sizes, allocate tensors and fill values before
calling this.
Raises:
ValueError: When the underlying interpreter fails raise ValueError.
"""
self._ensure_safe()
self._interpreter.Invoke()
def reset_all_variables(self):
return self._interpreter.ResetVariableTensors()
| 35.050847
| 84
| 0.702708
|
c036218e7e416d328cbbce80d5e8e180fab5782d
| 10,134
|
py
|
Python
|
src/.history/Test/HiwinRT605_test_20190619124542.py
|
SamKaiYang/2019_Hiwin_Shaking
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
[
"MIT"
] | null | null | null |
src/.history/Test/HiwinRT605_test_20190619124542.py
|
SamKaiYang/2019_Hiwin_Shaking
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
[
"MIT"
] | null | null | null |
src/.history/Test/HiwinRT605_test_20190619124542.py
|
SamKaiYang/2019_Hiwin_Shaking
|
d599f8c87dc4da89eae266990d12eb3a8b0f3e16
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# license removed for brevity
#策略 機械手臂 四點來回跑
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
import Hiwin_RT605_ROS as strategy
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
Arm_state_flag = 0
Strategy_flag = 0
arm_move_times = 1
##-----------server feedback arm state----------
def Arm_state(req):
global CurrentMissionType
Arm_state_flag = int('%s'%req.Arm_state)
if Arm_state_flag == 1: #表示手臂忙碌
Strategy_flag = 0
return(1)
if Arm_state_flag == 0: #表示手臂閒置
Strategy_flag = 1
return(0)
if Arm_state_flag == 6: #表示程式中斷
Strategy_flag= 6
return(6)
def strategy_server():
#rospy.init_node(NAME)
s = rospy.Service('arm_state',arm_state, Arm_state) ##server arm point data
#rospy.spin() #spinone
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##------------class-------
class point():
def __init__(self,x,y,z,pitch,roll,yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##-------------------------strategy---------------------
##-----Mission 參數
GetInfoFlag = False
ExecuteFlag = False
GetKeyFlag = False
MotionSerialKey = []
MissionType_Flag = 0
MotionStep = 0
##-----手臂動作位置資訊
angle_SubCue = 0
LinePtpFlag = False
MoveFlag = False
PushBallHeight = 6
ObjAboveHeight = 10
SpeedValue = 10
MissionEndFlag = False
CurrentMissionType = 0
##---------------Enum---------------##
class ArmMotionCommand(enum.IntEnum):
Arm_Stop = 0
Arm_MoveToTargetUpside = 1
Arm_MoveFowardDown = 2
Arm_MoveVision = 3
Arm_PushBall = 4
Arm_LineUp = 5
Arm_LineDown = 6
Arm_Angle = 7
Arm_StopPush = 8
class MissionType(enum.IntEnum):
Get_Img = 0
PushBall = 1
Pushback = 2
Mission_End = 3
##-----------switch define------------##
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class Target_pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class TargetPush_pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
class Item():
def __init__(self,x,y,label):
self.x = x
self.y = y
self.label = label
def Mission_Trigger():
if GetInfoFlag == True and GetKeyFlag == False and ExecuteFlag == False:
GetInfo_Mission()
if GetInfoFlag == False and GetKeyFlag == True and ExecuteFlag == False:
GetKey_Mission()
if GetInfoFlag == False and GetKeyFlag == False and ExecuteFlag == True:
Execute_Mission()
def GetInfo_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag
#Billiards_Calculation()
GetInfoFlag = False
GetKeyFlag = True
ExecuteFlag = False
def GetKey_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag,MotionKey,MotionSerialKey
Mission = Get_MissionType()
MissionItem(Mission)
MotionSerialKey = MotionKey
GetInfoFlag = False
GetKeyFlag = False
ExecuteFlag = True
def Get_MissionType():
global MissionType_Flag,CurrentMissionType
for case in switch(MissionType_Flag): #傳送指令給socket選擇手臂動作
if case(0):
Type = MissionType.PushBall
MissionType_Flag +=1
break
if case(1):
Type = MissionType.Pushback
MissionType_Flag -=1
break
CurrentMissionType = Type
return Type
def MissionItem(ItemNo):
global MotionKey
Key_PushBallCommand = [\
ArmMotionCommand.Arm_MoveToTargetUpside,\
ArmMotionCommand.Arm_LineDown,\
ArmMotionCommand.Arm_PushBall,\
ArmMotionCommand.Arm_LineUp,\
ArmMotionCommand.Arm_Stop,\
]
Key_PushBackCommand = [\
ArmMotionCommand.Arm_MoveVision,\
ArmMotionCommand.Arm_Stop,\
ArmMotionCommand.Arm_StopPush,\
]
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(MissionType.PushBall):
MotionKey = Key_PushBallCommand
break
if case(MissionType.Pushback):
MotionKey = Key_PushBackCommand
break
return MotionKey
def Execute_Mission():
global GetInfoFlag,GetKeyFlag,ExecuteFlag,MotionKey,MotionStep,MotionSerialKey,MissionEndFlag,CurrentMissionType
print("strategy :" ,state_flag.Strategy)
print("state :" ,state_flag.Arm)
if state_flag.Arm == 0 and state_flag.Strategy == 1:
state_flag.Strategy_feedback = 0
if MotionKey[MotionStep] == ArmMotionCommand.Arm_Stop:
if MissionEndFlag == True:
CurrentMissionType = MissionType.Mission_End
GetInfoFlag = False
GetKeyFlag = False
ExecuteFlag = False
print("Mission_End")
elif CurrentMissionType == MissionType.PushBall:
GetInfoFlag = False
GetKeyFlag = True
ExecuteFlag = False
MotionStep = 0
print("PushBall")
else:
GetInfoFlag = True
GetKeyFlag = False
ExecuteFlag = False
MotionStep = 0
else:
MotionItem(MotionSerialKey[MotionStep])
MotionStep += 1
def MotionItem(ItemNo):
global angle_SubCue,SpeedValue,PushFlag,LinePtpFlag,MissionEndFlag
SpeedValue = 5
for case in switch(ItemNo): #傳送指令給socket選擇手臂動作
if case(ArmMotionCommand.Arm_Stop):
MoveFlag = False
print("Arm_Stop")
break
if case(ArmMotionCommand.Arm_StopPush):
MoveFlag = False
PushFlag = True #重新掃描物件
print("Arm_StopPush")
break
if case(ArmMotionCommand.Arm_MoveToTargetUpside):
pos.x = 10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 10
MoveFlag = True
LinePtpFlag = False
SpeedValue = 10
print("Arm_MoveToTargetUpside")
break
if case(ArmMotionCommand.Arm_LineUp):
pos.z = ObjAboveHeight
MoveFlag = True
LinePtpFlag = True
SpeedValue = 5
print("Arm_LineUp")
break
if case(ArmMotionCommand.Arm_LineDown):
pos.z = PushBallHeight
MoveFlag = True
LinePtpFlag = True
SpeedValue = 5
print("Arm_LineDown")
break
if case(ArmMotionCommand.Arm_PushBall):
pos.x = -10
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = -10
SpeedValue = 10 ##待測試up
MoveFlag = True
LinePtpFlag = False
print("Arm_PushBall")
break
if case(ArmMotionCommand.Arm_MoveVision):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
SpeedValue = 10
MoveFlag = True
LinePtpFlag = False
##任務結束旗標
MissionEndFlag = True
print("Arm_MoveVision")
break
if case(ArmMotionCommand.Arm_MoveFowardDown):
pos.x = 0
pos.y = 36.8
pos.z = 11.35
pos.pitch = -90
pos.roll = 0
pos.yaw = 0
MoveFlag = True
LinePtpFlag = False
print("Arm_MoveFowardDown")
break
if case(): # default, could also just omit condition or 'if True'
print ("something else!")
# No need to break here, it'll stop anyway
if MoveFlag == True:
if LinePtpFlag == False:
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
#strategy_client_Arm_Mode(0,1,0,30,2)#action,ra,grip,vel,both
strategy.strategy_client_Arm_Mode(2,1,0,SpeedValue,2)#action,ra,grip,vel,both
strategy.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
elif LinePtpFlag == True:
#strategy_client_Arm_Mode(0,1,0,40,2)#action,ra,grip,vel,both
print('x: ',pos.x,' y: ',pos.y,' z: ',pos.z,' pitch: ',pos.pitch,' roll: ',pos.roll,' yaw: ',pos.yaw)
strategy.strategy_client_Arm_Mode(3,1,0,SpeedValue,2)#action,ra,grip,vel,both
strategy.strategy_client_pos_move(pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw)
#action: ptp line
#ra : abs rel
#grip 夾爪
#vel speed
#both : Ctrl_Mode
##-------------strategy end ------------
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
argv = rospy.myargv()
rospy.init_node('strategy', anonymous=True)
GetInfoFlag = True #Test no data
strategy_server()
strategy.strategy_client_Arm_Mode(0,1,0,20,2)#action,ra,grip,vel,both
while 1:
Mission_Trigger()
if CurrentMissionType == MissionType.Mission_End:
strategy.rospy.on_shutdown(myhook)
strategy.rospy.spin()
rospy.spin()
| 30.616314
| 116
| 0.57687
|
6744366509a32f1853489155bb5d5acd13d44a4a
| 227
|
py
|
Python
|
sqlalchemy_utils/exceptions.py
|
jd/sqlalchemy-utils
|
fa78e45f9bd38b46d5aface41914dad022c0099b
|
[
"BSD-3-Clause"
] | 1
|
2015-07-06T11:19:11.000Z
|
2015-07-06T11:19:11.000Z
|
sqlalchemy_utils/exceptions.py
|
jd/sqlalchemy-utils
|
fa78e45f9bd38b46d5aface41914dad022c0099b
|
[
"BSD-3-Clause"
] | null | null | null |
sqlalchemy_utils/exceptions.py
|
jd/sqlalchemy-utils
|
fa78e45f9bd38b46d5aface41914dad022c0099b
|
[
"BSD-3-Clause"
] | null | null | null |
"""Global SQLAlchemy-Utils exception classes.
"""
class ImproperlyConfigured(Exception):
"""
SQLAlchemy-Utils is improperly configured; normally due to usage of
a utility that depends on a missing library.
"""
| 25.222222
| 71
| 0.722467
|
3df1044e69977585208295bb305ce79f3ee0b722
| 4,310
|
py
|
Python
|
packages/tools/lektor_tools.py
|
Richienb/terminalguide
|
5246927c4d1197909c0ae2c7440f9bbed45db3cd
|
[
"MIT"
] | null | null | null |
packages/tools/lektor_tools.py
|
Richienb/terminalguide
|
5246927c4d1197909c0ae2c7440f9bbed45db3cd
|
[
"MIT"
] | null | null | null |
packages/tools/lektor_tools.py
|
Richienb/terminalguide
|
5246927c4d1197909c0ae2c7440f9bbed45db3cd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os.path
from lektor.pluginsystem import Plugin
from lektor.context import get_ctx
def snip_resolver(name):
ctx = get_ctx()
if not ctx:
return 'DEVMODE?'
val = 'SNIPPET ' + name
env = ctx.env
filename = os.path.join(env.root_path, 'snippets', name + '.txt')
if os.path.exists(filename):
with open(filename, 'r') as f:
val = f.read()
ctx.record_dependency(filename)
else:
val = 'SNIPPET missing: ' + name
return val
def snipi_resolver(name):
val = snip_resolver(name)
return val.strip()
def mode_link(mode):
try:
ctx = get_ctx()
if not ctx:
return 'DEVMODE?'
pad = ctx.pad
record = pad.get('/mode/' + mode)
if record:
title = record['title']
if title.endswith(')'):
title = title[:title.rindex('(')]
title = title.lower()
else:
title = "mode not found: " + mode
title = title.strip()
return '<a href="{url}">{title}</a>'.format(title=title, url='/mode/' + mode)
except Exception as ex:
print('failed:', ex)
raise
def seq_link(seq):
try:
ctx = get_ctx()
if not ctx:
return 'DEVMODE?'
pad = ctx.pad
record = pad.get('/seq/' + seq)
if record:
title = record['title']
if title.endswith(')'):
title = title[:title.rindex('(')]
title = title.lower()
else:
title = "Sequence not found: " + seq
title = title.strip()
return '<a href="{url}">{title}</a>'.format(title=title, url='/seq/' + seq)
except Exception as ex:
print('failed:', ex)
raise
def sgr_link(seq):
try:
ctx = get_ctx()
if not ctx:
return 'DEVMODE?'
pad = ctx.pad
record = pad.get('/attr/' + seq)
if record:
title = record['title']
if title.endswith(')'):
title = title[:title.rindex('(')]
title = title.lower()
else:
title = "Attribute not found: " + seq
title = title.strip()
return '<a href="{url}">{title}</a>'.format(title=title, url='/attr/' + seq)
except Exception as ex:
print('failed:', ex)
raise
def seq_param(name, placeholder='Ⓝ'):
ret = "<span class='term-param'><ruby>" + placeholder
ret += "<rt>" + name + " </rt>"
ret += "</ruby></span>"
return ret
def seq(l):
ret = "<span class='term-literal'><ruby>"
for i in l:
v = i
if i == '\033':
v = 'ESC'
elif i == '\0':
v = 'NUL'
elif i == '\b':
v = 'BS'
elif i == '\n':
v = 'LF'
elif i == '\r':
v = 'CR'
elif i == '\021':
v = 'XON'
elif i == '\023':
v = 'XOFF'
elif i == '\026':
v = 'SYN'
elif i == '\030':
v = 'CAN'
elif i == '\032':
v = 'SUB'
elif i == ' ':
v = '␣'
elif i == '\177':
v = 'DEL'
elif i == '\x9c':
v = 'ST'
ret += v + "<rt>" + hex(ord(i))[2:].rjust(2,'0') + " </rt>"
ret += "</ruby></span>"
return ret
def jinja_hex(s):
r = []
for ch in s:
r.append(hex(ord(ch))[2:].rjust(2,'0'))
return " ".join(r)
force_new_line = '<span style="display:block"></span>'
jinja_miss = force_new_line + '🛆 '
jinja_info = force_new_line + '🛈 '
class ToolsPlugin(Plugin):
name = 'tools'
description = u'Internal tools.'
def on_setup_env(self, **extra):
self.env.jinja_env.filters['hex'] = jinja_hex
self.env.jinja_env.globals['seq'] = seq
self.env.jinja_env.globals['seq_param'] = seq_param
self.env.jinja_env.globals['snip'] = snip_resolver
self.env.jinja_env.globals['snipi'] = snipi_resolver
self.env.jinja_env.globals['mode_link'] = mode_link
self.env.jinja_env.globals['seq_link'] = seq_link
self.env.jinja_env.globals['sgr_link'] = sgr_link
self.env.jinja_env.globals['info'] = jinja_info
self.env.jinja_env.globals['miss'] = jinja_miss
| 26.9375
| 85
| 0.494432
|
2a6da1c0108da50f479b2161b470ed87a09542c0
| 2,424
|
py
|
Python
|
application/physical/GIS/qgis3-tests/test_qgisaddlayercsv.py
|
cprior/sunlightmap
|
402809aaa48cd540d73b1954da353b6dfedb405d
|
[
"MIT"
] | 1
|
2018-01-05T11:18:57.000Z
|
2018-01-05T11:18:57.000Z
|
application/physical/GIS/qgis3-tests/test_qgisaddlayercsv.py
|
cprior/sunlightmap_app
|
402809aaa48cd540d73b1954da353b6dfedb405d
|
[
"MIT"
] | 8
|
2017-10-23T19:14:26.000Z
|
2017-11-16T18:01:23.000Z
|
application/physical/GIS/qgis3-tests/test_qgisaddlayercsv.py
|
cprior/sunlightmap_app
|
402809aaa48cd540d73b1954da353b6dfedb405d
|
[
"MIT"
] | null | null | null |
"""
This unittest for for QGIS 3 checks adding layers.
"""
import os
# import sys # @todo: setUp with an environment variable for PYTHONPATH
import unittest
#from qgis import *
from qgis.core import *
class TestQgisAddLayer(unittest.TestCase):
def setUp(self):
try:
self.qgis_version = QGis.QGIS_VERSION_INT
except NameError:
self.qgis_version = Qgis.QGIS_VERSION_INT
except:
self.fail("cannot get QGIS_VERSION_INT")
if self.qgis_version < 29900:
self.fail("unittest for QGIS 3 and higher only.")
try:
os.environ["QT_QPA_PLATFORM"] = "offscreen"
QgsApplication.setPrefixPath("/usr", False)
self.qgs = QgsApplication([], False)
self.qgs.initQgis()
except:
self.fail("cannot init qgis application")
def tearDown(self):
self.qgs.quit()
def test_qgisnewprojecthasnolayers(self):
project = QgsProject()
self.assertFalse(project.mapLayers()) # an empty list [] is false
def test_qgisnewprojecthasnolayers(self):
project = QgsProject()
project.setTitle("project with QGIS version " + str(self.qgis_version))
self.assertTrue(project.isDirty())
def test_qgisnewvectorlayer(self):
vlayer_land = QgsVectorLayer(
"./testdata/land/ne_10m_land.shp", 'land') # , 'memory')
self.assertTrue(vlayer_land.isValid()) # an empty list [] is false
def test_qgisaddvectorlayer(self):
project = QgsProject()
vlayer_land = QgsVectorLayer(
"./testdata/land/ne_10m_land.shp", 'land') # , 'memory')
project.addMapLayer(vlayer_land)
self.assertTrue(project.mapLayers()) # an empty list [] is false
def test_qgisaddtwovectorlayers(self):
project = QgsProject()
project.setTitle("foo")
project.setFileName("test.qgs")
vlayer_land = QgsVectorLayer("./testdata/land/ne_10m_land.shp", 'land')
# project.addMapLayer(vlayer_land)
vlayer_ocean = QgsVectorLayer(
"./testdata/ocean/ne_10m_ocean.shp", 'ocean')
# project.addMapLayer(vlayer_ocean)
project.addMapLayers([vlayer_land, vlayer_ocean])
self.assertTrue(len(project.mapLayers()) == 2)
self.assertTrue(project.isDirty())
if __name__ == '__main__':
unittest.main(exit=False, verbosity=1)
| 32.756757
| 79
| 0.640677
|
ebce82454a78d66791e52823f6e32759646fb597
| 1,252
|
py
|
Python
|
python/opscore/RO/Comm/BrowseURL.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | null | null | null |
python/opscore/RO/Comm/BrowseURL.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T21:08:14.000Z
|
2021-08-17T21:08:14.000Z
|
python/opscore/RO/Comm/BrowseURL.py
|
sdss/opscore
|
dd4f2b2ad525fe3dfe3565463de2c079a7e1232e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Open a URL in the user's default browser.
The URL is opened in a background thread.
History:
2004-10-05 ROwen
2011-06-16 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code
"""
__all__ = ["browseURL"]
import threading
import six.moves.urllib.parse as parse
import webbrowser
class _BrowseURLThread(threading.Thread):
def __init__(self, url):
threading.Thread.__init__(self)
self.url = url
self.setDaemon(True)
def run(self):
url = self.url
try:
webbrowser.open(url)
return
except Exception as e:
pass
# failed! if this is a file URL with an anchor,
# try again without the anchor
urlTuple = parse.urlparse(url)
if urlTuple[0] == "file" and urlTuple[-1] != '':
urlTuple = urlTuple[0:-1] + ('',)
url = parse.urlunparse(urlTuple)
if not url:
return
try:
webbrowser.open(url)
return
except Exception as e:
pass
# failed!
print("could not open URL %r: %s %r" % (url, e, e))
def browseURL(url):
newThread = _BrowseURLThread(url)
newThread.start()
| 25.04
| 89
| 0.571885
|
e95fc9575a4fe693f4a307f8424c3edcbc9d196b
| 1,810
|
py
|
Python
|
test/parser/unit_operators/testcases/Log/Log_generator.py
|
AyishaR/deepC
|
1dc9707ef5ca9000fc13c3da7f1129685a83b494
|
[
"Apache-2.0"
] | 223
|
2020-04-15T20:34:33.000Z
|
2022-03-28T05:41:49.000Z
|
test/parser/unit_operators/testcases/Log/Log_generator.py
|
AyishaR/deepC
|
1dc9707ef5ca9000fc13c3da7f1129685a83b494
|
[
"Apache-2.0"
] | 42
|
2019-07-29T15:57:12.000Z
|
2020-04-08T15:12:48.000Z
|
test/parser/unit_operators/testcases/Log/Log_generator.py
|
AyishaR/deepC
|
1dc9707ef5ca9000fc13c3da7f1129685a83b494
|
[
"Apache-2.0"
] | 58
|
2019-07-22T11:46:19.000Z
|
2020-04-09T22:56:41.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
#
# This file is part of DNN compiler maintained at
# https://github.com/ai-techsystems/dnnCompiler
import os, sys
import numpy as np
separator = os.path.sep
from onnx import *
sys.path.append(".."+separator+".."+separator+".."+separator+".."+separator+"python/parser")
from onnx_parser import *
op_name = 'Log'
inputs = [helper.make_tensor_value_info("x",TensorProto.FLOAT,(1,))]
outputs = [helper.make_tensor_value_info("y",TensorProto.FLOAT,(1,))]
nodes = []
nodes.append(helper.make_node("Log",["x"],["y"]))
graph = helper.make_graph(nodes, op_name+"_graph", inputs, outputs)
opset = (OperatorSetIdProto(version=11),)
model = helper.make_model(graph, opset_imports=opset)
onnx.checker.check_model(model)
t_prefix = ".." + separator + "testcases" + separator + op_name + separator + op_name
g_prefix = ".." + separator + "gold_files" + separator + op_name
onnx.save(model, t_prefix+".onnx")
parse(t_prefix+".onnx", g_prefix+".sym", onnx_output_file=t_prefix+".txt")
| 38.510638
| 92
| 0.745304
|
c4cd6fc371b9b71426040dca2cec523e2480c979
| 16,525
|
py
|
Python
|
ansys/mapdl/core/_commands/solution/solid_constraints.py
|
da1910/pymapdl
|
305b70b30e61a78011e974ff4cb409ee21f89e13
|
[
"MIT"
] | null | null | null |
ansys/mapdl/core/_commands/solution/solid_constraints.py
|
da1910/pymapdl
|
305b70b30e61a78011e974ff4cb409ee21f89e13
|
[
"MIT"
] | null | null | null |
ansys/mapdl/core/_commands/solution/solid_constraints.py
|
da1910/pymapdl
|
305b70b30e61a78011e974ff4cb409ee21f89e13
|
[
"MIT"
] | null | null | null |
def da(self, area="", lab="", value1="", value2="", **kwargs):
"""Defines degree-of-freedom constraints on areas.
APDL Command: DA
Parameters
----------
area
Area on which constraints are to be specified. If ALL, apply to
all selected areas [ASEL]. A component name may also be substituted for AREA.
lab
Symmetry label (see below):
SYMM - Generate symmetry constraints. Requires no Value1 or Value2.
ASYM - Generate antisymmetry constraints. Requires no Value1 or Value2.
ANSYS DOF labels:
UX - Displacement in X direction.
UY - Displacement in Y direction.
UZ - Displacement in Z direction.
ROTX - Rotation about X axis.
ROTY - Rotation about Y axis.
ROTZ - Rotation about Z axis.
HDSP - Hydrostatic pressure.
PRES - Pressure.
TEMP, TBOT, TE2, TE3, . . ., TTOP - Temperature.
MAG - Magnetic scalar potential (see 2 below).
VOLT - Electric scalar potential (see 3 below).
AZ - Magnetic vector potential in Z direction (see 4 below).
CONC - Concentration.
ALL - Applies all appropriate DOF labels except HDSP.
value1
Value of DOF or table name reference on the area. Valid for all
DOF labels. To specify a table, enclose the table name in % signs
(e.g., DA,AREA,TEMP,%tabname%). Use the *DIM command to define a
table.
value2
For MAG and VOLT DOFs:
Notes
-----
For elements SOLID236 and SOLID237, if Lab = AZ and Value1 = 0, this
sets the flux-parallel condition for the edge formulation. (A flux-
normal condition is the natural boundary condition.) Do not use the DA
command to set the edge-flux DOF, AZ to a nonzero value.
If Lab = MAG and Value1 = 0, this sets the flux-normal condition for
the magnetic scalar potential formulations (MSP) (A flux-parallel
condition is the natural boundary condition for MSP.)
If Lab = VOLT and Value1 = 0, the J-normal condition is set (current
density (J) flow normal to the area). (A J-parallel condition is the
natural boundary condition.)
You can transfer constraints from areas to nodes with the DTRAN or
SBCTRAN commands. See the DK command for information about generating
other constraints on areas.
Symmetry and antisymmetry constraints are generated as described for
the DSYM command.
Tabular boundary conditions (VALUE = %tabname%) are available only for
the following degree of freedom labels: Electric (VOLT), Structural
(UX, UY, UZ, ROTX, ROTY, ROTZ), Acoustic (PRES, UX, UY, UZ), and
temperature (TEMP, TBOT, TE2, TE3, . . ., TTOP).
Constraints specified by the DA command can conflict with other
specified constraints. See Resolution of Conflicting Constraint
Specifications in the Basic Analysis Guide for details.
The DA command is also valid in PREP7.
Examples
--------
Select all areas with a z-coordinate of 0, then set value for all
degrees of freedom to be 0 on the selected areas.
>>> mapdl.asel('S', 'LOC', 'Z', 0)
>>> mapdl.da('ALL', 'ALL')
Apply symmetric boundary conditions on area 2.
>>> mapdl.da(2, 'SYMM')
Allow x-displacement on area 2.
>>> mapdl.da(2, 'UX', 1)
"""
command = f"DA,{area},{lab},{value1},{value2}"
return self.run(command, **kwargs)
def dadele(self, area="", lab="", **kwargs):
"""Deletes degree-of-freedom constraints on an area.
APDL Command: DADELE
Parameters
----------
area
Area for which constraints are to be deleted. If ALL, delete for
all selected areas [ASEL]. If AREA = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). You can substitute a component name for AREA.
lab
Valid constraint labels are:
ALL - All constraints.
SYMM - Symmetry constraints.
ASYM - Antisymmetry constraints.
UX - Displacement in X direction.
UY - Displacement in Y direction.
UZ - Displacement in Z direction.
ROTX - Rotation about X axis.
ROTY - Rotation about Y axis.
ROTZ - Rotation about Z axis.
PRES - Pressure.
TEMP, TBOT, TE2, TE3, . . ., TTOP - Temperature.
MAG - Magnetic scalar potential.
VOLT - Electric scalar potential.
AX - Magnetic vector potential in X direction (see notes).
AY - Magnetic vector potential in Y direction.
AZ - Magnetic vector potential in Z direction (see notes).
CONC - Concentration.
Notes
-----
Deletes the degree of freedom constraints at an area (and all
corresponding finite element constraints) previously specified with the
DA command. See the DDELE command for delete details.
If the multiple species labels have been changed to user-defined labels
via the MSSPEC command, use the user-defined labels.
See the DA or the DA commands for details on element applicability.
Warning:: : On previously meshed areas, all constraints on affected
nodes will be deleted, whether or not they were specified by the DA
command.
This command is also valid in PREP7.
"""
command = f"DADELE,{area},{lab}"
return self.run(command, **kwargs)
def dalist(self, area="", **kwargs):
"""Lists the DOF constraints on an area.
APDL Command: DALIST
Parameters
----------
area
List constraints for this area. If ALL (default), list for all
selected areas [ASEL]. If P1 = P, graphical picking is enabled and
all remaining command fields are ignored (valid only in the GUI).
A component name may also be substituted for AREA.
Notes
-----
Lists the degree of freedom constraints on an area previously specified
with the DA command.
This command is valid in any processor.
"""
command = f"DALIST,{area}"
return self.run(command, **kwargs)
def dk(self, kpoi="", lab="", value="", value2="", kexpnd="", lab2="",
lab3="", lab4="", lab5="", lab6="", **kwargs):
"""Defines DOF constraints at keypoints.
APDL Command: DK
Parameters
----------
kpoi
Keypoint at which constraint is to be specified. If ALL, apply to
all selected keypoints [KSEL]. If KPOI = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for KPOI.
lab
Valid degree of freedom label. If ALL, use all appropriate labels
except HDSP. Structural labels: UX, UY, or UZ (displacements);
ROTX, ROTY, or ROTZ (rotations); WARP (warping); HDSP (hydrostatic
pressure). Thermal labels: TEMP, TBOT, TE2, TE3, . . ., TTOP
(temperature). Acoustic labels: PRES (pressure); UX, UY, or UZ
(displacements for FSI coupled elements). Electric labels: VOLT
(voltage). Magnetic labels: MAG (scalar magnetic potential); AX,
AY, or AZ (vector magnetic potentials). Diffusion labels: CONC
(concentration).
value
Degree of freedom value or table name reference for tabular
boundary conditions. To specify a table, enclose the table name in
percent signs (%) (e.g., DK,NODE,TEMP,%tabname%). Use the *DIM
command to define a table.
value2
Second degree of freedom value (if any). If the analysis type and
the degree of freedom allow a complex input, VALUE (above) is the
real component and VALUE2 is the imaginary component.
kexpnd
Expansion key:
0 - Constraint applies only to the node at this keypoint.
1 - Flags this keypoint for constraint expansion.
lab2, lab3, lab4, . . . , lab6
Additional degree of freedom labels. The same values are applied
to the keypoints for these labels.
Notes
-----
A keypoint may be flagged using KEXPND to allow its constraints to be
expanded to nodes on the attached solid model entities having similarly
flagged keypoint constraints. Constraints are transferred from
keypoints to nodes with the DTRAN or SBCTRAN commands. The expansion
uses interpolation to apply constraints to the nodes on the lines
between flagged keypoints. If all keypoints of an area or volume
region are flagged and the constraints (label and values) are equal,
the constraints are applied to the interior nodes of the region. See
the D command for a description of nodal constraints.
Tabular boundary conditions (VALUE = %tabname%) are available only for
the following degree of freedom labels: Electric (VOLT), structural
(UX, UY, UZ, ROTX, ROTY, ROTZ), Acoustic (PRES, UX, UY, UZ), and
temperature (TEMP, TBOT, TE2, TE3, . . ., TTOP).
Constraints specified by the DK command can conflict with other
specified constraints. See Resolution of Conflicting Constraint
Specifications in the Basic Analysis Guide for details.
This command is also valid in PREP7.
"""
command = f"DK,{kpoi},{lab},{value},{value2},{kexpnd},{lab2},{lab3},{lab4},{lab5},{lab6}"
return self.run(command, **kwargs)
def dkdele(self, kpoi="", lab="", **kwargs):
"""Deletes DOF constraints at a keypoint.
APDL Command: DKDELE
Parameters
----------
kpoi
Keypoint for which constraint is to be deleted. If ALL, delete for
all selected keypoints [KSEL]. If KPOI = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for KPOI.
lab
Valid degree of freedom label. If ALL, use all appropriate labels.
Structural labels: UX, UY, or UZ (displacements); ROTX, ROTY, or
ROTZ (rotations); WARP (warping). Thermal labels: TEMP, TBOT, TE2,
TE3, . . ., TTOP (temperature). Acoustic labels: PRES (pressure);
UX, UY, or UZ (displacements for FSI coupled elements). Electric
label: VOLT (voltage). Magnetic labels: MAG (scalar magnetic
potential); AX, AY, or AZ (vector magnetic potentials). Diffusion
label: CONC (concentration).
Notes
-----
Deletes the degree of freedom constraints (and all corresponding finite
element constraints) at a keypoint. See the DDELE command for details.
This command is also valid in PREP7.
"""
command = f"DKDELE,{kpoi},{lab}"
return self.run(command, **kwargs)
def dklist(self, kpoi="", **kwargs):
"""Lists the DOF constraints at keypoints.
APDL Command: DKLIST
Parameters
----------
kpoi
List constraints for this keypoint. If ALL (default), list for all
selected keypoints [KSEL]. If KPOI = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for KPOI.
Notes
-----
Listing applies to the selected keypoints [KSEL] and the selected
degree of freedom labels [DOFSEL].
This command is valid in any processor.
"""
command = f"DKLIST,{kpoi}"
return self.run(command, **kwargs)
def dl(self, line="", area="", lab="", value1="", value2="", **kwargs):
"""Defines DOF constraints on lines.
APDL Command: DL
Parameters
----------
line
Line at which constraints are to be specified. If ALL, apply to all
selected lines [LSEL]. If LINE = P, graphical picking is enabled
and all remaining command fields are ignored (valid only in the
GUI). A component name may also be substituted for LINE.
area
Area containing line. The normal to the symmetry or antisymmetry
surface is assumed to lie on this area. Defaults to the lowest
numbered selected area containing the line number.
lab
Symmetry label (see 2):
SYMM - Generate symmetry constraints.
ASYM - Generate antisymmetry constraints.
value1
Value of DOF (real part) or table name reference on the line.
Valid for all DOF labels. To specify a table, enclose the table
name in % signs (e.g., DL,LINE,AREA,TEMP,%tabname%). Use the *DIM
command to define a table.
value2
For VOLT DOFs:
Notes
-----
You can transfer constraints from lines to nodes with the DTRAN or
SBCTRAN commands. See the DK command for information about generating
other constraints at lines.
Symmetry and antisymmetry constraints are generated as described on the
DSYM command.
Setting Lab = VOLT and Value1 = 0 applies the J-normal boundary
condition (current density vector (J) flows normal to the line). No
input is required for the J-parallel condition because it is the
natural boundary condition.
Tabular boundary conditions (Value1 = %tabname%) are available only for
the following degree of freedom labels: Electric (VOLT), Structural
(UX, UY, UZ, ROTX, ROTY, ROTZ), Acoustic (PRES, UX, UY, UZ), and
temperature (TEMP, TBOT, TE2, TE3, . . ., TTOP).
Constraints specified by the DL command can conflict with other
specified constraints. See Resolution of Conflicting Constraint
Specifications in the Basic Analysis Guide for details.
This command is also valid in PREP7.
"""
command = f"DL,{line},{area},{lab},{value1},{value2}"
return self.run(command, **kwargs)
def dldele(self, line="", lab="", **kwargs):
"""Deletes DOF constraints on a line.
APDL Command: DLDELE
Parameters
----------
line
Line for which constraints are to be deleted. If ALL, delete for
all selected lines [LSEL]. If LINE = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may also be substituted for LINE
lab
Constraint label:
ALL - All constraints.
SYMM - Symmetry constraints.
ASYM - Antisymmetry constraints.
UX - Displacement in X direction.
UY - Displacement in Y direction.
UZ - Displacement in Z direction.
ROTX - Rotation about X axis.
ROTY - Rotation about Y axis.
ROTZ - Rotation about Z axis.
WARP - Warping magnitude.
PRES - Pressure.
TEMP, TBOT, TE2, TE3, . . ., TTOP - Temperature.
VOLT - Electric scalar potential.
AX - Magnetic vector potential in X direction.
AY - Magnetic vector potential in Y direction.
AZ - Magnetic vector potential in Z direction.
CONC - Concentration.
Notes
-----
Deletes the degree of freedom constraints (and all corresponding finite
element constraints) on a line previously specified with the DL
command. See the DDELE command for delete details.
Warning:: : On previously meshed lines, all constraints on affected
nodes will also be deleted, whether or not they were specified by the
DL command.
This command is also valid in PREP7.
"""
command = f"DLDELE,{line},{lab}"
return self.run(command, **kwargs)
def dllist(self, line="", **kwargs):
"""Lists DOF constraints on a line.
APDL Command: DLLIST
Parameters
----------
line
List constraints for this line. If ALL (default), list for all
selected lines [LSEL]. If LINE = P, graphical picking is enabled
and all remaining command fields are ignored (valid only in the
GUI). A component name may also be substituted for LINE.
Notes
-----
Lists the degree of freedom constraints on a line previously specified
with the DL command.
This command is valid in any processor.
"""
command = f"DLLIST,{line}"
return self.run(command, **kwargs)
def dtran(self, **kwargs):
"""Transfers solid model DOF constraints to the finite element model.
APDL Command: DTRAN
Notes
-----
Constraints are transferred only from selected solid model entities to
selected nodes. The DTRAN operation is also done if the SBCTRAN
command is issued, and is automatically done upon initiation of the
solution calculations [SOLVE].
This command is also valid in PREP7.
"""
command = f"DTRAN,"
return self.run(command, **kwargs)
| 32.593688
| 93
| 0.657368
|
f3b9678e04facb74be2995f5a4b6ca503802ef45
| 748
|
py
|
Python
|
lakey_finicity/models/birth_date.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | 1
|
2021-02-09T14:44:55.000Z
|
2021-02-09T14:44:55.000Z
|
lakey_finicity/models/birth_date.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | null | null | null |
lakey_finicity/models/birth_date.py
|
jeremydeanlakey/lakey-finicity-python
|
f0b5ae6febb9337f0e28731f631b726fca940d2c
|
[
"MIT"
] | 1
|
2022-01-26T18:09:33.000Z
|
2022-01-26T18:09:33.000Z
|
from dataclasses import dataclass
# https://community.finicity.com/s/article/Report-Consumers
@dataclass
class BirthDate(object):
year: int
month: int
day_of_month: int
def to_padded_string_dict(self) -> dict:
return {
'year': f'{self.year:04}', # The birthday's 4-digit year
'month': f'{self.month:02}', # The birthday's 2-digit month (01 is January)
'dayOfMonth': f'{self.day_of_month:02}', # The birthday's 2-digit day-of-month
}
@staticmethod
def from_dict(data: dict):
year = int(data['year'])
month = int(data['month'])
day_of_month = int(data['dayOfMonth'])
return BirthDate(year=year, month=month, day_of_month=day_of_month)
| 31.166667
| 91
| 0.628342
|
de614908b53369726bd04f554ef8a7ea7b791581
| 1,098
|
py
|
Python
|
api/tests/batch_mode_split_10_requests/test_batch_mode_split_10_requests.py
|
zhampel/FakeFinder
|
2891a8649acc1dabdef07554d6acb346dd23dbae
|
[
"Apache-2.0"
] | null | null | null |
api/tests/batch_mode_split_10_requests/test_batch_mode_split_10_requests.py
|
zhampel/FakeFinder
|
2891a8649acc1dabdef07554d6acb346dd23dbae
|
[
"Apache-2.0"
] | null | null | null |
api/tests/batch_mode_split_10_requests/test_batch_mode_split_10_requests.py
|
zhampel/FakeFinder
|
2891a8649acc1dabdef07554d6acb346dd23dbae
|
[
"Apache-2.0"
] | null | null | null |
import requests
import json
def test_batch_mode_selimsef():
url = 'http://localhost:5000/fakefinder/'
# Additional headers.
headers = {'Content-Type': 'application/json' }
# Body
payload = {"batchMode": True,
"alwaysOn": False,
"s3Location": ["s3://ff-inbound-videos/4000.mp4", "s3://ff-inbound-videos/4001.mp4", "s3://ff-inbound-videos/4002.mp4", "s3://ff-inbound-videos/4003.mp4", "s3://ff-inbound-videos/4004.mp4", "s3://ff-inbound-videos/4005.mp4", "s3://ff-inbound-videos/4006.mp4", "s3://ff-inbound-videos/4007.mp4", "s3://ff-inbound-videos/4008.mp4", "s3://ff-inbound-videos/4009.mp4"],
"modelName": "selimsef",
"splitRequests": True,
"numSplitRequests": 10,
}
# convert dict to json string by json.dumps() for body data.
resp = requests.post(url, headers=headers, data=json.dumps(payload,indent=4))
# Validate response headers and body contents, e.g. status code.
assert resp.status_code == 200
# print response full body as text
print(resp.json())
| 39.214286
| 380
| 0.627505
|
736638dd9e49ffff22868dd2c0e8ad8bdc8a08a6
| 381
|
py
|
Python
|
tests/test_util.py
|
redsoxfantom-home-automation/python_lifx_sdk
|
4a979036713c247de0603827e466fe746d16bc04
|
[
"MIT"
] | 29
|
2015-07-07T00:22:39.000Z
|
2021-01-30T21:41:21.000Z
|
tests/test_util.py
|
redsoxfantom-home-automation/python_lifx_sdk
|
4a979036713c247de0603827e466fe746d16bc04
|
[
"MIT"
] | 9
|
2015-07-11T23:06:50.000Z
|
2016-12-08T22:11:49.000Z
|
tests/test_util.py
|
redsoxfantom-home-automation/python_lifx_sdk
|
4a979036713c247de0603827e466fe746d16bc04
|
[
"MIT"
] | 16
|
2015-09-03T13:38:00.000Z
|
2020-11-23T20:27:52.000Z
|
import unittest
import time
from lifx.util import RepeatTimer
class UtilTests(unittest.TestCase):
def test_timer(self):
def trigger():
trigger.counter += 1
trigger.counter = 0
timer = RepeatTimer(0.005, trigger)
timer.start()
time.sleep(0.04)
timer.cancel()
self.assertGreaterEqual(trigger.counter, 6)
| 18.142857
| 51
| 0.622047
|
91199d8b838586b2d4d89e8fdcc36eabdd908736
| 1,476
|
py
|
Python
|
superjob.py
|
Rostwik/Future_salary
|
a42c95c3deaf6d63b81a300a001a45d0b44d5b05
|
[
"MIT"
] | null | null | null |
superjob.py
|
Rostwik/Future_salary
|
a42c95c3deaf6d63b81a300a001a45d0b44d5b05
|
[
"MIT"
] | null | null | null |
superjob.py
|
Rostwik/Future_salary
|
a42c95c3deaf6d63b81a300a001a45d0b44d5b05
|
[
"MIT"
] | null | null | null |
import itertools
import requests
from predict_salary import predict_rub_salary, get_analytics
def get_keyword_statistics(keyword, superjob_header, town_code, category_code):
url = 'https://api.superjob.ru/2.0/vacancies'
jobs = []
salaries = []
for page in itertools.count():
payloads = {
'town': town_code,
'catalogues': category_code,
'page': page,
'count': 20,
'keyword': keyword
}
response = requests.get(url, headers=superjob_header, params=payloads)
response.raise_for_status()
page_response = response.json()
jobs.extend(page_response['objects'])
if not page_response['more']:
break
vacancies_found = page_response['total']
for job in jobs:
if job['currency'] == "rub":
salary = predict_rub_salary(job['payment_from'], job['payment_to'])
if salary:
salaries.append(salary)
return salaries, vacancies_found
def get_superjob_job_statistics(keywords, superjob_token, town_code, category_code):
superjob_header = {'X-Api-App-Id': superjob_token}
job_analysis = {}
for keyword in keywords:
salaries, vacancies_found = get_keyword_statistics(
keyword, superjob_header, town_code, category_code
)
job_analysis[keyword] = get_analytics(
salaries, vacancies_found
)
return job_analysis
| 25.894737
| 84
| 0.632114
|
f3b53aa80ddfb3ed4812472438551e7c1623afa1
| 1,454
|
py
|
Python
|
prism/cmds/fun/eightball.py
|
ii-Python/Prism-v3
|
15a43161b41117529c915726e6270259f05d187d
|
[
"MIT"
] | 3
|
2021-11-26T22:08:11.000Z
|
2021-12-23T21:42:22.000Z
|
prism/cmds/fun/eightball.py
|
wannurhadi/Prism-v3
|
514f8d17072bf208c42e68391bce471c7d608269
|
[
"MIT"
] | 1
|
2021-07-07T22:37:10.000Z
|
2021-07-07T22:40:11.000Z
|
prism/cmds/fun/eightball.py
|
wannurhadi/Prism-v3
|
514f8d17072bf208c42e68391bce471c7d608269
|
[
"MIT"
] | 1
|
2021-12-23T21:42:24.000Z
|
2021-12-23T21:42:24.000Z
|
# Copyright 2021-xx iiPython
# Modules
import random
from discord.ext import commands
from discord.commands import Option
# Command class
class Eightball(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot
self.core = bot.core
self._8ball_responses = [
"It is certain.", "It is decidedly so.", "Without a doubt.", "Yes, definitely.",
"You may rely on it.", "As I see it, yes.", "Most likely.", "Outlook good.",
"Yes.", "Signs point to yes.", "Reply hazy, try again.", "Ask again later.",
"Better not tell you now", "Cannot predict now.", "Concentrate and ask again.",
"Don't count on it.", "My reply is no.", "My sources say no.", "Outlook not so good.",
"Very doubtful."
]
@commands.slash_command(description = "Ask the magic eightball a question.")
async def eightball(self, ctx, *, question: Option(str, "What you want to ask the eightball")) -> any:
response = random.choice(self._8ball_responses)
# Try to format the question
if not question.endswith("?"):
question += "?"
question = question[0].upper() + question[1:]
question = question.replace(" i ", " I ")
# Send response
return await ctx.respond(embed = self.core.embed(title = question, description = f"> {response}"))
# Link
def setup(bot) -> None:
return bot.add_cog(Eightball(bot))
| 36.35
| 106
| 0.608666
|
f5abf485762ec81cba3e638838afba8540ce5fcf
| 4,147
|
py
|
Python
|
qa/rpc-tests/p2p-feefilter.py
|
cenut/ilcoin-master
|
8bfc6f7e5e23bcf35840e1988bf19ec6096be9c2
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-feefilter.py
|
cenut/ilcoin-master
|
8bfc6f7e5e23bcf35840e1988bf19ec6096be9c2
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/p2p-feefilter.py
|
cenut/ilcoin-master
|
8bfc6f7e5e23bcf35840e1988bf19ec6096be9c2
|
[
"MIT"
] | 1
|
2020-04-10T07:57:53.000Z
|
2020-04-10T07:57:53.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Ilcoin Core developers
# All Rights Reserved. Ilgamos International 2017©
"""Test processing of feefilter messages."""
from test_framework.mininode import *
from test_framework.test_framework import IlcoinTestFramework
from test_framework.util import *
import time
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
# TestNode: bare-bones "peer". Used to track which invs are received from a node
# and to send the node feefilter messages.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.txinvs = []
def on_inv(self, conn, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
def send_filter(self, feerate):
self.send_message(msg_feefilter(feerate))
self.sync_with_ping()
class FeeFilterTest(IlcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
# Node1 will be used to generate txs which should be relayed from Node0
# to our test node
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
test_node = TestNode()
connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node)
test_node.add_connection(connection)
NetworkThread().start()
test_node.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Set a filter of 15 sat/byte
test_node.send_filter(15000)
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
# Remove fee filter and check that txs are received again
test_node.send_filter(0)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, test_node))
test_node.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| 37.026786
| 91
| 0.657825
|
d1ca7633c451259985ebcbfa2c15d14d86c01a0d
| 4,754
|
py
|
Python
|
tests/unit/threaded_test_wrapper_test.py
|
DavidWittman/pika
|
6d9896c89ee187ce1a1a5c6e55a1ee0adcc5b538
|
[
"BSD-3-Clause"
] | 2,479
|
2015-01-01T20:06:23.000Z
|
2022-03-31T13:29:19.000Z
|
tests/unit/threaded_test_wrapper_test.py
|
DavidWittman/pika
|
6d9896c89ee187ce1a1a5c6e55a1ee0adcc5b538
|
[
"BSD-3-Clause"
] | 813
|
2015-01-07T07:13:49.000Z
|
2022-03-28T05:05:06.000Z
|
tests/unit/threaded_test_wrapper_test.py
|
DavidWittman/pika
|
6d9896c89ee187ce1a1a5c6e55a1ee0adcc5b538
|
[
"BSD-3-Clause"
] | 763
|
2015-01-10T04:38:33.000Z
|
2022-03-31T07:24:57.000Z
|
"""
Tests for threaded_test_wrapper.py
"""
from __future__ import print_function
import sys
import threading
import time
import unittest
try:
from unittest import mock
except ImportError:
import mock
import pika.compat
from tests.wrappers import threaded_test_wrapper
from tests.wrappers.threaded_test_wrapper import (_ThreadedTestWrapper, run_in_thread_with_timeout)
# Suppress invalid-name, since our test names are descriptive and quite long
# pylint: disable=C0103
# Suppress missing-docstring to allow test method names to be printed by our the
# test runner
# pylint: disable=C0111
class ThreadedTestWrapperSelfChecks(unittest.TestCase):
"""Tests for threaded_test_wrapper.py.
"""
def start(self):
"""Each of the tests in this test case patches this method to run its
own test
"""
raise NotImplementedError
def test_propagation_of_failure_from_test_execution_thread(self):
class SelfCheckExceptionHandling(Exception):
pass
caller_thread_id = threading.current_thread().ident
@run_in_thread_with_timeout
def my_errant_function(*_args, **_kwargs):
if threading.current_thread().ident != caller_thread_id:
raise SelfCheckExceptionHandling()
# Suppress error output by redirecting to stringio_stderr
stringio_stderr = pika.compat.StringIO()
try:
with mock.patch.object(_ThreadedTestWrapper, '_stderr',
stringio_stderr):
with self.assertRaises(AssertionError) as exc_ctx:
my_errant_function()
self.assertIn('raise SelfCheckExceptionHandling()',
exc_ctx.exception.args[0])
expected_tail = 'SelfCheckExceptionHandling\n'
self.assertEqual(exc_ctx.exception.args[0][-len(expected_tail):],
expected_tail)
self.assertIn('raise SelfCheckExceptionHandling()',
stringio_stderr.getvalue())
self.assertEqual(stringio_stderr.getvalue()[-len(expected_tail):],
expected_tail)
except Exception:
try:
print('This stderr was captured from our thread wrapper:\n',
stringio_stderr.getvalue(),
file=sys.stderr)
except Exception: # pylint: disable=W0703
pass
raise
def test_handling_of_test_execution_thread_timeout(self):
# Suppress error output by redirecting to our stringio_stderr object
stringio_stderr = pika.compat.StringIO()
@run_in_thread_with_timeout
def my_sleeper(*_args, **_kwargs):
time.sleep(1.1)
# Redirect _ThreadedTestWrapper error output to our StringIO instance
with mock.patch.object(_ThreadedTestWrapper, '_stderr',
stringio_stderr):
# Patch DEFAULT_TEST_TIMEOUT to much smaller value than sleep in
# my_start()
with mock.patch.object(threaded_test_wrapper,
'DEFAULT_TEST_TIMEOUT',
0.01):
# Redirect start() call from thread to our own my_start()
with self.assertRaises(AssertionError) as exc_ctx:
my_sleeper()
self.assertEqual(len(stringio_stderr.getvalue()), 0)
self.assertIn('The test timed out.', exc_ctx.exception.args[0])
def test_integrity_of_args_and_return_value(self):
args_bucket = []
kwargs_bucket = []
value_to_return = dict()
@run_in_thread_with_timeout
def my_guinea_pig(*args, **kwargs):
args_bucket.append(args)
kwargs_bucket.append(kwargs)
return value_to_return
arg0 = dict()
arg1 = tuple()
kwarg0 = list()
result = my_guinea_pig(arg0, arg1, kwarg0=kwarg0)
self.assertIs(result, value_to_return)
args_ut = args_bucket[0]
self.assertEqual(len(args_ut), 2, repr(args_ut))
self.assertIs(args_ut[0], arg0)
self.assertIs(args_ut[1], arg1)
kwargs_ut = kwargs_bucket[0]
self.assertEqual(len(kwargs_ut), 1, repr(kwargs_ut))
self.assertIn('kwarg0', kwargs_ut, repr(kwargs_ut))
self.assertIs(kwargs_ut['kwarg0'], kwarg0)
def test_skip_test_is_passed_through(self):
@run_in_thread_with_timeout
def my_test_skipper():
raise unittest.SkipTest('I SKIP')
with self.assertRaises(unittest.SkipTest) as ctx:
my_test_skipper()
self.assertEqual(ctx.exception.args[0], 'I SKIP')
| 32.786207
| 99
| 0.628944
|
00135a8c3aa15221063162776956512989d51b7e
| 320
|
py
|
Python
|
lib.py
|
Shikiiii/shikiBot
|
12540ca80d388951323ec121cb554bf603fc253f
|
[
"MIT"
] | null | null | null |
lib.py
|
Shikiiii/shikiBot
|
12540ca80d388951323ec121cb554bf603fc253f
|
[
"MIT"
] | null | null | null |
lib.py
|
Shikiiii/shikiBot
|
12540ca80d388951323ec121cb554bf603fc253f
|
[
"MIT"
] | null | null | null |
import guilded
from guilded.ext import commands
class Cog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.owners = ["AnbjoWYA", "dxDY9JEd", "4WPbEZwd"]
if self.hidden:
for cmd in self.get_commands():
cmd.hidden = True
def __init_subclass__(cls, hidden: bool = False):
cls.hidden = hidden
| 21.333333
| 52
| 0.7
|
f13601a9451a26b2ff267829af339e98fac485f2
| 1,916
|
py
|
Python
|
desktop/core/ext-py/amqp-2.4.1/extra/update_comments_from_spec.py
|
maulikjs/hue
|
59ac879b55bb6fb26ecb4e85f4c70836fc21173f
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/amqp-2.4.1/extra/update_comments_from_spec.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/amqp-2.4.1/extra/update_comments_from_spec.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
from __future__ import absolute_import, unicode_literals
import os
import sys
import re
default_source_file = os.path.join(
os.path.dirname(__file__),
'../amqp/channel.py',
)
RE_COMMENTS = re.compile(
'(?P<methodsig>def\s+(?P<mname>[a-zA-Z0-9_]+)\(.*?\)'
':\n+\s+""")(?P<comment>.*?)(?=""")',
re.MULTILINE | re.DOTALL
)
USAGE = """\
Usage: %s <comments-file> <output-file> [<source-file>]\
"""
def update_comments(comments_file, impl_file, result_file):
text_file = open(impl_file, 'r')
source = text_file.read()
comments = get_comments(comments_file)
for def_name, comment in comments.items():
source = replace_comment_per_def(
source, result_file, def_name, comment
)
new_file = open(result_file, 'w+')
new_file.write(source)
def get_comments(filename):
text_file = open(filename, 'r')
whole_source = text_file.read()
comments = {}
all_matches = RE_COMMENTS.finditer(whole_source)
for match in all_matches:
comments[match.group('mname')] = match.group('comment')
# print('method: %s \ncomment: %s' % (
# match.group('mname'), match.group('comment')))
return comments
def replace_comment_per_def(source, result_file, def_name, new_comment):
regex = ('(?P<methodsig>def\s+' +
def_name +
'\(.*?\):\n+\s+""".*?\n).*?(?=""")')
# print('method and comment:' + def_name + new_comment)
result = re.sub(regex, '\g<methodsig>' + new_comment, source, 0,
re.MULTILINE | re.DOTALL)
return result
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) < 3:
print(USAGE % argv[0])
return 1
impl_file = default_source_file
if len(argv) >= 4:
impl_file = argv[3]
update_comments(argv[1], impl_file, argv[2])
if __name__ == '__main__':
sys.exit(main())
| 24.883117
| 72
| 0.60595
|
1963ceed30a44400f791762173131f732c2a5564
| 1,077
|
py
|
Python
|
tests/test_lossy.py
|
finalfusion/finalfusion-python
|
f7ab0174a66731d0f71635c45d172f7711f6aec8
|
[
"BlueOak-1.0.0"
] | 5
|
2019-06-13T12:06:59.000Z
|
2020-09-11T11:56:16.000Z
|
tests/test_lossy.py
|
finalfusion/finalfusion-python
|
f7ab0174a66731d0f71635c45d172f7711f6aec8
|
[
"BlueOak-1.0.0"
] | 77
|
2019-03-23T11:32:36.000Z
|
2020-06-30T11:14:21.000Z
|
tests/test_lossy.py
|
danieldk/finalfrontier-python
|
f7ab0174a66731d0f71635c45d172f7711f6aec8
|
[
"BlueOak-1.0.0"
] | 9
|
2019-06-24T10:11:50.000Z
|
2019-12-12T10:02:00.000Z
|
import pytest
from finalfusion import load_text_dims, load_text, load_word2vec
def test_text_dims_broken_utf8(tests_root):
e = load_text_dims(tests_root / "data" / "utf8-incomplete.dims",
lossy=True)
assert e.vocab.words == ["meren", "zee�n", "rivieren"]
with pytest.raises(UnicodeDecodeError):
_ = load_text_dims(tests_root / "data" / "utf8-incomplete.dims",
lossy=False)
def test_text_broken_utf8(tests_root):
e = load_text(tests_root / "data" / "utf8-incomplete.txt", lossy=True)
assert e.vocab.words == ["meren", "zee�n", "rivieren"]
with pytest.raises(UnicodeDecodeError):
_ = load_text(tests_root / "data" / "utf8-incomplete.txt", lossy=False)
def test_w2v_broken_utf8(tests_root):
e = load_word2vec(tests_root / "data" / "utf8-incomplete.bin", lossy=True)
assert e.vocab.words == ["meren", "zee�n", "rivieren"]
with pytest.raises(UnicodeDecodeError):
_ = load_word2vec(tests_root / "data" / "utf8-incomplete.bin",
lossy=False)
| 38.464286
| 79
| 0.651811
|
1ecae9afd987ea18a3f4bdf63e2ff08d667cac09
| 254
|
py
|
Python
|
tests/dB_scripts/removeOverlap.py
|
roberto-arista/TypedBot
|
a9af53d2d551ec761e81787e7e468d966e2bb37e
|
[
"BSD-3-Clause"
] | 2
|
2021-12-03T00:42:44.000Z
|
2021-12-15T14:16:44.000Z
|
tests/dB_scripts/removeOverlap.py
|
roberto-arista/TypedBot
|
a9af53d2d551ec761e81787e7e468d966e2bb37e
|
[
"BSD-3-Clause"
] | null | null | null |
tests/dB_scripts/removeOverlap.py
|
roberto-arista/TypedBot
|
a9af53d2d551ec761e81787e7e468d966e2bb37e
|
[
"BSD-3-Clause"
] | null | null | null |
import drawBot
drawBot.newDrawing()
drawBot.newPage(200, 100)
p = drawBot.BezierPath()
p.oval(5, 5, 70, 70)
p.rect(25, 25, 70, 70)
drawBot.fill(0, 0.3)
drawBot.stroke(0)
drawBot.drawPath(p)
p.removeOverlap()
drawBot.translate(100, 0)
drawBot.drawPath(p)
| 19.538462
| 25
| 0.732283
|
0bd2f125d63bbcb69555c6dbe410955d23dbd53e
| 8,734
|
py
|
Python
|
tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | null | null | null |
tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | 2
|
2020-12-28T19:37:16.000Z
|
2020-12-28T19:37:31.000Z
|
tests/test_filtsmooth/test_gaussfiltsmooth/test_extendedkalman.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy.linalg
from probnum.filtsmooth.gaussfiltsmooth import ExtendedKalman
from .filtsmooth_testcases import (
CarTrackingDDTestCase,
OrnsteinUhlenbeckCDTestCase,
PendulumNonlinearDDTestCase,
)
np.random.seed(5472)
VISUALISE = False # show plots or not?
if VISUALISE is True:
import matplotlib.pyplot as plt
class TestExtendedKalmanDiscDisc(CarTrackingDDTestCase):
"""
Try Kalman filtering and smoothing on a discrete setting.
"""
def setUp(self):
super().setup_cartracking()
self.method = ExtendedKalman(self.dynmod, self.measmod, self.initrv)
def test_dynamicmodel(self):
self.assertEqual(self.dynmod, self.method.dynamicmodel)
def test_measurementmodel(self):
self.assertEqual(self.measmod, self.method.measurementmodel)
def test_initialdistribution(self):
self.assertEqual(self.initrv, self.method.initialrandomvariable)
def test_predict(self):
pred, __ = self.method.predict(0.0, self.delta_t, self.initrv)
self.assertEqual(pred.mean.ndim, 1)
self.assertEqual(pred.mean.shape[0], 4)
self.assertEqual(pred.cov.ndim, 2)
self.assertEqual(pred.cov.shape[0], 4)
self.assertEqual(pred.cov.shape[1], 4)
def test_update(self):
data = self.measmod.transition_realization(self.initrv.mean, 0.0)[0].sample()
upd, __, __, __ = self.method.update(0.0, self.initrv, data)
self.assertEqual(upd.mean.ndim, 1)
self.assertEqual(upd.mean.shape[0], 4)
self.assertEqual(upd.cov.ndim, 2)
self.assertEqual(upd.cov.shape[0], 4)
self.assertEqual(upd.cov.shape[1], 4)
def test_filtsmooth(self):
"""
RMSE of smoother smaller than rmse of filter smaller
than of measurements?
"""
filter_posterior = self.method.filter(self.obs, self.tms)
filtms = filter_posterior.state_rvs.mean
filtcs = filter_posterior.state_rvs.cov
smooth_posterior = self.method.filtsmooth(self.obs, self.tms)
smooms = smooth_posterior.state_rvs.mean
smoocs = smooth_posterior.state_rvs.cov
comp = self.states[1:, :2]
normaliser = np.sqrt(comp.size)
filtrmse = np.linalg.norm(filtms[1:, :2] - comp) / normaliser
smoormse = np.linalg.norm(smooms[1:, :2] - comp) / normaliser
obs_rmse = np.linalg.norm(self.obs[:, :2] - comp) / normaliser
if VISUALISE is True:
plt.title(
"Car tracking trajectory (%.2f " % smoormse
+ "< %.2f < %.2f?)" % (filtrmse, obs_rmse)
)
plt.plot(
self.obs[:, 0], self.obs[:, 1], ".", label="Observations", alpha=0.5
)
plt.plot(filtms[:, 0], filtms[:, 1], "-", label="Filter guess")
plt.plot(smooms[:, 0], smooms[:, 1], "-", label="Smoother guess")
plt.plot(
self.states[:, 0],
self.states[:, 1],
"-",
linewidth=6,
alpha=0.25,
label="Truth",
)
plt.legend()
plt.show()
self.assertLess(smoormse, filtrmse)
self.assertLess(filtrmse, obs_rmse)
class TestExtendedKalmanContDisc(OrnsteinUhlenbeckCDTestCase):
"""
Try Kalman filtering on a continuous-discrete setting.
Try OU process.
"""
def setUp(self):
super().setup_ornsteinuhlenbeck()
self.method = ExtendedKalman(self.dynmod, self.measmod, self.initrv)
def test_dynamicmodel(self):
self.assertEqual(self.dynmod, self.method.dynamicmodel)
def test_measurementmodel(self):
self.assertEqual(self.measmod, self.method.measurementmodel)
def test_initialdistribution(self):
self.assertEqual(self.initrv, self.method.initialrandomvariable)
def test_predict_shape(self):
pred, __ = self.method.predict(0.0, self.delta_t, self.initrv)
self.assertEqual(pred.mean.shape, (1,))
self.assertEqual(pred.cov.shape, (1, 1))
def test_predict_value(self):
pred, __ = self.method.predict(0.0, self.delta_t, self.initrv)
ah = scipy.linalg.expm(self.delta_t * self.drift)
qh = (
self.q
/ (2 * self.lam)
* (1 - scipy.linalg.expm(2 * self.drift * self.delta_t))
)
expectedmean = np.squeeze(ah @ (self.initrv.mean * np.ones(1)))
expectedcov = np.squeeze(ah @ (self.initrv.cov * np.eye(1)) @ ah.T + qh)
self.assertApproxEqual(expectedmean, pred.mean)
self.assertApproxEqual(expectedcov, pred.cov)
def test_update(self):
data = self.measmod.transition_realization(self.initrv.mean * np.ones(1), 0.0)[
0
].sample()
upd, __, __, __ = self.method.update(0.0, self.initrv, data)
self.assertEqual(upd.mean.shape, (1,))
self.assertEqual(upd.cov.shape, (1, 1))
def test_smoother(self):
"""
RMSE of filter smaller than rmse of measurements?
"""
filter_posterior = self.method.filter(self.obs, self.tms)
filtms = filter_posterior.state_rvs.mean
filtcs = filter_posterior.state_rvs.cov
smooth_posterior = self.method.filtsmooth(self.obs, self.tms)
smooms = smooth_posterior.state_rvs.mean
smoocs = smooth_posterior.state_rvs.cov
comp = self.states[1:]
normaliser = np.sqrt(comp.size)
filtrmse = np.linalg.norm(filtms[1:] - comp) / normaliser
smoormse = np.linalg.norm(smooms[1:] - comp) / normaliser
obs_rmse = np.linalg.norm(self.obs - comp) / normaliser
if VISUALISE is True:
plt.title(
"Ornstein Uhlenbeck (%.2f < " % smoormse
+ "%.2f < %.2f?)" % (filtrmse, obs_rmse)
)
plt.plot(self.tms[1:], self.obs[:, 0], ".", label="Observations", alpha=0.5)
plt.plot(self.tms, filtms, "-", label="Filter guess")
plt.plot(self.tms, smooms, "-", label="Smoother guess")
plt.plot(self.tms, self.states, "-", linewidth=6, alpha=0.25, label="Truth")
plt.legend()
plt.show()
self.assertLess(smoormse, filtrmse)
self.assertLess(filtrmse, obs_rmse)
class TestExtendedKalmanPendulum(PendulumNonlinearDDTestCase):
"""
We test on the pendulum example 5.1 in BFaS.
"""
def setUp(self):
super().setup_pendulum()
self.method = ExtendedKalman(self.dynamod, self.measmod, self.initrv)
def test_filtsmooth(self):
filter_posterior = self.method.filter(self.obs, self.tms)
filtms = filter_posterior.state_rvs.mean
filtcs = filter_posterior.state_rvs.cov
smooth_posterior = self.method.filtsmooth(self.obs, self.tms)
smooms = smooth_posterior.state_rvs.mean
smoocs = smooth_posterior.state_rvs.cov
comp = self.states[:, 0]
normaliser = np.sqrt(comp.size)
filtrmse = np.linalg.norm(filtms[:, 0] - comp) / normaliser
smoormse = np.linalg.norm(smooms[:, 0] - comp) / normaliser
obs_rmse = np.linalg.norm(self.obs[:, 0] - comp[1:]) / normaliser
if VISUALISE is True:
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.suptitle(
"Noisy pendulum model (%.2f " % smoormse
+ "< %.2f < %.2f?)" % (filtrmse, obs_rmse)
)
ax1.set_title("Horizontal position")
ax1.plot(
self.tms[1:], self.obs[:, 0], ".", alpha=0.25, label="Observations"
)
ax1.plot(
self.tms[1:],
np.sin(self.states)[1:, 0],
"-",
linewidth=4,
alpha=0.5,
label="Truth",
)
ax1.plot(self.tms[1:], np.sin(filtms)[1:, 0], "-", label="Filter")
ax1.plot(self.tms[1:], np.sin(smooms)[1:, 0], "-", label="Smoother")
ax1.set_xlabel("time")
ax1.set_ylabel("horizontal pos. = sin(angular)")
ax1.legend()
ax2.set_title("Angular position")
ax2.plot(
self.tms[1:],
self.states[1:, 0],
"-",
linewidth=4,
alpha=0.5,
label="Truth",
)
ax2.plot(self.tms[1:], filtms[1:, 0], "-", label="Filter")
ax2.plot(self.tms[1:], smooms[1:, 0], "-", label="Smoother")
ax2.set_xlabel("time")
ax2.set_ylabel("angular pos.")
ax2.legend()
plt.show()
self.assertLess(smoormse, filtrmse)
self.assertLess(filtrmse, obs_rmse)
| 36.543933
| 88
| 0.583925
|
63912902c1a6da683bd77ca12fce5a30f70541eb
| 1,406
|
py
|
Python
|
server/app/users/repositories.py
|
aryaniyaps/todos
|
06e18717dfc1139d5ab2fdd71f8baf0d08548c33
|
[
"BSD-3-Clause"
] | 2
|
2021-11-05T05:19:55.000Z
|
2021-11-05T07:07:50.000Z
|
server/app/users/repositories.py
|
aryaniyaps/todos
|
06e18717dfc1139d5ab2fdd71f8baf0d08548c33
|
[
"BSD-3-Clause"
] | 184
|
2021-11-11T04:34:30.000Z
|
2022-03-28T08:25:51.000Z
|
server/app/users/repositories.py
|
aryaniyaps/todos
|
06e18717dfc1139d5ab2fdd71f8baf0d08548c33
|
[
"BSD-3-Clause"
] | null | null | null |
from passlib.hash import bcrypt
from sqlalchemy import select
from app.database.core import db_session
from app.users.entities import User
class UserRepo:
@classmethod
def get_user(cls, user_id: int) -> User | None:
"""
Get an user with the given ID.
:param user_id: The user's ID.
:return: The user with the given ID.
"""
return db_session.get(User, user_id)
@classmethod
def get_user_by_email(cls, email: str) -> User | None:
"""
Get an user with the given email.
:param email: The user's email.
:return: The user with the given email.
"""
statement = select(User).filter(User.email == email)
return db_session.scalars(statement).first()
@classmethod
def create_user(cls, email: str, password: str) -> User:
"""
Create an user.
:param email: The user's email.
:param password: The user's password.
:return: The created user.
"""
user = User(email=email)
user.password = bcrypt.hash(password)
db_session.add(user)
db_session.commit()
return user
@classmethod
def delete_user(cls, user: User) -> None:
"""
Delete the given user.
:param user: The user to delete.
"""
db_session.delete(user)
db_session.commit()
| 23.433333
| 60
| 0.586771
|
ce62ff348bde77fbc3b1c2ccb094ef74ab9fdeff
| 2,853
|
py
|
Python
|
src/log-analytics-solution/azext_log_analytics_solution/custom.py
|
tilnl/azure-cli-extensions
|
ef9946bbcde34bb51343554a8f2a8dedd1f7d44a
|
[
"MIT"
] | null | null | null |
src/log-analytics-solution/azext_log_analytics_solution/custom.py
|
tilnl/azure-cli-extensions
|
ef9946bbcde34bb51343554a8f2a8dedd1f7d44a
|
[
"MIT"
] | null | null | null |
src/log-analytics-solution/azext_log_analytics_solution/custom.py
|
tilnl/azure-cli-extensions
|
ef9946bbcde34bb51343554a8f2a8dedd1f7d44a
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
def create_monitor_log_analytics_solution(client,
resource_group_name,
solution_name,
plan_publisher,
plan_product,
workspace_resource_id,
location,
tags=None,
no_wait=False):
body = {
'location': location,
'tags': tags,
'properties': {
"workspace_resource_id": workspace_resource_id
},
"plan": {
"name": solution_name,
"product": plan_product,
"publisher": plan_publisher,
"promotion_code": ""
}
}
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name,
solution_name=solution_name, parameters=body)
def update_monitor_log_analytics_solution(client,
resource_group_name,
solution_name,
tags=None,
no_wait=False):
return sdk_no_wait(no_wait, client.update, resource_group_name=resource_group_name,
solution_name=solution_name, tags=tags)
def delete_monitor_log_analytics_solution(client,
resource_group_name,
solution_name,
no_wait=False):
return sdk_no_wait(no_wait, client.delete, resource_group_name=resource_group_name, solution_name=solution_name)
def get_monitor_log_analytics_solution(client,
resource_group_name,
solution_name):
return client.get(resource_group_name=resource_group_name, solution_name=solution_name)
def list_monitor_log_analytics_solution(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list_by_subscription()
| 41.347826
| 116
| 0.514546
|
99b98c85db6971f73c3d7f68fd6951c03998f24e
| 3,102
|
py
|
Python
|
course/migrations/0092_unicode_literals.py
|
68ymtlab/relate
|
0120ca0188a969b3251722241e8d97fda52226b4
|
[
"Unlicense"
] | null | null | null |
course/migrations/0092_unicode_literals.py
|
68ymtlab/relate
|
0120ca0188a969b3251722241e8d97fda52226b4
|
[
"Unlicense"
] | 6
|
2015-08-18T00:13:40.000Z
|
2018-01-31T05:55:13.000Z
|
course/migrations/0092_unicode_literals.py
|
davis68/relate
|
eb40c8c17d4a724a60de3caa3334521a833bad5c
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-08 22:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('course', '0091_changes_found_for_py3'),
]
operations = [
migrations.AlterField(
model_name='examticket',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Creator'),
),
migrations.AlterField(
model_name='flowaccessexception',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Creator'),
),
migrations.AlterField(
model_name='flowpagevisit',
name='impersonated_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='impersonator', to=settings.AUTH_USER_MODEL, verbose_name='Impersonated by'),
),
migrations.AlterField(
model_name='flowpagevisit',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='visitor', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='flowpagevisitgrade',
name='grader',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Grader'),
),
migrations.AlterField(
model_name='flowruleexception',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Creator'),
),
migrations.AlterField(
model_name='flowsession',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='gradechange',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Creator'),
),
migrations.AlterField(
model_name='gradechange',
name='flow_session',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='grade_changes', to='course.FlowSession', verbose_name='Flow session'),
),
migrations.AlterField(
model_name='participationpreapproval',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='Creator'),
),
]
| 45.617647
| 195
| 0.661186
|
691d7d0c2859555ae2558d66a65ec7cca8b04eda
| 387
|
py
|
Python
|
thesis/nn/models/__init__.py
|
morris-frank/unnamed-source-separation
|
e23af1761e8fdd587a67b307eaee800b02f5bccf
|
[
"MIT"
] | 2
|
2021-01-05T11:34:09.000Z
|
2021-01-05T16:06:36.000Z
|
thesis/nn/models/__init__.py
|
morris-frank/unsupervised-source-separation
|
e23af1761e8fdd587a67b307eaee800b02f5bccf
|
[
"MIT"
] | 1
|
2020-05-12T18:52:18.000Z
|
2020-05-12T18:52:18.000Z
|
thesis/nn/models/__init__.py
|
morris-frank/unnamed-source-separation
|
e23af1761e8fdd587a67b307eaee800b02f5bccf
|
[
"MIT"
] | null | null | null |
from abc import ABC
import torch
from torch import nn
from ...utils import _LossLogger
class BaseModel(ABC, nn.Module):
def __init__(self, name: str = ""):
super(BaseModel, self).__init__()
self.ℒ = _LossLogger()
self.name = name
def test(self, *args) -> torch.Tensor:
pass
def infer(self, *args, **kwargs) -> torch.Tensor:
pass
| 19.35
| 53
| 0.614987
|
1d35a0ec0c3e6f6cbc11bdbda03d1ad50bf98ed2
| 6,785
|
py
|
Python
|
examples/agents/q_net.py
|
simasan1017/gymlissyusei
|
7888e52785698a602687be2f70ad72c5bd4f20a6
|
[
"MIT"
] | null | null | null |
examples/agents/q_net.py
|
simasan1017/gymlissyusei
|
7888e52785698a602687be2f70ad72c5bd4f20a6
|
[
"MIT"
] | null | null | null |
examples/agents/q_net.py
|
simasan1017/gymlissyusei
|
7888e52785698a602687be2f70ad72c5bd4f20a6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
import numpy as np
from chainer import cuda, FunctionSet, Variable, optimizers
import chainer.functions as F
class QNet:
# Hyper-Parameters
gamma = 0.99 # Discount factor
initial_exploration = 10**3 # Initial exploratoin. original: 5x10^4
replay_size = 32 # Replay (batch) size
target_model_update_freq = 10**4 # Target update frequancy. original: 10^4
data_size = 10**5 # Data size of history. original: 10^6
hist_size = 1 #original: 4
def __init__(self, use_gpu, enable_controller, dim):
self.use_gpu = use_gpu
self.num_of_actions = len(enable_controller)
self.enable_controller = enable_controller
self.dim = dim
print("Initializing Q-Network...")
hidden_dim = 256
self.model = FunctionSet(
l4=F.Linear(self.dim*self.hist_size, hidden_dim, wscale=np.sqrt(2)),
q_value=F.Linear(hidden_dim, self.num_of_actions,
initialW=np.zeros((self.num_of_actions, hidden_dim),
dtype=np.float32))
)
if self.use_gpu >= 0:
self.model.to_gpu()
self.model_target = copy.deepcopy(self.model)
self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
self.optimizer.setup(self.model.collect_parameters())
# History Data : D=[s, a, r, s_dash, end_episode_flag]
self.d = [np.zeros((self.data_size, self.hist_size, self.dim), dtype=np.uint8),
np.zeros(self.data_size, dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.int8),
np.zeros((self.data_size, self.hist_size, self.dim), dtype=np.uint8),
np.zeros((self.data_size, 1), dtype=np.bool)]
def forward(self, state, action, reward, state_dash, episode_end):
num_of_batch = state.shape[0]
s = Variable(state)
s_dash = Variable(state_dash)
q = self.q_func(s) # Get Q-value
# Generate Target Signals
tmp = self.q_func_target(s_dash) # Q(s',*)
if self.use_gpu >= 0:
tmp = list(map(np.max, tmp.data.get())) # max_a Q(s',a)
else:
tmp = list(map(np.max, tmp.data)) # max_a Q(s',a)
max_q_dash = np.asanyarray(tmp, dtype=np.float32)
if self.use_gpu >= 0:
target = np.asanyarray(q.data.get(), dtype=np.float32)
else:
# make new array
target = np.array(q.data, dtype=np.float32)
for i in xrange(num_of_batch):
if not episode_end[i][0]:
tmp_ = reward[i] + self.gamma * max_q_dash[i]
else:
tmp_ = reward[i]
action_index = self.action_to_index(action[i])
target[i, action_index] = tmp_
# TD-error clipping
if self.use_gpu >= 0:
target = cuda.to_gpu(target)
td = Variable(target) - q # TD error
td_tmp = td.data + 1000.0 * (abs(td.data) <= 1) # Avoid zero division
td_clip = td * (abs(td.data) <= 1) + td/abs(td_tmp) * (abs(td.data) > 1)
zero_val = np.zeros((self.replay_size, self.num_of_actions), dtype=np.float32)
if self.use_gpu >= 0:
zero_val = cuda.to_gpu(zero_val)
zero_val = Variable(zero_val)
loss = F.mean_squared_error(td_clip, zero_val)
return loss, q
def stock_experience(self, time,
state, action, reward, state_dash,
episode_end_flag):
data_index = time % self.data_size
if episode_end_flag is True:
self.d[0][data_index] = state
self.d[1][data_index] = action
self.d[2][data_index] = reward
else:
self.d[0][data_index] = state
self.d[1][data_index] = action
self.d[2][data_index] = reward
self.d[3][data_index] = state_dash
self.d[4][data_index] = episode_end_flag
def experience_replay(self, time):
if self.initial_exploration < time:
# Pick up replay_size number of samples from the Data
if time < self.data_size: # during the first sweep of the History Data
replay_index = np.random.randint(0, time, (self.replay_size, 1))
else:
replay_index = np.random.randint(0, self.data_size, (self.replay_size, 1))
s_replay = np.ndarray(shape=(self.replay_size, self.hist_size, self.dim), dtype=np.float32)
a_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.uint8)
r_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.float32)
s_dash_replay = np.ndarray(shape=(self.replay_size, self.hist_size, self.dim), dtype=np.float32)
episode_end_replay = np.ndarray(shape=(self.replay_size, 1), dtype=np.bool)
for i in xrange(self.replay_size):
s_replay[i] = np.asarray(self.d[0][replay_index[i]], dtype=np.float32)
a_replay[i] = self.d[1][replay_index[i]]
r_replay[i] = self.d[2][replay_index[i]]
s_dash_replay[i] = np.array(self.d[3][replay_index[i]], dtype=np.float32)
episode_end_replay[i] = self.d[4][replay_index[i]]
if self.use_gpu >= 0:
s_replay = cuda.to_gpu(s_replay)
s_dash_replay = cuda.to_gpu(s_dash_replay)
# Gradient-based update
self.optimizer.zero_grads()
loss, _ = self.forward(s_replay, a_replay, r_replay, s_dash_replay, episode_end_replay)
loss.backward()
self.optimizer.update()
def q_func(self, state):
h4 = F.relu(self.model.l4(state))
q = self.model.q_value(h4 / 255.0)
return q
def q_func_target(self, state):
h4 = F.relu(self.model_target.l4(state / 255.0))
q = self.model_target.q_value(h4)
return q
def e_greedy(self, state, epsilon):
s = Variable(state)
q = self.q_func(s)
q = q.data
if np.random.rand() < epsilon:
index_action = np.random.randint(0, self.num_of_actions)
print(" Random"),
else:
if self.use_gpu >= 0:
index_action = np.argmax(q.get())
else:
index_action = np.argmax(q)
print("#Greedy"),
return self.index_to_action(index_action), q
def target_model_update(self):
self.model_target = copy.deepcopy(self.model)
def index_to_action(self, index_of_action):
return self.enable_controller[index_of_action]
def action_to_index(self, action):
return self.enable_controller.index(action)
| 39.678363
| 108
| 0.585999
|
63c2f9b2906f6cad171ec7091cd2f800c4410e44
| 3,526
|
py
|
Python
|
chb/mips/opcodes/MIPSBranch.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/mips/opcodes/MIPSBranch.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/mips/opcodes/MIPSBranch.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020-2021 Henny Sipma
# Copyright (c) 2021-2022 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, Sequence, TYPE_CHECKING
from chb.app.AbstractSyntaxTree import AbstractSyntaxTree
from chb.app.ASTNode import ASTInstruction, ASTExpr, ASTLval
from chb.app.InstrXData import InstrXData
from chb.mips.MIPSDictionaryRecord import mipsregistry
from chb.mips.MIPSOpcode import MIPSOpcode, simplify_result
from chb.mips.MIPSOperand import MIPSOperand
import chb.simulation.SimSymbolicValue as SSV
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.mips.MIPSDictionary import MIPSDictionary
from chb.simulation.SimulationState import SimulationState
@mipsregistry.register_tag("b", MIPSOpcode)
class MIPSBranch(MIPSOpcode):
"""B offset (assembly idiom)"""
def __init__(
self,
mipsd: "MIPSDictionary",
ixval: IndexedTableValue) -> None:
MIPSOpcode.__init__(self, mipsd, ixval)
@property
def target(self) -> MIPSOperand:
return self.mipsd.mips_operand(self.args[0])
def assembly_ast(
self,
astree: AbstractSyntaxTree,
iaddr: str,
bytestring: str,
xdata: InstrXData) -> List[ASTInstruction]:
return []
def annotation(self, xdata: InstrXData) -> str:
return 'goto ' + str(self.target)
# --------------------------------------------------------------------------
# Operation:
# I: target_offset <- sign_extend(offset || 0[2])
# I+1: PC <- PC + target_offset
# --------------------------------------------------------------------------
def simulate(self, iaddr: str, simstate: "SimulationState") -> str:
tgt = SSV.mk_global_address(
self.target.absolute_address_value, modulename=simstate.modulename)
simstate.increment_programcounter()
simstate.simprogramcounter.set_delayed_programcounter(tgt)
return "goto " + str(tgt)
| 38.747253
| 80
| 0.646909
|
1ff14ebe8a6f41b14f4b103eec4f94eea2f583a3
| 275
|
py
|
Python
|
apps/flasksample/flasksample/utils.py
|
Nebulaworks/orion
|
7819e80b12b555322433373ddb5075173fa60f22
|
[
"BSD-3-Clause"
] | 3
|
2021-07-27T20:07:55.000Z
|
2021-12-03T18:24:15.000Z
|
apps/flasksample/flasksample/utils.py
|
Nebulaworks/orion
|
7819e80b12b555322433373ddb5075173fa60f22
|
[
"BSD-3-Clause"
] | 3
|
2021-06-25T16:54:20.000Z
|
2021-12-07T19:20:43.000Z
|
apps/flasksample/flasksample/utils.py
|
Nebulaworks/orion
|
7819e80b12b555322433373ddb5075173fa60f22
|
[
"BSD-3-Clause"
] | null | null | null |
import socket
def gethostname():
return socket.gethostname()
def getlocaladdress():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Doesnt matter what we try to connect to but just that we try
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
| 25
| 66
| 0.690909
|
b1d6436040038010ec3134a9bf458b7dad742030
| 1,530
|
py
|
Python
|
test/tests/dunder_descriptors.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | 1
|
2020-02-06T14:28:45.000Z
|
2020-02-06T14:28:45.000Z
|
test/tests/dunder_descriptors.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | null | null | null |
test/tests/dunder_descriptors.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | 1
|
2020-02-06T14:29:00.000Z
|
2020-02-06T14:29:00.000Z
|
# - descriptors
# Descriptors get processed when fetched as part of a dunder lookup
def f1():
class D(object):
def __init__(self, n):
self.n = n
def __get__(self, obj, cls):
print "__get__()", obj is None, self.n
def desc(*args):
print "desc()", len(args)
return self.n
return desc
def __call__(self):
print "D.call"
return self.n
class C(object):
__hash__ = D(1)
__add__ = D(2)
__init__ = D(None)
print C.__init__()
c = C()
print C.__hash__()
print c.__hash__()
print hash(c)
print c + c
f1()
def f2():
print "\nf2"
class D(object):
def __call__(self, subcl):
print "call", subcl
return object.__new__(subcl)
def get(self, inst, owner):
print "__get__", inst, owner
def new(self):
print "new"
return object.__new__(owner)
return new
class C(object):
__new__ = D()
print type(C())
D.__get__ = get
print type(C())
f2()
def f3():
print "\nf3"
class D(object):
def __call__(self):
print "call"
return None
def get(self, inst, owner):
print "__get__", type(inst), owner
def init():
print "init"
return None
return init
class C(object):
__init__ = D()
print type(C())
D.__get__ = get
print type(C())
f3()
| 20.131579
| 68
| 0.498039
|
610af0a4a293668697bd0be32d1842c9de3abdd7
| 783
|
py
|
Python
|
testing/python.readline/read.py
|
marshallmidden/m4
|
8ff1cb050efdefe6963c6d7f459fd6f3d25eea94
|
[
"BSD-2-Clause"
] | null | null | null |
testing/python.readline/read.py
|
marshallmidden/m4
|
8ff1cb050efdefe6963c6d7f459fd6f3d25eea94
|
[
"BSD-2-Clause"
] | null | null | null |
testing/python.readline/read.py
|
marshallmidden/m4
|
8ff1cb050efdefe6963c6d7f459fd6f3d25eea94
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
#-----------------------------------------------------------------------------
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
#-----------------------------------------------------------------------------
import sys
import readline
readline.parse_and_bind('tab: complete')
readline.parse_and_bind('set editing-mode vi')
while True:
try:
sys.stdout.write('ready> ')
sys.stdout.flush()
line = sys.stdin.readline()
if not line:
break
line = line.strip()
if not sys.stdin.isatty():
print('READ>',line)
if line == 'stop':
break
print('ENTERED: "%s"' % line)
except SystemExit:
sys.exit(0)
except KeyboardInterrupt:
break
# elihw
| 24.46875
| 78
| 0.463602
|
c7453694c3c31955251d8dab9380722e2a7cd7e4
| 6,265
|
py
|
Python
|
common/middleware/key_manager.py
|
escudocloud/encswift_server
|
90a5a999c06b40da89d5f785795f80bd9d990640
|
[
"Apache-2.0"
] | null | null | null |
common/middleware/key_manager.py
|
escudocloud/encswift_server
|
90a5a999c06b40da89d5f785795f80bd9d990640
|
[
"Apache-2.0"
] | null | null | null |
common/middleware/key_manager.py
|
escudocloud/encswift_server
|
90a5a999c06b40da89d5f785795f80bd9d990640
|
[
"Apache-2.0"
] | 1
|
2016-09-27T09:08:24.000Z
|
2016-09-27T09:08:24.000Z
|
#!/usr/bin/env python
import os,json
import base64,time
from Crypto import Random
from Crypto.Cipher import AES,PKCS1_OAEP
from Crypto.Signature import PKCS1_PSS
from Crypto.Hash import SHA256
#from Crypto.PublicKey import RSA
from keystoneauth1.identity import v3
from keystoneauth1 import session
from keystoneclient import client as kc
from barbicanclient import client as bc
import imp
from connection import *
from swiftclient import client
from ecdsa import SigningKey, NIST256p,VerifyingKey
RSA = imp.load_source('Crypto.PublicKey', '/usr/lib/python2.7/dist-packages/Crypto/PublicKey/RSA.py')
meta_conn = client.Connection(user=SWIFT_USER, key=SWIFT_PASS, tenant_name=META_TENANT,
authurl=AUTH_URL, auth_version='2.0')
BLOCK_SIZE = 16
def generate_container_key():
"""
Generate a random AES key for the container
"""
random_bytes = os.urandom(BLOCK_SIZE)
secret = base64.b64encode(random_bytes).decode('utf-8')
id_ = uuid.uuid4()
return id_,secret
def decrypt_KEK(secret,signature, sender, receiver):
"""
Decipher the KEK from the catalog.
Returns:
Dek
"""
#sender_pub_key = RSA.importKey(get_publicKey(sender))
# receiver = self.userID
vk = get_verificationKey(sender)
h = SHA256.new()
h.update(secret)
dig = h.digest()
if sender == receiver:
# AES decipher
try:
vk.verify(signature, dig)
master_key = get_masterKey()
unpad = lambda s: s[: -ord(s[len(s) - 1:])]
secret = base64.b64decode(secret)
iv = secret[:BLOCK_SIZE]
cipher = AES.new(master_key, AES.MODE_CBC, iv)
result = unpad(cipher.decrypt(secret[BLOCK_SIZE:]))
return result
except:
#Error in signature
return None
else:
# RSA decipher
receiver_priv_key_rsa = RSA.importKey(get_privateKey())
receiver_priv_key = PKCS1_OAEP.new(receiver_priv_key_rsa)
try:
vk.verify(signature, dig)
result = receiver_priv_key.decrypt(secret)
return result
except Exception,err:
return None
#Error in signature
def encrypt_msg(info, secret, path=False):
"""
Encrypt a message using AES
"""
# padding : guarantee that the value is always MULTIPLE of BLOCK_SIZE
PADDING = '{'
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
encodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
cipher = AES.new(secret)
encoded = encodeAES(cipher, info)
if path:
# Encoding base32 to avoid paths (names containing slashes /)
encoded = base64.b32encode(encoded)
return encoded
def decrypt_msg(encryptedString, secret, path=False):
"""
Decrypt a message using AES
"""
PADDING = '{'
if path:
encryptedString = base64.b32decode(encryptedString)
decodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
key = secret
cipher = AES.new(key)
decoded = decodeAES(cipher, encryptedString)
return decoded
def get_masterKey():
"""
Get the user's master key
Returns:
The master key
"""
filename = '/opt/stack/swift/swift/common/middleware/mk.key'
with open(filename, 'r') as f:
master_key = f.read()
return base64.b64decode(master_key)
def get_privateKey():
"""
Get the plain user's private key
Returns:
The plain private key
"""
filename = '/opt/stack/swift/swift/common/middleware/pvt.key'
with open(filename, 'r') as f:
private_key = f.read()
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
private_key = base64.b64decode(private_key)
iv = private_key[:BLOCK_SIZE]
cipher = AES.new(get_masterKey(), AES.MODE_CBC, iv)
return unpad(cipher.decrypt(private_key[BLOCK_SIZE:]))
def get_publicKey(usrID): # TODO: from barbican
"""
Get the user's public key
Returns:
Public key from meta-container (Keys) in meta-tenant
"""
auth = v3.Password(auth_url=AUTH_URL,username=SWIFT_USER,password=SWIFT_PASS,project_name='demo',project_domain_id="Default",user_domain_name='Default')
sess = session.Session(auth=auth)
barbican = bc.Client(session=sess)
keystone = kc.Client(session=sess)
try:
user = keystone.users.get(usrID)
dict_keys = json.loads(user.description)
ref = dict_keys.get('Public_Key','')
ref = "%s/secrets/%s" %(BARBICAN_URL,ref)
secret_node = barbican.secrets.get(ref)
except Exception,err:
return
return secret_node.payload
def get_verificationKey(usrID):
"""
Get the user's verification key
Returns:
Verification key from meta-container (Keys) in meta-tenant
"""
auth = v3.Password(auth_url=AUTH_URL,username=ADMIN_USER,password=ADMIN_KEY,project_name='demo',project_domain_id="Default",user_domain_name="Default")
sess = session.Session(auth=auth)
barbican = bc.Client(session=sess)
keystone = kc.Client(session=sess)
try:
user = keystone.users.get(usrID)
dict_keys = json.loads(user.description)
ref = dict_keys.get('Verification_Key','')
ref = "%s/secrets/%s" %(BARBICAN_URL,ref)
secret_node = barbican.secrets.get(ref)
except Exception,err:
return
a = VerifyingKey.from_pem(secret_node.payload)
return a
def get_signKey(self, usrID):
"""
Get the user's sign key
Returns:
The sign key
"""
filename = '/opt/stack/swift/swift/common/middleware/sk.key'
with open(filename, 'r') as f:
sign_key = f.read()
return SigningKey.from_pem(sign_key)
| 34.048913
| 160
| 0.600798
|
6ad7d0cf00cd62149b458ca82b3c20fee2812ac1
| 3,231
|
py
|
Python
|
chapter12/09 TestFashionShopApp/Storage/FashionShop.py
|
munnep/begin_to_code_with_python
|
3ef14d90785526b6b26d262a7627eee73791d7d0
|
[
"MIT"
] | null | null | null |
chapter12/09 TestFashionShopApp/Storage/FashionShop.py
|
munnep/begin_to_code_with_python
|
3ef14d90785526b6b26d262a7627eee73791d7d0
|
[
"MIT"
] | null | null | null |
chapter12/09 TestFashionShopApp/Storage/FashionShop.py
|
munnep/begin_to_code_with_python
|
3ef14d90785526b6b26d262a7627eee73791d7d0
|
[
"MIT"
] | null | null | null |
import pickle
from Storage.StockItem import StockItem
class FashionShop:
show_instrumentation = False
min_price = 0.5
max_price = 500.0
max_stock_add = 50
def __init__(self):
if FashionShop.show_instrumentation:
print('** FashionShop __init__ called')
self.__stock_dictionary = {}
def save(self, filename):
'''
Saves the fashion shop to the given filename
Data is stored in binary as pickled file
Exceptions will be raised if the save fails
'''
if FashionShop.show_instrumentation:
print('** FashionShop save called')
with open(filename,'wb') as out_file:
pickle.dump(self,out_file)
@staticmethod
def load(filename):
'''
Loads the fashion shop from the given filename
Data are stored in binary as pickled file
Exceptions will be raised if the load fails
'''
if FashionShop.show_instrumentation:
print('** FashionShop load called')
with open(filename,'rb') as input_file:
result = pickle.load(input_file)
# Now update the versions of the loaded stock items
for stock_item in result.__stock_dictionary.values():
stock_item.check_version()
return result
def store_new_stock_item(self, stock_item):
'''
Create a new item in the fashion shop
The item is indexed on the stock_ref value
Raises an exception if the item already
exists
'''
if FashionShop.show_instrumentation:
print('** FashionShop store_new_stock_item called')
if stock_item.stock_ref in self.__stock_dictionary:
raise Exception('Item already present')
self.__stock_dictionary[stock_item.stock_ref] = stock_item
def find_stock_item(self, stock_ref):
'''
Gets an item from the stock dictionary
Returns None if there is no item for
this key
'''
if FashionShop.show_instrumentation:
print('** FashionShop find_stock_item called')
if stock_ref in self.__stock_dictionary:
return self.__stock_dictionary[stock_ref]
else:
return None
def __str__(self):
if FashionShop.show_instrumentation:
print('** FashionShop __str__ called')
stock = map(str,self.__stock_dictionary.values())
stock_list = '\n'.join(stock)
template = '''Items in Stock
{0}
'''
return template.format(stock_list)
def find_matching_with_tags(self, search_tags):
'''
Returns the stock items that contain
the search_tags as a subset of their tags
'''
if FashionShop.show_instrumentation:
print('** FashionShop find_matching_tags called', search_tags)
def match_tags(item):
'''
Returns True if the tags in the item
contain the search tags
'''
return search_tags.issubset(item.tags)
return filter(lambda item:search_tags.issubset(item.tags), self.__stock_dictionary.values())
# return filter(match_tags, self.__stock_dictionary.values())
| 31.067308
| 100
| 0.629526
|
480ccb31d09fea68d99d374d3d56ced8de3e19e4
| 1,613
|
py
|
Python
|
fibonacci_drop/__main__.py
|
myxie/fibonacci_drop
|
19c673c615d4587e5d523f6811c7a005574f2437
|
[
"Unlicense"
] | null | null | null |
fibonacci_drop/__main__.py
|
myxie/fibonacci_drop
|
19c673c615d4587e5d523f6811c7a005574f2437
|
[
"Unlicense"
] | null | null | null |
fibonacci_drop/__main__.py
|
myxie/fibonacci_drop
|
19c673c615d4587e5d523f6811c7a005574f2437
|
[
"Unlicense"
] | null | null | null |
# __main__ is not required for DALiuGE components.
import argparse # pragma: no cover
from . import FibonacciAppDrop # pragma: no cover
def main() -> None: # pragma: no cover
"""
The main function executes on commands:
`python -m fibonacci_drop` and `$ fibonacci_drop `.
This is your program's entry point.
You can change this function to do whatever you want.
Examples:
* Run a test suite
* Run a server
* Do some other stuff
* Run a command line application (Click, Typer, ArgParse)
* List all available tasks
* Run an application (Flask, FastAPI, Django, etc.)
"""
parser = argparse.ArgumentParser(
description="fibonacci_drop.",
epilog="Enjoy the fibonacci_drop functionality!",
)
# This is required positional argument
parser.add_argument(
"name",
type=str,
help="The username",
default="myxie",
)
# This is optional named argument
parser.add_argument(
"-m",
"--message",
type=str,
help="The Message",
default="Hello",
required=False,
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Optionally adds verbosity",
)
args = parser.parse_args()
print(f"{args.message} {args.name}!")
if args.verbose:
print("Verbose mode is on.")
print("Executing main function")
comp = FibonacciAppDrop('a','b')
print(comp.run())
print("End of main function")
if __name__ == "__main__": # pragma: no cover
main()
| 26.016129
| 65
| 0.599504
|
05939d5147db76601e28462bbe2955669f6e81be
| 6,023
|
py
|
Python
|
src/webcam_interface.py
|
DavidSpielman/Webcam-Midi-Controller
|
c97f39193b152aa2f5b3d9a1b0cd03d8d79bf2fc
|
[
"MIT"
] | null | null | null |
src/webcam_interface.py
|
DavidSpielman/Webcam-Midi-Controller
|
c97f39193b152aa2f5b3d9a1b0cd03d8d79bf2fc
|
[
"MIT"
] | null | null | null |
src/webcam_interface.py
|
DavidSpielman/Webcam-Midi-Controller
|
c97f39193b152aa2f5b3d9a1b0cd03d8d79bf2fc
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import rospy
from std_msgs.msg import Int16
global ret, frame, flip_frame, boundingBox, interval
# 0 for built in webcam, 2 for external webcam
cap = cv.VideoCapture(2, cv.CAP_V4L2) # This added VideoCapture API allows camera fps to be changed from initial 5fps
cap.set(cv.CAP_PROP_FRAME_WIDTH, 1280) # 640 x 480 is 480p (30fps on this cam), 1280 x 720 is 720p (7.5 fps on this cam)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 720)
tracker = cv.legacy.TrackerCSRT_create()
ret, frame = cap.read()
flip_frame = cv.flip(frame, 1) # Mirrors live video for better user experience
boundingBox = cv.selectROI('MIDI Controller', flip_frame, True)
tracker.init(flip_frame, boundingBox)
pub = rospy.Publisher('interval', Int16, queue_size = 5)
rospy.init_node('webcam')
r = rospy.Rate(10) # 1hz
interval = 0
inside_box = False
def setup_gui():
global width, height
width = cap.get(cv.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv.CAP_PROP_FRAME_HEIGHT)
thickness = 2
color = (0,255,0)
font = cv.FONT_HERSHEY_DUPLEX
font_size = 4
font_color = (0,255,0)
font_thickness = 4
line_type = cv.LINE_AA
# Rectangles for Note Interval Selection
top_left = cv.rectangle(flip_frame, (0,0), (int(width/3),0), color, thickness)
top_center = cv.rectangle(flip_frame, (int(width/3),0), (int(2*width/3),int(height/3)), color, thickness)
top_right = cv.rectangle(flip_frame, (int(2*width/3),0), (int(width),int(height/3)), color, thickness)
center_left = cv.rectangle(flip_frame, (0,int(height/3)), (int(width/3),int(2*height/3)), color, thickness)
center_right = cv.rectangle(flip_frame, (int(2*width/3),int(height/3)), (int(width),int(2*height/3)), color, thickness)
bottom_left = cv.rectangle(flip_frame, (0,int(2*height/3)), (int(width/3),int(height)), color, thickness)
bottom_right = cv.rectangle(flip_frame, (int(2*width/3),int(2*height/3)), (int(width),int(height)), color, thickness)
# Text Placement for Note Intervals
one = cv.putText(flip_frame,'1',(int(width/8),int(9*height/10)), font, font_size,font_color,font_thickness,line_type)
two = cv.putText(flip_frame,'2',(int(width/8),int(5.5*height/10)), font, font_size,font_color,font_thickness,line_type)
three = cv.putText(flip_frame,'3',(int(width/8),int(2.25*height/10)), font, font_size,font_color,font_thickness,line_type)
four = cv.putText(flip_frame,'4',(int(3.75*width/8),int(2.25*height/10)), font, font_size,font_color,font_thickness,line_type)
five = cv.putText(flip_frame,'5',(int(6.45*width/8),int(2.25*height/10)), font, font_size,font_color,font_thickness,line_type)
six = cv.putText(flip_frame,'6',(int(6.45*width/8),int(5.5*height/10)), font, font_size,font_color,font_thickness,line_type)
seven = cv.putText(flip_frame,'7',(int(6.45*width/8),int(9*height/10)), font, font_size,font_color,font_thickness,line_type)
def showROI(flip_frame, boundingBox):
global center_x, center_y
x, y, w, h = int(boundingBox[0]), int(boundingBox[1]), int(boundingBox[2]), int(boundingBox[3])
center_x = int(x+(w/2))
center_y = int(y+(h/2))
cv.rectangle(flip_frame, (x, y), (x+w, y+h), (255, 0, 0), 5)
dot = cv.circle(flip_frame, (center_x,center_y), 5, (0,0,255), -1)
def interval_selection(): # Uses the coordinates of the dot center to select intervals
global interval, inside_box
if center_x >= 0 and center_x <= int(width/3):
if center_y >= int(2*height/3) and center_y <= int(height):
inside_box = True
interval = 1
#print('Selected Interval 1')
elif center_y >= int(height/3) and center_y <= int(2*height/3):
inside_box = True
interval = 2
#print('Selected Interval 2')
elif center_y >= 0 and center_y <= int(height/3):
inside_box = True
interval = 3
#print('Selected Interval 3')
elif center_x >= int(width/3) and center_x <= int(2*width/3):
if center_y >= 0 and center_y <= int(height/3):
inside_box = True
interval = 4
#print('Selected Interval 4')
elif center_y >= int(height/3) and center_y <= int(height):
inside_box = False
interval = 0
elif center_x >= int(2*width/3) and center_x <= int(width):
if center_y >= 0 and center_y <= int(height/3):
inside_box = True
interval = 5
#print('Selected Interval 5')
elif center_y >= int(height/3) and center_y <= int(2*height/3):
inside_box = True
interval = 6
#print('Selected Interval 6')
elif center_y >= int(2*height/3) and center_y <= int(height):
inside_box = True
interval = 7
#print('Selected Interval 7')
if not cap.isOpened():
print('Could not open camera')
exit()
while True:
ret, frame = cap.read()
flip_frame = cv.flip(frame, 1)
if ret == True:
setup_gui()
showROI(flip_frame, boundingBox)
interval_selection()
print(inside_box)
if inside_box == True:
pub.publish(interval)
r.sleep()
else:
pub.publish(interval)
r.sleep()
else:
print('Could not return frame')
cv.imshow('MIDI Controller', flip_frame)
ret, boundingBox = tracker.update(flip_frame)
if cv.waitKey(1) == 27: # Uses the escape key to exit the program (on Ubuntu)
break
elif cv.waitKey(1) == ord('r'): # Allows user to resets ROI when the 'r' key is held
tracker = cv.legacy.TrackerCSRT_create()
ret, frame = cap.read()
flip_frame_updated = cv.flip(frame, 1)
boundingBox_updated = cv.selectROI('MIDI Controller', flip_frame, True)
tracker.init(flip_frame_updated, boundingBox_updated)
showROI(flip_frame_updated, boundingBox_updated)
ret, boundingBox = tracker.update(flip_frame)
print('ROI Reset')
cap.release()
cv.destroyAllWindows()
| 45.285714
| 130
| 0.649676
|
3f5ec75b0d439b59b67f3c8dd3eb6f0d7e996e82
| 3,943
|
py
|
Python
|
twitter_webapp/models.py
|
dsdoris/twitter_webapp
|
de43000b0755b49740ba516f8e74f93dfa9c4a50
|
[
"MIT"
] | null | null | null |
twitter_webapp/models.py
|
dsdoris/twitter_webapp
|
de43000b0755b49740ba516f8e74f93dfa9c4a50
|
[
"MIT"
] | null | null | null |
twitter_webapp/models.py
|
dsdoris/twitter_webapp
|
de43000b0755b49740ba516f8e74f93dfa9c4a50
|
[
"MIT"
] | null | null | null |
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
import pickle
from embedding_as_service_client import EmbeddingClient
from twitter_webapp.services import twitter_api
from sklearn.linear_model import LogisticRegression
db = SQLAlchemy()
migrate = Migrate()
en = EmbeddingClient(host='54.180.124.154', port=8989)
# User Table
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.BigInteger, primary_key=True)
username = db.Column(db.String, nullable=False, unique=True)
full_name = db.Column(db.String, nullable=False)
followers = db.Column(db.BigInteger, default=0)
location = db.Column(db.String)
def __repr__(self):
return f"<User {self.id} {self.username}>"
# Tweet Table
class Tweet(db.Model):
__tablename__ = 'tweet'
id = db.Column(db.BigInteger, primary_key=True)
text = db.Column(db.String, nullable=False)
user = db.relationship('User',backref=db.backref('tweets', lazy=True))
user_id = db.Column(db.BigInteger, db.ForeignKey('user.id'))
embedding = db.Column(db.PickleType)
def __repr__(self):
return f"<Tweet {self.id}>"
# User 데이터 가져오기(전체)
def get_userdata():
return User.query.all()
# User 데이터 수정
def update_userdata(username, fullname, location):
user = User.query.filter_by(username=username).update({'full_name': fullname, 'location': location})
db.session.commit()
print("update_commit")
return User.query.filter_by(username=username).first()
# Tweet 데이터 세팅
def set_tweetdata():
# Tweet테이블 초기화(기존자료 삭제)
Tweet.query.delete()
# User 데이터 가져오기
user = get_userdata()
# User테이블 데이터 기준으로 트윗데이터 저장
print("update data at Tweet table...")
for user in user:
raw_tweet = twitter_api.api.user_timeline(user_id = user.id, count=50, include_rts=False, exclude_replies=True, tweet_mode="extended")
en_tweet = embedding_tweet(raw_tweet)
for tweet in raw_tweet:
db.session.add(Tweet(id=tweet.id, text=tweet.full_text, user_id=user.id, embedding=en_tweet))
db.session.commit()
return Tweet.query.all()
# tweet 데이터 벡터화
def embedding_tweet(data):
en_list = []
for tweet in data:
en_list.append(tweet.full_text)
return en.encode(texts=en_list)
# 학습데이터 라벨링
def append_to_with_label(to_arr, from_arr, label_arr, label):
for item in from_arr:
to_arr.append(item)
label_arr.append(label)
# 트윗데이터 분석
def compare_user(user1, user2, word):
# 이용자의 트윗 불러오기
userid1 = User.query.filter_by(username=user1).first().id
userid2 = User.query.filter_by(username=user2).first().id
raw_tweet1 = twitter_api.api.user_timeline(user_id = userid1, count=50, include_rts=False, exclude_replies=True, tweet_mode="extended")
raw_tweet2 = twitter_api.api.user_timeline(user_id = userid2, count=50, include_rts=False, exclude_replies=True, tweet_mode="extended")
# 텍스트를 벡터로 변경
em_X_1 = embedding_tweet(raw_tweet1) # em_X_1 = db.session.query(Tweet.embedding).filter_by(user_id=userid1).all()
em_X_2 = embedding_tweet(raw_tweet2) # em_X_2 = db.session.query(Tweet.embedding).filter_by(user_id=userid2).all()
Y_1 = user1
Y_2 = user2
X=[]
y=[]
# 벡터 데이터(트윗 텍스트)와 유저 이름(레이블)을 하나의 리스트로 묶어줌
append_to_with_label(X,em_X_1,y,Y_1)
append_to_with_label(X,em_X_2,y,Y_2)
# 모델 학습
classifier = LogisticRegression()
classifier.fit(X,y)
PREDICTION_TEXT = word
# 예측하고자하는 데이터 벡터화
em_pred_val = en.encode(texts=[PREDICTION_TEXT])
# 학습된 모델로 데이터 예측
pred_result = classifier.predict(em_pred_val)
return f"'{PREDICTION_TEXT}'(이)라는 단어를 말할 확률이 높은 이용자는 {pred_result} 입니다."
# def save_model(model):
# with open(MODEL_PATH, "wb") as file:
# pickle.dump(model, file)
# def load_model():
# with open(MODEL_FILEPATH, "rb") as file:
# loaded_model = pickle.load(file)
# return loaded_mode
| 28.992647
| 142
| 0.695663
|
e90f9e96d23648c3e9d39e5b2426d929e0e30f43
| 618
|
py
|
Python
|
dashboard/migrations/0020_auto_20170124_2107.py
|
jarifibrahim/ashoka-dashboard
|
da975b31ba1508ef8b8cf71cbfd2118b572a322d
|
[
"Apache-2.0"
] | 1
|
2021-11-26T03:41:22.000Z
|
2021-11-26T03:41:22.000Z
|
dashboard/migrations/0020_auto_20170124_2107.py
|
jarifibrahim/ashoka-dashboard
|
da975b31ba1508ef8b8cf71cbfd2118b572a322d
|
[
"Apache-2.0"
] | 3
|
2017-02-04T05:12:47.000Z
|
2017-02-21T14:33:06.000Z
|
dashboard/migrations/0020_auto_20170124_2107.py
|
jarifibrahim/ashoka-dashboard
|
da975b31ba1508ef8b8cf71cbfd2118b572a322d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-24 15:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0019_auto_20170124_2105'),
]
operations = [
migrations.RenameField(
model_name='teamstatus',
old_name='systemic_vision',
new_name='sys_vision',
),
migrations.RenameField(
model_name='teamstatus',
old_name='systemic_vision_comment',
new_name='sys_vision_comment',
),
]
| 23.769231
| 49
| 0.608414
|
58a749b5db3bde6beca3038095412b900fed9bb7
| 1,345
|
py
|
Python
|
WebHost/models.py
|
draguscloud/MultiWorld-Utilities
|
0157f348cd46fce1bc6ff58a7eaec9f0a482f4be
|
[
"MIT"
] | null | null | null |
WebHost/models.py
|
draguscloud/MultiWorld-Utilities
|
0157f348cd46fce1bc6ff58a7eaec9f0a482f4be
|
[
"MIT"
] | null | null | null |
WebHost/models.py
|
draguscloud/MultiWorld-Utilities
|
0157f348cd46fce1bc6ff58a7eaec9f0a482f4be
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from uuid import UUID, uuid4
from pony.orm import *
db = Database()
class Patch(db.Entity):
id = PrimaryKey(int, auto=True)
player = Required(int)
data = Required(buffer, lazy=True)
seed = Optional('Seed')
class Room(db.Entity):
id = PrimaryKey(UUID, default=uuid4)
last_activity = Required(datetime, default=lambda: datetime.utcnow(), index=True)
creation_time = Required(datetime, default=lambda: datetime.utcnow())
owner = Required(UUID, index=True)
commands = Set('Command')
seed = Required('Seed', index=True)
multisave = Optional(Json, lazy=True)
show_spoiler = Required(int, default=0) # 0 -> never, 1 -> after completion, -> 2 always
timeout = Required(int, default=lambda: 6 * 60 * 60) # seconds since last activity to shutdown
tracker = Optional(UUID, index=True)
last_port = Optional(int, default=lambda: 0)
class Seed(db.Entity):
id = PrimaryKey(UUID, default=uuid4)
rooms = Set(Room)
multidata = Optional(Json, lazy=True)
owner = Required(UUID, index=True)
creation_time = Required(datetime, default=lambda: datetime.utcnow())
patches = Set(Patch)
spoiler = Optional(str, lazy=True)
class Command(db.Entity):
id = PrimaryKey(int, auto=True)
room = Required(Room)
commandtext = Required(str)
| 31.27907
| 99
| 0.685502
|
e098bd21dbd689a4d7b9fbcef62c41ccc4eb7efc
| 1,278
|
py
|
Python
|
app/core/migrations/0034_auto_20200912_1025.py
|
ig0r45ure/recipe-app-api
|
0654102293d6e58c13c4b7520909eb6c0ddb45f2
|
[
"MIT"
] | null | null | null |
app/core/migrations/0034_auto_20200912_1025.py
|
ig0r45ure/recipe-app-api
|
0654102293d6e58c13c4b7520909eb6c0ddb45f2
|
[
"MIT"
] | null | null | null |
app/core/migrations/0034_auto_20200912_1025.py
|
ig0r45ure/recipe-app-api
|
0654102293d6e58c13c4b7520909eb6c0ddb45f2
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.10 on 2020-09-12 10:25
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0033_formgeneration'),
]
operations = [
migrations.RemoveField(
model_name='formgeneration',
name='process_owner',
),
migrations.RemoveField(
model_name='formgeneration',
name='status',
),
migrations.CreateModel(
name='GenerationStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.PositiveSmallIntegerField(choices=[(0, 'not done'), (1, 'failed'), (2, 'succesfull')], default=0)),
('generation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.FormGeneration')),
('unit', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='core.OrgUnit')),
],
),
migrations.AddField(
model_name='formgeneration',
name='status',
field=models.ManyToManyField(blank=True, to='core.GenerationStatus'),
),
]
| 34.540541
| 133
| 0.594679
|
3c54dd7e6cec5e04e42c7ba5c9204879e155d012
| 17,474
|
py
|
Python
|
tensorflow_datasets/core/load.py
|
harsh020/datasets
|
b4ad3617b279ec65356e696c4c860458621976f6
|
[
"Apache-2.0"
] | 1
|
2020-12-22T17:05:51.000Z
|
2020-12-22T17:05:51.000Z
|
tensorflow_datasets/core/load.py
|
harsh020/datasets
|
b4ad3617b279ec65356e696c4c860458621976f6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/core/load.py
|
harsh020/datasets
|
b4ad3617b279ec65356e696c4c860458621976f6
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access registered datasets."""
import difflib
import posixpath
import re
import textwrap
import typing
from typing import Any, Callable, Dict, Iterable, Iterator, List, NoReturn, Optional, Type
from tensorflow_datasets.core import community
from tensorflow_datasets.core import constants
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import decode
from tensorflow_datasets.core import naming
from tensorflow_datasets.core import read_only_builder
from tensorflow_datasets.core import registered
from tensorflow_datasets.core import splits as splits_lib
from tensorflow_datasets.core.utils import gcs_utils
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.core.utils import read_config as read_config_lib
from tensorflow_datasets.core.utils import type_utils
from tensorflow_datasets.core.utils import version
# pylint: disable=logging-format-interpolation
Tree = type_utils.Tree
TreeDict = type_utils.TreeDict
PredicateFn = Callable[[Type[dataset_builder.DatasetBuilder]], bool]
# Regex matching 'dataset/config/1.3.0'
_FULL_NAME_REG = re.compile(r'^{ds_name}/({config_name}/)?{version}$'.format(
ds_name=r'\w+',
config_name=r'[\w\-\.]+',
version=r'[0-9]+\.[0-9]+\.[0-9]+',
))
# Variable to globally disable community datasets (e.g. inside tests)
COMMUNITY_DATASET_DISABLED = False
def list_builders(
*,
with_community_datasets: bool = True,
) -> List[str]:
"""Returns the string names of all `tfds.core.DatasetBuilder`s."""
datasets = registered.list_imported_builders()
return datasets
def builder_cls(name: str) -> Type[dataset_builder.DatasetBuilder]:
"""Fetches a `tfds.core.DatasetBuilder` class by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the class name
as camel or snake case: `MyDataset` or `my_dataset`).
Returns:
A `tfds.core.DatasetBuilder` class.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
"""
ds_name, kwargs = naming.parse_builder_name_kwargs(name)
if kwargs:
raise ValueError(
'`builder_cls` only accept the `dataset_name` without config, '
f"version or arguments. Got: name='{name}', kwargs={kwargs}"
)
if ds_name.namespace:
raise ValueError(
f'Namespaces not supported for `builder_cls`. Got: {ds_name}'
)
# Imported datasets
try:
cls = registered.imported_builder_cls(ds_name.name)
cls = typing.cast(Type[dataset_builder.DatasetBuilder], cls)
return cls
except registered.DatasetNotFoundError as e:
_reraise_with_list_builders(e, name=ds_name)
def builder(
name: str,
*,
try_gcs: bool = False,
**builder_kwargs: Any
) -> dataset_builder.DatasetBuilder:
"""Fetches a `tfds.core.DatasetBuilder` by string name.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the class name
as camel or snake case: `MyDataset` or `my_dataset`).
This can be either `'dataset_name'` or
`'dataset_name/config_name'` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `'foo_bar/a=True,b=3'` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `'foo_bar/zoo/a=True,b=3'` to
use the `'zoo'` config and pass to the builder keyword arguments `a=True`
and `b=3`).
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
**builder_kwargs: `dict` of keyword arguments passed to the
`tfds.core.DatasetBuilder`.
Returns:
A `tfds.core.DatasetBuilder`.
Raises:
DatasetNotFoundError: if `name` is unrecognized.
"""
# 'kaggle:my_dataset:1.0.0' -> ('kaggle', 'my_dataset', {'version': '1.0.0'})
name, builder_kwargs = naming.parse_builder_name_kwargs(
name, **builder_kwargs
)
# `try_gcs` currently only support non-community datasets
if (
try_gcs
and not name.namespace
and gcs_utils.is_dataset_on_gcs(str(name))
):
data_dir = builder_kwargs.get('data_dir')
if data_dir:
raise ValueError(
f'Cannot have both `try_gcs=True` and `data_dir={data_dir}` '
'explicitly set'
)
builder_kwargs['data_dir'] = gcs_utils.gcs_path('datasets')
# Community datasets
if name.namespace:
raise NotImplementedError
# First check whether code exists or not (imported datasets)
try:
cls = builder_cls(str(name))
except registered.DatasetNotFoundError as e:
cls = None # Class not found
not_found_error = e # Save the exception to eventually reraise
# Eventually try loading from files first
if _try_load_from_files_first(cls, **builder_kwargs):
try:
b = read_only_builder.builder_from_files(str(name), **builder_kwargs)
return b
except registered.DatasetNotFoundError as e:
pass
# If code exists and loading from files was skipped (e.g. files not found),
# load from the source code.
if cls:
with py_utils.try_reraise(prefix=f'Failed to construct dataset {name}: '):
return cls(**builder_kwargs) # pytype: disable=not-instantiable
# If neither the code nor the files are found, raise DatasetNotFoundError
raise not_found_error
def _try_load_from_files_first(
cls: Optional[Type[dataset_builder.DatasetBuilder]],
**builder_kwargs: Any,
) -> bool:
"""Returns True if files should be used rather than code."""
if set(builder_kwargs) - {'version', 'config', 'data_dir'}:
return False # Has extra kwargs, require original code.
elif builder_kwargs.get('version') == 'experimental_latest':
return False # Requested version require original code
elif not cls:
return True # Code does not exists
elif 'version' in builder_kwargs:
return True # Version explicitly given (unlock backward compatibility)
elif (
'config' in builder_kwargs
and isinstance(builder_kwargs['config'], str)
and builder_kwargs['config'] not in cls.builder_configs
):
return True # Requested config isn't found in the code
else:
return False # Code exists and no version given, use code.
def load(
name: str,
*,
split: Optional[Tree[splits_lib.Split]] = None,
data_dir: Optional[str] = None,
batch_size: Optional[int] = None,
shuffle_files: bool = False,
download: bool = True,
as_supervised: bool = False,
decoders: Optional[TreeDict[decode.Decoder]] = None,
read_config: Optional[read_config_lib.ReadConfig] = None,
with_info: bool = False,
builder_kwargs: Optional[Dict[str, Any]] = None,
download_and_prepare_kwargs: Optional[Dict[str, Any]] = None,
as_dataset_kwargs: Optional[Dict[str, Any]] = None,
try_gcs: bool = False,
):
# pylint: disable=line-too-long
"""Loads the named dataset into a `tf.data.Dataset`.
`tfds.load` is a convenience method that:
1. Fetch the `tfds.core.DatasetBuilder` by name:
```python
builder = tfds.builder(name, data_dir=data_dir, **builder_kwargs)
```
2. Generate the data (when `download=True`):
```python
builder.download_and_prepare(**download_and_prepare_kwargs)
```
3. Load the `tf.data.Dataset` object:
```python
ds = builder.as_dataset(
split=split,
as_supervised=as_supervised,
shuffle_files=shuffle_files,
read_config=read_config,
decoders=decoders,
**as_dataset_kwargs,
)
```
See: https://www.tensorflow.org/datasets/overview#load_a_dataset for more
examples.
If you'd like NumPy arrays instead of `tf.data.Dataset`s or `tf.Tensor`s,
you can pass the return value to `tfds.as_numpy`.
**Warning**: calling this function might potentially trigger the download
of hundreds of GiB to disk. Refer to the `download` argument.
Args:
name: `str`, the registered name of the `DatasetBuilder` (the snake case
version of the class name). This can be either `'dataset_name'` or
`'dataset_name/config_name'` for datasets with `BuilderConfig`s.
As a convenience, this string may contain comma-separated keyword
arguments for the builder. For example `'foo_bar/a=True,b=3'` would use
the `FooBar` dataset passing the keyword arguments `a=True` and `b=3`
(for builders with configs, it would be `'foo_bar/zoo/a=True,b=3'` to
use the `'zoo'` config and pass to the builder keyword arguments `a=True`
and `b=3`).
split: Which split of the data to load (e.g. `'train'`, `'test'`,
`['train', 'test']`, `'train[80%:]'`,...). See our
[split API guide](https://www.tensorflow.org/datasets/splits).
If `None`, will return all splits in a `Dict[Split, tf.data.Dataset]`
data_dir: `str`, directory to read/write data. Defaults to the value of
the environment variable TFDS_DATA_DIR, if set, otherwise falls back to
'~/tensorflow_datasets'.
batch_size: `int`, if set, add a batch dimension to examples. Note that
variable length features will be 0-padded. If
`batch_size=-1`, will return the full dataset as `tf.Tensor`s.
shuffle_files: `bool`, whether to shuffle the input files.
Defaults to `False`.
download: `bool` (optional), whether to call
`tfds.core.DatasetBuilder.download_and_prepare`
before calling `tf.DatasetBuilder.as_dataset`. If `False`, data is
expected to be in `data_dir`. If `True` and the data is already in
`data_dir`, `download_and_prepare` is a no-op.
as_supervised: `bool`, if `True`, the returned `tf.data.Dataset`
will have a 2-tuple structure `(input, label)` according to
`builder.info.supervised_keys`. If `False`, the default,
the returned `tf.data.Dataset` will have a dictionary with all the
features.
decoders: Nested dict of `Decoder` objects which allow to customize the
decoding. The structure should match the feature structure, but only
customized feature keys need to be present. See
[the guide](https://github.com/tensorflow/datasets/tree/master/docs/decode.md)
for more info.
read_config: `tfds.ReadConfig`, Additional options to configure the
input pipeline (e.g. seed, num parallel reads,...).
with_info: `bool`, if `True`, `tfds.load` will return the tuple
(`tf.data.Dataset`, `tfds.core.DatasetInfo`), the latter containing the
info associated with the builder.
builder_kwargs: `dict` (optional), keyword arguments to be passed to the
`tfds.core.DatasetBuilder` constructor. `data_dir` will be passed
through by default.
download_and_prepare_kwargs: `dict` (optional) keyword arguments passed to
`tfds.core.DatasetBuilder.download_and_prepare` if `download=True`. Allow
to control where to download and extract the cached data. If not set,
cache_dir and manual_dir will automatically be deduced from data_dir.
as_dataset_kwargs: `dict` (optional), keyword arguments passed to
`tfds.core.DatasetBuilder.as_dataset`.
try_gcs: `bool`, if True, tfds.load will see if the dataset exists on
the public GCS bucket before building it locally.
Returns:
ds: `tf.data.Dataset`, the dataset requested, or if `split` is None, a
`dict<key: tfds.Split, value: tf.data.Dataset>`. If `batch_size=-1`,
these will be full datasets as `tf.Tensor`s.
ds_info: `tfds.core.DatasetInfo`, if `with_info` is True, then `tfds.load`
will return a tuple `(ds, ds_info)` containing dataset information
(version, features, splits, num_examples,...). Note that the `ds_info`
object documents the entire dataset, regardless of the `split` requested.
Split-specific information is available in `ds_info.splits`.
"""
# pylint: enable=line-too-long
if builder_kwargs is None:
builder_kwargs = {}
dbuilder = builder(name, data_dir=data_dir, try_gcs=try_gcs, **builder_kwargs)
if download:
download_and_prepare_kwargs = download_and_prepare_kwargs or {}
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
if as_dataset_kwargs is None:
as_dataset_kwargs = {}
as_dataset_kwargs = dict(as_dataset_kwargs)
as_dataset_kwargs.setdefault('split', split)
as_dataset_kwargs.setdefault('as_supervised', as_supervised)
as_dataset_kwargs.setdefault('batch_size', batch_size)
as_dataset_kwargs.setdefault('decoders', decoders)
as_dataset_kwargs.setdefault('shuffle_files', shuffle_files)
as_dataset_kwargs.setdefault('read_config', read_config)
ds = dbuilder.as_dataset(**as_dataset_kwargs)
if with_info:
return ds, dbuilder.info
return ds
def _get_all_versions(
current_version: version.Version,
extra_versions: Iterable[version.Version],
current_version_only: bool,
) -> Iterable[str]:
"""Returns the list of all current versions."""
# Merge current version with all extra versions
version_list = [current_version]
if not current_version_only:
version_list.extend(extra_versions)
# Filter datasets which do not have a version (version is `None`) as they
# should not be instantiated directly (e.g wmt_translate)
return {str(v) for v in version_list if v}
def _iter_single_full_names(
builder_name: str,
builder_cls: Type[dataset_builder.DatasetBuilder], # pylint: disable=redefined-outer-name
current_version_only: bool,
) -> Iterator[str]:
"""Iterate over a single builder full names."""
if builder_cls.BUILDER_CONFIGS:
for config in builder_cls.BUILDER_CONFIGS:
for v in _get_all_versions(
config.version,
config.supported_versions,
current_version_only=current_version_only,
):
yield posixpath.join(builder_name, config.name, v)
else:
for v in _get_all_versions(
builder_cls.VERSION,
builder_cls.SUPPORTED_VERSIONS,
current_version_only=current_version_only
):
yield posixpath.join(builder_name, v)
def _iter_full_names(
predicate_fn: Optional[PredicateFn],
current_version_only: bool,
) -> Iterator[str]:
"""Yield all registered datasets full_names (see `list_full_names`)."""
for builder_name in registered.list_imported_builders():
builder_cls_ = builder_cls(builder_name)
# Only keep requested datasets
if predicate_fn is not None and not predicate_fn(builder_cls_):
continue
for full_name in _iter_single_full_names(
builder_name,
builder_cls_,
current_version_only=current_version_only,
):
yield full_name
_DEFAULT_PREDICATE_FN = None
def list_full_names(
predicate_fn: Optional[PredicateFn] = _DEFAULT_PREDICATE_FN,
current_version_only: bool = False,
) -> List[str]:
"""Lists all registered datasets full_names.
Args:
predicate_fn: `Callable[[Type[DatasetBuilder]], bool]`, if set, only
returns the dataset names which satisfy the predicate.
current_version_only: If True, only returns the current version.
Returns:
The list of all registered dataset full names.
"""
return sorted(_iter_full_names(
predicate_fn=predicate_fn,
current_version_only=current_version_only,
))
def single_full_names(
builder_name: str,
current_version_only: bool = True,
) -> List[str]:
"""Returns the list `['ds/c0/v0',...]` or `['ds/v']` for a single builder."""
return sorted(_iter_single_full_names(
builder_name,
builder_cls(builder_name),
current_version_only=current_version_only, # pytype: disable=wrong-arg-types
))
def is_full_name(full_name: str) -> bool:
"""Returns whether the string pattern match `ds/config/1.2.3` or `ds/1.2.3`.
Args:
full_name: String to check.
Returns:
`bool`.
"""
return bool(_FULL_NAME_REG.match(full_name))
def _reraise_with_list_builders(
e: Exception,
name: naming.DatasetName,
) -> NoReturn:
"""Add the list of available builders to the DatasetNotFoundError."""
# Should optimize to only filter through given namespace
all_datasets = list_builders(with_community_datasets=bool(name.namespace))
all_datasets_str = '\n\t- '.join([''] + all_datasets)
error_string = f'Available datasets:{all_datasets_str}\n'
error_string += textwrap.dedent(
"""
Check that:
- if dataset was added recently, it may only be available
in `tfds-nightly`
- the dataset name is spelled correctly
- dataset class defines all base class abstract methods
- the module defining the dataset class is imported
"""
)
# Add close matches
close_matches = difflib.get_close_matches(str(name), all_datasets, n=1)
if close_matches:
error_string += f'\nDid you meant: {name} -> {close_matches[0]}'
raise py_utils.reraise(e, suffix=error_string)
| 36.787368
| 94
| 0.712487
|
13310f74067bdff30b67d3726eb6d8c5fc654414
| 3,284
|
py
|
Python
|
xero/utils.py
|
bizgro/pyxero
|
904275656959e385bd9fa43e9746ccdad9391aa1
|
[
"BSD-3-Clause"
] | null | null | null |
xero/utils.py
|
bizgro/pyxero
|
904275656959e385bd9fa43e9746ccdad9391aa1
|
[
"BSD-3-Clause"
] | null | null | null |
xero/utils.py
|
bizgro/pyxero
|
904275656959e385bd9fa43e9746ccdad9391aa1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import datetime
import re
import six
DATE = re.compile(
r'^(\/Date\((?P<timestamp>-?\d+)((?P<offset_h>[-+]\d\d)(?P<offset_m>\d\d))?\)\/)'
r'|'
r'((?P<year>\d{4})-(?P<month>[0-2]\d)-0?(?P<day>[0-3]\d)'
r'T'
r'(?P<hour>[0-5]\d):(?P<minute>[0-5]\d):(?P<second>[0-6]\d))$'
)
OBJECT_NAMES = {
"Addresses": "Address",
"Attachments": "Attachment",
"Accounts": "Account",
"BankTransactions": "BankTransaction",
"BankTransfers": "BankTransfer",
"BrandingThemes": "BrandingTheme",
"ContactGroups": "ContactGroup",
"ContactPersons": "ContactPerson",
"Contacts": "Contact",
"CreditNotes": "CreditNote",
"Currencies": "Currency",
"Employees": "Employee",
"ExpenseClaims": "ExpenseClaim",
"Invoices": "Invoice",
"Items": "Item",
"Journals": "Journal",
"ManualJournals": "ManualJournal",
"Organisation": "Organisation",
"Overpayments": "Overpayment",
"Payments": "Payment",
"PayrollCalendars": "PayrollCalendar",
"PayRuns": "PayRun",
"Phones": "Phone",
"Prepayments": "Prepayment",
"Projects": "Project",
"ProjectsUsers": "ProjectsUser",
"Receipts": "Receipt",
"RepeatingInvoices": "RepeatingInvoice",
"Reports": "Report",
"TaxComponents": "TaxComponent",
"TaxRates": "TaxRate",
"TrackingCategories": "TrackingCategory",
"Tracking": "TrackingCategory",
"Time": "Time",
"Tasks": "Tasks",
"Users": "User",
"Associations": "Association",
"Files": "File",
"Folders": "Folder",
"Inbox": "Inbox",
"LineItems": "LineItem",
"JournalLines": "JournalLine",
"PurchaseOrders": "PurchaseOrder",
}
def isplural(word):
return word in OBJECT_NAMES.keys()
def singular(word):
return OBJECT_NAMES.get(word)
def parse_date(string, force_datetime=False):
""" Takes a Xero formatted date, e.g. /Date(1426849200000+1300)/"""
matches = DATE.match(string)
if not matches:
return None
values = dict([
(
k,
v if v[0] in '+-' else int(v)
) for k,v in matches.groupdict().items() if v and int(v)
])
if 'timestamp' in values:
value = datetime.datetime.utcfromtimestamp(0) + datetime.timedelta(
hours=int(values.get('offset_h', 0)),
minutes=int(values.get('offset_m', 0)),
seconds=int(values['timestamp']) / 1000.0
)
return value
# I've made an assumption here, that a DateTime value will not
# ever be YYYY-MM-DDT00:00:00, which is probably bad. I'm not
# really sure how to handle this, other than to hard-code the
# names of the field that are actually Date rather than DateTime.
if len(values) > 3 or force_datetime:
return datetime.datetime(**values)
# Sometimes Xero returns Date(0+0000), so we end up with no
# values. Return None for this case
if not values:
return None
return datetime.date(**values)
def json_load_object_hook(dct):
""" Hook for json.parse(...) to parse Xero date formats.
"""
for key, value in dct.items():
if isinstance(value, six.string_types):
value = parse_date(value)
if value:
dct[key] = value
return dct
| 28.807018
| 85
| 0.607186
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.